am d58b6cd1: am cbc90453: am 14958e21: Merge "audioflinger: fix issue with camcorder and A2DP" into ics-mr1

* commit 'd58b6cd1e3fdf3deb5147daec556fe424a568732':
diff --git a/camera/Camera.cpp b/camera/Camera.cpp
index ee458f1..b81fe86 100644
--- a/camera/Camera.cpp
+++ b/camera/Camera.cpp
@@ -27,7 +27,8 @@
 #include <camera/ICameraRecordingProxyListener.h>
 #include <camera/ICameraService.h>
 
-#include <surfaceflinger/Surface.h>
+#include <gui/ISurfaceTexture.h>
+#include <gui/Surface.h>
 
 namespace android {
 
@@ -115,13 +116,13 @@
     return cs->getCameraInfo(cameraId, cameraInfo);
 }
 
-sp<Camera> Camera::connect(int cameraId)
+sp<Camera> Camera::connect(int cameraId, bool force, bool keep)
 {
     ALOGV("connect");
     sp<Camera> c = new Camera();
     const sp<ICameraService>& cs = getCameraService();
     if (cs != 0) {
-        c->mCamera = cs->connect(c, cameraId);
+        c->mCamera = cs->connect(c, cameraId, force, keep);
     }
     if (c->mCamera != 0) {
         c->mCamera->asBinder()->linkToDeath(c);
diff --git a/camera/ICamera.cpp b/camera/ICamera.cpp
index 70f5dbc..8d8408c 100644
--- a/camera/ICamera.cpp
+++ b/camera/ICamera.cpp
@@ -22,6 +22,8 @@
 #include <sys/types.h>
 #include <binder/Parcel.h>
 #include <camera/ICamera.h>
+#include <gui/ISurfaceTexture.h>
+#include <gui/Surface.h>
 
 namespace android {
 
diff --git a/camera/ICameraService.cpp b/camera/ICameraService.cpp
index 85f1a29..c74298a 100644
--- a/camera/ICameraService.cpp
+++ b/camera/ICameraService.cpp
@@ -56,12 +56,15 @@
     }
 
     // connect to camera service
-    virtual sp<ICamera> connect(const sp<ICameraClient>& cameraClient, int cameraId)
+    virtual sp<ICamera> connect(const sp<ICameraClient>& cameraClient, int cameraId,
+                                bool force, bool keep)
     {
         Parcel data, reply;
         data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
         data.writeStrongBinder(cameraClient->asBinder());
         data.writeInt32(cameraId);
+        data.writeInt32(force);
+        data.writeInt32(keep);
         remote()->transact(BnCameraService::CONNECT, data, &reply);
         return interface_cast<ICamera>(reply.readStrongBinder());
     }
@@ -93,7 +96,10 @@
         case CONNECT: {
             CHECK_INTERFACE(ICameraService, data, reply);
             sp<ICameraClient> cameraClient = interface_cast<ICameraClient>(data.readStrongBinder());
-            sp<ICamera> camera = connect(cameraClient, data.readInt32());
+            const int cameraId = data.readInt32();
+            const int force = data.readInt32();
+            const int keep = data.readInt32();
+            sp<ICamera> camera = connect(cameraClient, cameraId, force, keep);
             reply->writeStrongBinder(camera->asBinder());
             return NO_ERROR;
         } break;
@@ -105,4 +111,3 @@
 // ----------------------------------------------------------------------------
 
 }; // namespace android
-
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
index e9642f7..30be7fa 100644
--- a/cmds/stagefright/Android.mk
+++ b/cmds/stagefright/Android.mk
@@ -7,16 +7,16 @@
 	SineSource.cpp
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright libmedia libutils libbinder libstagefright_foundation \
+	libstagefright libmedia libmedia_native libutils libbinder libstagefright_foundation \
         libskia libgui
 
 LOCAL_C_INCLUDES:= \
 	$(JNI_H_INCLUDE) \
 	frameworks/base/media/libstagefright \
 	frameworks/base/media/libstagefright/include \
-	$(TOP)/frameworks/base/include/media/stagefright/openmax \
-        external/skia/include/core \
-        external/skia/include/images \
+	$(TOP)/frameworks/native/include/media/openmax \
+	external/skia/include/core \
+	external/skia/include/images \
 
 LOCAL_CFLAGS += -Wno-multichar
 
@@ -35,12 +35,12 @@
         record.cpp
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright liblog libutils libbinder
+	libstagefright liblog libutils libbinder libstagefright_foundation
 
 LOCAL_C_INCLUDES:= \
 	$(JNI_H_INCLUDE) \
 	frameworks/base/media/libstagefright \
-	$(TOP)/frameworks/base/include/media/stagefright/openmax
+	$(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Wno-multichar
 
@@ -59,12 +59,12 @@
         recordvideo.cpp
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright liblog libutils libbinder
+	libstagefright liblog libutils libbinder libstagefright_foundation
 
 LOCAL_C_INCLUDES:= \
 	$(JNI_H_INCLUDE) \
 	frameworks/base/media/libstagefright \
-	$(TOP)/frameworks/base/include/media/stagefright/openmax
+	$(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Wno-multichar
 
@@ -84,12 +84,12 @@
         audioloop.cpp
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright liblog libutils libbinder
+	libstagefright liblog libutils libbinder libstagefright_foundation
 
 LOCAL_C_INCLUDES:= \
 	$(JNI_H_INCLUDE) \
 	frameworks/base/media/libstagefright \
-	$(TOP)/frameworks/base/include/media/stagefright/openmax
+	$(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Wno-multichar
 
@@ -108,12 +108,12 @@
 
 LOCAL_SHARED_LIBRARIES := \
 	libstagefright liblog libutils libbinder libgui \
-        libstagefright_foundation libmedia
+        libstagefright_foundation libmedia libmedia_native
 
 LOCAL_C_INCLUDES:= \
 	$(JNI_H_INCLUDE) \
 	frameworks/base/media/libstagefright \
-	$(TOP)/frameworks/base/include/media/stagefright/openmax
+	$(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Wno-multichar
 
@@ -132,12 +132,12 @@
 
 LOCAL_SHARED_LIBRARIES := \
 	libstagefright liblog libutils libbinder libstagefright_foundation \
-        libmedia libgui libcutils libui
+        libmedia libmedia_native libgui libcutils libui
 
 LOCAL_C_INCLUDES:= \
 	$(JNI_H_INCLUDE) \
 	frameworks/base/media/libstagefright \
-	$(TOP)/frameworks/base/include/media/stagefright/openmax
+	$(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Wno-multichar
 
@@ -147,4 +147,28 @@
 
 include $(BUILD_EXECUTABLE)
 
+################################################################################
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:=               \
+        codec.cpp               \
+        SimplePlayer.cpp        \
+
+LOCAL_SHARED_LIBRARIES := \
+	libstagefright liblog libutils libbinder libstagefright_foundation \
+        libmedia libmedia_native libgui libcutils libui
+
+LOCAL_C_INCLUDES:= \
+	$(JNI_H_INCLUDE) \
+	frameworks/base/media/libstagefright \
+	$(TOP)/frameworks/native/include/media/openmax
+
+LOCAL_CFLAGS += -Wno-multichar
+
+LOCAL_MODULE_TAGS := debug
+
+LOCAL_MODULE:= codec
+
+include $(BUILD_EXECUTABLE)
 
diff --git a/cmds/stagefright/SimplePlayer.cpp b/cmds/stagefright/SimplePlayer.cpp
new file mode 100644
index 0000000..0cfeb3e
--- /dev/null
+++ b/cmds/stagefright/SimplePlayer.cpp
@@ -0,0 +1,645 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SimplePlayer"
+#include <utils/Log.h>
+
+#include "SimplePlayer.h"
+
+#include <gui/SurfaceTextureClient.h>
+#include <media/AudioTrack.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/NativeWindowWrapper.h>
+#include <media/stagefright/NuMediaExtractor.h>
+
+namespace android {
+
+SimplePlayer::SimplePlayer()
+    : mState(UNINITIALIZED),
+      mDoMoreStuffGeneration(0),
+      mStartTimeRealUs(-1ll) {
+}
+
+SimplePlayer::~SimplePlayer() {
+}
+
+// static
+status_t PostAndAwaitResponse(
+        const sp<AMessage> &msg, sp<AMessage> *response) {
+    status_t err = msg->postAndAwaitResponse(response);
+
+    if (err != OK) {
+        return err;
+    }
+
+    if (!(*response)->findInt32("err", &err)) {
+        err = OK;
+    }
+
+    return err;
+}
+status_t SimplePlayer::setDataSource(const char *path) {
+    sp<AMessage> msg = new AMessage(kWhatSetDataSource, id());
+    msg->setString("path", path);
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
+status_t SimplePlayer::setSurface(const sp<ISurfaceTexture> &surfaceTexture) {
+    sp<AMessage> msg = new AMessage(kWhatSetSurface, id());
+
+    sp<SurfaceTextureClient> surfaceTextureClient;
+    if (surfaceTexture != NULL) {
+        surfaceTextureClient = new SurfaceTextureClient(surfaceTexture);
+    }
+
+    msg->setObject(
+            "native-window", new NativeWindowWrapper(surfaceTextureClient));
+
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
+status_t SimplePlayer::prepare() {
+    sp<AMessage> msg = new AMessage(kWhatPrepare, id());
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
+status_t SimplePlayer::start() {
+    sp<AMessage> msg = new AMessage(kWhatStart, id());
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
+status_t SimplePlayer::stop() {
+    sp<AMessage> msg = new AMessage(kWhatStop, id());
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
+status_t SimplePlayer::reset() {
+    sp<AMessage> msg = new AMessage(kWhatReset, id());
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
+void SimplePlayer::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatSetDataSource:
+        {
+            status_t err;
+            if (mState != UNINITIALIZED) {
+                err = INVALID_OPERATION;
+            } else {
+                CHECK(msg->findString("path", &mPath));
+                mState = UNPREPARED;
+            }
+
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatSetSurface:
+        {
+            status_t err;
+            if (mState != UNPREPARED) {
+                err = INVALID_OPERATION;
+            } else {
+                sp<RefBase> obj;
+                CHECK(msg->findObject("native-window", &obj));
+
+                mNativeWindow = static_cast<NativeWindowWrapper *>(obj.get());
+
+                err = OK;
+            }
+
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatPrepare:
+        {
+            status_t err;
+            if (mState != UNPREPARED) {
+                err = INVALID_OPERATION;
+            } else {
+                err = onPrepare();
+
+                if (err == OK) {
+                    mState = STOPPED;
+                }
+            }
+
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatStart:
+        {
+            status_t err = OK;
+
+            if (mState == UNPREPARED) {
+                err = onPrepare();
+
+                if (err == OK) {
+                    mState = STOPPED;
+                }
+            }
+
+            if (err == OK) {
+                if (mState != STOPPED) {
+                    err = INVALID_OPERATION;
+                } else {
+                    err = onStart();
+
+                    if (err == OK) {
+                        mState = STARTED;
+                    }
+                }
+            }
+
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatStop:
+        {
+            status_t err;
+
+            if (mState != STARTED) {
+                err = INVALID_OPERATION;
+            } else {
+                err = onStop();
+
+                if (err == OK) {
+                    mState = STOPPED;
+                }
+            }
+
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatReset:
+        {
+            status_t err = OK;
+
+            if (mState == STARTED) {
+                CHECK_EQ(onStop(), (status_t)OK);
+                mState = STOPPED;
+            }
+
+            if (mState == STOPPED) {
+                err = onReset();
+                mState = UNINITIALIZED;
+            }
+
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatDoMoreStuff:
+        {
+            int32_t generation;
+            CHECK(msg->findInt32("generation", &generation));
+
+            if (generation != mDoMoreStuffGeneration) {
+                break;
+            }
+
+            status_t err = onDoMoreStuff();
+
+            if (err == OK) {
+                msg->post(10000ll);
+            }
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+status_t SimplePlayer::onPrepare() {
+    CHECK_EQ(mState, UNPREPARED);
+
+    mExtractor = new NuMediaExtractor;
+
+    status_t err = mExtractor->setDataSource(mPath.c_str());
+
+    if (err != OK) {
+        mExtractor.clear();
+        return err;
+    }
+
+    if (mCodecLooper == NULL) {
+        mCodecLooper = new ALooper;
+        mCodecLooper->start();
+    }
+
+    bool haveAudio = false;
+    bool haveVideo = false;
+    for (size_t i = 0; i < mExtractor->countTracks(); ++i) {
+        sp<AMessage> format;
+        status_t err = mExtractor->getTrackFormat(i, &format);
+        CHECK_EQ(err, (status_t)OK);
+
+        AString mime;
+        CHECK(format->findString("mime", &mime));
+
+        if (!haveAudio && !strncasecmp(mime.c_str(), "audio/", 6)) {
+            haveAudio = true;
+        } else if (!haveVideo && !strncasecmp(mime.c_str(), "video/", 6)) {
+            haveVideo = true;
+        } else {
+            continue;
+        }
+
+        err = mExtractor->selectTrack(i);
+        CHECK_EQ(err, (status_t)OK);
+
+        CodecState *state =
+            &mStateByTrackIndex.editValueAt(
+                    mStateByTrackIndex.add(i, CodecState()));
+
+        state->mNumFramesWritten = 0;
+        state->mCodec = MediaCodec::CreateByType(
+                mCodecLooper, mime.c_str(), false /* encoder */);
+
+        CHECK(state->mCodec != NULL);
+
+        err = state->mCodec->configure(
+                format, mNativeWindow->getSurfaceTextureClient(),
+                0 /* flags */);
+
+        CHECK_EQ(err, (status_t)OK);
+
+        size_t j = 0;
+        sp<ABuffer> buffer;
+        while (format->findBuffer(StringPrintf("csd-%d", j).c_str(), &buffer)) {
+            state->mCSD.push_back(buffer);
+
+            ++j;
+        }
+    }
+
+    for (size_t i = 0; i < mStateByTrackIndex.size(); ++i) {
+        CodecState *state = &mStateByTrackIndex.editValueAt(i);
+
+        status_t err = state->mCodec->start();
+        CHECK_EQ(err, (status_t)OK);
+
+        err = state->mCodec->getInputBuffers(&state->mBuffers[0]);
+        CHECK_EQ(err, (status_t)OK);
+
+        err = state->mCodec->getOutputBuffers(&state->mBuffers[1]);
+        CHECK_EQ(err, (status_t)OK);
+
+        for (size_t j = 0; j < state->mCSD.size(); ++j) {
+            const sp<ABuffer> &srcBuffer = state->mCSD.itemAt(j);
+
+            size_t index;
+            err = state->mCodec->dequeueInputBuffer(&index, -1ll);
+            CHECK_EQ(err, (status_t)OK);
+
+            const sp<ABuffer> &dstBuffer = state->mBuffers[0].itemAt(index);
+
+            CHECK_LE(srcBuffer->size(), dstBuffer->capacity());
+            dstBuffer->setRange(0, srcBuffer->size());
+            memcpy(dstBuffer->data(), srcBuffer->data(), srcBuffer->size());
+
+            err = state->mCodec->queueInputBuffer(
+                    index,
+                    0,
+                    dstBuffer->size(),
+                    0ll,
+                    MediaCodec::BUFFER_FLAG_CODECCONFIG);
+            CHECK_EQ(err, (status_t)OK);
+        }
+    }
+
+    return OK;
+}
+
+status_t SimplePlayer::onStart() {
+    CHECK_EQ(mState, STOPPED);
+
+    mStartTimeRealUs = -1ll;
+
+    sp<AMessage> msg = new AMessage(kWhatDoMoreStuff, id());
+    msg->setInt32("generation", ++mDoMoreStuffGeneration);
+    msg->post();
+
+    return OK;
+}
+
+status_t SimplePlayer::onStop() {
+    CHECK_EQ(mState, STARTED);
+
+    ++mDoMoreStuffGeneration;
+
+    return OK;
+}
+
+status_t SimplePlayer::onReset() {
+    CHECK_EQ(mState, STOPPED);
+
+    for (size_t i = 0; i < mStateByTrackIndex.size(); ++i) {
+        CodecState *state = &mStateByTrackIndex.editValueAt(i);
+
+        CHECK_EQ(state->mCodec->release(), (status_t)OK);
+    }
+
+    mStartTimeRealUs = -1ll;
+
+    mStateByTrackIndex.clear();
+    mCodecLooper.clear();
+    mExtractor.clear();
+    mNativeWindow.clear();
+    mPath.clear();
+
+    return OK;
+}
+
+status_t SimplePlayer::onDoMoreStuff() {
+    ALOGV("onDoMoreStuff");
+    for (size_t i = 0; i < mStateByTrackIndex.size(); ++i) {
+        CodecState *state = &mStateByTrackIndex.editValueAt(i);
+
+        status_t err;
+        do {
+            size_t index;
+            err = state->mCodec->dequeueInputBuffer(&index);
+
+            if (err == OK) {
+                ALOGV("dequeued input buffer on track %d",
+                      mStateByTrackIndex.keyAt(i));
+
+                state->mAvailInputBufferIndices.push_back(index);
+            } else {
+                ALOGV("dequeueInputBuffer on track %d returned %d",
+                      mStateByTrackIndex.keyAt(i), err);
+            }
+        } while (err == OK);
+
+        do {
+            BufferInfo info;
+            err = state->mCodec->dequeueOutputBuffer(
+                    &info.mIndex,
+                    &info.mOffset,
+                    &info.mSize,
+                    &info.mPresentationTimeUs,
+                    &info.mFlags);
+
+            if (err == OK) {
+                ALOGV("dequeued output buffer on track %d",
+                      mStateByTrackIndex.keyAt(i));
+
+                state->mAvailOutputBufferInfos.push_back(info);
+            } else if (err == INFO_FORMAT_CHANGED) {
+                err = onOutputFormatChanged(mStateByTrackIndex.keyAt(i), state);
+                CHECK_EQ(err, (status_t)OK);
+            } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
+                err = state->mCodec->getOutputBuffers(&state->mBuffers[1]);
+                CHECK_EQ(err, (status_t)OK);
+            } else {
+                ALOGV("dequeueOutputBuffer on track %d returned %d",
+                      mStateByTrackIndex.keyAt(i), err);
+            }
+        } while (err == OK
+                || err == INFO_FORMAT_CHANGED
+                || err == INFO_OUTPUT_BUFFERS_CHANGED);
+    }
+
+    for (;;) {
+        size_t trackIndex;
+        status_t err = mExtractor->getSampleTrackIndex(&trackIndex);
+
+        if (err != OK) {
+            ALOGI("encountered input EOS.");
+            break;
+        } else {
+            CodecState *state = &mStateByTrackIndex.editValueFor(trackIndex);
+
+            if (state->mAvailInputBufferIndices.empty()) {
+                break;
+            }
+
+            size_t index = *state->mAvailInputBufferIndices.begin();
+            state->mAvailInputBufferIndices.erase(
+                    state->mAvailInputBufferIndices.begin());
+
+            const sp<ABuffer> &dstBuffer =
+                state->mBuffers[0].itemAt(index);
+
+            err = mExtractor->readSampleData(dstBuffer);
+            CHECK_EQ(err, (status_t)OK);
+
+            int64_t timeUs;
+            CHECK_EQ(mExtractor->getSampleTime(&timeUs), (status_t)OK);
+
+            err = state->mCodec->queueInputBuffer(
+                    index,
+                    dstBuffer->offset(),
+                    dstBuffer->size(),
+                    timeUs,
+                    0);
+            CHECK_EQ(err, (status_t)OK);
+
+            ALOGV("enqueued input data on track %d", trackIndex);
+
+            err = mExtractor->advance();
+            CHECK_EQ(err, (status_t)OK);
+        }
+    }
+
+    int64_t nowUs = ALooper::GetNowUs();
+
+    if (mStartTimeRealUs < 0ll) {
+        mStartTimeRealUs = nowUs + 1000000ll;
+    }
+
+    for (size_t i = 0; i < mStateByTrackIndex.size(); ++i) {
+        CodecState *state = &mStateByTrackIndex.editValueAt(i);
+
+        while (!state->mAvailOutputBufferInfos.empty()) {
+            BufferInfo *info = &*state->mAvailOutputBufferInfos.begin();
+
+            int64_t whenRealUs = info->mPresentationTimeUs + mStartTimeRealUs;
+            int64_t lateByUs = nowUs - whenRealUs;
+
+            if (lateByUs > -10000ll) {
+                bool release = true;
+
+                if (lateByUs > 30000ll) {
+                    ALOGI("track %d buffer late by %lld us, dropping.",
+                          mStateByTrackIndex.keyAt(i), lateByUs);
+                    state->mCodec->releaseOutputBuffer(info->mIndex);
+                } else {
+                    if (state->mAudioTrack != NULL) {
+                        const sp<ABuffer> &srcBuffer =
+                            state->mBuffers[1].itemAt(info->mIndex);
+
+                        renderAudio(state, info, srcBuffer);
+
+                        if (info->mSize > 0) {
+                            release = false;
+                        }
+                    }
+
+                    if (release) {
+                        state->mCodec->renderOutputBufferAndRelease(
+                                info->mIndex);
+                    }
+                }
+
+                if (release) {
+                    state->mAvailOutputBufferInfos.erase(
+                            state->mAvailOutputBufferInfos.begin());
+
+                    info = NULL;
+                } else {
+                    break;
+                }
+            } else {
+                ALOGV("track %d buffer early by %lld us.",
+                      mStateByTrackIndex.keyAt(i), -lateByUs);
+                break;
+            }
+        }
+    }
+
+    return OK;
+}
+
+status_t SimplePlayer::onOutputFormatChanged(
+        size_t trackIndex, CodecState *state) {
+    sp<AMessage> format;
+    status_t err = state->mCodec->getOutputFormat(&format);
+
+    if (err != OK) {
+        return err;
+    }
+
+    AString mime;
+    CHECK(format->findString("mime", &mime));
+
+    if (!strncasecmp(mime.c_str(), "audio/", 6)) {
+        int32_t channelCount;
+        int32_t sampleRate;
+        CHECK(format->findInt32("channel-count", &channelCount));
+        CHECK(format->findInt32("sample-rate", &sampleRate));
+
+        state->mAudioTrack = new AudioTrack(
+                AUDIO_STREAM_MUSIC,
+                sampleRate,
+                AUDIO_FORMAT_PCM_16_BIT,
+                audio_channel_out_mask_from_count(channelCount),
+                0);
+
+        state->mNumFramesWritten = 0;
+    }
+
+    return OK;
+}
+
+void SimplePlayer::renderAudio(
+        CodecState *state, BufferInfo *info, const sp<ABuffer> &buffer) {
+    CHECK(state->mAudioTrack != NULL);
+
+    if (state->mAudioTrack->stopped()) {
+        state->mAudioTrack->start();
+    }
+
+    uint32_t numFramesPlayed;
+    CHECK_EQ(state->mAudioTrack->getPosition(&numFramesPlayed), (status_t)OK);
+
+    uint32_t numFramesAvailableToWrite =
+        state->mAudioTrack->frameCount()
+            - (state->mNumFramesWritten - numFramesPlayed);
+
+    size_t numBytesAvailableToWrite =
+        numFramesAvailableToWrite * state->mAudioTrack->frameSize();
+
+    size_t copy = info->mSize;
+    if (copy > numBytesAvailableToWrite) {
+        copy = numBytesAvailableToWrite;
+    }
+
+    if (copy == 0) {
+        return;
+    }
+
+    int64_t startTimeUs = ALooper::GetNowUs();
+
+    ssize_t nbytes = state->mAudioTrack->write(
+            buffer->base() + info->mOffset, copy);
+
+    CHECK_EQ(nbytes, (ssize_t)copy);
+
+    int64_t delayUs = ALooper::GetNowUs() - startTimeUs;
+
+    uint32_t numFramesWritten = nbytes / state->mAudioTrack->frameSize();
+
+    if (delayUs > 2000ll) {
+        ALOGW("AudioTrack::write took %lld us, numFramesAvailableToWrite=%u, "
+              "numFramesWritten=%u",
+              delayUs, numFramesAvailableToWrite, numFramesWritten);
+    }
+
+    info->mOffset += nbytes;
+    info->mSize -= nbytes;
+
+    state->mNumFramesWritten += numFramesWritten;
+}
+
+}  // namespace android
diff --git a/cmds/stagefright/SimplePlayer.h b/cmds/stagefright/SimplePlayer.h
new file mode 100644
index 0000000..2548252
--- /dev/null
+++ b/cmds/stagefright/SimplePlayer.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/AString.h>
+#include <utils/KeyedVector.h>
+
+namespace android {
+
+struct ABuffer;
+struct ALooper;
+struct AudioTrack;
+struct ISurfaceTexture;
+struct MediaCodec;
+struct NativeWindowWrapper;
+struct NuMediaExtractor;
+
+struct SimplePlayer : public AHandler {
+    SimplePlayer();
+
+    status_t setDataSource(const char *path);
+    status_t setSurface(const sp<ISurfaceTexture> &surfaceTexture);
+    status_t prepare();
+    status_t start();
+    status_t stop();
+    status_t reset();
+
+protected:
+    virtual ~SimplePlayer();
+
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum State {
+        UNINITIALIZED,
+        UNPREPARED,
+        STOPPED,
+        STARTED
+    };
+
+    enum {
+        kWhatSetDataSource,
+        kWhatSetSurface,
+        kWhatPrepare,
+        kWhatStart,
+        kWhatStop,
+        kWhatReset,
+        kWhatDoMoreStuff,
+    };
+
+    struct BufferInfo {
+        size_t mIndex;
+        size_t mOffset;
+        size_t mSize;
+        int64_t mPresentationTimeUs;
+        uint32_t mFlags;
+    };
+
+    struct CodecState
+    {
+        sp<MediaCodec> mCodec;
+        Vector<sp<ABuffer> > mCSD;
+        Vector<sp<ABuffer> > mBuffers[2];
+
+        List<size_t> mAvailInputBufferIndices;
+        List<BufferInfo> mAvailOutputBufferInfos;
+
+        sp<AudioTrack> mAudioTrack;
+        uint32_t mNumFramesWritten;
+    };
+
+    State mState;
+    AString mPath;
+    sp<NativeWindowWrapper> mNativeWindow;
+
+    sp<NuMediaExtractor> mExtractor;
+    sp<ALooper> mCodecLooper;
+    KeyedVector<size_t, CodecState> mStateByTrackIndex;
+    int32_t mDoMoreStuffGeneration;
+
+    int64_t mStartTimeRealUs;
+
+    status_t onPrepare();
+    status_t onStart();
+    status_t onStop();
+    status_t onReset();
+    status_t onDoMoreStuff();
+    status_t onOutputFormatChanged(size_t trackIndex, CodecState *state);
+
+    void renderAudio(
+            CodecState *state, BufferInfo *info, const sp<ABuffer> &buffer);
+
+    DISALLOW_EVIL_CONSTRUCTORS(SimplePlayer);
+};
+
+}  // namespace android
diff --git a/cmds/stagefright/SineSource.cpp b/cmds/stagefright/SineSource.cpp
index 021f636..14b4306 100644
--- a/cmds/stagefright/SineSource.cpp
+++ b/cmds/stagefright/SineSource.cpp
@@ -3,7 +3,7 @@
 #include <math.h>
 
 #include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
 
diff --git a/cmds/stagefright/audioloop.cpp b/cmds/stagefright/audioloop.cpp
index 858681f..ed7d6cb 100644
--- a/cmds/stagefright/audioloop.cpp
+++ b/cmds/stagefright/audioloop.cpp
@@ -2,10 +2,10 @@
 
 #include <binder/ProcessState.h>
 #include <media/mediarecorder.h>
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/AMRWriter.h>
 #include <media/stagefright/AudioPlayer.h>
 #include <media/stagefright/AudioSource.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/OMXClient.h>
@@ -24,7 +24,7 @@
     android::ProcessState::self()->startThreadPool();
 
     OMXClient client;
-    CHECK_EQ(client.connect(), OK);
+    CHECK_EQ(client.connect(), (status_t)OK);
 
 #if 0
     sp<MediaSource> source = new SineSource(kSampleRate, kNumChannels);
@@ -32,9 +32,7 @@
     sp<MediaSource> source = new AudioSource(
             AUDIO_SOURCE_DEFAULT,
             kSampleRate,
-            kNumChannels == 1
-                ? AUDIO_CHANNEL_IN_MONO
-                : AUDIO_CHANNEL_IN_STEREO);
+            audio_channel_in_mask_from_count(kNumChannels));
 #endif
 
     sp<MetaData> meta = new MetaData;
@@ -82,7 +80,7 @@
     delete player;
     player = NULL;
 #elif 0
-    CHECK_EQ(decoder->start(), OK);
+    CHECK_EQ(decoder->start(), (status_t)OK);
 
     MediaBuffer *buffer;
     while (decoder->read(&buffer) == OK) {
@@ -95,7 +93,7 @@
         buffer = NULL;
     }
 
-    CHECK_EQ(decoder->stop(), OK);
+    CHECK_EQ(decoder->stop(), (status_t)OK);
 #endif
 #endif
 
diff --git a/cmds/stagefright/codec.cpp b/cmds/stagefright/codec.cpp
new file mode 100644
index 0000000..fea62cc
--- /dev/null
+++ b/cmds/stagefright/codec.cpp
@@ -0,0 +1,453 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "codec"
+#include <utils/Log.h>
+
+#include "SimplePlayer.h"
+
+#include <binder/ProcessState.h>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/NuMediaExtractor.h>
+#include <gui/SurfaceComposerClient.h>
+
+static void usage(const char *me) {
+    fprintf(stderr, "usage: %s [-a] use audio\n"
+                    "\t\t[-v] use video\n"
+                    "\t\t[-p] playback\n"
+                    "\t\t[-S] allocate buffers from a surface\n", me);
+
+    exit(1);
+}
+
+namespace android {
+
+struct CodecState {
+    sp<MediaCodec> mCodec;
+    Vector<sp<ABuffer> > mCSD;
+    size_t mCSDIndex;
+    Vector<sp<ABuffer> > mInBuffers;
+    Vector<sp<ABuffer> > mOutBuffers;
+    bool mSignalledInputEOS;
+    bool mSawOutputEOS;
+    int64_t mNumBuffersDecoded;
+    int64_t mNumBytesDecoded;
+    bool mIsAudio;
+};
+
+}  // namespace android
+
+static int decode(
+        const android::sp<android::ALooper> &looper,
+        const char *path,
+        bool useAudio,
+        bool useVideo,
+        const android::sp<android::Surface> &surface) {
+    using namespace android;
+
+    static int64_t kTimeout = 500ll;
+
+    sp<NuMediaExtractor> extractor = new NuMediaExtractor;
+    if (extractor->setDataSource(path) != OK) {
+        fprintf(stderr, "unable to instantiate extractor.\n");
+        return 1;
+    }
+
+    KeyedVector<size_t, CodecState> stateByTrack;
+
+    bool haveAudio = false;
+    bool haveVideo = false;
+    for (size_t i = 0; i < extractor->countTracks(); ++i) {
+        sp<AMessage> format;
+        status_t err = extractor->getTrackFormat(i, &format);
+        CHECK_EQ(err, (status_t)OK);
+
+        AString mime;
+        CHECK(format->findString("mime", &mime));
+
+        bool isAudio = !strncasecmp(mime.c_str(), "audio/", 6);
+        bool isVideo = !strncasecmp(mime.c_str(), "video/", 6);
+
+        if (useAudio && !haveAudio && isAudio) {
+            haveAudio = true;
+        } else if (useVideo && !haveVideo && isVideo) {
+            haveVideo = true;
+        } else {
+            continue;
+        }
+
+        ALOGV("selecting track %d", i);
+
+        err = extractor->selectTrack(i);
+        CHECK_EQ(err, (status_t)OK);
+
+        CodecState *state =
+            &stateByTrack.editValueAt(stateByTrack.add(i, CodecState()));
+
+        state->mNumBytesDecoded = 0;
+        state->mNumBuffersDecoded = 0;
+        state->mIsAudio = isAudio;
+
+        state->mCodec = MediaCodec::CreateByType(
+                looper, mime.c_str(), false /* encoder */);
+
+        CHECK(state->mCodec != NULL);
+
+        err = state->mCodec->configure(
+                format, isVideo ? surface : NULL, 0 /* flags */);
+
+        CHECK_EQ(err, (status_t)OK);
+
+        size_t j = 0;
+        sp<ABuffer> buffer;
+        while (format->findBuffer(StringPrintf("csd-%d", j).c_str(), &buffer)) {
+            state->mCSD.push_back(buffer);
+
+            ++j;
+        }
+
+        state->mCSDIndex = 0;
+        state->mSignalledInputEOS = false;
+        state->mSawOutputEOS = false;
+
+        ALOGV("got %d pieces of codec specific data.", state->mCSD.size());
+    }
+
+    CHECK(!stateByTrack.isEmpty());
+
+    int64_t startTimeUs = ALooper::GetNowUs();
+
+    for (size_t i = 0; i < stateByTrack.size(); ++i) {
+        CodecState *state = &stateByTrack.editValueAt(i);
+
+        sp<MediaCodec> codec = state->mCodec;
+
+        CHECK_EQ((status_t)OK, codec->start());
+
+        CHECK_EQ((status_t)OK, codec->getInputBuffers(&state->mInBuffers));
+        CHECK_EQ((status_t)OK, codec->getOutputBuffers(&state->mOutBuffers));
+
+        ALOGV("got %d input and %d output buffers",
+              state->mInBuffers.size(), state->mOutBuffers.size());
+
+        while (state->mCSDIndex < state->mCSD.size()) {
+            size_t index;
+            status_t err = codec->dequeueInputBuffer(&index, -1ll);
+            CHECK_EQ(err, (status_t)OK);
+
+            const sp<ABuffer> &srcBuffer =
+                state->mCSD.itemAt(state->mCSDIndex++);
+
+            const sp<ABuffer> &buffer = state->mInBuffers.itemAt(index);
+
+            memcpy(buffer->data(), srcBuffer->data(), srcBuffer->size());
+
+            err = codec->queueInputBuffer(
+                    index,
+                    0 /* offset */,
+                    srcBuffer->size(),
+                    0ll /* timeUs */,
+                    MediaCodec::BUFFER_FLAG_CODECCONFIG);
+
+            CHECK_EQ(err, (status_t)OK);
+        }
+    }
+
+    bool sawInputEOS = false;
+
+    for (;;) {
+        if (!sawInputEOS) {
+            size_t trackIndex;
+            status_t err = extractor->getSampleTrackIndex(&trackIndex);
+
+            if (err != OK) {
+                ALOGV("saw input eos");
+                sawInputEOS = true;
+            } else {
+                CodecState *state = &stateByTrack.editValueFor(trackIndex);
+
+                size_t index;
+                err = state->mCodec->dequeueInputBuffer(&index, kTimeout);
+
+                if (err == OK) {
+                    ALOGV("filling input buffer %d", index);
+
+                    const sp<ABuffer> &buffer = state->mInBuffers.itemAt(index);
+
+                    err = extractor->readSampleData(buffer);
+                    CHECK_EQ(err, (status_t)OK);
+
+                    int64_t timeUs;
+                    err = extractor->getSampleTime(&timeUs);
+                    CHECK_EQ(err, (status_t)OK);
+
+                    err = state->mCodec->queueInputBuffer(
+                            index,
+                            0 /* offset */,
+                            buffer->size(),
+                            timeUs,
+                            0 /* flags */);
+
+                    CHECK_EQ(err, (status_t)OK);
+
+                    extractor->advance();
+                } else {
+                    CHECK_EQ(err, -EAGAIN);
+                }
+            }
+        } else {
+            for (size_t i = 0; i < stateByTrack.size(); ++i) {
+                CodecState *state = &stateByTrack.editValueAt(i);
+
+                if (!state->mSignalledInputEOS) {
+                    size_t index;
+                    status_t err =
+                        state->mCodec->dequeueInputBuffer(&index, kTimeout);
+
+                    if (err == OK) {
+                        ALOGV("signalling input EOS on track %d", i);
+
+                        err = state->mCodec->queueInputBuffer(
+                                index,
+                                0 /* offset */,
+                                0 /* size */,
+                                0ll /* timeUs */,
+                                MediaCodec::BUFFER_FLAG_EOS);
+
+                        CHECK_EQ(err, (status_t)OK);
+
+                        state->mSignalledInputEOS = true;
+                    } else {
+                        CHECK_EQ(err, -EAGAIN);
+                    }
+                }
+            }
+        }
+
+        bool sawOutputEOSOnAllTracks = true;
+        for (size_t i = 0; i < stateByTrack.size(); ++i) {
+            CodecState *state = &stateByTrack.editValueAt(i);
+            if (!state->mSawOutputEOS) {
+                sawOutputEOSOnAllTracks = false;
+                break;
+            }
+        }
+
+        if (sawOutputEOSOnAllTracks) {
+            break;
+        }
+
+        for (size_t i = 0; i < stateByTrack.size(); ++i) {
+            CodecState *state = &stateByTrack.editValueAt(i);
+
+            if (state->mSawOutputEOS) {
+                continue;
+            }
+
+            size_t index;
+            size_t offset;
+            size_t size;
+            int64_t presentationTimeUs;
+            uint32_t flags;
+            status_t err = state->mCodec->dequeueOutputBuffer(
+                    &index, &offset, &size, &presentationTimeUs, &flags,
+                    kTimeout);
+
+            if (err == OK) {
+                ALOGV("draining output buffer %d, time = %lld us",
+                      index, presentationTimeUs);
+
+                ++state->mNumBuffersDecoded;
+                state->mNumBytesDecoded += size;
+
+                err = state->mCodec->releaseOutputBuffer(index);
+                CHECK_EQ(err, (status_t)OK);
+
+                if (flags & MediaCodec::BUFFER_FLAG_EOS) {
+                    ALOGV("reached EOS on output.");
+
+                    state->mSawOutputEOS = true;
+                }
+            } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
+                ALOGV("INFO_OUTPUT_BUFFERS_CHANGED");
+                CHECK_EQ((status_t)OK,
+                         state->mCodec->getOutputBuffers(&state->mOutBuffers));
+
+                ALOGV("got %d output buffers", state->mOutBuffers.size());
+            } else if (err == INFO_FORMAT_CHANGED) {
+                sp<AMessage> format;
+                CHECK_EQ((status_t)OK, state->mCodec->getOutputFormat(&format));
+
+                ALOGV("INFO_FORMAT_CHANGED: %s", format->debugString().c_str());
+            } else {
+                CHECK_EQ(err, -EAGAIN);
+            }
+        }
+    }
+
+    int64_t elapsedTimeUs = ALooper::GetNowUs() - startTimeUs;
+
+    for (size_t i = 0; i < stateByTrack.size(); ++i) {
+        CodecState *state = &stateByTrack.editValueAt(i);
+
+        CHECK_EQ((status_t)OK, state->mCodec->release());
+
+        if (state->mIsAudio) {
+            printf("track %d: %lld bytes received. %.2f KB/sec\n",
+                   i,
+                   state->mNumBytesDecoded,
+                   state->mNumBytesDecoded * 1E6 / 1024 / elapsedTimeUs);
+        } else {
+            printf("track %d: %lld frames decoded, %.2f fps. %lld bytes "
+                   "received. %.2f KB/sec\n",
+                   i,
+                   state->mNumBuffersDecoded,
+                   state->mNumBuffersDecoded * 1E6 / elapsedTimeUs,
+                   state->mNumBytesDecoded,
+                   state->mNumBytesDecoded * 1E6 / 1024 / elapsedTimeUs);
+        }
+    }
+
+    return 0;
+}
+
+int main(int argc, char **argv) {
+    using namespace android;
+
+    const char *me = argv[0];
+
+    bool useAudio = false;
+    bool useVideo = false;
+    bool playback = false;
+    bool useSurface = false;
+
+    int res;
+    while ((res = getopt(argc, argv, "havpS")) >= 0) {
+        switch (res) {
+            case 'a':
+            {
+                useAudio = true;
+                break;
+            }
+
+            case 'v':
+            {
+                useVideo = true;
+                break;
+            }
+
+            case 'p':
+            {
+                playback = true;
+                break;
+            }
+
+            case 'S':
+            {
+                useSurface = true;
+                break;
+            }
+
+            case '?':
+            case 'h':
+            default:
+            {
+                usage(me);
+            }
+        }
+    }
+
+    argc -= optind;
+    argv += optind;
+
+    if (argc != 1) {
+        usage(me);
+    }
+
+    if (!useAudio && !useVideo) {
+        useAudio = useVideo = true;
+    }
+
+    ProcessState::self()->startThreadPool();
+
+    DataSource::RegisterDefaultSniffers();
+
+    sp<ALooper> looper = new ALooper;
+    looper->start();
+
+    sp<SurfaceComposerClient> composerClient;
+    sp<SurfaceControl> control;
+    sp<Surface> surface;
+
+    if (playback || (useSurface && useVideo)) {
+        composerClient = new SurfaceComposerClient;
+        CHECK_EQ(composerClient->initCheck(), (status_t)OK);
+
+        ssize_t displayWidth = composerClient->getDisplayWidth(0);
+        ssize_t displayHeight = composerClient->getDisplayHeight(0);
+
+        ALOGV("display is %ld x %ld\n", displayWidth, displayHeight);
+
+        control = composerClient->createSurface(
+                String8("A Surface"),
+                0,
+                displayWidth,
+                displayHeight,
+                PIXEL_FORMAT_RGB_565,
+                0);
+
+        CHECK(control != NULL);
+        CHECK(control->isValid());
+
+        SurfaceComposerClient::openGlobalTransaction();
+        CHECK_EQ(control->setLayer(INT_MAX), (status_t)OK);
+        CHECK_EQ(control->show(), (status_t)OK);
+        SurfaceComposerClient::closeGlobalTransaction();
+
+        surface = control->getSurface();
+        CHECK(surface != NULL);
+    }
+
+    if (playback) {
+        sp<SimplePlayer> player = new SimplePlayer;
+        looper->registerHandler(player);
+
+        player->setDataSource(argv[0]);
+        player->setSurface(surface->getSurfaceTexture());
+        player->start();
+        sleep(60);
+        player->stop();
+        player->reset();
+    } else {
+        decode(looper, argv[0], useAudio, useVideo, surface);
+    }
+
+    if (playback || (useSurface && useVideo)) {
+        composerClient->dispose();
+    }
+
+    looper->stop();
+
+    return 0;
+}
diff --git a/cmds/stagefright/record.cpp b/cmds/stagefright/record.cpp
index 613435d..45c3f7b 100644
--- a/cmds/stagefright/record.cpp
+++ b/cmds/stagefright/record.cpp
@@ -17,11 +17,11 @@
 #include "SineSource.h"
 
 #include <binder/ProcessState.h>
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/AudioPlayer.h>
 #include <media/stagefright/CameraSource.h>
 #include <media/stagefright/FileSource.h>
 #include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MediaExtractor.h>
@@ -38,7 +38,7 @@
 static const int32_t kAudioBitRate = 12200;
 static const int64_t kDurationUs = 10000000LL;  // 10 seconds
 
-#if 1
+#if 0
 class DummySource : public MediaSource {
 
 public:
@@ -183,7 +183,7 @@
         return 1;
     }
     OMXClient client;
-    CHECK_EQ(client.connect(), OK);
+    CHECK_EQ(client.connect(), (status_t)OK);
 
     status_t err = OK;
 
@@ -231,14 +231,14 @@
     sp<MPEG4Writer> writer = new MPEG4Writer("/sdcard/output.mp4");
     writer->addSource(encoder);
     writer->setMaxFileDuration(kDurationUs);
-    CHECK_EQ(OK, writer->start());
+    CHECK_EQ((status_t)OK, writer->start());
     while (!writer->reachedEOS()) {
         fprintf(stderr, ".");
         usleep(100000);
     }
     err = writer->stop();
 #else
-    CHECK_EQ(OK, encoder->start());
+    CHECK_EQ((status_t)OK, encoder->start());
 
     MediaBuffer *buffer;
     while (encoder->read(&buffer) == OK) {
@@ -272,7 +272,7 @@
     for (int i = 0; i < 100; ++i) {
         MediaBuffer *buffer;
         status_t err = source->read(&buffer);
-        CHECK_EQ(err, OK);
+        CHECK_EQ(err, (status_t)OK);
 
         printf("got a frame, data=%p, size=%d\n",
                buffer->data(), buffer->range_length());
@@ -299,7 +299,7 @@
     android::ProcessState::self()->startThreadPool();
 
     OMXClient client;
-    CHECK_EQ(client.connect(), OK);
+    CHECK_EQ(client.connect(), (status_t)OK);
 
     const int32_t kSampleRate = 22050;
     const int32_t kNumChannels = 2;
@@ -318,7 +318,7 @@
 
     sp<MetaData> encMeta = new MetaData;
     encMeta->setCString(kKeyMIMEType,
-            1 ? MEDIA_MIMETYPE_AUDIO_AMR_WB : MEDIA_MIMETYPE_AUDIO_AAC);
+            0 ? MEDIA_MIMETYPE_AUDIO_AMR_WB : MEDIA_MIMETYPE_AUDIO_AAC);
     encMeta->setInt32(kKeySampleRate, kSampleRate);
     encMeta->setInt32(kKeyChannelCount, kNumChannels);
     encMeta->setInt32(kKeyMaxInputSize, 8192);
diff --git a/cmds/stagefright/recordvideo.cpp b/cmds/stagefright/recordvideo.cpp
index c402286..3bd1fe2 100644
--- a/cmds/stagefright/recordvideo.cpp
+++ b/cmds/stagefright/recordvideo.cpp
@@ -17,9 +17,9 @@
 #include "SineSource.h"
 
 #include <binder/ProcessState.h>
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/AudioPlayer.h>
 #include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MPEG4Writer.h>
@@ -243,7 +243,7 @@
     }
 
     OMXClient client;
-    CHECK_EQ(client.connect(), OK);
+    CHECK_EQ(client.connect(), (status_t)OK);
 
     status_t err = OK;
     sp<MediaSource> source =
@@ -283,7 +283,7 @@
     sp<MPEG4Writer> writer = new MPEG4Writer(fileName);
     writer->addSource(encoder);
     int64_t start = systemTime();
-    CHECK_EQ(OK, writer->start());
+    CHECK_EQ((status_t)OK, writer->start());
     while (!writer->reachedEOS()) {
     }
     err = writer->stop();
diff --git a/cmds/stagefright/sf2.cpp b/cmds/stagefright/sf2.cpp
index ae80f88..e47cdc0 100644
--- a/cmds/stagefright/sf2.cpp
+++ b/cmds/stagefright/sf2.cpp
@@ -14,6 +14,10 @@
  * limitations under the License.
  */
 
+//#define LOG_NDEBUG 0
+#define LOG_TAG "sf2"
+#include <utils/Log.h>
+
 #include <binder/ProcessState.h>
 
 #include <media/stagefright/foundation/hexdump.h>
@@ -32,8 +36,7 @@
 #include <media/stagefright/NativeWindowWrapper.h>
 #include <media/stagefright/Utils.h>
 
-#include <surfaceflinger/ISurfaceComposer.h>
-#include <surfaceflinger/SurfaceComposerClient.h>
+#include <gui/SurfaceComposerClient.h>
 
 #include "include/ESDS.h"
 
@@ -198,9 +201,7 @@
 
                     (new AMessage(kWhatSeek, id()))->post(5000000ll);
                 } else if (what == ACodec::kWhatOutputFormatChanged) {
-                } else {
-                    CHECK_EQ(what, (int32_t)ACodec::kWhatShutdownCompleted);
-
+                } else if (what == ACodec::kWhatShutdownCompleted) {
                     mDecodeLooper->unregisterHandler(mCodec->id());
 
                     if (mDecodeLooper != looper()) {
@@ -208,6 +209,12 @@
                     }
 
                     looper()->stop();
+                } else if (what == ACodec::kWhatError) {
+                    ALOGE("something went wrong, codec reported an error.");
+
+                    printf("E\n");
+
+                    (new AMessage(kWhatStop, id()))->post();
                 }
                 break;
             }
@@ -360,7 +367,7 @@
             buffer->meta()->setInt32("csd", true);
             mCSD.push(buffer);
 
-            msg->setObject("csd", buffer);
+            msg->setBuffer("csd", buffer);
         } else if (meta->findData(kKeyESDS, &type, &data, &size)) {
             ESDS esds((const char *)data, size);
             CHECK_EQ(esds.InitCheck(), (status_t)OK);
@@ -410,9 +417,8 @@
             return;
         }
 
-        sp<RefBase> obj;
-        CHECK(msg->findObject("buffer", &obj));
-        sp<ABuffer> outBuffer = static_cast<ABuffer *>(obj.get());
+        sp<ABuffer> outBuffer;
+        CHECK(msg->findBuffer("buffer", &outBuffer));
 
         if (mCSDIndex < mCSD.size()) {
             outBuffer = mCSD.editItemAt(mCSDIndex++);
@@ -511,15 +517,14 @@
             }
         }
 
-        reply->setObject("buffer", outBuffer);
+        reply->setBuffer("buffer", outBuffer);
         reply->post();
     }
 
     void onDrainThisBuffer(const sp<AMessage> &msg) {
-        sp<RefBase> obj;
-        CHECK(msg->findObject("buffer", &obj));
+        sp<ABuffer> buffer;
+        CHECK(msg->findBuffer("buffer", &buffer));
 
-        sp<ABuffer> buffer = static_cast<ABuffer *>(obj.get());
         mTotalBytesReceived += buffer->size();
 
         sp<AMessage> reply;
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 7cb8f62..dab2e0f 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -55,9 +55,7 @@
 #include <fcntl.h>
 
 #include <gui/SurfaceTextureClient.h>
-
-#include <surfaceflinger/ISurfaceComposer.h>
-#include <surfaceflinger/SurfaceComposerClient.h>
+#include <gui/SurfaceComposerClient.h>
 
 using namespace android;
 
diff --git a/cmds/stagefright/stream.cpp b/cmds/stagefright/stream.cpp
index 0d6c738..efa1445 100644
--- a/cmds/stagefright/stream.cpp
+++ b/cmds/stagefright/stream.cpp
@@ -32,8 +32,7 @@
 
 #include <binder/IServiceManager.h>
 #include <media/IMediaPlayerService.h>
-#include <surfaceflinger/ISurfaceComposer.h>
-#include <surfaceflinger/SurfaceComposerClient.h>
+#include <gui/SurfaceComposerClient.h>
 
 #include <fcntl.h>
 
diff --git a/drm/common/DrmEngineBase.cpp b/drm/common/DrmEngineBase.cpp
index 9b16c36..1c345a2 100644
--- a/drm/common/DrmEngineBase.cpp
+++ b/drm/common/DrmEngineBase.cpp
@@ -120,13 +120,23 @@
 }
 
 status_t DrmEngineBase::openDecryptSession(
-    int uniqueId, DecryptHandle* decryptHandle, int fd, off64_t offset, off64_t length) {
-    return onOpenDecryptSession(uniqueId, decryptHandle, fd, offset, length);
+    int uniqueId, DecryptHandle* decryptHandle,
+    int fd, off64_t offset, off64_t length, const char* mime) {
+
+    if (!mime || mime[0] == '\0') {
+        return onOpenDecryptSession(uniqueId, decryptHandle, fd, offset, length);
+    }
+
+    return onOpenDecryptSession(uniqueId, decryptHandle, fd, offset, length, mime);
 }
 
 status_t DrmEngineBase::openDecryptSession(
-    int uniqueId, DecryptHandle* decryptHandle, const char* uri) {
-    return onOpenDecryptSession(uniqueId, decryptHandle, uri);
+    int uniqueId, DecryptHandle* decryptHandle,
+    const char* uri, const char* mime) {
+    if (!mime || mime[0] == '\0') {
+        return onOpenDecryptSession(uniqueId, decryptHandle, uri);
+    }
+    return onOpenDecryptSession(uniqueId, decryptHandle, uri, mime);
 }
 
 status_t DrmEngineBase::closeDecryptSession(int uniqueId, DecryptHandle* decryptHandle) {
diff --git a/drm/common/IDrmManagerService.cpp b/drm/common/IDrmManagerService.cpp
index 3ed8ade..43f64f2 100644
--- a/drm/common/IDrmManagerService.cpp
+++ b/drm/common/IDrmManagerService.cpp
@@ -600,7 +600,7 @@
 }
 
 DecryptHandle* BpDrmManagerService::openDecryptSession(
-            int uniqueId, int fd, off64_t offset, off64_t length) {
+            int uniqueId, int fd, off64_t offset, off64_t length, const char* mime) {
     ALOGV("Entering BpDrmManagerService::openDecryptSession");
     Parcel data, reply;
 
@@ -609,6 +609,11 @@
     data.writeFileDescriptor(fd);
     data.writeInt64(offset);
     data.writeInt64(length);
+    String8 mimeType;
+    if (mime) {
+        mimeType = mime;
+    }
+    data.writeString8(mimeType);
 
     remote()->transact(OPEN_DECRYPT_SESSION, data, &reply);
 
@@ -620,13 +625,20 @@
     return handle;
 }
 
-DecryptHandle* BpDrmManagerService::openDecryptSession(int uniqueId, const char* uri) {
-    ALOGV("Entering BpDrmManagerService::openDecryptSession");
+DecryptHandle* BpDrmManagerService::openDecryptSession(
+        int uniqueId, const char* uri, const char* mime) {
+
+    ALOGV("Entering BpDrmManagerService::openDecryptSession: mime=%s", mime? mime: "NULL");
     Parcel data, reply;
 
     data.writeInterfaceToken(IDrmManagerService::getInterfaceDescriptor());
     data.writeInt32(uniqueId);
     data.writeString8(String8(uri));
+    String8 mimeType;
+    if (mime) {
+        mimeType = mime;
+    }
+    data.writeString8(mimeType);
 
     remote()->transact(OPEN_DECRYPT_SESSION_FROM_URI, data, &reply);
 
@@ -1265,8 +1277,10 @@
 
         const off64_t offset = data.readInt64();
         const off64_t length = data.readInt64();
+        const String8 mime = data.readString8();
+
         DecryptHandle* handle
-            = openDecryptSession(uniqueId, fd, offset, length);
+            = openDecryptSession(uniqueId, fd, offset, length, mime.string());
 
         if (NULL != handle) {
             writeDecryptHandleToParcelData(handle, reply);
@@ -1283,8 +1297,9 @@
 
         const int uniqueId = data.readInt32();
         const String8 uri = data.readString8();
+        const String8 mime = data.readString8();
 
-        DecryptHandle* handle = openDecryptSession(uniqueId, uri.string());
+        DecryptHandle* handle = openDecryptSession(uniqueId, uri.string(), mime.string());
 
         if (NULL != handle) {
             writeDecryptHandleToParcelData(handle, reply);
diff --git a/drm/drmserver/DrmManager.cpp b/drm/drmserver/DrmManager.cpp
index 3abf3d3..999295a 100644
--- a/drm/drmserver/DrmManager.cpp
+++ b/drm/drmserver/DrmManager.cpp
@@ -426,7 +426,9 @@
     return DRM_NO_ERROR;
 }
 
-DecryptHandle* DrmManager::openDecryptSession(int uniqueId, int fd, off64_t offset, off64_t length) {
+DecryptHandle* DrmManager::openDecryptSession(
+        int uniqueId, int fd, off64_t offset, off64_t length, const char* mime) {
+
     Mutex::Autolock _l(mDecryptLock);
     status_t result = DRM_ERROR_CANNOT_HANDLE;
     Vector<String8> plugInIdList = mPlugInManager.getPlugInIdList();
@@ -438,7 +440,7 @@
         for (unsigned int index = 0; index < plugInIdList.size(); index++) {
             String8 plugInId = plugInIdList.itemAt(index);
             IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
-            result = rDrmEngine.openDecryptSession(uniqueId, handle, fd, offset, length);
+            result = rDrmEngine.openDecryptSession(uniqueId, handle, fd, offset, length, mime);
 
             if (DRM_NO_ERROR == result) {
                 ++mDecryptSessionId;
@@ -453,7 +455,8 @@
     return handle;
 }
 
-DecryptHandle* DrmManager::openDecryptSession(int uniqueId, const char* uri) {
+DecryptHandle* DrmManager::openDecryptSession(
+        int uniqueId, const char* uri, const char* mime) {
     Mutex::Autolock _l(mDecryptLock);
     status_t result = DRM_ERROR_CANNOT_HANDLE;
     Vector<String8> plugInIdList = mPlugInManager.getPlugInIdList();
@@ -465,7 +468,7 @@
         for (unsigned int index = 0; index < plugInIdList.size(); index++) {
             String8 plugInId = plugInIdList.itemAt(index);
             IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
-            result = rDrmEngine.openDecryptSession(uniqueId, handle, uri);
+            result = rDrmEngine.openDecryptSession(uniqueId, handle, uri, mime);
 
             if (DRM_NO_ERROR == result) {
                 ++mDecryptSessionId;
diff --git a/drm/drmserver/DrmManagerService.cpp b/drm/drmserver/DrmManagerService.cpp
index df17ac5..8ba0203 100644
--- a/drm/drmserver/DrmManagerService.cpp
+++ b/drm/drmserver/DrmManagerService.cpp
@@ -159,12 +159,18 @@
 status_t DrmManagerService::consumeRights(
             int uniqueId, DecryptHandle* decryptHandle, int action, bool reserve) {
     ALOGV("Entering consumeRights");
+    if (!isProtectedCallAllowed()) {
+        return DRM_ERROR_NO_PERMISSION;
+    }
     return mDrmManager->consumeRights(uniqueId, decryptHandle, action, reserve);
 }
 
 status_t DrmManagerService::setPlaybackStatus(
             int uniqueId, DecryptHandle* decryptHandle, int playbackStatus, int64_t position) {
     ALOGV("Entering setPlaybackStatus");
+    if (!isProtectedCallAllowed()) {
+        return DRM_ERROR_NO_PERMISSION;
+    }
     return mDrmManager->setPlaybackStatus(uniqueId, decryptHandle, playbackStatus, position);
 }
 
@@ -208,20 +214,20 @@
 }
 
 DecryptHandle* DrmManagerService::openDecryptSession(
-            int uniqueId, int fd, off64_t offset, off64_t length) {
+            int uniqueId, int fd, off64_t offset, off64_t length, const char* mime) {
     ALOGV("Entering DrmManagerService::openDecryptSession");
     if (isProtectedCallAllowed()) {
-        return mDrmManager->openDecryptSession(uniqueId, fd, offset, length);
+        return mDrmManager->openDecryptSession(uniqueId, fd, offset, length, mime);
     }
 
     return NULL;
 }
 
 DecryptHandle* DrmManagerService::openDecryptSession(
-            int uniqueId, const char* uri) {
+            int uniqueId, const char* uri, const char* mime) {
     ALOGV("Entering DrmManagerService::openDecryptSession with uri");
     if (isProtectedCallAllowed()) {
-        return mDrmManager->openDecryptSession(uniqueId, uri);
+        return mDrmManager->openDecryptSession(uniqueId, uri, mime);
     }
 
     return NULL;
@@ -229,12 +235,18 @@
 
 status_t DrmManagerService::closeDecryptSession(int uniqueId, DecryptHandle* decryptHandle) {
     ALOGV("Entering closeDecryptSession");
+    if (!isProtectedCallAllowed()) {
+        return DRM_ERROR_NO_PERMISSION;
+    }
     return mDrmManager->closeDecryptSession(uniqueId, decryptHandle);
 }
 
 status_t DrmManagerService::initializeDecryptUnit(int uniqueId, DecryptHandle* decryptHandle,
             int decryptUnitId, const DrmBuffer* headerInfo) {
     ALOGV("Entering initializeDecryptUnit");
+    if (!isProtectedCallAllowed()) {
+        return DRM_ERROR_NO_PERMISSION;
+    }
     return mDrmManager->initializeDecryptUnit(uniqueId,decryptHandle, decryptUnitId, headerInfo);
 }
 
@@ -242,18 +254,27 @@
             int uniqueId, DecryptHandle* decryptHandle, int decryptUnitId,
             const DrmBuffer* encBuffer, DrmBuffer** decBuffer, DrmBuffer* IV) {
     ALOGV("Entering decrypt");
+    if (!isProtectedCallAllowed()) {
+        return DRM_ERROR_NO_PERMISSION;
+    }
     return mDrmManager->decrypt(uniqueId, decryptHandle, decryptUnitId, encBuffer, decBuffer, IV);
 }
 
 status_t DrmManagerService::finalizeDecryptUnit(
             int uniqueId, DecryptHandle* decryptHandle, int decryptUnitId) {
     ALOGV("Entering finalizeDecryptUnit");
+    if (!isProtectedCallAllowed()) {
+        return DRM_ERROR_NO_PERMISSION;
+    }
     return mDrmManager->finalizeDecryptUnit(uniqueId, decryptHandle, decryptUnitId);
 }
 
 ssize_t DrmManagerService::pread(int uniqueId, DecryptHandle* decryptHandle,
             void* buffer, ssize_t numBytes, off64_t offset) {
     ALOGV("Entering pread");
+    if (!isProtectedCallAllowed()) {
+        return DRM_ERROR_NO_PERMISSION;
+    }
     return mDrmManager->pread(uniqueId, decryptHandle, buffer, numBytes, offset);
 }
 
diff --git a/drm/drmserver/main_drmserver.cpp b/drm/drmserver/main_drmserver.cpp
index ed42818..434d561 100644
--- a/drm/drmserver/main_drmserver.cpp
+++ b/drm/drmserver/main_drmserver.cpp
@@ -14,15 +14,13 @@
  * limitations under the License.
  */
 
-#include <sys/types.h>
-#include <unistd.h>
-#include <grp.h>
+#define LOG_TAG "drmserver"
+//#define LOG_NDEBUG 0
 
 #include <binder/IPCThreadState.h>
 #include <binder/ProcessState.h>
 #include <binder/IServiceManager.h>
 #include <utils/Log.h>
-#include <private/android_filesystem_config.h>
 
 #include <DrmManagerService.h>
 
@@ -32,7 +30,7 @@
 {
     sp<ProcessState> proc(ProcessState::self());
     sp<IServiceManager> sm = defaultServiceManager();
-    ALOGI("ServiceManager: %p", sm.get());
+    ALOGV("ServiceManager: %p", sm.get());
     DrmManagerService::instantiate();
     ProcessState::self()->startThreadPool();
     IPCThreadState::self()->joinThreadPool();
diff --git a/drm/libdrmframework/DrmManagerClient.cpp b/drm/libdrmframework/DrmManagerClient.cpp
index c9c0d57..8768c08 100644
--- a/drm/libdrmframework/DrmManagerClient.cpp
+++ b/drm/libdrmframework/DrmManagerClient.cpp
@@ -116,12 +116,18 @@
     return mDrmManagerClientImpl->getAllSupportInfo(mUniqueId, length, drmSupportInfoArray);
 }
 
-sp<DecryptHandle> DrmManagerClient::openDecryptSession(int fd, off64_t offset, off64_t length) {
-    return mDrmManagerClientImpl->openDecryptSession(mUniqueId, fd, offset, length);
+sp<DecryptHandle> DrmManagerClient::openDecryptSession(
+        int fd, off64_t offset, off64_t length, const char* mime) {
+
+    return mDrmManagerClientImpl->openDecryptSession(
+                    mUniqueId, fd, offset, length, mime);
 }
 
-sp<DecryptHandle> DrmManagerClient::openDecryptSession(const char* uri) {
-    return mDrmManagerClientImpl->openDecryptSession(mUniqueId, uri);
+sp<DecryptHandle> DrmManagerClient::openDecryptSession(
+        const char* uri, const char* mime) {
+
+    return mDrmManagerClientImpl->openDecryptSession(
+                    mUniqueId, uri, mime);
 }
 
 status_t DrmManagerClient::closeDecryptSession(sp<DecryptHandle> &decryptHandle) {
diff --git a/drm/libdrmframework/DrmManagerClientImpl.cpp b/drm/libdrmframework/DrmManagerClientImpl.cpp
index b222b8f..fb0439e 100644
--- a/drm/libdrmframework/DrmManagerClientImpl.cpp
+++ b/drm/libdrmframework/DrmManagerClientImpl.cpp
@@ -255,15 +255,19 @@
 }
 
 sp<DecryptHandle> DrmManagerClientImpl::openDecryptSession(
-            int uniqueId, int fd, off64_t offset, off64_t length) {
-    return getDrmManagerService()->openDecryptSession(uniqueId, fd, offset, length);
+            int uniqueId, int fd, off64_t offset,
+            off64_t length, const char* mime) {
+
+    return getDrmManagerService()->openDecryptSession(
+                uniqueId, fd, offset, length, mime);
 }
 
 sp<DecryptHandle> DrmManagerClientImpl::openDecryptSession(
-        int uniqueId, const char* uri) {
+        int uniqueId, const char* uri, const char* mime) {
+
     DecryptHandle* handle = NULL;
     if (NULL != uri) {
-        handle = getDrmManagerService()->openDecryptSession(uniqueId, uri);
+        handle = getDrmManagerService()->openDecryptSession(uniqueId, uri, mime);
     }
     return handle;
 }
diff --git a/drm/libdrmframework/include/DrmManager.h b/drm/libdrmframework/include/DrmManager.h
index ac2b946..c9167d4 100644
--- a/drm/libdrmframework/include/DrmManager.h
+++ b/drm/libdrmframework/include/DrmManager.h
@@ -111,9 +111,10 @@
 
     status_t getAllSupportInfo(int uniqueId, int* length, DrmSupportInfo** drmSupportInfoArray);
 
-    DecryptHandle* openDecryptSession(int uniqueId, int fd, off64_t offset, off64_t length);
+    DecryptHandle* openDecryptSession(
+            int uniqueId, int fd, off64_t offset, off64_t length, const char* mime);
 
-    DecryptHandle* openDecryptSession(int uniqueId, const char* uri);
+    DecryptHandle* openDecryptSession(int uniqueId, const char* uri, const char* mime);
 
     status_t closeDecryptSession(int uniqueId, DecryptHandle* decryptHandle);
 
diff --git a/drm/libdrmframework/include/DrmManagerClientImpl.h b/drm/libdrmframework/include/DrmManagerClientImpl.h
index e3338d9..2aa493f 100644
--- a/drm/libdrmframework/include/DrmManagerClientImpl.h
+++ b/drm/libdrmframework/include/DrmManagerClientImpl.h
@@ -300,20 +300,24 @@
      * @param[in] fd File descriptor of the protected content to be decrypted
      * @param[in] offset Start position of the content
      * @param[in] length The length of the protected content
+     * @param[in] mime The mime type of the protected content if it is not NULL or empty
      * @return
      *     Handle for the decryption session
      */
-    sp<DecryptHandle> openDecryptSession(int uniqueId, int fd, off64_t offset, off64_t length);
+    sp<DecryptHandle> openDecryptSession(
+            int uniqueId, int fd, off64_t offset, off64_t length, const char* mime);
 
     /**
      * Open the decrypt session to decrypt the given protected content
      *
      * @param[in] uniqueId Unique identifier for a session
      * @param[in] uri Path of the protected content to be decrypted
+     * @param[in] mime The mime type of the protected content if it is not NULL or empty
      * @return
      *     Handle for the decryption session
      */
-    sp<DecryptHandle> openDecryptSession(int uniqueId, const char* uri);
+    sp<DecryptHandle> openDecryptSession(
+            int uniqueId, const char* uri, const char* mime);
 
     /**
      * Close the decrypt session for the given handle
diff --git a/drm/libdrmframework/include/DrmManagerService.h b/drm/libdrmframework/include/DrmManagerService.h
index 9cb5804..1a8c2ae 100644
--- a/drm/libdrmframework/include/DrmManagerService.h
+++ b/drm/libdrmframework/include/DrmManagerService.h
@@ -98,9 +98,11 @@
 
     status_t getAllSupportInfo(int uniqueId, int* length, DrmSupportInfo** drmSupportInfoArray);
 
-    DecryptHandle* openDecryptSession(int uniqueId, int fd, off64_t offset, off64_t length);
+    DecryptHandle* openDecryptSession(
+        int uniqueId, int fd, off64_t offset, off64_t length, const char *mime);
 
-    DecryptHandle* openDecryptSession(int uniqueId, const char* uri);
+    DecryptHandle* openDecryptSession(
+        int uniqueId, const char* uri, const char* mime);
 
     status_t closeDecryptSession(int uniqueId, DecryptHandle* decryptHandle);
 
diff --git a/drm/libdrmframework/include/IDrmManagerService.h b/drm/libdrmframework/include/IDrmManagerService.h
index b9618bb..a7d21c5 100644
--- a/drm/libdrmframework/include/IDrmManagerService.h
+++ b/drm/libdrmframework/include/IDrmManagerService.h
@@ -139,9 +139,12 @@
     virtual status_t getAllSupportInfo(
             int uniqueId, int* length, DrmSupportInfo** drmSupportInfoArray) = 0;
 
-    virtual DecryptHandle* openDecryptSession(int uniqueId, int fd, off64_t offset, off64_t length) = 0;
+    virtual DecryptHandle* openDecryptSession(
+                int uniqueId, int fd, off64_t offset,
+                off64_t length, const char* mime) = 0;
 
-    virtual DecryptHandle* openDecryptSession(int uniqueId, const char* uri) = 0;
+    virtual DecryptHandle* openDecryptSession(
+                int uniqueId, const char* uri, const char* mime) = 0;
 
     virtual status_t closeDecryptSession(int uniqueId, DecryptHandle* decryptHandle) = 0;
 
@@ -222,9 +225,12 @@
     virtual status_t getAllSupportInfo(
             int uniqueId, int* length, DrmSupportInfo** drmSupportInfoArray);
 
-    virtual DecryptHandle* openDecryptSession(int uniqueId, int fd, off64_t offset, off64_t length);
+    virtual DecryptHandle* openDecryptSession(
+                int uniqueId, int fd, off64_t offset, off64_t length,
+                const char* mime);
 
-    virtual DecryptHandle* openDecryptSession(int uniqueId, const char* uri);
+    virtual DecryptHandle* openDecryptSession(
+                int uniqueId, const char* uri, const char* mime);
 
     virtual status_t closeDecryptSession(int uniqueId, DecryptHandle* decryptHandle);
 
diff --git a/drm/libdrmframework/plugins/common/include/DrmEngineBase.h b/drm/libdrmframework/plugins/common/include/DrmEngineBase.h
index 4a5afcf..08f6e6d 100644
--- a/drm/libdrmframework/plugins/common/include/DrmEngineBase.h
+++ b/drm/libdrmframework/plugins/common/include/DrmEngineBase.h
@@ -80,10 +80,12 @@
     DrmSupportInfo* getSupportInfo(int uniqueId);
 
     status_t openDecryptSession(
-            int uniqueId, DecryptHandle* decryptHandle, int fd, off64_t offset, off64_t length);
+            int uniqueId, DecryptHandle* decryptHandle,
+            int fd, off64_t offset, off64_t length, const char* mime);
 
     status_t openDecryptSession(
-            int uniqueId, DecryptHandle* decryptHandle, const char* uri);
+            int uniqueId, DecryptHandle* decryptHandle,
+            const char* uri, const char* mime);
 
     status_t closeDecryptSession(int uniqueId, DecryptHandle* decryptHandle);
 
@@ -375,7 +377,29 @@
      *     DRM_ERROR_CANNOT_HANDLE for failure and DRM_NO_ERROR for success
      */
     virtual status_t onOpenDecryptSession(
-            int uniqueId, DecryptHandle* decryptHandle, int fd, off64_t offset, off64_t length) = 0;
+            int uniqueId, DecryptHandle* decryptHandle,
+            int fd, off64_t offset, off64_t length) = 0;
+
+    /**
+     * Open the decrypt session to decrypt the given protected content
+     *
+     * @param[in] uniqueId Unique identifier for a session
+     * @param[in] decryptHandle Handle for the current decryption session
+     * @param[in] fd File descriptor of the protected content to be decrypted
+     * @param[in] offset Start position of the content
+     * @param[in] length The length of the protected content
+     * @param[in] mime Mime type of the protected content
+     *     drm plugin may do some optimization since the mime type is known.
+     * @return
+     *     DRM_ERROR_CANNOT_HANDLE for failure and DRM_NO_ERROR for success
+     */
+    virtual status_t onOpenDecryptSession(
+            int uniqueId, DecryptHandle* decryptHandle,
+            int fd, off64_t offset, off64_t length,
+            const char* mime) {
+
+        return DRM_ERROR_CANNOT_HANDLE;
+    }
 
     /**
      * Open the decrypt session to decrypt the given protected content
@@ -387,7 +411,26 @@
      *     DRM_ERROR_CANNOT_HANDLE for failure and DRM_NO_ERROR for success
      */
     virtual status_t onOpenDecryptSession(
-            int uniqueId, DecryptHandle* decryptHandle, const char* uri) = 0;
+            int uniqueId, DecryptHandle* decryptHandle,
+            const char* uri) = 0;
+
+    /**
+     * Open the decrypt session to decrypt the given protected content
+     *
+     * @param[in] uniqueId Unique identifier for a session
+     * @param[in] decryptHandle Handle for the current decryption session
+     * @param[in] uri Path of the protected content to be decrypted
+     * @param[in] mime Mime type of the protected content. The corresponding
+     *     drm plugin may do some optimization since the mime type is known.
+     * @return
+     *     DRM_ERROR_CANNOT_HANDLE for failure and DRM_NO_ERROR for success
+     */
+    virtual status_t onOpenDecryptSession(
+            int uniqueId, DecryptHandle* decryptHandle,
+            const char* uri, const char* mime) {
+
+        return DRM_ERROR_CANNOT_HANDLE;
+    }
 
     /**
      * Close the decrypt session for the given handle
diff --git a/drm/libdrmframework/plugins/common/include/IDrmEngine.h b/drm/libdrmframework/plugins/common/include/IDrmEngine.h
index 77460f6..dcf5977 100644
--- a/drm/libdrmframework/plugins/common/include/IDrmEngine.h
+++ b/drm/libdrmframework/plugins/common/include/IDrmEngine.h
@@ -320,11 +320,14 @@
      * @param[in] fd File descriptor of the protected content to be decrypted
      * @param[in] offset Start position of the content
      * @param[in] length The length of the protected content
+     * @param[in] mime Mime type of the protected content if it is
+     *     not NULL or empty
      * @return
      *     DRM_ERROR_CANNOT_HANDLE for failure and DRM_NO_ERROR for success
      */
     virtual status_t openDecryptSession(
-        int uniqueId, DecryptHandle* decryptHandle, int fd, off64_t offset, off64_t length) = 0;
+        int uniqueId, DecryptHandle* decryptHandle,
+        int fd, off64_t offset, off64_t length, const char* mime) = 0;
 
     /**
      * Open the decrypt session to decrypt the given protected content
@@ -332,11 +335,14 @@
      * @param[in] uniqueId Unique identifier for a session
      * @param[in] decryptHandle Handle for the current decryption session
      * @param[in] uri Path of the protected content to be decrypted
+     * @param[in] mime Mime type of the protected content if it is
+     *     not NULL or empty
      * @return
      *     DRM_ERROR_CANNOT_HANDLE for failure and DRM_NO_ERROR for success
      */
     virtual status_t openDecryptSession(
-        int uniqueId, DecryptHandle* decryptHandle, const char* uri) = 0;
+        int uniqueId, DecryptHandle* decryptHandle,
+        const char* uri, const char* mime) = 0;
 
     /**
      * Close the decrypt session for the given handle
diff --git a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/include/FwdLockEngine.h b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/include/FwdLockEngine.h
index 34804cf..c0e408e 100644
--- a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/include/FwdLockEngine.h
+++ b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/include/FwdLockEngine.h
@@ -499,6 +499,14 @@
 
 private:
 
+    static const String8 Description;
+    static const String8 FileSuffixes[];
+    static const String8 MimeTypes[];
+    static bool IsFileSuffixSupported(const String8& suffix);
+    static bool IsMimeTypeSupported(const String8& mime);
+    static void AddSupportedMimeTypes(DrmSupportInfo *info);
+    static void AddSupportedFileSuffixes(DrmSupportInfo *info);
+
 /**
  * Session Class for Forward Lock Conversion. An object of this class is created
  * for every conversion.
diff --git a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/include/FwdLockEngineConst.h b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/include/FwdLockEngineConst.h
deleted file mode 100644
index da95d60..0000000
--- a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/include/FwdLockEngineConst.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __FWDLOCKENGINECONST_H__
-#define __FWDLOCKENGINECONST_H__
-
-namespace android {
-
-/**
- * Constants for forward Lock Engine used for exposing engine's capabilities.
- */
-#define FWDLOCK_EXTENSION_FL           ("FL")
-#define FWDLOCK_DOTEXTENSION_FL        (".fl")
-#define FWDLOCK_MIMETYPE_FL            ("application/x-android-drm-fl")
-
-#define FWDLOCK_EXTENSION_DM           ("DM")
-#define FWDLOCK_DOTEXTENSION_DM        (".dm")
-#define FWDLOCK_MIMETYPE_DM            ("application/vnd.oma.drm.message")
-
-#define FWDLOCK_DESCRIPTION            ("OMA V1 Forward Lock")
-
-};
-
-#endif /* __FWDLOCKENGINECONST_H__ */
diff --git a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/src/FwdLockEngine.cpp b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/src/FwdLockEngine.cpp
index 0273a4b..4b1b40e 100644
--- a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/src/FwdLockEngine.cpp
+++ b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/src/FwdLockEngine.cpp
@@ -35,7 +35,6 @@
 #include "FwdLockConv.h"
 #include "FwdLockFile.h"
 #include "FwdLockGlue.h"
-#include "FwdLockEngineConst.h"
 #include "MimeTypeUtil.h"
 
 #undef LOG_TAG
@@ -160,6 +159,54 @@
     return DRM_NO_ERROR;
 }
 
+// make sure that lower-case letters are used.
+const String8 FwdLockEngine::FileSuffixes[] = {
+    String8(".fl"),
+    String8(".dm"),
+};
+
+// make sure that lower-case letters are used.
+const String8 FwdLockEngine::MimeTypes[] = {
+    String8("application/x-android-drm-fl"),
+    String8("application/vnd.oma.drm.message"),
+};
+
+const String8 FwdLockEngine::Description("OMA V1 Forward Lock");
+
+void FwdLockEngine::AddSupportedMimeTypes(DrmSupportInfo *info) {
+    for (size_t i = 0, n = sizeof(MimeTypes)/sizeof(MimeTypes[0]); i < n; ++i) {
+        info->addMimeType(MimeTypes[i]);
+    }
+}
+
+void FwdLockEngine::AddSupportedFileSuffixes(DrmSupportInfo *info) {
+    for (size_t i = 0, n = sizeof(FileSuffixes)/sizeof(FileSuffixes[0]); i < n; ++i) {
+        info->addFileSuffix(FileSuffixes[i]);
+    }
+}
+
+bool FwdLockEngine::IsMimeTypeSupported(const String8& mime) {
+    String8 tmp(mime);
+    tmp.toLower();
+    for (size_t i = 0, n = sizeof(MimeTypes)/sizeof(MimeTypes[0]); i < n; ++i) {
+        if (tmp == MimeTypes[i]) {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool FwdLockEngine::IsFileSuffixSupported(const String8& suffix) {
+    String8 tmp(suffix);
+    tmp.toLower();
+    for (size_t i = 0, n = sizeof(FileSuffixes)/sizeof(FileSuffixes[0]); i < n; ++i) {
+        if (tmp == FileSuffixes[i]) {
+            return true;
+        }
+    }
+    return false;
+}
+
 DrmSupportInfo* FwdLockEngine::onGetSupportInfo(int uniqueId) {
     DrmSupportInfo* pSupportInfo = new DrmSupportInfo();
 
@@ -167,12 +214,9 @@
 
     // fill all Forward Lock mimetypes and extensions
     if (NULL != pSupportInfo) {
-        pSupportInfo->addMimeType(String8(FWDLOCK_MIMETYPE_FL));
-        pSupportInfo->addFileSuffix(String8(FWDLOCK_DOTEXTENSION_FL));
-        pSupportInfo->addMimeType(String8(FWDLOCK_MIMETYPE_DM));
-        pSupportInfo->addFileSuffix(String8(FWDLOCK_DOTEXTENSION_DM));
-
-        pSupportInfo->setDescription(String8(FWDLOCK_DESCRIPTION));
+        AddSupportedMimeTypes(pSupportInfo);
+        AddSupportedFileSuffixes(pSupportInfo);
+        pSupportInfo->setDescription(Description);
     }
 
     return pSupportInfo;
@@ -182,14 +226,7 @@
     bool result = false;
 
     String8 extString = path.getPathExtension();
-
-    extString.toLower();
-
-    if ((extString == String8(FWDLOCK_DOTEXTENSION_FL)) ||
-        (extString == String8(FWDLOCK_DOTEXTENSION_DM))) {
-        result = true;
-    }
-    return result;
+    return IsFileSuffixSupported(extString);
 }
 
 DrmInfoStatus* FwdLockEngine::onProcessDrmInfo(int uniqueId, const DrmInfo* drmInfo) {
@@ -299,8 +336,6 @@
 
     LOG_VERBOSE("FwdLockEngine::onGetDrmObjectType");
 
-    mimeStr.toLower();
-
     /* Checks whether
     * 1. path and mime type both are not empty strings (meaning unavailable) else content is unknown
     * 2. if one of them is empty string and if other is known then its a DRM Content Object.
@@ -308,8 +343,7 @@
     *    (regardless of the relation between them to make it compatible with other DRM Engines)
     */
     if (((0 == path.length()) || onCanHandle(uniqueId, path)) &&
-        ((0 == mimeType.length()) || ((mimeStr == String8(FWDLOCK_MIMETYPE_FL)) ||
-        (mimeStr == String8(FWDLOCK_MIMETYPE_DM)))) && (mimeType != path) ) {
+        ((0 == mimeType.length()) || IsMimeTypeSupported(mimeType)) && (mimeType != path) ) {
             return DrmObjectType::CONTENT;
     }
 
diff --git a/drm/libdrmframework/plugins/forward-lock/internal-format/decoder/FwdLockFile.c b/drm/libdrmframework/plugins/forward-lock/internal-format/decoder/FwdLockFile.c
index dacf00e..365bdec 100644
--- a/drm/libdrmframework/plugins/forward-lock/internal-format/decoder/FwdLockFile.c
+++ b/drm/libdrmframework/plugins/forward-lock/internal-format/decoder/FwdLockFile.c
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include <utils/Log.h>
 #include <assert.h>
 #include <errno.h>
 #include <fcntl.h>
@@ -107,6 +108,7 @@
         }
         pthread_mutex_unlock(&sessionAcquisitionMutex);
         if (i == MAX_NUM_SESSIONS) {
+            ALOGE("Too many sessions opened at the same time");
             errno = ENFILE;
         }
     }
@@ -293,7 +295,12 @@
 
 int FwdLockFile_open(const char *pFilename) {
     int fileDesc = open(pFilename, O_RDONLY);
-    if (fileDesc >= 0 && FwdLockFile_attach(fileDesc) < 0) {
+    if (fileDesc < 0) {
+        ALOGE("failed to open file '%s': %s", pFilename, strerror(errno));
+        return fileDesc;
+    }
+
+    if (FwdLockFile_attach(fileDesc) < 0) {
         (void)close(fileDesc);
         fileDesc = -1;
     }
diff --git a/drm/libdrmframework/plugins/passthru/src/DrmPassthruPlugIn.cpp b/drm/libdrmframework/plugins/passthru/src/DrmPassthruPlugIn.cpp
index 0ffc0a7..a3eac3e 100644
--- a/drm/libdrmframework/plugins/passthru/src/DrmPassthruPlugIn.cpp
+++ b/drm/libdrmframework/plugins/passthru/src/DrmPassthruPlugIn.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#define LOG_NDEBUG 0
+//#define LOG_NDEBUG 0
 #define LOG_TAG "DrmPassthruPlugIn"
 #include <utils/Log.h>
 
@@ -58,7 +58,7 @@
 
 DrmConstraints* DrmPassthruPlugIn::onGetConstraints(
         int uniqueId, const String8* path, int action) {
-    ALOGD("DrmPassthruPlugIn::onGetConstraints From Path: %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onGetConstraints From Path: %d", uniqueId);
     DrmConstraints* drmConstraints = new DrmConstraints();
 
     String8 value("dummy_available_time");
@@ -73,7 +73,7 @@
 }
 
 DrmInfoStatus* DrmPassthruPlugIn::onProcessDrmInfo(int uniqueId, const DrmInfo* drmInfo) {
-    ALOGD("DrmPassthruPlugIn::onProcessDrmInfo - Enter : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onProcessDrmInfo - Enter : %d", uniqueId);
     DrmInfoStatus* drmInfoStatus = NULL;
     if (NULL != drmInfo) {
         switch (drmInfo->getInfoType()) {
@@ -102,28 +102,28 @@
         }
         }
     }
-    ALOGD("DrmPassthruPlugIn::onProcessDrmInfo - Exit");
+    ALOGV("DrmPassthruPlugIn::onProcessDrmInfo - Exit");
     return drmInfoStatus;
 }
 
 status_t DrmPassthruPlugIn::onSetOnInfoListener(
             int uniqueId, const IDrmEngine::OnInfoListener* infoListener) {
-    ALOGD("DrmPassthruPlugIn::onSetOnInfoListener : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onSetOnInfoListener : %d", uniqueId);
     return DRM_NO_ERROR;
 }
 
 status_t DrmPassthruPlugIn::onInitialize(int uniqueId) {
-    ALOGD("DrmPassthruPlugIn::onInitialize : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onInitialize : %d", uniqueId);
     return DRM_NO_ERROR;
 }
 
 status_t DrmPassthruPlugIn::onTerminate(int uniqueId) {
-    ALOGD("DrmPassthruPlugIn::onTerminate : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onTerminate : %d", uniqueId);
     return DRM_NO_ERROR;
 }
 
 DrmSupportInfo* DrmPassthruPlugIn::onGetSupportInfo(int uniqueId) {
-    ALOGD("DrmPassthruPlugIn::onGetSupportInfo : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onGetSupportInfo : %d", uniqueId);
     DrmSupportInfo* drmSupportInfo = new DrmSupportInfo();
     // Add mimetype's
     drmSupportInfo->addMimeType(String8("application/vnd.passthru.drm"));
@@ -136,12 +136,12 @@
 
 status_t DrmPassthruPlugIn::onSaveRights(int uniqueId, const DrmRights& drmRights,
             const String8& rightsPath, const String8& contentPath) {
-    ALOGD("DrmPassthruPlugIn::onSaveRights : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onSaveRights : %d", uniqueId);
     return DRM_NO_ERROR;
 }
 
 DrmInfo* DrmPassthruPlugIn::onAcquireDrmInfo(int uniqueId, const DrmInfoRequest* drmInfoRequest) {
-    ALOGD("DrmPassthruPlugIn::onAcquireDrmInfo : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onAcquireDrmInfo : %d", uniqueId);
     DrmInfo* drmInfo = NULL;
 
     if (NULL != drmInfoRequest) {
@@ -157,65 +157,65 @@
 }
 
 bool DrmPassthruPlugIn::onCanHandle(int uniqueId, const String8& path) {
-    ALOGD("DrmPassthruPlugIn::canHandle: %s ", path.string());
+    ALOGV("DrmPassthruPlugIn::canHandle: %s ", path.string());
     String8 extension = path.getPathExtension();
     extension.toLower();
     return (String8(".passthru") == extension);
 }
 
 String8 DrmPassthruPlugIn::onGetOriginalMimeType(int uniqueId, const String8& path) {
-    ALOGD("DrmPassthruPlugIn::onGetOriginalMimeType() : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onGetOriginalMimeType() : %d", uniqueId);
     return String8("video/passthru");
 }
 
 int DrmPassthruPlugIn::onGetDrmObjectType(
             int uniqueId, const String8& path, const String8& mimeType) {
-    ALOGD("DrmPassthruPlugIn::onGetDrmObjectType() : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onGetDrmObjectType() : %d", uniqueId);
     return DrmObjectType::UNKNOWN;
 }
 
 int DrmPassthruPlugIn::onCheckRightsStatus(int uniqueId, const String8& path, int action) {
-    ALOGD("DrmPassthruPlugIn::onCheckRightsStatus() : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onCheckRightsStatus() : %d", uniqueId);
     int rightsStatus = RightsStatus::RIGHTS_VALID;
     return rightsStatus;
 }
 
 status_t DrmPassthruPlugIn::onConsumeRights(int uniqueId, DecryptHandle* decryptHandle,
             int action, bool reserve) {
-    ALOGD("DrmPassthruPlugIn::onConsumeRights() : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onConsumeRights() : %d", uniqueId);
     return DRM_NO_ERROR;
 }
 
 status_t DrmPassthruPlugIn::onSetPlaybackStatus(int uniqueId, DecryptHandle* decryptHandle,
             int playbackStatus, int64_t position) {
-    ALOGD("DrmPassthruPlugIn::onSetPlaybackStatus() : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onSetPlaybackStatus() : %d", uniqueId);
     return DRM_NO_ERROR;
 }
 
 bool DrmPassthruPlugIn::onValidateAction(int uniqueId, const String8& path,
             int action, const ActionDescription& description) {
-    ALOGD("DrmPassthruPlugIn::onValidateAction() : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onValidateAction() : %d", uniqueId);
     return true;
 }
 
 status_t DrmPassthruPlugIn::onRemoveRights(int uniqueId, const String8& path) {
-    ALOGD("DrmPassthruPlugIn::onRemoveRights() : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onRemoveRights() : %d", uniqueId);
     return DRM_NO_ERROR;
 }
 
 status_t DrmPassthruPlugIn::onRemoveAllRights(int uniqueId) {
-    ALOGD("DrmPassthruPlugIn::onRemoveAllRights() : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onRemoveAllRights() : %d", uniqueId);
     return DRM_NO_ERROR;
 }
 
 status_t DrmPassthruPlugIn::onOpenConvertSession(int uniqueId, int convertId) {
-    ALOGD("DrmPassthruPlugIn::onOpenConvertSession() : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onOpenConvertSession() : %d", uniqueId);
     return DRM_NO_ERROR;
 }
 
 DrmConvertedStatus* DrmPassthruPlugIn::onConvertData(
             int uniqueId, int convertId, const DrmBuffer* inputData) {
-    ALOGD("DrmPassthruPlugIn::onConvertData() : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onConvertData() : %d", uniqueId);
     DrmBuffer* convertedData = NULL;
 
     if (NULL != inputData && 0 < inputData->length) {
@@ -229,13 +229,13 @@
 }
 
 DrmConvertedStatus* DrmPassthruPlugIn::onCloseConvertSession(int uniqueId, int convertId) {
-    ALOGD("DrmPassthruPlugIn::onCloseConvertSession() : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onCloseConvertSession() : %d", uniqueId);
     return new DrmConvertedStatus(DrmConvertedStatus::STATUS_OK, NULL, 0 /*offset*/);
 }
 
 status_t DrmPassthruPlugIn::onOpenDecryptSession(
             int uniqueId, DecryptHandle* decryptHandle, int fd, off64_t offset, off64_t length) {
-    ALOGD("DrmPassthruPlugIn::onOpenDecryptSession() : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onOpenDecryptSession() : %d", uniqueId);
 
 #ifdef ENABLE_PASSTHRU_DECRYPTION
     decryptHandle->mimeType = String8("video/passthru");
@@ -254,7 +254,7 @@
 }
 
 status_t DrmPassthruPlugIn::onCloseDecryptSession(int uniqueId, DecryptHandle* decryptHandle) {
-    ALOGD("DrmPassthruPlugIn::onCloseDecryptSession() : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onCloseDecryptSession() : %d", uniqueId);
     if (NULL != decryptHandle) {
         if (NULL != decryptHandle->decryptInfo) {
             delete decryptHandle->decryptInfo; decryptHandle->decryptInfo = NULL;
@@ -266,34 +266,40 @@
 
 status_t DrmPassthruPlugIn::onInitializeDecryptUnit(int uniqueId, DecryptHandle* decryptHandle,
             int decryptUnitId, const DrmBuffer* headerInfo) {
-    ALOGD("DrmPassthruPlugIn::onInitializeDecryptUnit() : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onInitializeDecryptUnit() : %d", uniqueId);
     return DRM_NO_ERROR;
 }
 
 status_t DrmPassthruPlugIn::onDecrypt(int uniqueId, DecryptHandle* decryptHandle,
             int decryptUnitId, const DrmBuffer* encBuffer, DrmBuffer** decBuffer, DrmBuffer* IV) {
-    ALOGD("DrmPassthruPlugIn::onDecrypt() : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onDecrypt() : %d", uniqueId);
     /**
      * As a workaround implementation passthru would copy the given
      * encrypted buffer as it is to decrypted buffer. Note, decBuffer
      * memory has to be allocated by the caller.
      */
     if (NULL != (*decBuffer) && 0 < (*decBuffer)->length) {
-        memcpy((*decBuffer)->data, encBuffer->data, encBuffer->length);
-        (*decBuffer)->length = encBuffer->length;
+        if ((*decBuffer)->length >= encBuffer->length) {
+            memcpy((*decBuffer)->data, encBuffer->data, encBuffer->length);
+            (*decBuffer)->length = encBuffer->length;
+        } else {
+            ALOGE("decBuffer size (%d) too small to hold %d bytes",
+                (*decBuffer)->length, encBuffer->length);
+            return DRM_ERROR_UNKNOWN;
+        }
     }
     return DRM_NO_ERROR;
 }
 
 status_t DrmPassthruPlugIn::onFinalizeDecryptUnit(
             int uniqueId, DecryptHandle* decryptHandle, int decryptUnitId) {
-    ALOGD("DrmPassthruPlugIn::onFinalizeDecryptUnit() : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onFinalizeDecryptUnit() : %d", uniqueId);
     return DRM_NO_ERROR;
 }
 
 ssize_t DrmPassthruPlugIn::onPread(int uniqueId, DecryptHandle* decryptHandle,
             void* buffer, ssize_t numBytes, off64_t offset) {
-    ALOGD("DrmPassthruPlugIn::onPread() : %d", uniqueId);
+    ALOGV("DrmPassthruPlugIn::onPread() : %d", uniqueId);
     return 0;
 }
 
diff --git a/include/camera/Camera.h b/include/camera/Camera.h
index 234e165..3fedea0 100644
--- a/include/camera/Camera.h
+++ b/include/camera/Camera.h
@@ -72,7 +72,7 @@
     static  int32_t     getNumberOfCameras();
     static  status_t    getCameraInfo(int cameraId,
                                       struct CameraInfo* cameraInfo);
-    static  sp<Camera>  connect(int cameraId);
+    static  sp<Camera>  connect(int cameraId, bool force, bool keep);
             virtual     ~Camera();
             void        init();
 
diff --git a/include/camera/CameraParameters.h b/include/camera/CameraParameters.h
deleted file mode 100644
index 7edf6b4..0000000
--- a/include/camera/CameraParameters.h
+++ /dev/null
@@ -1,666 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_CAMERA_PARAMETERS_H
-#define ANDROID_HARDWARE_CAMERA_PARAMETERS_H
-
-#include <utils/KeyedVector.h>
-#include <utils/String8.h>
-
-namespace android {
-
-struct Size {
-    int width;
-    int height;
-
-    Size() {
-        width = 0;
-        height = 0;
-    }
-
-    Size(int w, int h) {
-        width = w;
-        height = h;
-    }
-};
-
-class CameraParameters
-{
-public:
-    CameraParameters();
-    CameraParameters(const String8 &params) { unflatten(params); }
-    ~CameraParameters();
-
-    String8 flatten() const;
-    void unflatten(const String8 &params);
-
-    void set(const char *key, const char *value);
-    void set(const char *key, int value);
-    void setFloat(const char *key, float value);
-    const char *get(const char *key) const;
-    int getInt(const char *key) const;
-    float getFloat(const char *key) const;
-
-    void remove(const char *key);
-
-    void setPreviewSize(int width, int height);
-    void getPreviewSize(int *width, int *height) const;
-    void getSupportedPreviewSizes(Vector<Size> &sizes) const;
-
-    // Set the dimensions in pixels to the given width and height
-    // for video frames. The given width and height must be one
-    // of the supported dimensions returned from
-    // getSupportedVideoSizes(). Must not be called if
-    // getSupportedVideoSizes() returns an empty Vector of Size.
-    void setVideoSize(int width, int height);
-    // Retrieve the current dimensions (width and height)
-    // in pixels for video frames, which must be one of the
-    // supported dimensions returned from getSupportedVideoSizes().
-    // Must not be called if getSupportedVideoSizes() returns an
-    // empty Vector of Size.
-    void getVideoSize(int *width, int *height) const;
-    // Retrieve a Vector of supported dimensions (width and height)
-    // in pixels for video frames. If sizes returned from the method
-    // is empty, the camera does not support calls to setVideoSize()
-    // or getVideoSize(). In adddition, it also indicates that
-    // the camera only has a single output, and does not have
-    // separate output for video frames and preview frame.
-    void getSupportedVideoSizes(Vector<Size> &sizes) const;
-    // Retrieve the preferred preview size (width and height) in pixels
-    // for video recording. The given width and height must be one of
-    // supported preview sizes returned from getSupportedPreviewSizes().
-    // Must not be called if getSupportedVideoSizes() returns an empty
-    // Vector of Size. If getSupportedVideoSizes() returns an empty
-    // Vector of Size, the width and height returned from this method
-    // is invalid, and is "-1x-1".
-    void getPreferredPreviewSizeForVideo(int *width, int *height) const;
-
-    void setPreviewFrameRate(int fps);
-    int getPreviewFrameRate() const;
-    void getPreviewFpsRange(int *min_fps, int *max_fps) const;
-    void setPreviewFormat(const char *format);
-    const char *getPreviewFormat() const;
-    void setPictureSize(int width, int height);
-    void getPictureSize(int *width, int *height) const;
-    void getSupportedPictureSizes(Vector<Size> &sizes) const;
-    void setPictureFormat(const char *format);
-    const char *getPictureFormat() const;
-
-    void dump() const;
-    status_t dump(int fd, const Vector<String16>& args) const;
-
-    // Parameter keys to communicate between camera application and driver.
-    // The access (read/write, read only, or write only) is viewed from the
-    // perspective of applications, not driver.
-
-    // Preview frame size in pixels (width x height).
-    // Example value: "480x320". Read/Write.
-    static const char KEY_PREVIEW_SIZE[];
-    // Supported preview frame sizes in pixels.
-    // Example value: "800x600,480x320". Read only.
-    static const char KEY_SUPPORTED_PREVIEW_SIZES[];
-    // The current minimum and maximum preview fps. This controls the rate of
-    // preview frames received (CAMERA_MSG_PREVIEW_FRAME). The minimum and
-    // maximum fps must be one of the elements from
-    // KEY_SUPPORTED_PREVIEW_FPS_RANGE parameter.
-    // Example value: "10500,26623"
-    static const char KEY_PREVIEW_FPS_RANGE[];
-    // The supported preview fps (frame-per-second) ranges. Each range contains
-    // a minimum fps and maximum fps. If minimum fps equals to maximum fps, the
-    // camera outputs frames in fixed frame rate. If not, the camera outputs
-    // frames in auto frame rate. The actual frame rate fluctuates between the
-    // minimum and the maximum. The list has at least one element. The list is
-    // sorted from small to large (first by maximum fps and then minimum fps).
-    // Example value: "(10500,26623),(15000,26623),(30000,30000)"
-    static const char KEY_SUPPORTED_PREVIEW_FPS_RANGE[];
-    // The image format for preview frames. See CAMERA_MSG_PREVIEW_FRAME in
-    // frameworks/base/include/camera/Camera.h.
-    // Example value: "yuv420sp" or PIXEL_FORMAT_XXX constants. Read/write.
-    static const char KEY_PREVIEW_FORMAT[];
-    // Supported image formats for preview frames.
-    // Example value: "yuv420sp,yuv422i-yuyv". Read only.
-    static const char KEY_SUPPORTED_PREVIEW_FORMATS[];
-    // Number of preview frames per second. This is the target frame rate. The
-    // actual frame rate depends on the driver.
-    // Example value: "15". Read/write.
-    static const char KEY_PREVIEW_FRAME_RATE[];
-    // Supported number of preview frames per second.
-    // Example value: "24,15,10". Read.
-    static const char KEY_SUPPORTED_PREVIEW_FRAME_RATES[];
-    // The dimensions for captured pictures in pixels (width x height).
-    // Example value: "1024x768". Read/write.
-    static const char KEY_PICTURE_SIZE[];
-    // Supported dimensions for captured pictures in pixels.
-    // Example value: "2048x1536,1024x768". Read only.
-    static const char KEY_SUPPORTED_PICTURE_SIZES[];
-    // The image format for captured pictures. See CAMERA_MSG_COMPRESSED_IMAGE
-    // in frameworks/base/include/camera/Camera.h.
-    // Example value: "jpeg" or PIXEL_FORMAT_XXX constants. Read/write.
-    static const char KEY_PICTURE_FORMAT[];
-    // Supported image formats for captured pictures.
-    // Example value: "jpeg,rgb565". Read only.
-    static const char KEY_SUPPORTED_PICTURE_FORMATS[];
-    // The width (in pixels) of EXIF thumbnail in Jpeg picture.
-    // Example value: "512". Read/write.
-    static const char KEY_JPEG_THUMBNAIL_WIDTH[];
-    // The height (in pixels) of EXIF thumbnail in Jpeg picture.
-    // Example value: "384". Read/write.
-    static const char KEY_JPEG_THUMBNAIL_HEIGHT[];
-    // Supported EXIF thumbnail sizes (width x height). 0x0 means not thumbnail
-    // in EXIF.
-    // Example value: "512x384,320x240,0x0". Read only.
-    static const char KEY_SUPPORTED_JPEG_THUMBNAIL_SIZES[];
-    // The quality of the EXIF thumbnail in Jpeg picture. The range is 1 to 100,
-    // with 100 being the best.
-    // Example value: "90". Read/write.
-    static const char KEY_JPEG_THUMBNAIL_QUALITY[];
-    // Jpeg quality of captured picture. The range is 1 to 100, with 100 being
-    // the best.
-    // Example value: "90". Read/write.
-    static const char KEY_JPEG_QUALITY[];
-    // The rotation angle in degrees relative to the orientation of the camera.
-    // This affects the pictures returned from CAMERA_MSG_COMPRESSED_IMAGE. The
-    // camera driver may set orientation in the EXIF header without rotating the
-    // picture. Or the driver may rotate the picture and the EXIF thumbnail. If
-    // the Jpeg picture is rotated, the orientation in the EXIF header will be
-    // missing or 1 (row #0 is top and column #0 is left side).
-    //
-    // Note that the JPEG pictures of front-facing cameras are not mirrored
-    // as in preview display.
-    //
-    // For example, suppose the natural orientation of the device is portrait.
-    // The device is rotated 270 degrees clockwise, so the device orientation is
-    // 270. Suppose a back-facing camera sensor is mounted in landscape and the
-    // top side of the camera sensor is aligned with the right edge of the
-    // display in natural orientation. So the camera orientation is 90. The
-    // rotation should be set to 0 (270 + 90).
-    //
-    // Example value: "0" or "90" or "180" or "270". Write only.
-    static const char KEY_ROTATION[];
-    // GPS latitude coordinate. GPSLatitude and GPSLatitudeRef will be stored in
-    // JPEG EXIF header.
-    // Example value: "25.032146" or "-33.462809". Write only.
-    static const char KEY_GPS_LATITUDE[];
-    // GPS longitude coordinate. GPSLongitude and GPSLongitudeRef will be stored
-    // in JPEG EXIF header.
-    // Example value: "121.564448" or "-70.660286". Write only.
-    static const char KEY_GPS_LONGITUDE[];
-    // GPS altitude. GPSAltitude and GPSAltitudeRef will be stored in JPEG EXIF
-    // header.
-    // Example value: "21.0" or "-5". Write only.
-    static const char KEY_GPS_ALTITUDE[];
-    // GPS timestamp (UTC in seconds since January 1, 1970). This should be
-    // stored in JPEG EXIF header.
-    // Example value: "1251192757". Write only.
-    static const char KEY_GPS_TIMESTAMP[];
-    // GPS Processing Method
-    // Example value: "GPS" or "NETWORK". Write only.
-    static const char KEY_GPS_PROCESSING_METHOD[];
-    // Current white balance setting.
-    // Example value: "auto" or WHITE_BALANCE_XXX constants. Read/write.
-    static const char KEY_WHITE_BALANCE[];
-    // Supported white balance settings.
-    // Example value: "auto,incandescent,daylight". Read only.
-    static const char KEY_SUPPORTED_WHITE_BALANCE[];
-    // Current color effect setting.
-    // Example value: "none" or EFFECT_XXX constants. Read/write.
-    static const char KEY_EFFECT[];
-    // Supported color effect settings.
-    // Example value: "none,mono,sepia". Read only.
-    static const char KEY_SUPPORTED_EFFECTS[];
-    // Current antibanding setting.
-    // Example value: "auto" or ANTIBANDING_XXX constants. Read/write.
-    static const char KEY_ANTIBANDING[];
-    // Supported antibanding settings.
-    // Example value: "auto,50hz,60hz,off". Read only.
-    static const char KEY_SUPPORTED_ANTIBANDING[];
-    // Current scene mode.
-    // Example value: "auto" or SCENE_MODE_XXX constants. Read/write.
-    static const char KEY_SCENE_MODE[];
-    // Supported scene mode settings.
-    // Example value: "auto,night,fireworks". Read only.
-    static const char KEY_SUPPORTED_SCENE_MODES[];
-    // Current flash mode.
-    // Example value: "auto" or FLASH_MODE_XXX constants. Read/write.
-    static const char KEY_FLASH_MODE[];
-    // Supported flash modes.
-    // Example value: "auto,on,off". Read only.
-    static const char KEY_SUPPORTED_FLASH_MODES[];
-    // Current focus mode. This will not be empty. Applications should call
-    // CameraHardwareInterface.autoFocus to start the focus if focus mode is
-    // FOCUS_MODE_AUTO or FOCUS_MODE_MACRO.
-    // Example value: "auto" or FOCUS_MODE_XXX constants. Read/write.
-    static const char KEY_FOCUS_MODE[];
-    // Supported focus modes.
-    // Example value: "auto,macro,fixed". Read only.
-    static const char KEY_SUPPORTED_FOCUS_MODES[];
-    // The maximum number of focus areas supported. This is the maximum length
-    // of KEY_FOCUS_AREAS.
-    // Example value: "0" or "2". Read only.
-    static const char KEY_MAX_NUM_FOCUS_AREAS[];
-    // Current focus areas.
-    //
-    // Before accessing this parameter, apps should check
-    // KEY_MAX_NUM_FOCUS_AREAS first to know the maximum number of focus areas
-    // first. If the value is 0, focus area is not supported.
-    //
-    // Each focus area is a five-element int array. The first four elements are
-    // the rectangle of the area (left, top, right, bottom). The direction is
-    // relative to the sensor orientation, that is, what the sensor sees. The
-    // direction is not affected by the rotation or mirroring of
-    // CAMERA_CMD_SET_DISPLAY_ORIENTATION. Coordinates range from -1000 to 1000.
-    // (-1000,-1000) is the upper left point. (1000, 1000) is the lower right
-    // point. The width and height of focus areas cannot be 0 or negative.
-    //
-    // The fifth element is the weight. Values for weight must range from 1 to
-    // 1000.  The weight should be interpreted as a per-pixel weight - all
-    // pixels in the area have the specified weight. This means a small area
-    // with the same weight as a larger area will have less influence on the
-    // focusing than the larger area. Focus areas can partially overlap and the
-    // driver will add the weights in the overlap region.
-    //
-    // A special case of single focus area (0,0,0,0,0) means driver to decide
-    // the focus area. For example, the driver may use more signals to decide
-    // focus areas and change them dynamically. Apps can set (0,0,0,0,0) if they
-    // want the driver to decide focus areas.
-    //
-    // Focus areas are relative to the current field of view (KEY_ZOOM). No
-    // matter what the zoom level is, (-1000,-1000) represents the top of the
-    // currently visible camera frame. The focus area cannot be set to be
-    // outside the current field of view, even when using zoom.
-    //
-    // Focus area only has effect if the current focus mode is FOCUS_MODE_AUTO,
-    // FOCUS_MODE_MACRO, FOCUS_MODE_CONTINUOUS_VIDEO, or
-    // FOCUS_MODE_CONTINUOUS_PICTURE.
-    // Example value: "(-10,-10,0,0,300),(0,0,10,10,700)". Read/write.
-    static const char KEY_FOCUS_AREAS[];
-    // Focal length in millimeter.
-    // Example value: "4.31". Read only.
-    static const char KEY_FOCAL_LENGTH[];
-    // Horizontal angle of view in degrees.
-    // Example value: "54.8". Read only.
-    static const char KEY_HORIZONTAL_VIEW_ANGLE[];
-    // Vertical angle of view in degrees.
-    // Example value: "42.5". Read only.
-    static const char KEY_VERTICAL_VIEW_ANGLE[];
-    // Exposure compensation index. 0 means exposure is not adjusted.
-    // Example value: "0" or "5". Read/write.
-    static const char KEY_EXPOSURE_COMPENSATION[];
-    // The maximum exposure compensation index (>=0).
-    // Example value: "6". Read only.
-    static const char KEY_MAX_EXPOSURE_COMPENSATION[];
-    // The minimum exposure compensation index (<=0).
-    // Example value: "-6". Read only.
-    static const char KEY_MIN_EXPOSURE_COMPENSATION[];
-    // The exposure compensation step. Exposure compensation index multiply by
-    // step eqals to EV. Ex: if exposure compensation index is 6 and step is
-    // 0.3333, EV is -2.
-    // Example value: "0.333333333" or "0.5". Read only.
-    static const char KEY_EXPOSURE_COMPENSATION_STEP[];
-    // The state of the auto-exposure lock. "true" means that
-    // auto-exposure is locked to its current value and will not
-    // change. "false" means the auto-exposure routine is free to
-    // change exposure values. If auto-exposure is already locked,
-    // setting this to true again has no effect (the driver will not
-    // recalculate exposure values). Changing exposure compensation
-    // settings will still affect the exposure settings while
-    // auto-exposure is locked. Stopping preview or taking a still
-    // image will not change the lock. In conjunction with
-    // exposure compensation, this allows for capturing multi-exposure
-    // brackets with known relative exposure values. Locking
-    // auto-exposure after open but before the first call to
-    // startPreview may result in severely over- or under-exposed
-    // images.  The driver will not change the AE lock after
-    // auto-focus completes.
-    static const char KEY_AUTO_EXPOSURE_LOCK[];
-    // Whether locking the auto-exposure is supported. "true" means it is, and
-    // "false" or this key not existing means it is not supported.
-    static const char KEY_AUTO_EXPOSURE_LOCK_SUPPORTED[];
-    // The state of the auto-white balance lock. "true" means that
-    // auto-white balance is locked to its current value and will not
-    // change. "false" means the auto-white balance routine is free to
-    // change white balance values. If auto-white balance is already
-    // locked, setting this to true again has no effect (the driver
-    // will not recalculate white balance values). Stopping preview or
-    // taking a still image will not change the lock. In conjunction
-    // with exposure compensation, this allows for capturing
-    // multi-exposure brackets with fixed white balance. Locking
-    // auto-white balance after open but before the first call to
-    // startPreview may result in severely incorrect color.  The
-    // driver will not change the AWB lock after auto-focus
-    // completes.
-    static const char KEY_AUTO_WHITEBALANCE_LOCK[];
-    // Whether locking the auto-white balance is supported. "true"
-    // means it is, and "false" or this key not existing means it is
-    // not supported.
-    static const char KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED[];
-
-    // The maximum number of metering areas supported. This is the maximum
-    // length of KEY_METERING_AREAS.
-    // Example value: "0" or "2". Read only.
-    static const char KEY_MAX_NUM_METERING_AREAS[];
-    // Current metering areas. Camera driver uses these areas to decide
-    // exposure.
-    //
-    // Before accessing this parameter, apps should check
-    // KEY_MAX_NUM_METERING_AREAS first to know the maximum number of metering
-    // areas first. If the value is 0, metering area is not supported.
-    //
-    // Each metering area is a rectangle with specified weight. The direction is
-    // relative to the sensor orientation, that is, what the sensor sees. The
-    // direction is not affected by the rotation or mirroring of
-    // CAMERA_CMD_SET_DISPLAY_ORIENTATION. Coordinates of the rectangle range
-    // from -1000 to 1000. (-1000, -1000) is the upper left point. (1000, 1000)
-    // is the lower right point. The width and height of metering areas cannot
-    // be 0 or negative.
-    //
-    // The fifth element is the weight. Values for weight must range from 1 to
-    // 1000.  The weight should be interpreted as a per-pixel weight - all
-    // pixels in the area have the specified weight. This means a small area
-    // with the same weight as a larger area will have less influence on the
-    // metering than the larger area. Metering areas can partially overlap and
-    // the driver will add the weights in the overlap region.
-    //
-    // A special case of all-zero single metering area means driver to decide
-    // the metering area. For example, the driver may use more signals to decide
-    // metering areas and change them dynamically. Apps can set all-zero if they
-    // want the driver to decide metering areas.
-    //
-    // Metering areas are relative to the current field of view (KEY_ZOOM).
-    // No matter what the zoom level is, (-1000,-1000) represents the top of the
-    // currently visible camera frame. The metering area cannot be set to be
-    // outside the current field of view, even when using zoom.
-    //
-    // No matter what metering areas are, the final exposure are compensated
-    // by KEY_EXPOSURE_COMPENSATION.
-    // Example value: "(-10,-10,0,0,300),(0,0,10,10,700)". Read/write.
-    static const char KEY_METERING_AREAS[];
-    // Current zoom value.
-    // Example value: "0" or "6". Read/write.
-    static const char KEY_ZOOM[];
-    // Maximum zoom value.
-    // Example value: "6". Read only.
-    static const char KEY_MAX_ZOOM[];
-    // The zoom ratios of all zoom values. The zoom ratio is in 1/100
-    // increments. Ex: a zoom of 3.2x is returned as 320. The number of list
-    // elements is KEY_MAX_ZOOM + 1. The first element is always 100. The last
-    // element is the zoom ratio of zoom value KEY_MAX_ZOOM.
-    // Example value: "100,150,200,250,300,350,400". Read only.
-    static const char KEY_ZOOM_RATIOS[];
-    // Whether zoom is supported. Zoom is supported if the value is "true". Zoom
-    // is not supported if the value is not "true" or the key does not exist.
-    // Example value: "true". Read only.
-    static const char KEY_ZOOM_SUPPORTED[];
-    // Whether if smooth zoom is supported. Smooth zoom is supported if the
-    // value is "true". It is not supported if the value is not "true" or the
-    // key does not exist.
-    // See CAMERA_CMD_START_SMOOTH_ZOOM, CAMERA_CMD_STOP_SMOOTH_ZOOM, and
-    // CAMERA_MSG_ZOOM in frameworks/base/include/camera/Camera.h.
-    // Example value: "true". Read only.
-    static const char KEY_SMOOTH_ZOOM_SUPPORTED[];
-
-    // The distances (in meters) from the camera to where an object appears to
-    // be in focus. The object is sharpest at the optimal focus distance. The
-    // depth of field is the far focus distance minus near focus distance.
-    //
-    // Focus distances may change after starting auto focus, canceling auto
-    // focus, or starting the preview. Applications can read this anytime to get
-    // the latest focus distances. If the focus mode is FOCUS_MODE_CONTINUOUS,
-    // focus distances may change from time to time.
-    //
-    // This is intended to estimate the distance between the camera and the
-    // subject. After autofocus, the subject distance may be within near and far
-    // focus distance. However, the precision depends on the camera hardware,
-    // autofocus algorithm, the focus area, and the scene. The error can be
-    // large and it should be only used as a reference.
-    //
-    // Far focus distance > optimal focus distance > near focus distance. If
-    // the far focus distance is infinity, the value should be "Infinity" (case
-    // sensitive). The format is three float values separated by commas. The
-    // first is near focus distance. The second is optimal focus distance. The
-    // third is far focus distance.
-    // Example value: "0.95,1.9,Infinity" or "0.049,0.05,0.051". Read only.
-    static const char KEY_FOCUS_DISTANCES[];
-
-    // The current dimensions in pixels (width x height) for video frames.
-    // The width and height must be one of the supported sizes retrieved
-    // via KEY_SUPPORTED_VIDEO_SIZES.
-    // Example value: "1280x720". Read/write.
-    static const char KEY_VIDEO_SIZE[];
-    // A list of the supported dimensions in pixels (width x height)
-    // for video frames. See CAMERA_MSG_VIDEO_FRAME for details in
-    // frameworks/base/include/camera/Camera.h.
-    // Example: "176x144,1280x720". Read only.
-    static const char KEY_SUPPORTED_VIDEO_SIZES[];
-
-    // The maximum number of detected faces supported by hardware face
-    // detection. If the value is 0, hardware face detection is not supported.
-    // Example: "5". Read only
-    static const char KEY_MAX_NUM_DETECTED_FACES_HW[];
-
-    // The maximum number of detected faces supported by software face
-    // detection. If the value is 0, software face detection is not supported.
-    // Example: "5". Read only
-    static const char KEY_MAX_NUM_DETECTED_FACES_SW[];
-
-    // Preferred preview frame size in pixels for video recording.
-    // The width and height must be one of the supported sizes retrieved
-    // via KEY_SUPPORTED_PREVIEW_SIZES. This key can be used only when
-    // getSupportedVideoSizes() does not return an empty Vector of Size.
-    // Camcorder applications are recommended to set the preview size
-    // to a value that is not larger than the preferred preview size.
-    // In other words, the product of the width and height of the
-    // preview size should not be larger than that of the preferred
-    // preview size. In addition, we recommend to choos a preview size
-    // that has the same aspect ratio as the resolution of video to be
-    // recorded.
-    // Example value: "800x600". Read only.
-    static const char KEY_PREFERRED_PREVIEW_SIZE_FOR_VIDEO[];
-
-    // The image format for video frames. See CAMERA_MSG_VIDEO_FRAME in
-    // frameworks/base/include/camera/Camera.h.
-    // Example value: "yuv420sp" or PIXEL_FORMAT_XXX constants. Read only.
-    static const char KEY_VIDEO_FRAME_FORMAT[];
-
-    // Sets the hint of the recording mode. If this is true, MediaRecorder.start
-    // may be faster or has less glitches. This should be called before starting
-    // the preview for the best result. But it is allowed to change the hint
-    // while the preview is active. The default value is false.
-    //
-    // The apps can still call Camera.takePicture when the hint is true. The
-    // apps can call MediaRecorder.start when the hint is false. But the
-    // performance may be worse.
-    // Example value: "true" or "false". Read/write.
-    static const char KEY_RECORDING_HINT[];
-
-    // Returns true if video snapshot is supported. That is, applications
-    // can call Camera.takePicture during recording. Applications do not need to
-    // call Camera.startPreview after taking a picture. The preview will be
-    // still active. Other than that, taking a picture during recording is
-    // identical to taking a picture normally. All settings and methods related
-    // to takePicture work identically. Ex: KEY_PICTURE_SIZE,
-    // KEY_SUPPORTED_PICTURE_SIZES, KEY_JPEG_QUALITY, KEY_ROTATION, and etc.
-    // The picture will have an EXIF header. FLASH_MODE_AUTO and FLASH_MODE_ON
-    // also still work, but the video will record the flash.
-    //
-    // Applications can set shutter callback as null to avoid the shutter
-    // sound. It is also recommended to set raw picture and post view callbacks
-    // to null to avoid the interrupt of preview display.
-    //
-    // Field-of-view of the recorded video may be different from that of the
-    // captured pictures.
-    // Example value: "true" or "false". Read only.
-    static const char KEY_VIDEO_SNAPSHOT_SUPPORTED[];
-
-    // The state of the video stabilization. If set to true, both the
-    // preview stream and the recorded video stream are stabilized by
-    // the camera. Only valid to set if KEY_VIDEO_STABILIZATION_SUPPORTED is
-    // set to true.
-    //
-    // The value of this key can be changed any time the camera is
-    // open. If preview or recording is active, it is acceptable for
-    // there to be a slight video glitch when video stabilization is
-    // toggled on and off.
-    //
-    // This only stabilizes video streams (between-frames stabilization), and
-    // has no effect on still image capture.
-    static const char KEY_VIDEO_STABILIZATION[];
-
-    // Returns true if video stabilization is supported. That is, applications
-    // can set KEY_VIDEO_STABILIZATION to true and have a stabilized preview
-    // stream and record stabilized videos.
-    static const char KEY_VIDEO_STABILIZATION_SUPPORTED[];
-
-    // Value for KEY_ZOOM_SUPPORTED or KEY_SMOOTH_ZOOM_SUPPORTED.
-    static const char TRUE[];
-    static const char FALSE[];
-
-    // Value for KEY_FOCUS_DISTANCES.
-    static const char FOCUS_DISTANCE_INFINITY[];
-
-    // Values for white balance settings.
-    static const char WHITE_BALANCE_AUTO[];
-    static const char WHITE_BALANCE_INCANDESCENT[];
-    static const char WHITE_BALANCE_FLUORESCENT[];
-    static const char WHITE_BALANCE_WARM_FLUORESCENT[];
-    static const char WHITE_BALANCE_DAYLIGHT[];
-    static const char WHITE_BALANCE_CLOUDY_DAYLIGHT[];
-    static const char WHITE_BALANCE_TWILIGHT[];
-    static const char WHITE_BALANCE_SHADE[];
-
-    // Values for effect settings.
-    static const char EFFECT_NONE[];
-    static const char EFFECT_MONO[];
-    static const char EFFECT_NEGATIVE[];
-    static const char EFFECT_SOLARIZE[];
-    static const char EFFECT_SEPIA[];
-    static const char EFFECT_POSTERIZE[];
-    static const char EFFECT_WHITEBOARD[];
-    static const char EFFECT_BLACKBOARD[];
-    static const char EFFECT_AQUA[];
-
-    // Values for antibanding settings.
-    static const char ANTIBANDING_AUTO[];
-    static const char ANTIBANDING_50HZ[];
-    static const char ANTIBANDING_60HZ[];
-    static const char ANTIBANDING_OFF[];
-
-    // Values for flash mode settings.
-    // Flash will not be fired.
-    static const char FLASH_MODE_OFF[];
-    // Flash will be fired automatically when required. The flash may be fired
-    // during preview, auto-focus, or snapshot depending on the driver.
-    static const char FLASH_MODE_AUTO[];
-    // Flash will always be fired during snapshot. The flash may also be
-    // fired during preview or auto-focus depending on the driver.
-    static const char FLASH_MODE_ON[];
-    // Flash will be fired in red-eye reduction mode.
-    static const char FLASH_MODE_RED_EYE[];
-    // Constant emission of light during preview, auto-focus and snapshot.
-    // This can also be used for video recording.
-    static const char FLASH_MODE_TORCH[];
-
-    // Values for scene mode settings.
-    static const char SCENE_MODE_AUTO[];
-    static const char SCENE_MODE_ACTION[];
-    static const char SCENE_MODE_PORTRAIT[];
-    static const char SCENE_MODE_LANDSCAPE[];
-    static const char SCENE_MODE_NIGHT[];
-    static const char SCENE_MODE_NIGHT_PORTRAIT[];
-    static const char SCENE_MODE_THEATRE[];
-    static const char SCENE_MODE_BEACH[];
-    static const char SCENE_MODE_SNOW[];
-    static const char SCENE_MODE_SUNSET[];
-    static const char SCENE_MODE_STEADYPHOTO[];
-    static const char SCENE_MODE_FIREWORKS[];
-    static const char SCENE_MODE_SPORTS[];
-    static const char SCENE_MODE_PARTY[];
-    static const char SCENE_MODE_CANDLELIGHT[];
-    // Applications are looking for a barcode. Camera driver will be optimized
-    // for barcode reading.
-    static const char SCENE_MODE_BARCODE[];
-
-    // Pixel color formats for KEY_PREVIEW_FORMAT, KEY_PICTURE_FORMAT,
-    // and KEY_VIDEO_FRAME_FORMAT
-    static const char PIXEL_FORMAT_YUV422SP[];
-    static const char PIXEL_FORMAT_YUV420SP[]; // NV21
-    static const char PIXEL_FORMAT_YUV422I[]; // YUY2
-    static const char PIXEL_FORMAT_YUV420P[]; // YV12
-    static const char PIXEL_FORMAT_RGB565[];
-    static const char PIXEL_FORMAT_RGBA8888[];
-    static const char PIXEL_FORMAT_JPEG[];
-    // Raw bayer format used for images, which is 10 bit precision samples
-    // stored in 16 bit words. The filter pattern is RGGB.
-    static const char PIXEL_FORMAT_BAYER_RGGB[];
-
-    // Values for focus mode settings.
-    // Auto-focus mode. Applications should call
-    // CameraHardwareInterface.autoFocus to start the focus in this mode.
-    static const char FOCUS_MODE_AUTO[];
-    // Focus is set at infinity. Applications should not call
-    // CameraHardwareInterface.autoFocus in this mode.
-    static const char FOCUS_MODE_INFINITY[];
-    // Macro (close-up) focus mode. Applications should call
-    // CameraHardwareInterface.autoFocus to start the focus in this mode.
-    static const char FOCUS_MODE_MACRO[];
-    // Focus is fixed. The camera is always in this mode if the focus is not
-    // adjustable. If the camera has auto-focus, this mode can fix the
-    // focus, which is usually at hyperfocal distance. Applications should
-    // not call CameraHardwareInterface.autoFocus in this mode.
-    static const char FOCUS_MODE_FIXED[];
-    // Extended depth of field (EDOF). Focusing is done digitally and
-    // continuously. Applications should not call
-    // CameraHardwareInterface.autoFocus in this mode.
-    static const char FOCUS_MODE_EDOF[];
-    // Continuous auto focus mode intended for video recording. The camera
-    // continuously tries to focus. This is the best choice for video
-    // recording because the focus changes smoothly . Applications still can
-    // call CameraHardwareInterface.takePicture in this mode but the subject may
-    // not be in focus. Auto focus starts when the parameter is set.
-    //
-    // Applications can call CameraHardwareInterface.autoFocus in this mode. The
-    // focus callback will immediately return with a boolean that indicates
-    // whether the focus is sharp or not. The focus position is locked after
-    // autoFocus call. If applications want to resume the continuous focus,
-    // cancelAutoFocus must be called. Restarting the preview will not resume
-    // the continuous autofocus. To stop continuous focus, applications should
-    // change the focus mode to other modes.
-    static const char FOCUS_MODE_CONTINUOUS_VIDEO[];
-    // Continuous auto focus mode intended for taking pictures. The camera
-    // continuously tries to focus. The speed of focus change is more aggressive
-    // than FOCUS_MODE_CONTINUOUS_VIDEO. Auto focus starts when the parameter is
-    // set.
-    //
-    // Applications can call CameraHardwareInterface.autoFocus in this mode. If
-    // the autofocus is in the middle of scanning, the focus callback will
-    // return when it completes. If the autofocus is not scanning, focus
-    // callback will immediately return with a boolean that indicates whether
-    // the focus is sharp or not. The apps can then decide if they want to take
-    // a picture immediately or to change the focus mode to auto, and run a full
-    // autofocus cycle. The focus position is locked after autoFocus call. If
-    // applications want to resume the continuous focus, cancelAutoFocus must be
-    // called. Restarting the preview will not resume the continuous autofocus.
-    // To stop continuous focus, applications should change the focus mode to
-    // other modes.
-    static const char FOCUS_MODE_CONTINUOUS_PICTURE[];
-
-private:
-    DefaultKeyedVector<String8,String8>    mMap;
-};
-
-}; // namespace android
-
-#endif
diff --git a/include/camera/ICamera.h b/include/camera/ICamera.h
index 400d7f4..3d18837 100644
--- a/include/camera/ICamera.h
+++ b/include/camera/ICamera.h
@@ -20,15 +20,15 @@
 #include <utils/RefBase.h>
 #include <binder/IInterface.h>
 #include <binder/Parcel.h>
-#include <surfaceflinger/Surface.h>
 #include <binder/IMemory.h>
 #include <utils/String8.h>
 #include <camera/Camera.h>
-#include <gui/ISurfaceTexture.h>
 
 namespace android {
 
 class ICameraClient;
+class ISurfaceTexture;
+class Surface;
 
 class ICamera: public IInterface
 {
diff --git a/include/camera/ICameraService.h b/include/camera/ICameraService.h
index 7d70c1e..97e3169 100644
--- a/include/camera/ICameraService.h
+++ b/include/camera/ICameraService.h
@@ -42,7 +42,7 @@
     virtual status_t        getCameraInfo(int cameraId,
                                           struct CameraInfo* cameraInfo) = 0;
     virtual sp<ICamera>     connect(const sp<ICameraClient>& cameraClient,
-                                    int cameraId) = 0;
+                                    int cameraId, bool force, bool keep) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/common_time/ICommonClock.h b/include/common_time/ICommonClock.h
new file mode 100644
index 0000000..d7073f1
--- /dev/null
+++ b/include/common_time/ICommonClock.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_ICOMMONCLOCK_H
+#define ANDROID_ICOMMONCLOCK_H
+
+#include <stdint.h>
+#include <linux/socket.h>
+
+#include <binder/IInterface.h>
+#include <binder/IServiceManager.h>
+
+namespace android {
+
+class ICommonClockListener : public IInterface {
+  public:
+    DECLARE_META_INTERFACE(CommonClockListener);
+
+    virtual void onTimelineChanged(uint64_t timelineID) = 0;
+};
+
+class BnCommonClockListener : public BnInterface<ICommonClockListener> {
+  public:
+    virtual status_t onTransact(uint32_t code, const Parcel& data,
+                                Parcel* reply, uint32_t flags = 0);
+};
+
+class ICommonClock : public IInterface {
+  public:
+    DECLARE_META_INTERFACE(CommonClock);
+
+    // Name of the ICommonClock service registered with the service manager.
+    static const String16 kServiceName;
+
+    // a reserved invalid timeline ID
+    static const uint64_t kInvalidTimelineID;
+
+    // a reserved invalid error estimate
+    static const int32_t kErrorEstimateUnknown;
+
+    enum State {
+        // the device just came up and is trying to discover the master
+        STATE_INITIAL,
+
+        // the device is a client of a master
+        STATE_CLIENT,
+
+        // the device is acting as master
+        STATE_MASTER,
+
+        // the device has lost contact with its master and needs to participate
+        // in the election of a new master
+        STATE_RONIN,
+
+        // the device is waiting for announcement of the newly elected master
+        STATE_WAIT_FOR_ELECTION,
+    };
+
+    virtual status_t isCommonTimeValid(bool* valid, uint32_t* timelineID) = 0;
+    virtual status_t commonTimeToLocalTime(int64_t commonTime,
+                                           int64_t* localTime) = 0;
+    virtual status_t localTimeToCommonTime(int64_t localTime,
+                                           int64_t* commonTime) = 0;
+    virtual status_t getCommonTime(int64_t* commonTime) = 0;
+    virtual status_t getCommonFreq(uint64_t* freq) = 0;
+    virtual status_t getLocalTime(int64_t* localTime) = 0;
+    virtual status_t getLocalFreq(uint64_t* freq) = 0;
+    virtual status_t getEstimatedError(int32_t* estimate) = 0;
+    virtual status_t getTimelineID(uint64_t* id) = 0;
+    virtual status_t getState(State* state) = 0;
+    virtual status_t getMasterAddr(struct sockaddr_storage* addr) = 0;
+
+    virtual status_t registerListener(
+            const sp<ICommonClockListener>& listener) = 0;
+    virtual status_t unregisterListener(
+            const sp<ICommonClockListener>& listener) = 0;
+
+    // Simple helper to make it easier to connect to the CommonClock service.
+    static inline sp<ICommonClock> getInstance() {
+        sp<IBinder> binder = defaultServiceManager()->checkService(
+                ICommonClock::kServiceName);
+        sp<ICommonClock> clk = interface_cast<ICommonClock>(binder);
+        return clk;
+    }
+};
+
+class BnCommonClock : public BnInterface<ICommonClock> {
+  public:
+    virtual status_t onTransact(uint32_t code, const Parcel& data,
+                                Parcel* reply, uint32_t flags = 0);
+};
+
+};  // namespace android
+
+#endif  // ANDROID_ICOMMONCLOCK_H
diff --git a/include/common_time/ICommonTimeConfig.h b/include/common_time/ICommonTimeConfig.h
new file mode 100644
index 0000000..497b666
--- /dev/null
+++ b/include/common_time/ICommonTimeConfig.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_ICOMMONTIMECONFIG_H
+#define ANDROID_ICOMMONTIMECONFIG_H
+
+#include <stdint.h>
+#include <linux/socket.h>
+
+#include <binder/IInterface.h>
+#include <binder/IServiceManager.h>
+
+namespace android {
+
+class String16;
+
+class ICommonTimeConfig : public IInterface {
+  public:
+    DECLARE_META_INTERFACE(CommonTimeConfig);
+
+    // Name of the ICommonTimeConfig service registered with the service
+    // manager.
+    static const String16 kServiceName;
+
+    virtual status_t getMasterElectionPriority(uint8_t *priority) = 0;
+    virtual status_t setMasterElectionPriority(uint8_t priority) = 0;
+    virtual status_t getMasterElectionEndpoint(struct sockaddr_storage *addr) = 0;
+    virtual status_t setMasterElectionEndpoint(const struct sockaddr_storage *addr) = 0;
+    virtual status_t getMasterElectionGroupId(uint64_t *id) = 0;
+    virtual status_t setMasterElectionGroupId(uint64_t id) = 0;
+    virtual status_t getInterfaceBinding(String16& ifaceName) = 0;
+    virtual status_t setInterfaceBinding(const String16& ifaceName) = 0;
+    virtual status_t getMasterAnnounceInterval(int *interval) = 0;
+    virtual status_t setMasterAnnounceInterval(int interval) = 0;
+    virtual status_t getClientSyncInterval(int *interval) = 0;
+    virtual status_t setClientSyncInterval(int interval) = 0;
+    virtual status_t getPanicThreshold(int *threshold) = 0;
+    virtual status_t setPanicThreshold(int threshold) = 0;
+    virtual status_t getAutoDisable(bool *autoDisable) = 0;
+    virtual status_t setAutoDisable(bool autoDisable) = 0;
+    virtual status_t forceNetworklessMasterMode() = 0;
+
+    // Simple helper to make it easier to connect to the CommonTimeConfig service.
+    static inline sp<ICommonTimeConfig> getInstance() {
+        sp<IBinder> binder = defaultServiceManager()->checkService(
+                ICommonTimeConfig::kServiceName);
+        sp<ICommonTimeConfig> clk = interface_cast<ICommonTimeConfig>(binder);
+        return clk;
+    }
+};
+
+class BnCommonTimeConfig : public BnInterface<ICommonTimeConfig> {
+  public:
+    virtual status_t onTransact(uint32_t code, const Parcel& data,
+                                Parcel* reply, uint32_t flags = 0);
+};
+
+};  // namespace android
+
+#endif  // ANDROID_ICOMMONTIMECONFIG_H
diff --git a/include/common_time/cc_helper.h b/include/common_time/cc_helper.h
new file mode 100644
index 0000000..8c4d5c0
--- /dev/null
+++ b/include/common_time/cc_helper.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CC_HELPER_H__
+#define __CC_HELPER_H__
+
+#include <stdint.h>
+#include <common_time/ICommonClock.h>
+#include <utils/threads.h>
+
+namespace android {
+
+// CCHelper is a simple wrapper class to help with centralizing access to the
+// Common Clock service and implementing lifetime managment, as well as to
+// implement a simple policy of making a basic attempt to reconnect to the
+// common clock service when things go wrong.
+//
+// On platforms which run the native common_time service in auto-disable mode,
+// the service will go into networkless mode whenever it has no active clients.
+// It tracks active clients using registered CommonClockListeners (the callback
+// interface for onTimelineChanged) since this provides a convienent death
+// handler notification for when the service's clients die unexpectedly.  This
+// means that users of the common time service should really always have a
+// CommonClockListener, unless they know that the time service is not running in
+// auto disabled mode, or that there is at least one other registered listener
+// active in the system.  The CCHelper makes this a little easier by sharing a
+// ref counted ICommonClock interface across all clients and automatically
+// registering and unregistering a listener whenever there are CCHelper
+// instances active in the process.
+class CCHelper {
+  public:
+    CCHelper();
+    ~CCHelper();
+
+    status_t isCommonTimeValid(bool* valid, uint32_t* timelineID);
+    status_t commonTimeToLocalTime(int64_t commonTime, int64_t* localTime);
+    status_t localTimeToCommonTime(int64_t localTime, int64_t* commonTime);
+    status_t getCommonTime(int64_t* commonTime);
+    status_t getCommonFreq(uint64_t* freq);
+    status_t getLocalTime(int64_t* localTime);
+    status_t getLocalFreq(uint64_t* freq);
+
+  private:
+    class CommonClockListener : public BnCommonClockListener {
+      public:
+        void onTimelineChanged(uint64_t timelineID);
+    };
+
+    static bool verifyClock_l();
+
+    static Mutex lock_;
+    static sp<ICommonClock> common_clock_;
+    static sp<ICommonClockListener> common_clock_listener_;
+    static uint32_t ref_count_;
+};
+
+
+}  // namespace android
+#endif  // __CC_HELPER_H__
diff --git a/include/common_time/local_clock.h b/include/common_time/local_clock.h
new file mode 100644
index 0000000..845d1c2
--- /dev/null
+++ b/include/common_time/local_clock.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef __LOCAL_CLOCK_H__
+#define __LOCAL_CLOCK_H__
+
+#include <stdint.h>
+
+#include <hardware/local_time_hal.h>
+#include <utils/Errors.h>
+#include <utils/threads.h>
+
+namespace android {
+
+class LocalClock {
+  public:
+     LocalClock();
+
+    bool initCheck();
+
+    int64_t  getLocalTime();
+    uint64_t getLocalFreq();
+    status_t setLocalSlew(int16_t rate);
+    int32_t  getDebugLog(struct local_time_debug_event* records,
+                         int max_records);
+
+  private:
+    static Mutex dev_lock_;
+    static local_time_hw_device_t* dev_;
+};
+
+}  // namespace android
+#endif  // __LOCAL_CLOCK_H__
diff --git a/include/drm/DrmManagerClient.h b/include/drm/DrmManagerClient.h
index b8fe46d..c47bbfb 100644
--- a/include/drm/DrmManagerClient.h
+++ b/include/drm/DrmManagerClient.h
@@ -66,19 +66,21 @@
      * @param[in] fd File descriptor of the protected content to be decrypted
      * @param[in] offset Start position of the content
      * @param[in] length The length of the protected content
+     * @param[in] mime Mime type of the protected content if it is not NULL or empty
      * @return
      *     Handle for the decryption session
      */
-    sp<DecryptHandle> openDecryptSession(int fd, off64_t offset, off64_t length);
+    sp<DecryptHandle> openDecryptSession(int fd, off64_t offset, off64_t length, const char* mime);
 
     /**
      * Open the decrypt session to decrypt the given protected content
      *
      * @param[in] uri Path of the protected content to be decrypted
+     * @param[in] mime Mime type of the protected content if it is not NULL or empty
      * @return
      *     Handle for the decryption session
      */
-    sp<DecryptHandle> openDecryptSession(const char* uri);
+    sp<DecryptHandle> openDecryptSession(const char* uri, const char* mime);
 
     /**
      * Close the decrypt session for the given handle
diff --git a/include/drm/drm_framework_common.h b/include/drm/drm_framework_common.h
index 2632cbd..637409c 100644
--- a/include/drm/drm_framework_common.h
+++ b/include/drm/drm_framework_common.h
@@ -43,6 +43,7 @@
     DRM_ERROR_DECRYPT                       = ERROR_BASE - 5,
     DRM_ERROR_CANNOT_HANDLE                 = ERROR_BASE - 6,
     DRM_ERROR_TAMPER_DETECTED               = ERROR_BASE - 7,
+    DRM_ERROR_NO_PERMISSION                 = ERROR_BASE - 8,
 
     DRM_NO_ERROR                            = NO_ERROR
 };
diff --git a/include/media/AudioEffect.h b/include/media/AudioEffect.h
index 1417416..02dfc1b 100644
--- a/include/media/AudioEffect.h
+++ b/include/media/AudioEffect.h
@@ -108,7 +108,8 @@
      * Returned value
      *   *descriptor updated with effect descriptor
      */
-    static status_t getEffectDescriptor(effect_uuid_t *uuid, effect_descriptor_t *descriptor);
+    static status_t getEffectDescriptor(const effect_uuid_t *uuid,
+                                        effect_descriptor_t *descriptor) /*const*/;
 
 
     /*
@@ -226,8 +227,8 @@
     AudioEffect(const effect_uuid_t *type,
                 const effect_uuid_t *uuid = NULL,
                   int32_t priority = 0,
-                  effect_callback_t cbf = 0,
-                  void* user = 0,
+                  effect_callback_t cbf = NULL,
+                  void* user = NULL,
                   int sessionId = 0,
                   audio_io_handle_t io = 0
                   );
@@ -238,8 +239,8 @@
     AudioEffect(const char *typeStr,
                     const char *uuidStr = NULL,
                     int32_t priority = 0,
-                    effect_callback_t cbf = 0,
-                    void* user = 0,
+                    effect_callback_t cbf = NULL,
+                    void* user = NULL,
                     int sessionId = 0,
                     audio_io_handle_t io = 0
                     );
@@ -260,8 +261,8 @@
             status_t    set(const effect_uuid_t *type,
                             const effect_uuid_t *uuid = NULL,
                             int32_t priority = 0,
-                            effect_callback_t cbf = 0,
-                            void* user = 0,
+                            effect_callback_t cbf = NULL,
+                            void* user = NULL,
                             int sessionId = 0,
                             audio_io_handle_t io = 0
                             );
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index 605680a..7df6668 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -22,7 +22,6 @@
 
 #include <media/IAudioFlinger.h>
 #include <media/IAudioRecord.h>
-#include <media/AudioTrack.h>
 
 #include <utils/RefBase.h>
 #include <utils/Errors.h>
@@ -34,6 +33,8 @@
 
 namespace android {
 
+class audio_track_cblk_t;
+
 // ----------------------------------------------------------------------------
 
 class AudioRecord
@@ -67,7 +68,7 @@
         };
         uint32_t    flags;
         int         channelCount;
-        int         format;
+        audio_format_t format;
         size_t      frameCount;
         size_t      size;
         union {
@@ -111,7 +112,7 @@
 
      static status_t getMinFrameCount(int* frameCount,
                                       uint32_t sampleRate,
-                                      int format,
+                                      audio_format_t format,
                                       int channelCount);
 
     /* Constructs an uninitialized AudioRecord. No connection with
@@ -142,20 +143,22 @@
      * user                Context for use by the callback receiver.
      */
 
+     // FIXME consider removing this alias and replacing it by audio_in_acoustics_t
+     //       or removing the parameter entirely if it is unused
      enum record_flags {
          RECORD_AGC_ENABLE = AUDIO_IN_ACOUSTICS_AGC_ENABLE,
          RECORD_NS_ENABLE  = AUDIO_IN_ACOUSTICS_NS_ENABLE,
          RECORD_IIR_ENABLE = AUDIO_IN_ACOUSTICS_TX_IIR_ENABLE,
      };
 
-                        AudioRecord(int inputSource,
+                        AudioRecord(audio_source_t inputSource,
                                     uint32_t sampleRate = 0,
-                                    int format          = 0,
+                                    audio_format_t format = AUDIO_FORMAT_DEFAULT,
                                     uint32_t channelMask = AUDIO_CHANNEL_IN_MONO,
                                     int frameCount      = 0,
-                                    uint32_t flags      = 0,
-                                    callback_t cbf = 0,
-                                    void* user = 0,
+                                    record_flags flags  = (record_flags) 0,
+                                    callback_t cbf = NULL,
+                                    void* user = NULL,
                                     int notificationFrames = 0,
                                     int sessionId = 0);
 
@@ -174,14 +177,14 @@
      *  - NO_INIT: audio server or audio hardware not initialized
      *  - PERMISSION_DENIED: recording is not allowed for the requesting process
      * */
-            status_t    set(int inputSource     = 0,
+            status_t    set(audio_source_t inputSource = AUDIO_SOURCE_DEFAULT,
                             uint32_t sampleRate = 0,
-                            int format          = 0,
+                            audio_format_t format = AUDIO_FORMAT_DEFAULT,
                             uint32_t channelMask = AUDIO_CHANNEL_IN_MONO,
                             int frameCount      = 0,
-                            uint32_t flags      = 0,
-                            callback_t cbf = 0,
-                            void* user = 0,
+                            record_flags flags  = (record_flags) 0,
+                            callback_t cbf = NULL,
+                            void* user = NULL,
                             int notificationFrames = 0,
                             bool threadCanCallJava = false,
                             int sessionId = 0);
@@ -202,12 +205,12 @@
 
    /* getters, see constructor */
 
-            int         format() const;
+            audio_format_t format() const;
             int         channelCount() const;
             int         channels() const;
             uint32_t    frameCount() const;
-            int         frameSize() const;
-            int         inputSource() const;
+            size_t      frameSize() const;
+            audio_source_t inputSource() const;
 
 
     /* After it's created the track is not active. Call start() to
@@ -224,7 +227,7 @@
 
     /* get sample rate for this record track
      */
-            uint32_t    getSampleRate();
+            uint32_t    getSampleRate() const;
 
     /* Sets marker position. When record reaches the number of frames specified,
      * a callback with event type EVENT_MARKER is called. Calling setMarkerPosition
@@ -241,7 +244,7 @@
      *  - INVALID_OPERATION: the AudioRecord has no callback installed.
      */
             status_t    setMarkerPosition(uint32_t marker);
-            status_t    getMarkerPosition(uint32_t *marker);
+            status_t    getMarkerPosition(uint32_t *marker) const;
 
 
     /* Sets position update period. Every time the number of frames specified has been recorded,
@@ -260,7 +263,7 @@
      *  - INVALID_OPERATION: the AudioRecord has no callback installed.
      */
             status_t    setPositionUpdatePeriod(uint32_t updatePeriod);
-            status_t    getPositionUpdatePeriod(uint32_t *updatePeriod);
+            status_t    getPositionUpdatePeriod(uint32_t *updatePeriod) const;
 
 
     /* Gets record head position. The position is the  total number of frames
@@ -274,7 +277,7 @@
      *  - NO_ERROR: successful operation
      *  - BAD_VALUE:  position is NULL
      */
-            status_t    getPosition(uint32_t *position);
+            status_t    getPosition(uint32_t *position) const;
 
     /* returns a handle on the audio input used by this AudioRecord.
      *
@@ -284,7 +287,7 @@
      * Returned value:
      *  handle on audio hardware input
      */
-            audio_io_handle_t    getInput();
+            audio_io_handle_t    getInput() const;
 
     /* returns the audio session ID associated to this AudioRecord.
      *
@@ -294,11 +297,11 @@
      * Returned value:
      *  AudioRecord session ID.
      */
-            int    getSessionId();
+            int    getSessionId() const;
 
     /* obtains a buffer of "frameCount" frames. The buffer must be
      * filled entirely. If the track is stopped, obtainBuffer() returns
-     * STOPPED instead of NO_ERROR as long as there are buffers availlable,
+     * STOPPED instead of NO_ERROR as long as there are buffers available,
      * at which point NO_MORE_BUFFERS is returned.
      * Buffers will be returned until the pool (buffercount())
      * is exhausted, at which point obtainBuffer() will either block
@@ -316,16 +319,17 @@
 
 
     /* As a convenience we provide a read() interface to the audio buffer.
-     * This is implemented on top of lockBuffer/unlockBuffer.
+     * This is implemented on top of obtainBuffer/releaseBuffer.
      */
             ssize_t     read(void* buffer, size_t size);
 
-    /* Return the amount of input frames lost in the audio driver since the last call of this function.
-     * Audio driver is expected to reset the value to 0 and restart counting upon returning the current value by this function call.
-     * Such loss typically occurs when the user space process is blocked longer than the capacity of audio driver buffers.
+    /* Return the amount of input frames lost in the audio driver since the last call of this
+     * function.  Audio driver is expected to reset the value to 0 and restart counting upon
+     * returning the current value by this function call.  Such loss typically occurs when the
+     * user space process is blocked longer than the capacity of audio driver buffers.
      * Unit: the number of input audio frames
      */
-            unsigned int  getInputFramesLost();
+            unsigned int  getInputFramesLost() const;
 
 private:
     /* copying audio tracks is not allowed */
@@ -340,15 +344,14 @@
     private:
         friend class AudioRecord;
         virtual bool        threadLoop();
-        virtual status_t    readyToRun() { return NO_ERROR; }
+        virtual status_t    readyToRun();
         virtual void        onFirstRef() {}
         AudioRecord& mReceiver;
-        Mutex       mLock;
     };
 
             bool processAudioBuffer(const sp<ClientRecordThread>& thread);
             status_t openRecord_l(uint32_t sampleRate,
-                                uint32_t format,
+                                audio_format_t format,
                                 uint32_t channelMask,
                                 int frameCount,
                                 uint32_t flags,
@@ -359,15 +362,16 @@
     sp<IAudioRecord>        mAudioRecord;
     sp<IMemory>             mCblkMemory;
     sp<ClientRecordThread>  mClientRecordThread;
-    Mutex                   mLock;
+    status_t                mReadyToRun;
+    mutable Mutex           mLock;
+    Condition               mCondition;
 
     uint32_t                mFrameCount;
 
     audio_track_cblk_t*     mCblk;
-    uint32_t                mFormat;
+    audio_format_t          mFormat;
     uint8_t                 mChannelCount;
-    uint8_t                 mInputSource;
-    uint8_t                 mReserved[2];
+    audio_source_t          mInputSource;
     status_t                mStatus;
     uint32_t                mLatency;
 
@@ -381,10 +385,12 @@
     bool                    mMarkerReached;
     uint32_t                mNewPosition;
     uint32_t                mUpdatePeriod;
-    uint32_t                mFlags;
+    record_flags            mFlags;
     uint32_t                mChannelMask;
     audio_io_handle_t       mInput;
     int                     mSessionId;
+    int                     mPreviousPriority;          // before start()
+    int                     mPreviousSchedulingGroup;
 };
 
 }; // namespace android
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 6a15f6e..cc0a594 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -55,19 +55,21 @@
     static status_t getMasterMute(bool* mute);
 
     // set/get stream volume on specified output
-    static status_t setStreamVolume(int stream, float value, int output);
-    static status_t getStreamVolume(int stream, float* volume, int output);
+    static status_t setStreamVolume(audio_stream_type_t stream, float value,
+                                    audio_io_handle_t output);
+    static status_t getStreamVolume(audio_stream_type_t stream, float* volume,
+                                    audio_io_handle_t output);
 
     // mute/unmute stream
-    static status_t setStreamMute(int stream, bool mute);
-    static status_t getStreamMute(int stream, bool* mute);
+    static status_t setStreamMute(audio_stream_type_t stream, bool mute);
+    static status_t getStreamMute(audio_stream_type_t stream, bool* mute);
 
-    // set audio mode in audio hardware (see audio_mode_t)
-    static status_t setMode(int mode);
+    // set audio mode in audio hardware
+    static status_t setMode(audio_mode_t mode);
 
     // returns true in *state if tracks are active on the specified stream or has been active
     // in the past inPastMs milliseconds
-    static status_t isStreamActive(int stream, bool *state, uint32_t inPastMs = 0);
+    static status_t isStreamActive(audio_stream_type_t stream, bool *state, uint32_t inPastMs = 0);
 
     // set/get audio hardware parameters. The function accepts a list of parameters
     // key value pairs in the form: key1=value1;key2=value2;...
@@ -83,13 +85,19 @@
     static float linearToLog(int volume);
     static int logToLinear(float volume);
 
+    static status_t getOutputSamplingRate(int* samplingRate, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
+    static status_t getOutputFrameCount(int* frameCount, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
+    static status_t getOutputLatency(uint32_t* latency, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
+
+    // DEPRECATED
     static status_t getOutputSamplingRate(int* samplingRate, int stream = AUDIO_STREAM_DEFAULT);
+
+    // DEPRECATED
     static status_t getOutputFrameCount(int* frameCount, int stream = AUDIO_STREAM_DEFAULT);
-    static status_t getOutputLatency(uint32_t* latency, int stream = AUDIO_STREAM_DEFAULT);
 
-    static bool routedToA2dpOutput(int streamType);
+    static bool routedToA2dpOutput(audio_stream_type_t streamType);
 
-    static status_t getInputBufferSize(uint32_t sampleRate, int format, int channelCount,
+    static status_t getInputBufferSize(uint32_t sampleRate, audio_format_t format, int channelCount,
         size_t* buffSize);
 
     static status_t setVoiceVolume(float volume);
@@ -103,7 +111,7 @@
     // - BAD_VALUE: invalid parameter
     // NOTE: this feature is not supported on all hardware platforms and it is
     // necessary to check returned status before using the returned values.
-    static status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, int stream = AUDIO_STREAM_DEFAULT);
+    static status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
 
     static unsigned int  getInputFramesLost(audio_io_handle_t ioHandle);
 
@@ -123,12 +131,12 @@
         NUM_CONFIG_EVENTS
     };
 
-    // audio output descritor used to cache output configurations in client process to avoid frequent calls
+    // audio output descriptor used to cache output configurations in client process to avoid frequent calls
     // through IAudioFlinger
     class OutputDescriptor {
     public:
         OutputDescriptor()
-        : samplingRate(0), format(0), channels(0), frameCount(0), latency(0)  {}
+        : samplingRate(0), format(AUDIO_FORMAT_DEFAULT), channels(0), frameCount(0), latency(0)  {}
 
         uint32_t samplingRate;
         int32_t format;
@@ -142,15 +150,14 @@
     //
     static status_t setDeviceConnectionState(audio_devices_t device, audio_policy_dev_state_t state, const char *device_address);
     static audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device, const char *device_address);
-    static status_t setPhoneState(int state);
-    static status_t setRingerMode(uint32_t mode, uint32_t mask);
+    static status_t setPhoneState(audio_mode_t state);
     static status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
     static audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
     static audio_io_handle_t getOutput(audio_stream_type_t stream,
                                         uint32_t samplingRate = 0,
-                                        uint32_t format = AUDIO_FORMAT_DEFAULT,
+                                        audio_format_t format = AUDIO_FORMAT_DEFAULT,
                                         uint32_t channels = AUDIO_CHANNEL_OUT_STEREO,
-                                        audio_policy_output_flags_t flags = AUDIO_POLICY_OUTPUT_FLAG_INDIRECT);
+                                        audio_policy_output_flags_t flags = AUDIO_POLICY_OUTPUT_FLAG_NONE);
     static status_t startOutput(audio_io_handle_t output,
                                 audio_stream_type_t stream,
                                 int session = 0);
@@ -158,9 +165,9 @@
                                audio_stream_type_t stream,
                                int session = 0);
     static void releaseOutput(audio_io_handle_t output);
-    static audio_io_handle_t getInput(int inputSource,
+    static audio_io_handle_t getInput(audio_source_t inputSource,
                                     uint32_t samplingRate = 0,
-                                    uint32_t format = AUDIO_FORMAT_DEFAULT,
+                                    audio_format_t format = AUDIO_FORMAT_DEFAULT,
                                     uint32_t channels = AUDIO_CHANNEL_IN_MONO,
                                     audio_in_acoustics_t acoustics = (audio_in_acoustics_t)0,
                                     int sessionId = 0);
@@ -170,11 +177,15 @@
     static status_t initStreamVolume(audio_stream_type_t stream,
                                       int indexMin,
                                       int indexMax);
-    static status_t setStreamVolumeIndex(audio_stream_type_t stream, int index);
-    static status_t getStreamVolumeIndex(audio_stream_type_t stream, int *index);
+    static status_t setStreamVolumeIndex(audio_stream_type_t stream,
+                                         int index,
+                                         audio_devices_t device);
+    static status_t getStreamVolumeIndex(audio_stream_type_t stream,
+                                         int *index,
+                                         audio_devices_t device);
 
     static uint32_t getStrategyForStream(audio_stream_type_t stream);
-    static uint32_t getDevicesForStream(audio_stream_type_t stream);
+    static audio_devices_t getDevicesForStream(audio_stream_type_t stream);
 
     static audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc);
     static status_t registerEffect(effect_descriptor_t *desc,
@@ -207,8 +218,8 @@
         // IAudioFlingerClient
 
         // indicate a change in the configuration of an output or input: keeps the cached
-        // values for output/input parameters upto date in client process
-        virtual void ioConfigChanged(int event, int ioHandle, void *param2);
+        // values for output/input parameters up-to-date in client process
+        virtual void ioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2);
     };
 
     class AudioPolicyServiceClient: public IBinder::DeathRecipient
@@ -233,13 +244,13 @@
     static size_t gInBuffSize;
     // previous parameters for recording buffer size queries
     static uint32_t gPrevInSamplingRate;
-    static int gPrevInFormat;
+    static audio_format_t gPrevInFormat;
     static int gPrevInChannelCount;
 
     static sp<IAudioPolicyService> gAudioPolicyService;
 
     // mapping between stream types and outputs
-    static DefaultKeyedVector<int, audio_io_handle_t> gStreamOutputMap;
+    static DefaultKeyedVector<audio_stream_type_t, audio_io_handle_t> gStreamOutputMap;
     // list of output descriptors containing cached parameters
     // (sampling rate, framecount, channel count...)
     static DefaultKeyedVector<audio_io_handle_t, OutputDescriptor *> gOutputs;
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 1c401e2..552e829 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -38,7 +38,7 @@
 
 // ----------------------------------------------------------------------------
 
-class AudioTrack
+class AudioTrack : virtual public RefBase
 {
 public:
     enum channel_index {
@@ -58,8 +58,8 @@
         EVENT_BUFFER_END = 5        // Playback head is at the end of the buffer.
     };
 
-    /* Create Buffer on the stack and pass it to obtainBuffer()
-     * and releaseBuffer().
+    /* Client should declare Buffer on the stack and pass address to obtainBuffer()
+     * and releaseBuffer().  See also callback_t for EVENT_MORE_DATA.
      */
 
     class Buffer
@@ -68,34 +68,39 @@
         enum {
             MUTE    = 0x00000001
         };
-        uint32_t    flags;
-        int         format;
+        uint32_t    flags;        // 0 or MUTE
+        audio_format_t format; // but AUDIO_FORMAT_PCM_8_BIT -> AUDIO_FORMAT_PCM_16_BIT
+        // accessed directly by WebKit ANP callback
         int         channelCount; // will be removed in the future, do not use
-        size_t      frameCount;
-        size_t      size;
+
+        size_t      frameCount;   // number of sample frames corresponding to size;
+                                  // on input it is the number of frames desired,
+                                  // on output is the number of frames actually filled
+
+        size_t      size;         // input/output in byte units
         union {
             void*       raw;
-            short*      i16;
-            int8_t*     i8;
+            short*      i16;    // signed 16-bit
+            int8_t*     i8;     // unsigned 8-bit, offset by 0x80
         };
     };
 
 
     /* As a convenience, if a callback is supplied, a handler thread
      * is automatically created with the appropriate priority. This thread
-     * invokes the callback when a new buffer becomes availlable or an underrun condition occurs.
+     * invokes the callback when a new buffer becomes available or various conditions occur.
      * Parameters:
      *
      * event:   type of event notified (see enum AudioTrack::event_type).
      * user:    Pointer to context for use by the callback receiver.
      * info:    Pointer to optional parameter according to event type:
      *          - EVENT_MORE_DATA: pointer to AudioTrack::Buffer struct. The callback must not write
-     *          more bytes than indicated by 'size' field and update 'size' if less bytes are
-     *          written.
+     *            more bytes than indicated by 'size' field and update 'size' if fewer bytes are
+     *            written.
      *          - EVENT_UNDERRUN: unused.
      *          - EVENT_LOOP_END: pointer to an int indicating the number of loops remaining.
-     *          - EVENT_MARKER: pointer to an uin32_t containing the marker position in frames.
-     *          - EVENT_NEW_POS: pointer to an uin32_t containing the new position in frames.
+     *          - EVENT_MARKER: pointer to an uint32_t containing the marker position in frames.
+     *          - EVENT_NEW_POS: pointer to an uint32_t containing the new position in frames.
      *          - EVENT_BUFFER_END: unused.
      */
 
@@ -109,7 +114,7 @@
      */
 
      static status_t getMinFrameCount(int* frameCount,
-                                      int streamType      =-1,
+                                      audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT,
                                       uint32_t sampleRate = 0);
 
     /* Constructs an uninitialized AudioTrack. No connection with
@@ -137,63 +142,76 @@
      * flags:              Reserved for future use.
      * cbf:                Callback function. If not null, this function is called periodically
      *                     to request new PCM data.
+     * user:               Context for use by the callback receiver.
      * notificationFrames: The callback function is called each time notificationFrames PCM
-     *                     frames have been comsumed from track input buffer.
-     * user                Context for use by the callback receiver.
+     *                     frames have been consumed from track input buffer.
+     * sessionId:          Specific session ID, or zero to use default.
      */
 
-                        AudioTrack( int streamType,
+                        AudioTrack( audio_stream_type_t streamType,
                                     uint32_t sampleRate  = 0,
-                                    int format           = 0,
+                                    audio_format_t format = AUDIO_FORMAT_DEFAULT,
                                     int channelMask      = 0,
                                     int frameCount       = 0,
-                                    uint32_t flags       = 0,
+                                    audio_policy_output_flags_t flags = AUDIO_POLICY_OUTPUT_FLAG_NONE,
+                                    callback_t cbf       = NULL,
+                                    void* user           = NULL,
+                                    int notificationFrames = 0,
+                                    int sessionId = 0);
+
+                        // DEPRECATED
+                        explicit AudioTrack( int streamType,
+                                    uint32_t sampleRate  = 0,
+                                    int format = AUDIO_FORMAT_DEFAULT,
+                                    int channelMask      = 0,
+                                    int frameCount       = 0,
+                                    uint32_t flags       = (uint32_t) AUDIO_POLICY_OUTPUT_FLAG_NONE,
                                     callback_t cbf       = 0,
                                     void* user           = 0,
                                     int notificationFrames = 0,
                                     int sessionId = 0);
 
     /* Creates an audio track and registers it with AudioFlinger. With this constructor,
-     * The PCM data to be rendered by AudioTrack is passed in a shared memory buffer
+     * the PCM data to be rendered by AudioTrack is passed in a shared memory buffer
      * identified by the argument sharedBuffer. This prototype is for static buffer playback.
-     * PCM data must be present into memory before the AudioTrack is started.
-     * The Write() and Flush() methods are not supported in this case.
-     * It is recommented to pass a callback function to be notified of playback end by an
+     * PCM data must be present in memory before the AudioTrack is started.
+     * The write() and flush() methods are not supported in this case.
+     * It is recommended to pass a callback function to be notified of playback end by an
      * EVENT_UNDERRUN event.
      */
 
-                        AudioTrack( int streamType,
+                        AudioTrack( audio_stream_type_t streamType,
                                     uint32_t sampleRate = 0,
-                                    int format          = 0,
+                                    audio_format_t format = AUDIO_FORMAT_DEFAULT,
                                     int channelMask     = 0,
                                     const sp<IMemory>& sharedBuffer = 0,
-                                    uint32_t flags      = 0,
-                                    callback_t cbf      = 0,
-                                    void* user          = 0,
+                                    audio_policy_output_flags_t flags = AUDIO_POLICY_OUTPUT_FLAG_NONE,
+                                    callback_t cbf      = NULL,
+                                    void* user          = NULL,
                                     int notificationFrames = 0,
                                     int sessionId = 0);
 
     /* Terminates the AudioTrack and unregisters it from AudioFlinger.
-     * Also destroys all resources assotiated with the AudioTrack.
+     * Also destroys all resources associated with the AudioTrack.
      */
                         ~AudioTrack();
 
 
     /* Initialize an uninitialized AudioTrack.
      * Returned status (from utils/Errors.h) can be:
-     *  - NO_ERROR: successful intialization
-     *  - INVALID_OPERATION: AudioTrack is already intitialized
+     *  - NO_ERROR: successful initialization
+     *  - INVALID_OPERATION: AudioTrack is already initialized
      *  - BAD_VALUE: invalid parameter (channels, format, sampleRate...)
      *  - NO_INIT: audio server or audio hardware not initialized
      * */
-            status_t    set(int streamType      =-1,
+            status_t    set(audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT,
                             uint32_t sampleRate = 0,
-                            int format          = 0,
+                            audio_format_t format = AUDIO_FORMAT_DEFAULT,
                             int channelMask     = 0,
                             int frameCount      = 0,
-                            uint32_t flags      = 0,
-                            callback_t cbf      = 0,
-                            void* user          = 0,
+                            audio_policy_output_flags_t flags = AUDIO_POLICY_OUTPUT_FLAG_NONE,
+                            callback_t cbf      = NULL,
+                            void* user          = NULL,
                             int notificationFrames = 0,
                             const sp<IMemory>& sharedBuffer = 0,
                             bool threadCanCallJava = false,
@@ -201,25 +219,30 @@
 
 
     /* Result of constructing the AudioTrack. This must be checked
-     * before using any AudioTrack API (except for set()), using
+     * before using any AudioTrack API (except for set()), because using
      * an uninitialized AudioTrack produces undefined results.
      * See set() method above for possible return codes.
      */
             status_t    initCheck() const;
 
-    /* Returns this track's latency in milliseconds.
+    /* Returns this track's estimated latency in milliseconds.
      * This includes the latency due to AudioTrack buffer size, AudioMixer (if any)
      * and audio hardware driver.
      */
             uint32_t     latency() const;
 
-    /* getters, see constructor */
+    /* getters, see constructors and set() */
 
-            int         streamType() const;
-            int         format() const;
+            audio_stream_type_t streamType() const;
+            audio_format_t format() const;
             int         channelCount() const;
             uint32_t    frameCount() const;
-            int         frameSize() const;
+
+    /* Return channelCount * (bit depth per channel / 8).
+     * channelCount is determined from channelMask, and bit depth comes from format.
+     */
+            size_t      frameSize() const;
+
             sp<IMemory>& sharedBuffer();
 
 
@@ -235,8 +258,8 @@
             void        stop();
             bool        stopped() const;
 
-    /* flush a stopped track. All pending buffers are discarded.
-     * This function has no effect if the track is not stoped.
+    /* Flush a stopped track. All pending buffers are discarded.
+     * This function has no effect if the track is not stopped.
      */
             void        flush();
 
@@ -246,29 +269,28 @@
      */
             void        pause();
 
-    /* mute or unmutes this track.
-     * While mutted, the callback, if set, is still called.
+    /* Mute or unmute this track.
+     * While muted, the callback, if set, is still called.
      */
             void        mute(bool);
             bool        muted() const;
 
-
-    /* set volume for this track, mostly used for games' sound effects
-     * left and right volumes. Levels must be <= 1.0.
+    /* Set volume for this track, mostly used for games' sound effects
+     * left and right volumes. Levels must be >= 0.0 and <= 1.0.
      */
             status_t    setVolume(float left, float right);
-            void        getVolume(float* left, float* right);
+            void        getVolume(float* left, float* right) const;
 
-    /* set the send level for this track. An auxiliary effect should be attached
-     * to the track with attachEffect(). Level must be <= 1.0.
+    /* Set the send level for this track. An auxiliary effect should be attached
+     * to the track with attachEffect(). Level must be >= 0.0 and <= 1.0.
      */
             status_t    setAuxEffectSendLevel(float level);
-            void        getAuxEffectSendLevel(float* level);
+            void        getAuxEffectSendLevel(float* level) const;
 
-    /* set sample rate for this track, mostly used for games' sound effects
+    /* Set sample rate for this track, mostly used for games' sound effects
      */
             status_t    setSampleRate(int sampleRate);
-            uint32_t    getSampleRate();
+            uint32_t    getSampleRate() const;
 
     /* Enables looping and sets the start and end points of looping.
      *
@@ -276,19 +298,17 @@
      *
      * loopStart:   loop start expressed as the number of PCM frames played since AudioTrack start.
      * loopEnd:     loop end expressed as the number of PCM frames played since AudioTrack start.
-     * loopCount:   number of loops to execute. Calling setLoop() with loopCount == 0 cancels any pending or
-     *          active loop. loopCount = -1 means infinite looping.
+     * loopCount:   number of loops to execute. Calling setLoop() with loopCount == 0 cancels any
+     *              pending or active loop. loopCount = -1 means infinite looping.
      *
      * For proper operation the following condition must be respected:
      *          (loopEnd-loopStart) <= framecount()
      */
             status_t    setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount);
-            status_t    getLoop(uint32_t *loopStart, uint32_t *loopEnd, int *loopCount);
 
-
-    /* Sets marker position. When playback reaches the number of frames specified, a callback with event 
-     * type EVENT_MARKER is called. Calling setMarkerPosition with marker == 0 cancels marker notification 
-     * callback. 
+    /* Sets marker position. When playback reaches the number of frames specified, a callback with
+     * event type EVENT_MARKER is called. Calling setMarkerPosition with marker == 0 cancels marker
+     * notification callback.
      * If the AudioTrack has been opened with no callback function associated, the operation will fail.
      *
      * Parameters:
@@ -300,13 +320,13 @@
      *  - INVALID_OPERATION: the AudioTrack has no callback installed.
      */
             status_t    setMarkerPosition(uint32_t marker);
-            status_t    getMarkerPosition(uint32_t *marker);
+            status_t    getMarkerPosition(uint32_t *marker) const;
 
 
-    /* Sets position update period. Every time the number of frames specified has been played, 
-     * a callback with event type EVENT_NEW_POS is called. 
-     * Calling setPositionUpdatePeriod with updatePeriod == 0 cancels new position notification 
-     * callback. 
+    /* Sets position update period. Every time the number of frames specified has been played,
+     * a callback with event type EVENT_NEW_POS is called.
+     * Calling setPositionUpdatePeriod with updatePeriod == 0 cancels new position notification
+     * callback.
      * If the AudioTrack has been opened with no callback function associated, the operation will fail.
      *
      * Parameters:
@@ -318,14 +338,13 @@
      *  - INVALID_OPERATION: the AudioTrack has no callback installed.
      */
             status_t    setPositionUpdatePeriod(uint32_t updatePeriod);
-            status_t    getPositionUpdatePeriod(uint32_t *updatePeriod);
-
+            status_t    getPositionUpdatePeriod(uint32_t *updatePeriod) const;
 
     /* Sets playback head position within AudioTrack buffer. The new position is specified
-     * in number of frames. 
+     * in number of frames.
      * This method must be called with the AudioTrack in paused or stopped state.
-     * Note that the actual position set is <position> modulo the AudioTrack buffer size in frames. 
-     * Therefore using this method makes sense only when playing a "static" audio buffer 
+     * Note that the actual position set is <position> modulo the AudioTrack buffer size in frames.
+     * Therefore using this method makes sense only when playing a "static" audio buffer
      * as opposed to streaming.
      * The getPosition() method on the other hand returns the total number of frames played since
      * playback start.
@@ -337,12 +356,12 @@
      * Returned status (from utils/Errors.h) can be:
      *  - NO_ERROR: successful operation
      *  - INVALID_OPERATION: the AudioTrack is not stopped.
-     *  - BAD_VALUE: The specified position is beyond the number of frames present in AudioTrack buffer 
+     *  - BAD_VALUE: The specified position is beyond the number of frames present in AudioTrack buffer
      */
             status_t    setPosition(uint32_t position);
             status_t    getPosition(uint32_t *position);
 
-    /* Forces AudioTrack buffer full condition. When playing a static buffer, this method avoids 
+    /* Forces AudioTrack buffer full condition. When playing a static buffer, this method avoids
      * rewriting the buffer before restarting playback after a stop.
      * This method must be called with the AudioTrack in paused or stopped state.
      *
@@ -352,7 +371,7 @@
      */
             status_t    reload();
 
-    /* returns a handle on the audio output used by this AudioTrack.
+    /* Returns a handle on the audio output used by this AudioTrack.
      *
      * Parameters:
      *  none.
@@ -362,18 +381,17 @@
      */
             audio_io_handle_t    getOutput();
 
-    /* returns the unique ID associated to this track.
+    /* Returns the unique session ID associated with this track.
      *
      * Parameters:
      *  none.
      *
      * Returned value:
-     *  AudioTrack ID.
+     *  AudioTrack session ID.
      */
-            int    getSessionId();
+            int    getSessionId() const;
 
-
-    /* Attach track auxiliary output to specified effect. Used effectId = 0
+    /* Attach track auxiliary output to specified effect. Use effectId = 0
      * to detach track from effect.
      *
      * Parameters:
@@ -387,29 +405,41 @@
      */
             status_t    attachAuxEffect(int effectId);
 
-    /* obtains a buffer of "frameCount" frames. The buffer must be
-     * filled entirely. If the track is stopped, obtainBuffer() returns
-     * STOPPED instead of NO_ERROR as long as there are buffers availlable,
+    /* Obtains a buffer of "frameCount" frames. The buffer must be
+     * filled entirely, and then released with releaseBuffer().
+     * If the track is stopped, obtainBuffer() returns
+     * STOPPED instead of NO_ERROR as long as there are buffers available,
      * at which point NO_MORE_BUFFERS is returned.
      * Buffers will be returned until the pool (buffercount())
      * is exhausted, at which point obtainBuffer() will either block
      * or return WOULD_BLOCK depending on the value of the "blocking"
      * parameter.
+     *
+     * Interpretation of waitCount:
+     *  +n  limits wait time to n * WAIT_PERIOD_MS,
+     *  -1  causes an (almost) infinite wait time,
+     *   0  non-blocking.
      */
 
         enum {
-            NO_MORE_BUFFERS = 0x80000001,
+            NO_MORE_BUFFERS = 0x80000001,   // same name in AudioFlinger.h, ok to be different value
             STOPPED = 1
         };
 
             status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount);
+
+    /* Release a filled buffer of "frameCount" frames for AudioFlinger to process. */
             void        releaseBuffer(Buffer* audioBuffer);
 
-
     /* As a convenience we provide a write() interface to the audio buffer.
-     * This is implemented on top of lockBuffer/unlockBuffer. For best
-     * performance
-     *
+     * This is implemented on top of obtainBuffer/releaseBuffer. For best
+     * performance use callbacks. Returns actual number of bytes written >= 0,
+     * or one of the following negative status codes:
+     *      INVALID_OPERATION   AudioTrack is configured for shared buffer mode
+     *      BAD_VALUE           size is invalid
+     *      STOPPED             AudioTrack was stopped during the write
+     *      NO_MORE_BUFFERS     when obtainBuffer() returns same
+     *      or any other error code returned by IAudioTrack::start() or restoreTrack_l().
      */
             ssize_t     write(const void* buffer, size_t size);
 
@@ -418,7 +448,7 @@
      */
             status_t dump(int fd, const Vector<String16>& args) const;
 
-private:
+protected:
     /* copying audio tracks is not allowed */
                         AudioTrack(const AudioTrack& other);
             AudioTrack& operator = (const AudioTrack& other);
@@ -434,16 +464,16 @@
         virtual status_t    readyToRun();
         virtual void        onFirstRef();
         AudioTrack& mReceiver;
-        Mutex       mLock;
     };
 
+            // body of AudioTrackThread::threadLoop()
             bool processAudioBuffer(const sp<AudioTrackThread>& thread);
-            status_t createTrack_l(int streamType,
+            status_t createTrack_l(audio_stream_type_t streamType,
                                  uint32_t sampleRate,
-                                 uint32_t format,
+                                 audio_format_t format,
                                  uint32_t channelMask,
                                  int frameCount,
-                                 uint32_t flags,
+                                 audio_policy_output_flags_t flags,
                                  const sp<IMemory>& sharedBuffer,
                                  audio_io_handle_t output,
                                  bool enforceFrameCount);
@@ -451,6 +481,7 @@
             status_t setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount);
             audio_io_handle_t getOutput_l();
             status_t restoreTrack_l(audio_track_cblk_t*& cblk, bool fromStart);
+            bool stopped_l() const { return !mActive; }
 
     sp<IAudioTrack>         mAudioTrack;
     sp<IMemory>             mCblkMemory;
@@ -461,8 +492,8 @@
     uint32_t                mFrameCount;
 
     audio_track_cblk_t*     mCblk;
-    uint32_t                mFormat;
-    uint8_t                 mStreamType;
+    audio_format_t          mFormat;
+    audio_stream_type_t     mStreamType;
     uint8_t                 mChannelCount;
     uint8_t                 mMuted;
     uint8_t                 mReserved;
@@ -470,9 +501,9 @@
     status_t                mStatus;
     uint32_t                mLatency;
 
-    volatile int32_t        mActive;
+    bool                    mActive;                // protected by mLock
 
-    callback_t              mCbf;
+    callback_t              mCbf;                   // callback handler for events, or NULL
     void*                   mUserData;
     uint32_t                mNotificationFramesReq; // requested number of frames between each notification callback
     uint32_t                mNotificationFramesAct; // actual number of frames between each notification callback
@@ -484,13 +515,38 @@
     uint32_t                mNewPosition;
     uint32_t                mUpdatePeriod;
     bool                    mFlushed; // FIXME will be made obsolete by making flush() synchronous
-    uint32_t                mFlags;
+    audio_policy_output_flags_t mFlags;
     int                     mSessionId;
     int                     mAuxEffectId;
-    Mutex                   mLock;
+    mutable Mutex           mLock;
     status_t                mRestoreStatus;
+    bool                    mIsTimed;
+    int                     mPreviousPriority;          // before start()
+    int                     mPreviousSchedulingGroup;
 };
 
+class TimedAudioTrack : public AudioTrack
+{
+public:
+    TimedAudioTrack();
+
+    /* allocate a shared memory buffer that can be passed to queueTimedBuffer */
+    status_t allocateTimedBuffer(size_t size, sp<IMemory>* buffer);
+
+    /* queue a buffer obtained via allocateTimedBuffer for playback at the
+       given timestamp.  PTS units a microseconds on the media time timeline.
+       The media time transform (set with setMediaTimeTransform) set by the
+       audio producer will handle converting from media time to local time
+       (perhaps going through the common time timeline in the case of
+       synchronized multiroom audio case) */
+    status_t queueTimedBuffer(const sp<IMemory>& buffer, int64_t pts);
+
+    /* define a transform between media time and either common time or
+       local time */
+    enum TargetTimeline {LOCAL_TIME, COMMON_TIME};
+    status_t setMediaTimeTransform(const LinearTransform& xform,
+                                   TargetTimeline target);
+};
 
 }; // namespace android
 
diff --git a/include/media/EffectsFactoryApi.h b/include/media/EffectsFactoryApi.h
index 8ae13cc..65c26f4 100644
--- a/include/media/EffectsFactoryApi.h
+++ b/include/media/EffectsFactoryApi.h
@@ -87,7 +87,7 @@
 //    Description:    Creates an effect engine of the specified type and returns an
 //          effect control interface on this engine. The function will allocate the
 //          resources for an instance of the requested effect engine and return
-//          a handler on the effect control interface.
+//          a handle on the effect control interface.
 //
 //    Input:
 //          pEffectUuid:    pointer to the effect uuid.
@@ -109,23 +109,23 @@
 //        *pHandle:         updated with the effect handle.
 //
 ////////////////////////////////////////////////////////////////////////////////
-int EffectCreate(effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId, effect_handle_t *pHandle);
+int EffectCreate(const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId, effect_handle_t *pHandle);
 
 ////////////////////////////////////////////////////////////////////////////////
 //
 //    Function:       EffectRelease
 //
-//    Description:    Releases the effect engine whose handler is given as argument.
+//    Description:    Releases the effect engine whose handle is given as argument.
 //          All resources allocated to this particular instance of the effect are
 //          released.
 //
 //    Input:
-//          handle:    handler on the effect interface to be released.
+//          handle:    handle on the effect interface to be released.
 //
 //    Output:
 //        returned value:    0          successful operation.
 //                          -ENODEV     factory failed to initialize
-//                          -EINVAL     invalid interface handler
+//                          -EINVAL     invalid interface handle
 //
 ////////////////////////////////////////////////////////////////////////////////
 int EffectRelease(effect_handle_t handle);
@@ -151,7 +151,7 @@
 //        *pDescriptor:     updated with the effect descriptor.
 //
 ////////////////////////////////////////////////////////////////////////////////
-int EffectGetDescriptor(effect_uuid_t *pEffectUuid, effect_descriptor_t *pDescriptor);
+int EffectGetDescriptor(const effect_uuid_t *pEffectUuid, effect_descriptor_t *pDescriptor);
 
 ////////////////////////////////////////////////////////////////////////////////
 //
@@ -167,7 +167,7 @@
 //                           1 if uuid is equal to EFFECT_UUID_NULL.
 //
 ////////////////////////////////////////////////////////////////////////////////
-int EffectIsNullUuid(effect_uuid_t *pEffectUuid);
+int EffectIsNullUuid(const effect_uuid_t *pEffectUuid);
 
 #if __cplusplus
 }  // extern "C"
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 9e3cb7f..0f39cf3 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -27,6 +27,8 @@
 #include <media/IAudioTrack.h>
 #include <media/IAudioRecord.h>
 #include <media/IAudioFlingerClient.h>
+#include <system/audio.h>
+#include <system/audio_policy.h>
 #include <hardware/audio_effect.h>
 #include <media/IEffect.h>
 #include <media/IEffectClient.h>
@@ -46,22 +48,23 @@
      */
     virtual sp<IAudioTrack> createTrack(
                                 pid_t pid,
-                                int streamType,
+                                audio_stream_type_t streamType,
                                 uint32_t sampleRate,
-                                uint32_t format,
+                                audio_format_t format,
                                 uint32_t channelMask,
                                 int frameCount,
                                 uint32_t flags,
                                 const sp<IMemory>& sharedBuffer,
-                                int output,
+                                audio_io_handle_t output,
+                                bool isTimed,
                                 int *sessionId,
                                 status_t *status) = 0;
 
     virtual sp<IAudioRecord> openRecord(
                                 pid_t pid,
-                                int input,
+                                audio_io_handle_t input,
                                 uint32_t sampleRate,
-                                uint32_t format,
+                                audio_format_t format,
                                 uint32_t channelMask,
                                 int frameCount,
                                 uint32_t flags,
@@ -71,11 +74,13 @@
     /* query the audio hardware state. This state never changes,
      * and therefore can be cached.
      */
-    virtual     uint32_t    sampleRate(int output) const = 0;
-    virtual     int         channelCount(int output) const = 0;
-    virtual     uint32_t    format(int output) const = 0;
-    virtual     size_t      frameCount(int output) const = 0;
-    virtual     uint32_t    latency(int output) const = 0;
+    virtual     uint32_t    sampleRate(audio_io_handle_t output) const = 0;
+    virtual     int         channelCount(audio_io_handle_t output) const = 0;
+    virtual     audio_format_t format(audio_io_handle_t output) const = 0;
+    virtual     size_t      frameCount(audio_io_handle_t output) const = 0;
+
+    // return estimated latency in milliseconds
+    virtual     uint32_t    latency(audio_io_handle_t output) const = 0;
 
     /* set/get the audio hardware state. This will probably be used by
      * the preference panel, mostly.
@@ -89,76 +94,83 @@
     /* set/get stream type state. This will probably be used by
      * the preference panel, mostly.
      */
-    virtual     status_t    setStreamVolume(int stream, float value, int output) = 0;
-    virtual     status_t    setStreamMute(int stream, bool muted) = 0;
+    virtual     status_t    setStreamVolume(audio_stream_type_t stream, float value,
+                                    audio_io_handle_t output) = 0;
+    virtual     status_t    setStreamMute(audio_stream_type_t stream, bool muted) = 0;
 
-    virtual     float       streamVolume(int stream, int output) const = 0;
-    virtual     bool        streamMute(int stream) const = 0;
+    virtual     float       streamVolume(audio_stream_type_t stream,
+                                    audio_io_handle_t output) const = 0;
+    virtual     bool        streamMute(audio_stream_type_t stream) const = 0;
 
     // set audio mode
-    virtual     status_t    setMode(int mode) = 0;
+    virtual     status_t    setMode(audio_mode_t mode) = 0;
 
     // mic mute/state
     virtual     status_t    setMicMute(bool state) = 0;
     virtual     bool        getMicMute() const = 0;
 
-    virtual     status_t    setParameters(int ioHandle, const String8& keyValuePairs) = 0;
-    virtual     String8     getParameters(int ioHandle, const String8& keys) = 0;
+    virtual     status_t    setParameters(audio_io_handle_t ioHandle,
+                                    const String8& keyValuePairs) = 0;
+    virtual     String8     getParameters(audio_io_handle_t ioHandle, const String8& keys) const = 0;
 
     // register a current process for audio output change notifications
     virtual void registerClient(const sp<IAudioFlingerClient>& client) = 0;
 
     // retrieve the audio recording buffer size
-    virtual size_t getInputBufferSize(uint32_t sampleRate, int format, int channelCount) = 0;
+    virtual size_t getInputBufferSize(uint32_t sampleRate, audio_format_t format, int channelCount) const = 0;
 
-    virtual int openOutput(uint32_t *pDevices,
+    virtual audio_io_handle_t openOutput(uint32_t *pDevices,
                                     uint32_t *pSamplingRate,
-                                    uint32_t *pFormat,
+                                    audio_format_t *pFormat,
                                     uint32_t *pChannels,
                                     uint32_t *pLatencyMs,
-                                    uint32_t flags) = 0;
-    virtual int openDuplicateOutput(int output1, int output2) = 0;
-    virtual status_t closeOutput(int output) = 0;
-    virtual status_t suspendOutput(int output) = 0;
-    virtual status_t restoreOutput(int output) = 0;
+                                    audio_policy_output_flags_t flags) = 0;
+    virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
+                                    audio_io_handle_t output2) = 0;
+    virtual status_t closeOutput(audio_io_handle_t output) = 0;
+    virtual status_t suspendOutput(audio_io_handle_t output) = 0;
+    virtual status_t restoreOutput(audio_io_handle_t output) = 0;
 
-    virtual int openInput(uint32_t *pDevices,
+    virtual audio_io_handle_t openInput(uint32_t *pDevices,
                                     uint32_t *pSamplingRate,
-                                    uint32_t *pFormat,
+                                    audio_format_t *pFormat,
                                     uint32_t *pChannels,
-                                    uint32_t acoustics) = 0;
-    virtual status_t closeInput(int input) = 0;
+                                    audio_in_acoustics_t acoustics) = 0;
+    virtual status_t closeInput(audio_io_handle_t input) = 0;
 
-    virtual status_t setStreamOutput(uint32_t stream, int output) = 0;
+    virtual status_t setStreamOutput(audio_stream_type_t stream, audio_io_handle_t output) = 0;
 
     virtual status_t setVoiceVolume(float volume) = 0;
 
-    virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, int output) = 0;
+    virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames,
+                                    audio_io_handle_t output) const = 0;
 
-    virtual unsigned int  getInputFramesLost(int ioHandle) = 0;
+    virtual unsigned int getInputFramesLost(audio_io_handle_t ioHandle) const = 0;
 
     virtual int newAudioSessionId() = 0;
 
     virtual void acquireAudioSessionId(int audioSession) = 0;
     virtual void releaseAudioSessionId(int audioSession) = 0;
 
-    virtual status_t queryNumberEffects(uint32_t *numEffects) = 0;
+    virtual status_t queryNumberEffects(uint32_t *numEffects) const = 0;
 
-    virtual status_t queryEffect(uint32_t index, effect_descriptor_t *pDescriptor) = 0;
+    virtual status_t queryEffect(uint32_t index, effect_descriptor_t *pDescriptor) const = 0;
 
-    virtual status_t getEffectDescriptor(effect_uuid_t *pEffectUUID, effect_descriptor_t *pDescriptor) = 0;
+    virtual status_t getEffectDescriptor(const effect_uuid_t *pEffectUUID,
+                                        effect_descriptor_t *pDescriptor) const = 0;
 
     virtual sp<IEffect> createEffect(pid_t pid,
                                     effect_descriptor_t *pDesc,
                                     const sp<IEffectClient>& client,
                                     int32_t priority,
-                                    int output,
+                                    audio_io_handle_t output,
                                     int sessionId,
                                     status_t *status,
                                     int *id,
                                     int *enabled) = 0;
 
-    virtual status_t moveEffects(int session, int srcOutput, int dstOutput) = 0;
+    virtual status_t moveEffects(int session, audio_io_handle_t srcOutput,
+                                    audio_io_handle_t dstOutput) = 0;
 };
 
 
diff --git a/include/media/IAudioFlingerClient.h b/include/media/IAudioFlingerClient.h
index aa0cdcf..75a9971 100644
--- a/include/media/IAudioFlingerClient.h
+++ b/include/media/IAudioFlingerClient.h
@@ -21,6 +21,7 @@
 #include <utils/RefBase.h>
 #include <binder/IInterface.h>
 #include <utils/KeyedVector.h>
+#include <system/audio.h>
 
 namespace android {
 
@@ -32,7 +33,7 @@
     DECLARE_META_INTERFACE(AudioFlingerClient);
 
     // Notifies a change of audio input/output configuration.
-    virtual void ioConfigChanged(int event, int ioHandle, void *param2) = 0;
+    virtual void ioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2) = 0;
 
 };
 
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index 9807cbe..04c927a 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -45,15 +45,14 @@
                                               const char *device_address) = 0;
     virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
                                                                           const char *device_address) = 0;
-    virtual status_t setPhoneState(int state) = 0;
-    virtual status_t setRingerMode(uint32_t mode, uint32_t mask) = 0;
+    virtual status_t setPhoneState(audio_mode_t state) = 0;
     virtual status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config) = 0;
     virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) = 0;
     virtual audio_io_handle_t getOutput(audio_stream_type_t stream,
                                         uint32_t samplingRate = 0,
-                                        uint32_t format = AUDIO_FORMAT_DEFAULT,
+                                        audio_format_t format = AUDIO_FORMAT_DEFAULT,
                                         uint32_t channels = 0,
-                                        audio_policy_output_flags_t flags = AUDIO_POLICY_OUTPUT_FLAG_INDIRECT) = 0;
+                                        audio_policy_output_flags_t flags = AUDIO_POLICY_OUTPUT_FLAG_NONE) = 0;
     virtual status_t startOutput(audio_io_handle_t output,
                                  audio_stream_type_t stream,
                                  int session = 0) = 0;
@@ -61,9 +60,9 @@
                                 audio_stream_type_t stream,
                                 int session = 0) = 0;
     virtual void releaseOutput(audio_io_handle_t output) = 0;
-    virtual audio_io_handle_t getInput(int inputSource,
+    virtual audio_io_handle_t getInput(audio_source_t inputSource,
                                     uint32_t samplingRate = 0,
-                                    uint32_t format = AUDIO_FORMAT_DEFAULT,
+                                    audio_format_t format = AUDIO_FORMAT_DEFAULT,
                                     uint32_t channels = 0,
                                     audio_in_acoustics_t acoustics = (audio_in_acoustics_t)0,
                                     int audioSession = 0) = 0;
@@ -73,10 +72,14 @@
     virtual status_t initStreamVolume(audio_stream_type_t stream,
                                       int indexMin,
                                       int indexMax) = 0;
-    virtual status_t setStreamVolumeIndex(audio_stream_type_t stream, int index) = 0;
-    virtual status_t getStreamVolumeIndex(audio_stream_type_t stream, int *index) = 0;
+    virtual status_t setStreamVolumeIndex(audio_stream_type_t stream,
+                                          int index,
+                                          audio_devices_t device) = 0;
+    virtual status_t getStreamVolumeIndex(audio_stream_type_t stream,
+                                          int *index,
+                                          audio_devices_t device) = 0;
     virtual uint32_t getStrategyForStream(audio_stream_type_t stream) = 0;
-    virtual uint32_t getDevicesForStream(audio_stream_type_t stream) = 0;
+    virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream) = 0;
     virtual audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc) = 0;
     virtual status_t registerEffect(effect_descriptor_t *desc,
                                     audio_io_handle_t io,
@@ -85,7 +88,7 @@
                                     int id) = 0;
     virtual status_t unregisterEffect(int id) = 0;
     virtual status_t setEffectEnabled(int id, bool enabled) = 0;
-    virtual bool     isStreamActive(int stream, uint32_t inPastMs = 0) const = 0;
+    virtual bool     isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const = 0;
     virtual status_t queryDefaultPreProcessing(int audioSession,
                                               effect_descriptor_t *descriptors,
                                               uint32_t *count) = 0;
diff --git a/include/media/IAudioRecord.h b/include/media/IAudioRecord.h
index 46735de..089be3b 100644
--- a/include/media/IAudioRecord.h
+++ b/include/media/IAudioRecord.h
@@ -32,22 +32,23 @@
 
 class IAudioRecord : public IInterface
 {
-public: 
+public:
     DECLARE_META_INTERFACE(AudioRecord);
 
     /* After it's created the track is not active. Call start() to
      * make it active. If set, the callback will start being called.
+     * tid identifies the client callback thread, or 0 if not needed.
      */
-    virtual status_t    start() = 0;
+    virtual status_t    start(pid_t tid) = 0;
 
     /* Stop a track. If set, the callback will cease being called and
-     * obtainBuffer will return an error. Buffers that are already released 
+     * obtainBuffer will return an error. Buffers that are already released
      * will be processed, unless flush() is called.
      */
     virtual void        stop() = 0;
 
     /* get this tracks control block */
-    virtual sp<IMemory> getCblk() const = 0;    
+    virtual sp<IMemory> getCblk() const = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/media/IAudioTrack.h b/include/media/IAudioTrack.h
index 47d530b..577b095 100644
--- a/include/media/IAudioTrack.h
+++ b/include/media/IAudioTrack.h
@@ -24,7 +24,7 @@
 #include <utils/Errors.h>
 #include <binder/IInterface.h>
 #include <binder/IMemory.h>
-
+#include <utils/LinearTransform.h>
 
 namespace android {
 
@@ -32,33 +32,37 @@
 
 class IAudioTrack : public IInterface
 {
-public: 
+public:
     DECLARE_META_INTERFACE(AudioTrack);
 
+    /* Get this track's control block */
+    virtual sp<IMemory> getCblk() const = 0;
+
     /* After it's created the track is not active. Call start() to
      * make it active. If set, the callback will start being called.
+     * tid identifies the client callback thread, or 0 if not needed.
      */
-    virtual status_t    start() = 0;
+    virtual status_t    start(pid_t tid) = 0;
 
     /* Stop a track. If set, the callback will cease being called and
-     * obtainBuffer will return an error. Buffers that are already released 
-     * will be processed, unless flush() is called.
+     * obtainBuffer will return an error. Buffers that are already released
+     * will continue to be processed, unless/until flush() is called.
      */
     virtual void        stop() = 0;
 
-    /* flush a stopped track. All pending buffers are discarded.
-     * This function has no effect if the track is not stoped.
+    /* Flush a stopped or paused track. All pending/released buffers are discarded.
+     * This function has no effect if the track is not stopped or paused.
      */
     virtual void        flush() = 0;
 
-    /* mute or unmutes this track.
-     * While mutted, the callback, if set, is still called.
+    /* Mute or unmute this track.
+     * While muted, the callback, if set, is still called.
      */
     virtual void        mute(bool) = 0;
-    
+
     /* Pause a track. If set, the callback will cease being called and
-     * obtainBuffer will return an error. Buffers that are already released 
-     * will be processed, unless flush() is called.
+     * obtainBuffer will return an error. Buffers that are already released
+     * will continue to be processed, unless/until flush() is called.
      */
     virtual void        pause() = 0;
 
@@ -67,8 +71,23 @@
      */
     virtual status_t    attachAuxEffect(int effectId) = 0;
 
-    /* get this tracks control block */
-    virtual sp<IMemory> getCblk() const = 0;    
+
+    /* Allocate a shared memory buffer suitable for holding timed audio
+       samples */
+    virtual status_t    allocateTimedBuffer(size_t size,
+                                            sp<IMemory>* buffer) = 0;
+
+    /* Queue a buffer obtained via allocateTimedBuffer for playback at the given
+       timestamp */
+    virtual status_t    queueTimedBuffer(const sp<IMemory>& buffer,
+                                         int64_t pts) = 0;
+
+    /* Define the linear transform that will be applied to the timestamps
+       given to queueTimedBuffer (which are expressed in media time).
+       Target specifies whether this transform converts media time to local time
+       or Tungsten time. The values for target are defined in AudioTrack.h */
+    virtual status_t    setMediaTimeTransform(const LinearTransform& xform,
+                                              int target) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/media/IMediaMetadataRetriever.h b/include/media/IMediaMetadataRetriever.h
index 1c1c268..6dbb2d7 100644
--- a/include/media/IMediaMetadataRetriever.h
+++ b/include/media/IMediaMetadataRetriever.h
@@ -56,4 +56,3 @@
 }; // namespace android
 
 #endif // ANDROID_IMEDIAMETADATARETRIEVER_H
-
diff --git a/include/media/IMediaPlayer.h b/include/media/IMediaPlayer.h
index e905903..00facc5 100644
--- a/include/media/IMediaPlayer.h
+++ b/include/media/IMediaPlayer.h
@@ -21,6 +21,11 @@
 #include <binder/IInterface.h>
 #include <binder/Parcel.h>
 #include <utils/KeyedVector.h>
+#include <system/audio.h>
+
+// Fwd decl to make sure everyone agrees that the scope of struct sockaddr_in is
+// global, and not in android::
+struct sockaddr_in;
 
 namespace android {
 
@@ -51,13 +56,15 @@
     virtual status_t        getCurrentPosition(int* msec) = 0;
     virtual status_t        getDuration(int* msec) = 0;
     virtual status_t        reset() = 0;
-    virtual status_t        setAudioStreamType(int type) = 0;
+    virtual status_t        setAudioStreamType(audio_stream_type_t type) = 0;
     virtual status_t        setLooping(int loop) = 0;
     virtual status_t        setVolume(float leftVolume, float rightVolume) = 0;
     virtual status_t        setAuxEffectSendLevel(float level) = 0;
     virtual status_t        attachAuxEffect(int effectId) = 0;
     virtual status_t        setParameter(int key, const Parcel& request) = 0;
     virtual status_t        getParameter(int key, Parcel* reply) = 0;
+    virtual status_t        setRetransmitEndpoint(const struct sockaddr_in* endpoint) = 0;
+    virtual status_t        setNextPlayer(const sp<IMediaPlayer>& next) = 0;
 
     // Invoke a generic method on the player by using opaque parcels
     // for the request and reply.
diff --git a/include/media/IMediaPlayerClient.h b/include/media/IMediaPlayerClient.h
index daec1c7..8f1843e 100644
--- a/include/media/IMediaPlayerClient.h
+++ b/include/media/IMediaPlayerClient.h
@@ -45,4 +45,3 @@
 }; // namespace android
 
 #endif // ANDROID_IMEDIAPLAYERCLIENT_H
-
diff --git a/include/media/IMediaPlayerService.h b/include/media/IMediaPlayerService.h
index 93bbe13..4f46fcd 100644
--- a/include/media/IMediaPlayerService.h
+++ b/include/media/IMediaPlayerService.h
@@ -23,6 +23,7 @@
 #include <utils/String8.h>
 #include <binder/IInterface.h>
 #include <binder/Parcel.h>
+#include <system/audio.h>
 
 #include <media/IMediaPlayerClient.h>
 #include <media/IMediaPlayer.h>
@@ -43,8 +44,8 @@
     virtual sp<IMediaMetadataRetriever> createMetadataRetriever(pid_t pid) = 0;
     virtual sp<IMediaPlayer> create(pid_t pid, const sp<IMediaPlayerClient>& client, int audioSessionId = 0) = 0;
 
-    virtual sp<IMemory>         decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, int* pFormat) = 0;
-    virtual sp<IMemory>         decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, int* pFormat) = 0;
+    virtual sp<IMemory>         decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat) = 0;
+    virtual sp<IMemory>         decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat) = 0;
     virtual sp<IOMX>            getOMX() = 0;
 
     // codecs and audio devices usage tracking for the battery app
diff --git a/include/media/IMediaRecorderClient.h b/include/media/IMediaRecorderClient.h
index 0058ef2..e7d0229 100644
--- a/include/media/IMediaRecorderClient.h
+++ b/include/media/IMediaRecorderClient.h
@@ -45,4 +45,3 @@
 }; // namespace android
 
 #endif // ANDROID_IMEDIARECORDERCLIENT_H
-
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
index c4cc947..a295e9a 100644
--- a/include/media/IOMX.h
+++ b/include/media/IOMX.h
@@ -42,10 +42,10 @@
     typedef void *buffer_id;
     typedef void *node_id;
 
-    // Given the calling process' pid, returns true iff
+    // Given a node_id and the calling process' pid, returns true iff
     // the implementation of the OMX interface lives in the same
     // process.
-    virtual bool livesLocally(pid_t pid) = 0;
+    virtual bool livesLocally(node_id node, pid_t pid) = 0;
 
     struct ComponentInfo {
         String8 mName;
diff --git a/include/media/JetPlayer.h b/include/media/JetPlayer.h
index 16764a9..38a3e44 100644
--- a/include/media/JetPlayer.h
+++ b/include/media/JetPlayer.h
@@ -22,7 +22,7 @@
 
 #include <libsonivox/jet.h>
 #include <libsonivox/eas_types.h>
-#include "AudioTrack.h"
+#include <media/AudioTrack.h>
 
 
 namespace android {
@@ -40,13 +40,13 @@
     static const int JET_NUMQUEUEDSEGMENT_UPDATE = 3;
     static const int JET_PAUSE_UPDATE            = 4;
 
-    JetPlayer(jobject javaJetPlayer, 
-            int maxTracks = 32, 
+    JetPlayer(jobject javaJetPlayer,
+            int maxTracks = 32,
             int trackBufferSize = 1200);
     ~JetPlayer();
     int init();
     int release();
-    
+
     int loadFromFile(const char* url);
     int loadFromFD(const int fd, const long long offset, const long long length);
     int closeFile();
@@ -60,12 +60,11 @@
     int clearQueue();
 
     void setEventCallback(jetevent_callback callback);
-    
+
     int getMaxTracks() { return mMaxTracks; };
 
 
 private:
-    static  int         renderThread(void*);
     int                 render();
     void                fireUpdateOnStatusChange();
     void                fireEventsFromJetQueue();
@@ -89,14 +88,36 @@
     int                 mMaxTracks; // max number of MIDI tracks, usually 32
     EAS_DATA_HANDLE     mEasData;
     EAS_FILE_LOCATOR    mEasJetFileLoc;
-    EAS_PCM*            mAudioBuffer;// EAS renders the MIDI data into this buffer, 
+    EAS_PCM*            mAudioBuffer;// EAS renders the MIDI data into this buffer,
     AudioTrack*         mAudioTrack; // and we play it in this audio track
     int                 mTrackBufferSize;
     S_JET_STATUS        mJetStatus;
     S_JET_STATUS        mPreviousJetStatus;
 
-    char                mJetFilePath[256];
+    char                mJetFilePath[PATH_MAX];
 
+    class JetPlayerThread : public Thread {
+    public:
+        JetPlayerThread(JetPlayer *player) : mPlayer(player) {
+        }
+
+    protected:
+        virtual ~JetPlayerThread() {}
+
+    private:
+        JetPlayer *mPlayer;
+
+        bool threadLoop() {
+            int result;
+            result = mPlayer->render();
+            return false;
+        }
+
+        JetPlayerThread(const JetPlayerThread &);
+        JetPlayerThread &operator=(const JetPlayerThread &);
+    };
+
+    sp<JetPlayerThread> mThread;
 
 }; // end class JetPlayer
 
diff --git a/include/media/MediaMetadataRetrieverInterface.h b/include/media/MediaMetadataRetrieverInterface.h
index 27b7e4d..ecc3b65 100644
--- a/include/media/MediaMetadataRetrieverInterface.h
+++ b/include/media/MediaMetadataRetrieverInterface.h
@@ -1,6 +1,6 @@
 /*
 **
-** Copyright (C) 2008 The Android Open Source Project 
+** Copyright (C) 2008 The Android Open Source Project
 **
 ** Licensed under the Apache License, Version 2.0 (the "License");
 ** you may not use this file except in compliance with the License.
@@ -56,4 +56,3 @@
 }; // namespace android
 
 #endif // ANDROID_MEDIAMETADATARETRIEVERINTERFACE_H
-
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index 80f43a3..d4aa233 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -29,6 +29,10 @@
 #include <media/AudioSystem.h>
 #include <media/Metadata.h>
 
+// Fwd decl to make sure everyone agrees that the scope of struct sockaddr_in is
+// global, and not in android::
+struct sockaddr_in;
+
 namespace android {
 
 class Parcel;
@@ -46,6 +50,9 @@
     // The shared library with the test player is passed passed as an
     // argument to the 'test:' url in the setDataSource call.
     TEST_PLAYER = 5,
+
+    AAH_RX_PLAYER = 100,
+    AAH_TX_PLAYER = 101,
 };
 
 
@@ -53,6 +60,8 @@
 #define DEFAULT_AUDIOSINK_BUFFERSIZE 1200
 #define DEFAULT_AUDIOSINK_SAMPLERATE 44100
 
+// when the channel mask isn't known, use the channel count to derive a mask in AudioSink::open()
+#define CHANNEL_MASK_USE_CHANNEL_ORDER 0
 
 // callback mechanism for passing messages to MediaPlayer object
 typedef void (*notify_callback_f)(void* cookie,
@@ -84,8 +93,8 @@
         // If no callback is specified, use the "write" API below to submit
         // audio data.
         virtual status_t    open(
-                uint32_t sampleRate, int channelCount,
-                int format=AUDIO_FORMAT_PCM_16_BIT,
+                uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
+                audio_format_t format=AUDIO_FORMAT_PCM_16_BIT,
                 int bufferCount=DEFAULT_AUDIOSINK_BUFFERCOUNT,
                 AudioCallback cb = NULL,
                 void *cookie = NULL) = 0;
@@ -96,6 +105,9 @@
         virtual void        flush() = 0;
         virtual void        pause() = 0;
         virtual void        close() = 0;
+
+        virtual status_t    setPlaybackRatePermille(int32_t rate) { return INVALID_OPERATION; }
+        virtual bool        needsTrailingPadding() { return true; }
     };
 
                         MediaPlayerBase() : mCookie(0), mNotify(0) {}
@@ -136,6 +148,14 @@
     virtual status_t    setParameter(int key, const Parcel &request) = 0;
     virtual status_t    getParameter(int key, Parcel *reply) = 0;
 
+    // Right now, only the AAX TX player supports this functionality.  For now,
+    // provide a default implementation which indicates a lack of support for
+    // this functionality to make life easier for all of the other media player
+    // maintainers out there.
+    virtual status_t setRetransmitEndpoint(const struct sockaddr_in* endpoint) {
+        return INVALID_OPERATION;
+    }
+
     // Invoke a generic method on the player by using opaque parcels
     // for the request and reply.
     //
@@ -199,7 +219,7 @@
     virtual             ~MediaPlayerHWInterface() {}
     virtual bool        hardwareOutput() { return true; }
     virtual status_t    setVolume(float leftVolume, float rightVolume) = 0;
-    virtual status_t    setAudioStreamType(int streamType) = 0;
+    virtual status_t    setAudioStreamType(audio_stream_type_t streamType) = 0;
 };
 
 }; // namespace android
diff --git a/include/media/MediaProfiles.h b/include/media/MediaProfiles.h
index 250f267..9fc962c 100644
--- a/include/media/MediaProfiles.h
+++ b/include/media/MediaProfiles.h
@@ -516,4 +516,3 @@
 }; // namespace android
 
 #endif // ANDROID_MEDIAPROFILES_H
-
diff --git a/include/media/MemoryLeakTrackUtil.h b/include/media/MemoryLeakTrackUtil.h
index 290b748..d2618aa 100644
--- a/include/media/MemoryLeakTrackUtil.h
+++ b/include/media/MemoryLeakTrackUtil.h
@@ -1,4 +1,3 @@
-
 /*
  * Copyright 2011, The Android Open Source Project
  *
@@ -19,7 +18,7 @@
 
 namespace android {
 /*
- * Dump the memory adddress of the calling process to the given fd.
+ * Dump the memory address of the calling process to the given fd.
  */
 extern void dumpMemoryAddresses(int fd);
 
diff --git a/include/media/ToneGenerator.h b/include/media/ToneGenerator.h
index 1ad1f26..df0c97e 100644
--- a/include/media/ToneGenerator.h
+++ b/include/media/ToneGenerator.h
@@ -151,10 +151,10 @@
         NUM_SUP_TONES = LAST_SUP_TONE-FIRST_SUP_TONE+1
     };
 
-    ToneGenerator(int streamType, float volume, bool threadCanCallJava = false);
+    ToneGenerator(audio_stream_type_t streamType, float volume, bool threadCanCallJava = false);
     ~ToneGenerator();
 
-    bool startTone(int toneType, int durationMs = -1);
+    bool startTone(tone_type toneType, int durationMs = -1);
     void stopTone();
 
     bool isInited() { return (mState == TONE_IDLE)?false:true;}
@@ -266,7 +266,7 @@
     Mutex mCbkCondLock; // Mutex associated to mWaitCbkCond
     Condition mWaitCbkCond; // condition enabling interface to wait for audio callback completion after a change is requested
     float mVolume;  // Volume applied to audio track
-    int mStreamType; // Audio stream used for output
+    audio_stream_type_t mStreamType; // Audio stream used for output
     unsigned int mProcessSize;  // Size of audio blocks generated at a time by audioCallback() (in PCM frames).
 
     bool initAudioTrack();
@@ -274,7 +274,7 @@
     bool prepareWave();
     unsigned int numWaves(unsigned int segmentIdx);
     void clearWaveGens();
-    int getToneForRegion(int toneType);
+    tone_type getToneForRegion(tone_type toneType);
 
     // WaveGenerator generates a single sine wave
     class WaveGenerator {
diff --git a/include/media/Visualizer.h b/include/media/Visualizer.h
index 5d2c874..60fa15b 100644
--- a/include/media/Visualizer.h
+++ b/include/media/Visualizer.h
@@ -66,8 +66,8 @@
      * See AudioEffect constructor for details on parameters.
      */
                         Visualizer(int32_t priority = 0,
-                                   effect_callback_t cbf = 0,
-                                   void* user = 0,
+                                   effect_callback_t cbf = NULL,
+                                   void* user = NULL,
                                    int sessionId = 0);
 
                         ~Visualizer();
@@ -143,7 +143,7 @@
     void periodicCapture();
     uint32_t initCaptureSize();
 
-    Mutex mLock;
+    Mutex mCaptureLock;
     uint32_t mCaptureRate;
     uint32_t mCaptureSize;
     uint32_t mSampleRate;
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index e6a0cc5..a68ab4e 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_MEDIAPLAYER_H
 #define ANDROID_MEDIAPLAYER_H
 
+#include <arpa/inet.h>
+
 #include <binder/IMemory.h>
 #include <media/IMediaPlayerClient.h>
 #include <media/IMediaPlayer.h>
@@ -94,6 +96,9 @@
 enum media_info_type {
     // 0xx
     MEDIA_INFO_UNKNOWN = 1,
+    // The player was started because it was used as the next player for another
+    // player, which just completed playback
+    MEDIA_INFO_STARTED_AS_NEXT = 2,
     // 7xx
     // The video is too complex for the decoder: it can't decode frames fast
     // enough. Possibly only the audio plays fine at this stage.
@@ -115,6 +120,9 @@
     MEDIA_INFO_NOT_SEEKABLE = 801,
     // New media metadata is available.
     MEDIA_INFO_METADATA_UPDATE = 802,
+
+    //9xx
+    MEDIA_INFO_TIMED_TEXT_ERROR = 900,
 };
 
 
@@ -135,9 +143,6 @@
 // The same enum space is used for both set and get, in case there are future keys that
 // can be both set and get.  But as of now, all parameters are either set only or get only.
 enum media_parameter_keys {
-    KEY_PARAMETER_TIMED_TEXT_TRACK_INDEX = 1000,                // set only
-    KEY_PARAMETER_TIMED_TEXT_ADD_OUT_OF_BAND_SOURCE = 1001,     // set only
-
     // Streaming/buffering parameters
     KEY_PARAMETER_CACHE_STAT_COLLECT_FREQ_MS = 1100,            // set only
 
@@ -145,6 +150,26 @@
     // audio track, or zero for error (e.g. no audio track) or unknown.
     KEY_PARAMETER_AUDIO_CHANNEL_COUNT = 1200,                   // get only
 
+    // Playback rate expressed in permille (1000 is normal speed), saved as int32_t, with negative
+    // values used for rewinding or reverse playback.
+    KEY_PARAMETER_PLAYBACK_RATE_PERMILLE = 1300,                // set only
+};
+
+// Keep INVOKE_ID_* in sync with MediaPlayer.java.
+enum media_player_invoke_ids {
+    INVOKE_ID_GET_TRACK_INFO = 1,
+    INVOKE_ID_ADD_EXTERNAL_SOURCE = 2,
+    INVOKE_ID_ADD_EXTERNAL_SOURCE_FD = 3,
+    INVOKE_ID_SELECT_TRACK = 4,
+    INVOKE_ID_UNSELECT_TRACK = 5,
+};
+
+// Keep MEDIA_TRACK_TYPE_* in sync with MediaPlayer.java.
+enum media_track_type {
+    MEDIA_TRACK_TYPE_UNKNOWN = 0,
+    MEDIA_TRACK_TYPE_VIDEO = 1,
+    MEDIA_TRACK_TYPE_AUDIO = 2,
+    MEDIA_TRACK_TYPE_TIMEDTEXT = 3,
 };
 
 // ----------------------------------------------------------------------------
@@ -185,13 +210,13 @@
             status_t        getCurrentPosition(int *msec);
             status_t        getDuration(int *msec);
             status_t        reset();
-            status_t        setAudioStreamType(int type);
+            status_t        setAudioStreamType(audio_stream_type_t type);
             status_t        setLooping(int loop);
             bool            isLooping();
             status_t        setVolume(float leftVolume, float rightVolume);
             void            notify(int msg, int ext1, int ext2, const Parcel *obj = NULL);
-    static  sp<IMemory>     decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, int* pFormat);
-    static  sp<IMemory>     decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, int* pFormat);
+    static  sp<IMemory>     decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat);
+    static  sp<IMemory>     decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat);
             status_t        invoke(const Parcel& request, Parcel *reply);
             status_t        setMetadataFilter(const Parcel& filter);
             status_t        getMetadata(bool update_only, bool apply_filter, Parcel *metadata);
@@ -201,6 +226,8 @@
             status_t        attachAuxEffect(int effectId);
             status_t        setParameter(int key, const Parcel& request);
             status_t        getParameter(int key, Parcel* reply);
+            status_t        setRetransmitEndpoint(const char* addrString, uint16_t port);
+            status_t        setNextMediaPlayer(const sp<MediaPlayer>& player);
 
 private:
             void            clear_l();
@@ -209,6 +236,7 @@
             status_t        getDuration_l(int *msec);
             status_t        attachNewPlayer(const sp<IMediaPlayer>& player);
             status_t        reset_l();
+            status_t        doSetRetransmitEndpoint(const sp<IMediaPlayer>& player);
 
     sp<IMediaPlayer>            mPlayer;
     thread_id_t                 mLockThreadId;
@@ -223,7 +251,7 @@
     int                         mSeekPosition;
     bool                        mPrepareSync;
     status_t                    mPrepareStatus;
-    int                         mStreamType;
+    audio_stream_type_t         mStreamType;
     bool                        mLoop;
     float                       mLeftVolume;
     float                       mRightVolume;
@@ -231,6 +259,8 @@
     int                         mVideoHeight;
     int                         mAudioSessionId;
     float                       mSendLevel;
+    struct sockaddr_in          mRetransmitEndpoint;
+    bool                        mRetransmitEndpointValid;
 };
 
 }; // namespace android
diff --git a/include/media/stagefright/AACWriter.h b/include/media/stagefright/AACWriter.h
index fa3ab8a..49397ee 100644
--- a/include/media/stagefright/AACWriter.h
+++ b/include/media/stagefright/AACWriter.h
@@ -34,7 +34,7 @@
     virtual status_t addSource(const sp<MediaSource> &source);
     virtual bool reachedEOS();
     virtual status_t start(MetaData *params = NULL);
-    virtual status_t stop();
+    virtual status_t stop() { return reset(); }
     virtual status_t pause();
 
 protected:
@@ -66,6 +66,7 @@
     bool exceedsFileSizeLimit();
     bool exceedsFileDurationLimit();
     status_t writeAdtsHeader(uint32_t frameLength);
+    status_t reset();
 
     DISALLOW_EVIL_CONSTRUCTORS(AACWriter);
 };
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 3963d9c..fa1a416 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -22,6 +22,7 @@
 #include <android/native_window.h>
 #include <media/IOMX.h>
 #include <media/stagefright/foundation/AHierarchicalStateMachine.h>
+#include <OMX_Audio.h>
 
 namespace android {
 
@@ -37,6 +38,9 @@
         kWhatFlushCompleted      = 'fcom',
         kWhatOutputFormatChanged = 'outC',
         kWhatError               = 'erro',
+        kWhatComponentAllocated  = 'cAll',
+        kWhatComponentConfigured = 'cCon',
+        kWhatBuffersAllocated    = 'allc',
     };
 
     ACodec();
@@ -45,7 +49,11 @@
     void initiateSetup(const sp<AMessage> &msg);
     void signalFlush();
     void signalResume();
-    void initiateShutdown();
+    void initiateShutdown(bool keepComponentAllocated = false);
+
+    void initiateAllocateComponent(const sp<AMessage> &msg);
+    void initiateConfigureComponent(const sp<AMessage> &msg);
+    void initiateStart();
 
 protected:
     virtual ~ACodec();
@@ -53,6 +61,7 @@
 private:
     struct BaseState;
     struct UninitializedState;
+    struct LoadedState;
     struct LoadedToIdleState;
     struct IdleToExecutingState;
     struct ExecutingState;
@@ -70,6 +79,9 @@
         kWhatFlush                   = 'flus',
         kWhatResume                  = 'resm',
         kWhatDrainDeferredMessages   = 'drai',
+        kWhatAllocateComponent       = 'allo',
+        kWhatConfigureComponent      = 'conf',
+        kWhatStart                   = 'star',
     };
 
     enum {
@@ -96,6 +108,7 @@
     sp<AMessage> mNotify;
 
     sp<UninitializedState> mUninitializedState;
+    sp<LoadedState> mLoadedState;
     sp<LoadedToIdleState> mLoadedToIdleState;
     sp<IdleToExecutingState> mIdleToExecutingState;
     sp<ExecutingState> mExecutingState;
@@ -105,6 +118,7 @@
     sp<FlushingState> mFlushingState;
 
     AString mComponentName;
+    uint32_t mQuirks;
     sp<IOMX> mOMX;
     IOMX::node_id mNode;
     sp<MemoryDealer> mDealer[2];
@@ -118,6 +132,13 @@
     List<sp<AMessage> > mDeferredQueue;
 
     bool mSentFormat;
+    bool mIsEncoder;
+
+    bool mShutdownInProgress;
+
+    // If "mKeepComponentAllocated" we only transition back to Loaded state
+    // and do not release the component instance.
+    bool mKeepComponentAllocated;
 
     status_t allocateBuffersOnPort(OMX_U32 portIndex);
     status_t freeBuffersOnPort(OMX_U32 portIndex);
@@ -132,8 +153,8 @@
             uint32_t portIndex, IOMX::buffer_id bufferID,
             ssize_t *index = NULL);
 
-    void setComponentRole(bool isEncoder, const char *mime);
-    void configureCodec(const char *mime, const sp<AMessage> &msg);
+    status_t setComponentRole(bool isEncoder, const char *mime);
+    status_t configureCodec(const char *mime, const sp<AMessage> &msg);
 
     status_t setVideoPortFormatType(
             OMX_U32 portIndex,
@@ -145,20 +166,37 @@
     status_t setupVideoDecoder(
             const char *mime, int32_t width, int32_t height);
 
+    status_t setupVideoEncoder(
+            const char *mime, const sp<AMessage> &msg);
+
     status_t setVideoFormatOnPort(
             OMX_U32 portIndex,
             int32_t width, int32_t height,
             OMX_VIDEO_CODINGTYPE compressionFormat);
 
-    status_t setupAACDecoder(int32_t numChannels, int32_t sampleRate);
-    status_t setupAMRDecoder(bool isWAMR);
-    status_t setupG711Decoder(int32_t numChannels);
+    status_t setupAACCodec(
+            bool encoder,
+            int32_t numChannels, int32_t sampleRate, int32_t bitRate);
+
+    status_t selectAudioPortFormat(
+            OMX_U32 portIndex, OMX_AUDIO_CODINGTYPE desiredFormat);
+
+    status_t setupAMRCodec(bool encoder, bool isWAMR, int32_t bitRate);
+    status_t setupG711Codec(bool encoder, int32_t numChannels);
 
     status_t setupRawAudioFormat(
             OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels);
 
     status_t setMinBufferSize(OMX_U32 portIndex, size_t size);
 
+    status_t setupMPEG4EncoderParameters(const sp<AMessage> &msg);
+    status_t setupH263EncoderParameters(const sp<AMessage> &msg);
+    status_t setupAVCEncoderParameters(const sp<AMessage> &msg);
+
+    status_t verifySupportForProfileAndLevel(int32_t profile, int32_t level);
+    status_t configureBitrate(int32_t bitrate);
+    status_t setupErrorCorrectionParameters();
+
     status_t initNativeWindow();
 
     // Returns true iff all buffers on the given port have status OWNED_BY_US.
@@ -173,7 +211,9 @@
 
     void sendFormatChange();
 
-    void signalError(OMX_ERRORTYPE error = OMX_ErrorUndefined);
+    void signalError(
+            OMX_ERRORTYPE error = OMX_ErrorUndefined,
+            status_t internalError = UNKNOWN_ERROR);
 
     DISALLOW_EVIL_CONSTRUCTORS(ACodec);
 };
diff --git a/include/media/stagefright/AMRWriter.h b/include/media/stagefright/AMRWriter.h
index 62d57b4..392f968 100644
--- a/include/media/stagefright/AMRWriter.h
+++ b/include/media/stagefright/AMRWriter.h
@@ -37,7 +37,7 @@
     virtual status_t addSource(const sp<MediaSource> &source);
     virtual bool reachedEOS();
     virtual status_t start(MetaData *params = NULL);
-    virtual status_t stop();
+    virtual status_t stop() { return reset(); }
     virtual status_t pause();
 
 protected:
@@ -60,6 +60,7 @@
     status_t threadFunc();
     bool exceedsFileSizeLimit();
     bool exceedsFileDurationLimit();
+    status_t reset();
 
     AMRWriter(const AMRWriter &);
     AMRWriter &operator=(const AMRWriter &);
diff --git a/include/media/stagefright/AudioPlayer.h b/include/media/stagefright/AudioPlayer.h
index 0b79324..70c47ae 100644
--- a/include/media/stagefright/AudioPlayer.h
+++ b/include/media/stagefright/AudioPlayer.h
@@ -64,6 +64,8 @@
     bool isSeeking();
     bool reachedEOS(status_t *finalStatus);
 
+    status_t setPlaybackRatePermille(int32_t ratePermille);
+
 private:
     friend class VideoEditorAudioPlayer;
     sp<MediaSource> mSource;
diff --git a/include/media/stagefright/AudioSource.h b/include/media/stagefright/AudioSource.h
index 19bd31b..f5466e8 100644
--- a/include/media/stagefright/AudioSource.h
+++ b/include/media/stagefright/AudioSource.h
@@ -34,13 +34,13 @@
     // Note that the "channels" parameter is _not_ the number of channels,
     // but a bitmask of audio_channels_t constants.
     AudioSource(
-            int inputSource, uint32_t sampleRate,
+            audio_source_t inputSource, uint32_t sampleRate,
             uint32_t channels = AUDIO_CHANNEL_IN_MONO);
 
     status_t initCheck() const;
 
     virtual status_t start(MetaData *params = NULL);
-    virtual status_t stop();
+    virtual status_t stop() { return reset(); }
     virtual sp<MetaData> getFormat();
 
     // Returns the maximum amplitude since last call.
@@ -95,8 +95,10 @@
         int32_t startFrame, int32_t rampDurationFrames,
         uint8_t *data,   size_t bytes);
 
+    void queueInputBuffer_l(MediaBuffer *buffer, int64_t timeUs);
     void releaseQueuedFrames_l();
     void waitOutstandingEncodingFrames_l();
+    status_t reset();
 
     AudioSource(const AudioSource &);
     AudioSource &operator=(const AudioSource &);
diff --git a/include/media/stagefright/CameraSource.h b/include/media/stagefright/CameraSource.h
index 446720b..5a35358 100644
--- a/include/media/stagefright/CameraSource.h
+++ b/include/media/stagefright/CameraSource.h
@@ -79,7 +79,7 @@
     virtual ~CameraSource();
 
     virtual status_t start(MetaData *params = NULL);
-    virtual status_t stop();
+    virtual status_t stop() { return reset(); }
     virtual status_t read(
             MediaBuffer **buffer, const ReadOptions *options = NULL);
 
@@ -163,7 +163,6 @@
                  bool storeMetaDataInVideoBuffers);
 
     virtual void startCameraRecording();
-    virtual void stopCameraRecording();
     virtual void releaseRecordingFrame(const sp<IMemory>& frame);
 
     // Returns true if need to skip the current frame.
@@ -220,7 +219,9 @@
     status_t checkFrameRate(const CameraParameters& params,
                     int32_t frameRate);
 
+    void stopCameraRecording();
     void releaseCamera();
+    status_t reset();
 
     CameraSource(const CameraSource &);
     CameraSource &operator=(const CameraSource &);
diff --git a/include/media/stagefright/CameraSourceTimeLapse.h b/include/media/stagefright/CameraSourceTimeLapse.h
index b060691..0936da2 100644
--- a/include/media/stagefright/CameraSourceTimeLapse.h
+++ b/include/media/stagefright/CameraSourceTimeLapse.h
@@ -121,9 +121,6 @@
     // Wrapper over CameraSource::read() to implement quick stop.
     virtual status_t read(MediaBuffer **buffer, const ReadOptions *options = NULL);
 
-    // For video camera case, just stops the camera's video recording.
-    virtual void stopCameraRecording();
-
     // mSkipCurrentFrame is set to true in dataCallbackTimestamp() if the current
     // frame needs to be skipped and this function just returns the value of mSkipCurrentFrame.
     virtual bool skipCurrentFrame(int64_t timestampUs);
diff --git a/include/media/stagefright/DataSource.h b/include/media/stagefright/DataSource.h
index 713af92..00d583e 100644
--- a/include/media/stagefright/DataSource.h
+++ b/include/media/stagefright/DataSource.h
@@ -81,7 +81,7 @@
     static void RegisterDefaultSniffers();
 
     // for DRM
-    virtual sp<DecryptHandle> DrmInitialization() {
+    virtual sp<DecryptHandle> DrmInitialization(const char *mime = NULL) {
         return NULL;
     }
     virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client) {};
diff --git a/include/media/stagefright/FileSource.h b/include/media/stagefright/FileSource.h
index 6cf86dc..d994cb3 100644
--- a/include/media/stagefright/FileSource.h
+++ b/include/media/stagefright/FileSource.h
@@ -38,7 +38,7 @@
 
     virtual status_t getSize(off64_t *size);
 
-    virtual sp<DecryptHandle> DrmInitialization();
+    virtual sp<DecryptHandle> DrmInitialization(const char *mime);
 
     virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client);
 
diff --git a/include/media/stagefright/HardwareAPI.h b/include/media/stagefright/HardwareAPI.h
deleted file mode 100644
index 32eed3f..0000000
--- a/include/media/stagefright/HardwareAPI.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef HARDWARE_API_H_
-
-#define HARDWARE_API_H_
-
-#include <media/stagefright/OMXPluginBase.h>
-#include <ui/android_native_buffer.h>
-#include <utils/RefBase.h>
-
-#include <OMX_Component.h>
-
-namespace android {
-
-// A pointer to this struct is passed to the OMX_SetParameter when the extension
-// index for the 'OMX.google.android.index.enableAndroidNativeBuffers' extension
-// is given.
-//
-// When Android native buffer use is disabled for a port (the default state),
-// the OMX node should operate as normal, and expect UseBuffer calls to set its
-// buffers.  This is the mode that will be used when CPU access to the buffer is
-// required.
-//
-// When Android native buffer use has been enabled for a given port, the video
-// color format for the port is to be interpreted as an Android pixel format
-// rather than an OMX color format.  The node should then expect to receive
-// UseAndroidNativeBuffer calls (via OMX_SetParameter) rather than UseBuffer
-// calls for that port.
-struct EnableAndroidNativeBuffersParams {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_BOOL enable;
-};
-
-// A pointer to this struct is passed to OMX_SetParameter() when the extension
-// index "OMX.google.android.index.storeMetaDataInBuffers"
-// is given.
-//
-// When meta data is stored in the video buffers passed between OMX clients
-// and OMX components, interpretation of the buffer data is up to the
-// buffer receiver, and the data may or may not be the actual video data, but
-// some information helpful for the receiver to locate the actual data.
-// The buffer receiver thus needs to know how to interpret what is stored
-// in these buffers, with mechanisms pre-determined externally. How to
-// interpret the meta data is outside of the scope of this method.
-//
-// Currently, this is specifically used to pass meta data from video source
-// (camera component, for instance) to video encoder to avoid memcpying of
-// input video frame data. To do this, bStoreMetaDta is set to OMX_TRUE.
-// If bStoreMetaData is set to false, real YUV frame data will be stored
-// in the buffers. In addition, if no OMX_SetParameter() call is made
-// with the corresponding extension index, real YUV data is stored
-// in the buffers.
-struct StoreMetaDataInBuffersParams {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_BOOL bStoreMetaData;
-};
-
-// A pointer to this struct is passed to OMX_SetParameter when the extension
-// index for the 'OMX.google.android.index.useAndroidNativeBuffer' extension is
-// given.  This call will only be performed if a prior call was made with the
-// 'OMX.google.android.index.enableAndroidNativeBuffers' extension index,
-// enabling use of Android native buffers.
-struct UseAndroidNativeBufferParams {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_PTR pAppPrivate;
-    OMX_BUFFERHEADERTYPE **bufferHeader;
-    const sp<ANativeWindowBuffer>& nativeBuffer;
-};
-
-// A pointer to this struct is passed to OMX_GetParameter when the extension
-// index for the 'OMX.google.android.index.getAndroidNativeBufferUsage'
-// extension is given.  The usage bits returned from this query will be used to
-// allocate the Gralloc buffers that get passed to the useAndroidNativeBuffer
-// command.
-struct GetAndroidNativeBufferUsageParams {
-    OMX_U32 nSize;              // IN
-    OMX_VERSIONTYPE nVersion;   // IN
-    OMX_U32 nPortIndex;         // IN
-    OMX_U32 nUsage;             // OUT
-};
-
-// An enum OMX_COLOR_FormatAndroidOpaque to indicate an opaque colorformat
-// is declared in media/stagefright/openmax/OMX_IVCommon.h
-// This will inform the encoder that the actual
-// colorformat will be relayed by the GRalloc Buffers.
-// OMX_COLOR_FormatAndroidOpaque  = 0x7F000001,
-
-
-}  // namespace android
-
-extern android::OMXPluginBase *createOMXPlugin();
-
-#endif  // HARDWARE_API_H_
diff --git a/include/media/stagefright/MPEG2TSWriter.h b/include/media/stagefright/MPEG2TSWriter.h
index e4c1c49..a7c9ecf 100644
--- a/include/media/stagefright/MPEG2TSWriter.h
+++ b/include/media/stagefright/MPEG2TSWriter.h
@@ -37,7 +37,7 @@
 
     virtual status_t addSource(const sp<MediaSource> &source);
     virtual status_t start(MetaData *param = NULL);
-    virtual status_t stop();
+    virtual status_t stop() { return reset(); }
     virtual status_t pause();
     virtual bool reachedEOS();
     virtual status_t dump(int fd, const Vector<String16>& args);
@@ -78,6 +78,7 @@
     void writeAccessUnit(int32_t sourceIndex, const sp<ABuffer> &buffer);
 
     ssize_t internalWrite(const void *data, size_t size);
+    status_t reset();
 
     DISALLOW_EVIL_CONSTRUCTORS(MPEG2TSWriter);
 };
diff --git a/include/media/stagefright/MPEG4Writer.h b/include/media/stagefright/MPEG4Writer.h
index 77166ed..0409b30 100644
--- a/include/media/stagefright/MPEG4Writer.h
+++ b/include/media/stagefright/MPEG4Writer.h
@@ -37,7 +37,7 @@
 
     virtual status_t addSource(const sp<MediaSource> &source);
     virtual status_t start(MetaData *param = NULL);
-    virtual status_t stop();
+    virtual status_t stop() { return reset(); }
     virtual status_t pause();
     virtual bool reachedEOS();
     virtual status_t dump(int fd, const Vector<String16>& args);
@@ -184,6 +184,7 @@
     void writeLongitude(int degreex10000);
     void sendSessionSummary();
     void release();
+    status_t reset();
 
     MPEG4Writer(const MPEG4Writer &);
     MPEG4Writer &operator=(const MPEG4Writer &);
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
new file mode 100644
index 0000000..72ac56a
--- /dev/null
+++ b/include/media/stagefright/MediaCodec.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_CODEC_H_
+
+#define MEDIA_CODEC_H_
+
+#include <gui/ISurfaceTexture.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <utils/Vector.h>
+
+namespace android {
+
+struct ABuffer;
+struct ACodec;
+struct AMessage;
+struct SoftwareRenderer;
+struct SurfaceTextureClient;
+
+struct MediaCodec : public AHandler {
+    enum ConfigureFlags {
+        CONFIGURE_FLAG_ENCODE   = 1,
+    };
+
+    enum BufferFlags {
+        BUFFER_FLAG_SYNCFRAME   = 1,
+        BUFFER_FLAG_CODECCONFIG = 2,
+        BUFFER_FLAG_EOS         = 4,
+    };
+
+    static sp<MediaCodec> CreateByType(
+            const sp<ALooper> &looper, const char *mime, bool encoder);
+
+    static sp<MediaCodec> CreateByComponentName(
+            const sp<ALooper> &looper, const char *name);
+
+    status_t configure(
+            const sp<AMessage> &format,
+            const sp<SurfaceTextureClient> &nativeWindow,
+            uint32_t flags);
+
+    status_t start();
+
+    // Returns to a state in which the component remains allocated but
+    // unconfigured.
+    status_t stop();
+
+    // Client MUST call release before releasing final reference to this
+    // object.
+    status_t release();
+
+    status_t flush();
+
+    status_t queueInputBuffer(
+            size_t index,
+            size_t offset,
+            size_t size,
+            int64_t presentationTimeUs,
+            uint32_t flags);
+
+    status_t dequeueInputBuffer(size_t *index, int64_t timeoutUs = 0ll);
+
+    status_t dequeueOutputBuffer(
+            size_t *index,
+            size_t *offset,
+            size_t *size,
+            int64_t *presentationTimeUs,
+            uint32_t *flags,
+            int64_t timeoutUs = 0ll);
+
+    status_t renderOutputBufferAndRelease(size_t index);
+    status_t releaseOutputBuffer(size_t index);
+
+    status_t getOutputFormat(sp<AMessage> *format) const;
+
+    status_t getInputBuffers(Vector<sp<ABuffer> > *buffers) const;
+    status_t getOutputBuffers(Vector<sp<ABuffer> > *buffers) const;
+
+protected:
+    virtual ~MediaCodec();
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum State {
+        UNINITIALIZED,
+        INITIALIZING,
+        INITIALIZED,
+        CONFIGURING,
+        CONFIGURED,
+        STARTING,
+        STARTED,
+        FLUSHING,
+        STOPPING,
+        RELEASING,
+    };
+
+    enum {
+        kPortIndexInput         = 0,
+        kPortIndexOutput        = 1,
+    };
+
+    enum {
+        kWhatInit                       = 'init',
+        kWhatConfigure                  = 'conf',
+        kWhatStart                      = 'strt',
+        kWhatStop                       = 'stop',
+        kWhatRelease                    = 'rele',
+        kWhatDequeueInputBuffer         = 'deqI',
+        kWhatQueueInputBuffer           = 'queI',
+        kWhatDequeueOutputBuffer        = 'deqO',
+        kWhatReleaseOutputBuffer        = 'relO',
+        kWhatGetBuffers                 = 'getB',
+        kWhatFlush                      = 'flus',
+        kWhatGetOutputFormat            = 'getO',
+        kWhatDequeueInputTimedOut       = 'dITO',
+        kWhatDequeueOutputTimedOut      = 'dOTO',
+        kWhatCodecNotify                = 'codc',
+    };
+
+    enum {
+        kFlagIsSoftwareCodec            = 1,
+        kFlagOutputFormatChanged        = 2,
+        kFlagOutputBuffersChanged       = 4,
+        kFlagStickyError                = 8,
+        kFlagDequeueInputPending        = 16,
+        kFlagDequeueOutputPending       = 32,
+    };
+
+    struct BufferInfo {
+        void *mBufferID;
+        sp<ABuffer> mData;
+        sp<AMessage> mNotify;
+        bool mOwnedByClient;
+    };
+
+    State mState;
+    sp<ALooper> mLooper;
+    sp<ALooper> mCodecLooper;
+    sp<ACodec> mCodec;
+    uint32_t mReplyID;
+    uint32_t mFlags;
+    sp<SurfaceTextureClient> mNativeWindow;
+    SoftwareRenderer *mSoftRenderer;
+    sp<AMessage> mOutputFormat;
+
+    List<size_t> mAvailPortBuffers[2];
+    Vector<BufferInfo> mPortBuffers[2];
+
+    int32_t mDequeueInputTimeoutGeneration;
+    uint32_t mDequeueInputReplyID;
+
+    int32_t mDequeueOutputTimeoutGeneration;
+    uint32_t mDequeueOutputReplyID;
+
+    MediaCodec(const sp<ALooper> &looper);
+
+    static status_t PostAndAwaitResponse(
+            const sp<AMessage> &msg, sp<AMessage> *response);
+
+    status_t init(const char *name, bool nameIsType, bool encoder);
+
+    void setState(State newState);
+    void returnBuffersToCodec();
+    void returnBuffersToCodecOnPort(int32_t portIndex);
+    size_t updateBuffers(int32_t portIndex, const sp<AMessage> &msg);
+    status_t onQueueInputBuffer(const sp<AMessage> &msg);
+    status_t onReleaseOutputBuffer(const sp<AMessage> &msg);
+    ssize_t dequeuePortBuffer(int32_t portIndex);
+
+    bool handleDequeueInputBuffer(uint32_t replyID, bool newRequest = false);
+    bool handleDequeueOutputBuffer(uint32_t replyID, bool newRequest = false);
+    void cancelPendingDequeueOperations();
+
+    DISALLOW_EVIL_CONSTRUCTORS(MediaCodec);
+};
+
+}  // namespace android
+
+#endif  // MEDIA_CODEC_H_
diff --git a/include/media/stagefright/MediaCodecList.h b/include/media/stagefright/MediaCodecList.h
new file mode 100644
index 0000000..14dc1b8
--- /dev/null
+++ b/include/media/stagefright/MediaCodecList.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_CODEC_LIST_H_
+
+#define MEDIA_CODEC_LIST_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/AString.h>
+
+#include <sys/types.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/Vector.h>
+
+namespace android {
+
+struct MediaCodecList {
+    static const MediaCodecList *getInstance();
+
+    ssize_t findCodecByType(
+            const char *type, bool encoder, size_t startIndex = 0) const;
+
+    ssize_t findCodecByName(const char *name) const;
+
+    const char *getCodecName(size_t index) const;
+    bool codecHasQuirk(size_t index, const char *quirkName) const;
+
+private:
+    enum Section {
+        SECTION_TOPLEVEL,
+        SECTION_DECODERS,
+        SECTION_DECODER,
+        SECTION_ENCODERS,
+        SECTION_ENCODER,
+    };
+
+    struct CodecInfo {
+        AString mName;
+        bool mIsEncoder;
+        uint32_t mTypes;
+        uint32_t mQuirks;
+    };
+
+    static MediaCodecList *sCodecList;
+
+    status_t mInitCheck;
+    Section mCurrentSection;
+    int32_t mDepth;
+
+    Vector<CodecInfo> mCodecInfos;
+    KeyedVector<AString, size_t> mCodecQuirks;
+    KeyedVector<AString, size_t> mTypes;
+
+    MediaCodecList();
+    ~MediaCodecList();
+
+    status_t initCheck() const;
+    void parseXMLFile(FILE *file);
+
+    static void StartElementHandlerWrapper(
+            void *me, const char *name, const char **attrs);
+
+    static void EndElementHandlerWrapper(void *me, const char *name);
+
+    void startElementHandler(const char *name, const char **attrs);
+    void endElementHandler(const char *name);
+
+    status_t addMediaCodecFromAttributes(bool encoder, const char **attrs);
+    void addMediaCodec(bool encoder, const char *name, const char *type = NULL);
+
+    status_t addQuirk(const char **attrs);
+    status_t addTypeFromAttributes(const char **attrs);
+    void addType(const char *name);
+
+    DISALLOW_EVIL_CONSTRUCTORS(MediaCodecList);
+};
+
+}  // namespace android
+
+#endif  // MEDIA_CODEC_LIST_H_
+
diff --git a/include/media/stagefright/MediaDebug.h b/include/media/stagefright/MediaDebug.h
deleted file mode 100644
index 2ca9667..0000000
--- a/include/media/stagefright/MediaDebug.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_DEBUG_H_
-
-#define MEDIA_DEBUG_H_
-
-#include <cutils/log.h>
-
-#define LITERAL_TO_STRING_INTERNAL(x)    #x
-#define LITERAL_TO_STRING(x) LITERAL_TO_STRING_INTERNAL(x)
-
-#define CHECK_EQ(x,y)                                                   \
-    LOG_ALWAYS_FATAL_IF(                                                \
-            (x) != (y),                                                 \
-            __FILE__ ":" LITERAL_TO_STRING(__LINE__) " " #x " != " #y)
-
-#define CHECK(x)                                                        \
-    LOG_ALWAYS_FATAL_IF(                                                \
-            !(x),                                                       \
-            __FILE__ ":" LITERAL_TO_STRING(__LINE__) " " #x)
-
-#endif  // MEDIA_DEBUG_H_
diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h
index 2eb259e..457d5d7 100644
--- a/include/media/stagefright/MediaDefs.h
+++ b/include/media/stagefright/MediaDefs.h
@@ -54,6 +54,7 @@
 extern const char *MEDIA_MIMETYPE_CONTAINER_WVM;
 
 extern const char *MEDIA_MIMETYPE_TEXT_3GPP;
+extern const char *MEDIA_MIMETYPE_TEXT_SUBRIP;
 
 }  // namespace android
 
diff --git a/include/media/stagefright/MediaErrors.h b/include/media/stagefright/MediaErrors.h
index 21d00b8..dd3bf28 100644
--- a/include/media/stagefright/MediaErrors.h
+++ b/include/media/stagefright/MediaErrors.h
@@ -40,6 +40,7 @@
     // Not technically an error.
     INFO_FORMAT_CHANGED    = MEDIA_ERROR_BASE - 12,
     INFO_DISCONTINUITY     = MEDIA_ERROR_BASE - 13,
+    INFO_OUTPUT_BUFFERS_CHANGED = MEDIA_ERROR_BASE - 14,
 
     // The following constant values should be in sync with
     // drm/drm_framework_common.h
diff --git a/include/media/stagefright/MediaExtractor.h b/include/media/stagefright/MediaExtractor.h
index eb45237..94090ee 100644
--- a/include/media/stagefright/MediaExtractor.h
+++ b/include/media/stagefright/MediaExtractor.h
@@ -56,10 +56,10 @@
     virtual uint32_t flags() const;
 
     // for DRM
-    virtual void setDrmFlag(bool flag) {
+    void setDrmFlag(bool flag) {
         mIsDrm = flag;
     };
-    virtual bool getDrmFlag() {
+    bool getDrmFlag() {
         return mIsDrm;
     }
     virtual char* getDrmTrackInfo(size_t trackID, int *len) {
diff --git a/include/media/stagefright/MediaSourceSplitter.h b/include/media/stagefright/MediaSourceSplitter.h
deleted file mode 100644
index 568f4c2..0000000
--- a/include/media/stagefright/MediaSourceSplitter.h
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// This class provides a way to split a single media source into multiple sources.
-// The constructor takes in the real mediaSource and createClient() can then be
-// used to create multiple sources served from this real mediaSource.
-//
-// Usage:
-// - Create MediaSourceSplitter by passing in a real mediaSource from which
-// multiple duplicate channels are needed.
-// - Create a client using createClient() and use it as any other mediaSource.
-//
-// Note that multiple clients can be created using createClient() and
-// started/stopped in any order. MediaSourceSplitter stops the real source only
-// when all clients have been stopped.
-//
-// If a new client is created/started after some existing clients have already
-// started, the new client will start getting its read frames from the current
-// time.
-
-#ifndef MEDIA_SOURCE_SPLITTER_H_
-
-#define MEDIA_SOURCE_SPLITTER_H_
-
-#include <media/stagefright/MediaSource.h>
-#include <utils/threads.h>
-#include <utils/Vector.h>
-#include <utils/RefBase.h>
-
-namespace android {
-
-class MediaBuffer;
-class MetaData;
-
-class MediaSourceSplitter : public RefBase {
-public:
-    // Constructor
-    // mediaSource: The real mediaSource. The class keeps a reference to it to
-    // implement the various clients.
-    MediaSourceSplitter(sp<MediaSource> mediaSource);
-
-    ~MediaSourceSplitter();
-
-    // Creates a new client of base type MediaSource. Multiple clients can be
-    // created which get their data through the same real mediaSource. These
-    // clients can then be used like any other MediaSource, all of which provide
-    // data from the same real source.
-    sp<MediaSource> createClient();
-
-private:
-    // Total number of clients created through createClient().
-    int32_t mNumberOfClients;
-
-    // reference to the real MediaSource passed to the constructor.
-    sp<MediaSource> mSource;
-
-    // Stores pointer to the MediaBuffer read from the real MediaSource.
-    // All clients use this to implement the read() call.
-    MediaBuffer *mLastReadMediaBuffer;
-
-    // Status code for read from the real MediaSource. All clients return
-    // this for their read().
-    status_t mLastReadStatus;
-
-    // Boolean telling whether the real MediaSource has started.
-    bool mSourceStarted;
-
-    // List of booleans, one for each client, storing whether the corresponding
-    // client's start() has been called.
-    Vector<bool> mClientsStarted;
-
-    // Stores the number of clients which are currently started.
-    int32_t mNumberOfClientsStarted;
-
-    // Since different clients call read() asynchronously, we need to keep track
-    // of what data is currently read into the mLastReadMediaBuffer.
-    // mCurrentReadBit stores the bit for the current read buffer. This bit
-    // flips each time a new buffer is read from the source.
-    // mClientsDesiredReadBit stores the bit for the next desired read buffer
-    // for each client. This bit flips each time read() is completed for this
-    // client.
-    bool mCurrentReadBit;
-    Vector<bool> mClientsDesiredReadBit;
-
-    // Number of clients whose current read has been completed.
-    int32_t mNumberOfCurrentReads;
-
-    // Boolean telling whether the last read has been completed for all clients.
-    // The variable is reset to false each time buffer is read from the real
-    // source.
-    bool mLastReadCompleted;
-
-    // A global mutex for access to critical sections.
-    Mutex mLock;
-
-    // Condition variable for waiting on read from source to complete.
-    Condition mReadFromSourceCondition;
-
-    // Condition variable for waiting on all client's last read to complete.
-    Condition mAllReadsCompleteCondition;
-
-    // Functions used by Client to implement the MediaSource interface.
-
-    // If the real source has not been started yet by any client, starts it.
-    status_t start(int clientId, MetaData *params);
-
-    // Stops the real source after all clients have called stop().
-    status_t stop(int clientId);
-
-    // returns the real source's getFormat().
-    sp<MetaData> getFormat(int clientId);
-
-    // If the client's desired buffer has already been read into
-    // mLastReadMediaBuffer, points the buffer to that. Otherwise if it is the
-    // master client, reads the buffer from source or else waits for the master
-    // client to read the buffer and uses that.
-    status_t read(int clientId,
-            MediaBuffer **buffer, const MediaSource::ReadOptions *options = NULL);
-
-    // Not implemented right now.
-    status_t pause(int clientId);
-
-    // Function which reads a buffer from the real source into
-    // mLastReadMediaBuffer
-    void readFromSource_lock(const MediaSource::ReadOptions *options);
-
-    // Waits until read from the real source has been completed.
-    // _lock means that the function should be called when the thread has already
-    // obtained the lock for the mutex mLock.
-    void waitForReadFromSource_lock(int32_t clientId);
-
-    // Waits until all clients have read the current buffer in
-    // mLastReadCompleted.
-    void waitForAllClientsLastRead_lock(int32_t clientId);
-
-    // Each client calls this after it completes its read(). Once all clients
-    // have called this for the current buffer, the function calls
-    // mAllReadsCompleteCondition.broadcast() to signal the waiting clients.
-    void signalReadComplete_lock(bool readAborted);
-
-    // Make these constructors private.
-    MediaSourceSplitter();
-    MediaSourceSplitter(const MediaSourceSplitter &);
-    MediaSourceSplitter &operator=(const MediaSourceSplitter &);
-
-    // This class implements the MediaSource interface. Each client stores a
-    // reference to the parent MediaSourceSplitter and uses it to complete the
-    // various calls.
-    class Client : public MediaSource {
-    public:
-        // Constructor stores reference to the parent MediaSourceSplitter and it
-        // client id.
-        Client(sp<MediaSourceSplitter> splitter, int32_t clientId);
-
-        // MediaSource interface
-        virtual status_t start(MetaData *params = NULL);
-
-        virtual status_t stop();
-
-        virtual sp<MetaData> getFormat();
-
-        virtual status_t read(
-                MediaBuffer **buffer, const ReadOptions *options = NULL);
-
-        virtual status_t pause();
-
-    private:
-        // Refernce to the parent MediaSourceSplitter
-        sp<MediaSourceSplitter> mSplitter;
-
-        // Id of this client.
-        int32_t mClientId;
-    };
-
-    friend class Client;
-};
-
-}  // namespace android
-
-#endif  // MEDIA_SOURCE_SPLITTER_H_
diff --git a/include/media/stagefright/MetaData.h b/include/media/stagefright/MetaData.h
index 4cdee17..00b8679 100644
--- a/include/media/stagefright/MetaData.h
+++ b/include/media/stagefright/MetaData.h
@@ -43,6 +43,7 @@
     kKeyStride            = 'strd',  // int32_t
     kKeySliceHeight       = 'slht',  // int32_t
     kKeyChannelCount      = '#chn',  // int32_t
+    kKeyChannelMask       = 'chnm',  // int32_t
     kKeySampleRate        = 'srte',  // int32_t (audio sampling rate Hz)
     kKeyFrameRate         = 'frmR',  // int32_t (video frame rate fps)
     kKeyBitRate           = 'brte',  // int32_t (bps)
@@ -69,6 +70,8 @@
     kKeyThumbnailTime     = 'thbT',  // int64_t (usecs)
     kKeyTrackID           = 'trID',
     kKeyIsDRM             = 'idrm',  // int32_t (bool)
+    kKeyEncoderDelay      = 'encd',  // int32_t (frames)
+    kKeyEncoderPadding    = 'encp',  // int32_t (frames)
 
     kKeyAlbum             = 'albu',  // cstring
     kKeyArtist            = 'arti',  // cstring
diff --git a/include/media/stagefright/MetadataBufferType.h b/include/media/stagefright/MetadataBufferType.h
deleted file mode 100644
index 4eaf8ac..0000000
--- a/include/media/stagefright/MetadataBufferType.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef METADATA_BUFFER_TYPE_H
-#define METADATA_BUFFER_TYPE_H
-
-#ifdef __cplusplus
-extern "C" {
-namespace android {
-#endif
-
-/*
- * MetadataBufferType defines the type of the metadata buffers that
- * can be passed to video encoder component for encoding, via Stagefright
- * media recording framework. To see how to work with the metadata buffers
- * in media recording framework, please consult HardwareAPI.h
- *
- * The creator of metadata buffers and video encoder share common knowledge
- * on what is actually being stored in these metadata buffers, and
- * how the information can be used by the video encoder component
- * to locate the actual pixel data as the source input for video
- * encoder, plus whatever other information that is necessary. Stagefright
- * media recording framework does not need to know anything specific about the
- * metadata buffers, except for receving each individual metadata buffer
- * as the source input, making a copy of the metadata buffer, and passing the
- * copy via OpenMAX API to the video encoder component.
- *
- * The creator of the metadata buffers must ensure that the first
- * 4 bytes in every metadata buffer indicates its buffer type,
- * and the rest of the metadata buffer contains the
- * actual metadata information. When a video encoder component receives
- * a metadata buffer, it uses the first 4 bytes in that buffer to find
- * out the type of the metadata buffer, and takes action appropriate
- * to that type of metadata buffers (for instance, locate the actual
- * pixel data input and then encoding the input data to produce a
- * compressed output buffer).
- *
- * The following shows the layout of a metadata buffer,
- * where buffer type is a 4-byte field of MetadataBufferType,
- * and the payload is the metadata information.
- *
- * --------------------------------------------------------------
- * |  buffer type  |          payload                           |
- * --------------------------------------------------------------
- *
- */
-typedef enum {
-
-    /*
-     * kMetadataBufferTypeCameraSource is used to indicate that
-     * the source of the metadata buffer is the camera component.
-     */
-    kMetadataBufferTypeCameraSource  = 0,
-
-    /*
-     * kMetadataBufferTypeGrallocSource is used to indicate that
-     * the payload of the metadata buffers can be interpreted as
-     * a buffer_handle_t.
-     * So in this case,the metadata that the encoder receives
-     * will have a byte stream that consists of two parts:
-     * 1. First, there is an integer indicating that it is a GRAlloc
-     * source (kMetadataBufferTypeGrallocSource)
-     * 2. This is followed by the buffer_handle_t that is a handle to the
-     * GRalloc buffer. The encoder needs to interpret this GRalloc handle
-     * and encode the frames.
-     * --------------------------------------------------------------
-     * |  kMetadataBufferTypeGrallocSource | sizeof(buffer_handle_t) |
-     * --------------------------------------------------------------
-     */
-    kMetadataBufferTypeGrallocSource = 1,
-
-    // Add more here...
-
-} MetadataBufferType;
-
-#ifdef __cplusplus
-}  // namespace android
-}
-#endif
-
-#endif  // METADATA_BUFFER_TYPE_H
diff --git a/include/media/stagefright/NativeWindowWrapper.h b/include/media/stagefright/NativeWindowWrapper.h
index f323cbc..97cc0ce 100644
--- a/include/media/stagefright/NativeWindowWrapper.h
+++ b/include/media/stagefright/NativeWindowWrapper.h
@@ -18,40 +18,28 @@
 
 #define NATIVE_WINDOW_WRAPPER_H_
 
-#include <surfaceflinger/Surface.h>
 #include <gui/SurfaceTextureClient.h>
 
 namespace android {
 
-// Both Surface and SurfaceTextureClient are RefBase that implement the
-// ANativeWindow interface, but at different addresses. ANativeWindow is not
-// a RefBase but acts like one for use with sp<>.  This wrapper converts a
-// Surface or SurfaceTextureClient into a single reference-counted object
-// that holds an sp reference to the underlying Surface or SurfaceTextureClient,
-// It provides a method to get the ANativeWindow.
+// SurfaceTextureClient derives from ANativeWindow which derives from multiple
+// base classes, in order to carry it in AMessages, we'll temporarily wrap it
+// into a NativeWindowWrapper.
 
 struct NativeWindowWrapper : RefBase {
     NativeWindowWrapper(
-            const sp<Surface> &surface) :
-        mSurface(surface) { }
-
-    NativeWindowWrapper(
             const sp<SurfaceTextureClient> &surfaceTextureClient) :
         mSurfaceTextureClient(surfaceTextureClient) { }
 
     sp<ANativeWindow> getNativeWindow() const {
-        if (mSurface != NULL) {
-            return mSurface;
-        } else {
-            return mSurfaceTextureClient;
-        }
+        return mSurfaceTextureClient;
     }
 
-    // If needed later we can provide a method to ask what kind of native window
+    sp<SurfaceTextureClient> getSurfaceTextureClient() const {
+        return mSurfaceTextureClient;
+    }
 
 private:
-    // At most one of mSurface and mSurfaceTextureClient will be non-NULL
-    const sp<Surface> mSurface;
     const sp<SurfaceTextureClient> mSurfaceTextureClient;
 
     DISALLOW_EVIL_CONSTRUCTORS(NativeWindowWrapper);
diff --git a/include/media/stagefright/NuMediaExtractor.h b/include/media/stagefright/NuMediaExtractor.h
new file mode 100644
index 0000000..96efdff
--- /dev/null
+++ b/include/media/stagefright/NuMediaExtractor.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NU_MEDIA_EXTRACTOR_H_
+#define NU_MEDIA_EXTRACTOR_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <utils/Vector.h>
+
+namespace android {
+
+struct ABuffer;
+struct AMessage;
+struct MediaBuffer;
+struct MediaExtractor;
+struct MediaSource;
+
+struct NuMediaExtractor : public RefBase {
+    NuMediaExtractor();
+
+    status_t setDataSource(const char *path);
+
+    size_t countTracks() const;
+    status_t getTrackFormat(size_t index, sp<AMessage> *format) const;
+
+    status_t selectTrack(size_t index);
+
+    status_t seekTo(int64_t timeUs);
+
+    status_t advance();
+    status_t readSampleData(const sp<ABuffer> &buffer);
+    status_t getSampleTrackIndex(size_t *trackIndex);
+    status_t getSampleTime(int64_t *sampleTimeUs);
+
+protected:
+    virtual ~NuMediaExtractor();
+
+private:
+    enum TrackFlags {
+        kIsVorbis       = 1,
+    };
+
+    struct TrackInfo {
+        sp<MediaSource> mSource;
+        size_t mTrackIndex;
+        status_t mFinalResult;
+        MediaBuffer *mSample;
+        int64_t mSampleTimeUs;
+        uint32_t mFlags;  // bitmask of "TrackFlags"
+    };
+
+    sp<MediaExtractor> mImpl;
+
+    Vector<TrackInfo> mSelectedTracks;
+
+    ssize_t fetchTrackSamples(int64_t seekTimeUs = -1ll);
+    void releaseTrackSamples();
+
+    DISALLOW_EVIL_CONSTRUCTORS(NuMediaExtractor);
+};
+
+}  // namespace android
+
+#endif  // NU_MEDIA_EXTRACTOR_H_
+
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index 84f8282..392ea87 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -26,6 +26,7 @@
 
 namespace android {
 
+struct MediaCodecList;
 class MemoryDealer;
 struct OMXCodecObserver;
 struct CodecProfileLevel;
@@ -82,12 +83,35 @@
     // from MediaBufferObserver
     virtual void signalBufferReturned(MediaBuffer *buffer);
 
+    enum Quirks {
+        kNeedsFlushBeforeDisable              = 1,
+        kWantsNALFragments                    = 2,
+        kRequiresLoadedToIdleAfterAllocation  = 4,
+        kRequiresAllocateBufferOnInputPorts   = 8,
+        kRequiresFlushCompleteEmulation       = 16,
+        kRequiresAllocateBufferOnOutputPorts  = 32,
+        kRequiresFlushBeforeShutdown          = 64,
+        kDefersOutputBufferAllocation         = 128,
+        kDecoderLiesAboutNumberOfChannels     = 256,
+        kInputBufferSizesAreBogus             = 512,
+        kSupportsMultipleFramesPerInputBuffer = 1024,
+        kAvoidMemcopyInputRecordingFrames     = 2048,
+        kRequiresLargerEncoderOutputBuffer    = 4096,
+        kOutputBuffersAreUnreadable           = 8192,
+    };
+
     // for use by ACodec
     static void findMatchingCodecs(
             const char *mime,
             bool createEncoder, const char *matchComponentName,
             uint32_t flags,
-            Vector<String8> *matchingCodecs);
+            Vector<String8> *matchingCodecs,
+            Vector<uint32_t> *matchingCodecQuirks = NULL);
+
+    static uint32_t getComponentQuirks(
+            const MediaCodecList *list, size_t index);
+
+    static bool findCodecQuirks(const char *componentName, uint32_t *quirks);
 
 protected:
     virtual ~OMXCodec();
@@ -125,23 +149,6 @@
         SHUTTING_DOWN,
     };
 
-    enum Quirks {
-        kNeedsFlushBeforeDisable              = 1,
-        kWantsNALFragments                    = 2,
-        kRequiresLoadedToIdleAfterAllocation  = 4,
-        kRequiresAllocateBufferOnInputPorts   = 8,
-        kRequiresFlushCompleteEmulation       = 16,
-        kRequiresAllocateBufferOnOutputPorts  = 32,
-        kRequiresFlushBeforeShutdown          = 64,
-        kDefersOutputBufferAllocation         = 128,
-        kDecoderLiesAboutNumberOfChannels     = 256,
-        kInputBufferSizesAreBogus             = 512,
-        kSupportsMultipleFramesPerInputBuffer = 1024,
-        kAvoidMemcopyInputRecordingFrames     = 2048,
-        kRequiresLargerEncoderOutputBuffer    = 4096,
-        kOutputBuffersAreUnreadable           = 8192,
-    };
-
     enum BufferStatus {
         OWNED_BY_US,
         OWNED_BY_COMPONENT,
@@ -172,6 +179,7 @@
     uint32_t mFlags;
 
     bool mIsEncoder;
+    bool mIsVideo;
     char *mMIME;
     char *mComponentName;
     sp<MetaData> mOutputFormat;
@@ -326,15 +334,12 @@
 
     status_t configureCodec(const sp<MetaData> &meta);
 
-    static uint32_t getComponentQuirks(
-            const char *componentName, bool isEncoder);
-
     void restorePatchedDataPointer(BufferInfo *info);
 
     status_t applyRotation();
     status_t waitForBufferFilled_l();
 
-    int64_t retrieveDecodingTimeUs(bool isCodecSpecific);
+    int64_t getDecodingTimeUs();
 
     status_t parseAVCCodecSpecificData(
             const void *data, size_t size,
diff --git a/include/media/stagefright/OMXPluginBase.h b/include/media/stagefright/OMXPluginBase.h
deleted file mode 100644
index 2fd8e12..0000000
--- a/include/media/stagefright/OMXPluginBase.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef OMX_PLUGIN_BASE_H_
-
-#define OMX_PLUGIN_BASE_H_
-
-#include <sys/types.h>
-
-#include <OMX_Component.h>
-
-#include <utils/String8.h>
-#include <utils/Vector.h>
-
-namespace android {
-
-struct OMXComponentBase;
-
-struct OMXPluginBase {
-    OMXPluginBase() {}
-    virtual ~OMXPluginBase() {}
-
-    virtual OMX_ERRORTYPE makeComponentInstance(
-            const char *name,
-            const OMX_CALLBACKTYPE *callbacks,
-            OMX_PTR appData,
-            OMX_COMPONENTTYPE **component) = 0;
-
-    virtual OMX_ERRORTYPE destroyComponentInstance(
-            OMX_COMPONENTTYPE *component) = 0;
-
-    virtual OMX_ERRORTYPE enumerateComponents(
-            OMX_STRING name,
-            size_t size,
-            OMX_U32 index) = 0;
-
-    virtual OMX_ERRORTYPE getRolesOfComponent(
-            const char *name,
-            Vector<String8> *roles) = 0;
-
-private:
-    OMXPluginBase(const OMXPluginBase &);
-    OMXPluginBase &operator=(const OMXPluginBase &);
-};
-
-}  // namespace android
-
-#endif  // OMX_PLUGIN_BASE_H_
diff --git a/include/media/stagefright/SurfaceMediaSource.h b/include/media/stagefright/SurfaceMediaSource.h
index d0940bb..54baab6 100644
--- a/include/media/stagefright/SurfaceMediaSource.h
+++ b/include/media/stagefright/SurfaceMediaSource.h
@@ -58,7 +58,7 @@
 
     // For the MediaSource interface for use by StageFrightRecorder:
     virtual status_t start(MetaData *params = NULL);
-    virtual status_t stop();
+    virtual status_t stop() { return reset(); }
     virtual status_t read(
             MediaBuffer **buffer, const ReadOptions *options = NULL);
     virtual sp<MetaData> getFormat();
@@ -359,6 +359,8 @@
     Condition mFrameAvailableCondition;
     Condition mFrameCompleteCondition;
 
+    status_t reset();
+
     // Avoid copying and equating and default constructor
     DISALLOW_IMPLICIT_CONSTRUCTORS(SurfaceMediaSource);
 };
diff --git a/include/media/stagefright/foundation/AMessage.h b/include/media/stagefright/foundation/AMessage.h
index 7ec54aa..e5416e4 100644
--- a/include/media/stagefright/foundation/AMessage.h
+++ b/include/media/stagefright/foundation/AMessage.h
@@ -25,6 +25,7 @@
 
 namespace android {
 
+struct ABuffer;
 struct AString;
 struct Parcel;
 
@@ -50,6 +51,7 @@
     void setPointer(const char *name, void *value);
     void setString(const char *name, const char *s, ssize_t len = -1);
     void setObject(const char *name, const sp<RefBase> &obj);
+    void setBuffer(const char *name, const sp<ABuffer> &buffer);
     void setMessage(const char *name, const sp<AMessage> &obj);
 
     void setRect(
@@ -64,6 +66,7 @@
     bool findPointer(const char *name, void **value) const;
     bool findString(const char *name, AString *value) const;
     bool findObject(const char *name, sp<RefBase> *obj) const;
+    bool findBuffer(const char *name, sp<ABuffer> *buffer) const;
     bool findMessage(const char *name, sp<AMessage> *obj) const;
 
     bool findRect(
@@ -90,10 +93,6 @@
 
     AString debugString(int32_t indent = 0) const;
 
-protected:
-    virtual ~AMessage();
-
-private:
     enum Type {
         kTypeInt32,
         kTypeInt64,
@@ -105,8 +104,16 @@
         kTypeObject,
         kTypeMessage,
         kTypeRect,
+        kTypeBuffer,
     };
 
+    size_t countEntries() const;
+    const char *getEntryNameAt(size_t index, Type *type) const;
+
+protected:
+    virtual ~AMessage();
+
+private:
     uint32_t mWhat;
     ALooper::handler_id mTarget;
 
@@ -131,7 +138,7 @@
     };
 
     enum {
-        kMaxNumItems = 16
+        kMaxNumItems = 32
     };
     Item mItems[kMaxNumItems];
     size_t mNumItems;
@@ -140,6 +147,9 @@
     void freeItem(Item *item);
     const Item *findItem(const char *name, Type type) const;
 
+    void setObjectInternal(
+            const char *name, const sp<RefBase> &obj, Type type);
+
     DISALLOW_EVIL_CONSTRUCTORS(AMessage);
 };
 
diff --git a/include/media/stagefright/openmax/OMX_Audio.h b/include/media/stagefright/openmax/OMX_Audio.h
deleted file mode 100644
index 3482841..0000000
--- a/include/media/stagefright/openmax/OMX_Audio.h
+++ /dev/null
@@ -1,1328 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/*
- * Copyright (c) 2008 The Khronos Group Inc. 
- * 
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject
- * to the following conditions: 
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software. 
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
- * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 
- *
- */
-
-/** @file OMX_Audio.h - OpenMax IL version 1.1.2
- *  The structures needed by Audio components to exchange
- *  parameters and configuration data with the componenmilts.
- */
-
-#ifndef OMX_Audio_h
-#define OMX_Audio_h
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-
-/* Each OMX header must include all required header files to allow the
- *  header to compile without errors.  The includes below are required
- *  for this header file to compile successfully 
- */
-
-#include <OMX_Core.h>
-
-/** @defgroup midi MIDI
- * @ingroup audio
- */
- 
-/** @defgroup effects Audio effects
- * @ingroup audio
- */
-
-/** @defgroup audio OpenMAX IL Audio Domain
- * Structures for OpenMAX IL Audio domain
- * @{
- */
-
-/** Enumeration used to define the possible audio codings.  
- *  If "OMX_AUDIO_CodingUnused" is selected, the coding selection must 
- *  be done in a vendor specific way.  Since this is for an audio 
- *  processing element this enum is relevant.  However, for another 
- *  type of component other enums would be in this area.
- */
-typedef enum OMX_AUDIO_CODINGTYPE {
-    OMX_AUDIO_CodingUnused = 0,  /**< Placeholder value when coding is N/A  */
-    OMX_AUDIO_CodingAutoDetect,  /**< auto detection of audio format */
-    OMX_AUDIO_CodingPCM,         /**< Any variant of PCM coding */
-    OMX_AUDIO_CodingADPCM,       /**< Any variant of ADPCM encoded data */
-    OMX_AUDIO_CodingAMR,         /**< Any variant of AMR encoded data */
-    OMX_AUDIO_CodingGSMFR,       /**< Any variant of GSM fullrate (i.e. GSM610) */
-    OMX_AUDIO_CodingGSMEFR,      /**< Any variant of GSM Enhanced Fullrate encoded data*/
-    OMX_AUDIO_CodingGSMHR,       /**< Any variant of GSM Halfrate encoded data */
-    OMX_AUDIO_CodingPDCFR,       /**< Any variant of PDC Fullrate encoded data */
-    OMX_AUDIO_CodingPDCEFR,      /**< Any variant of PDC Enhanced Fullrate encoded data */
-    OMX_AUDIO_CodingPDCHR,       /**< Any variant of PDC Halfrate encoded data */
-    OMX_AUDIO_CodingTDMAFR,      /**< Any variant of TDMA Fullrate encoded data (TIA/EIA-136-420) */
-    OMX_AUDIO_CodingTDMAEFR,     /**< Any variant of TDMA Enhanced Fullrate encoded data (TIA/EIA-136-410) */
-    OMX_AUDIO_CodingQCELP8,      /**< Any variant of QCELP 8kbps encoded data */
-    OMX_AUDIO_CodingQCELP13,     /**< Any variant of QCELP 13kbps encoded data */
-    OMX_AUDIO_CodingEVRC,        /**< Any variant of EVRC encoded data */
-    OMX_AUDIO_CodingSMV,         /**< Any variant of SMV encoded data */
-    OMX_AUDIO_CodingG711,        /**< Any variant of G.711 encoded data */
-    OMX_AUDIO_CodingG723,        /**< Any variant of G.723 dot 1 encoded data */
-    OMX_AUDIO_CodingG726,        /**< Any variant of G.726 encoded data */
-    OMX_AUDIO_CodingG729,        /**< Any variant of G.729 encoded data */
-    OMX_AUDIO_CodingAAC,         /**< Any variant of AAC encoded data */
-    OMX_AUDIO_CodingMP3,         /**< Any variant of MP3 encoded data */
-    OMX_AUDIO_CodingSBC,         /**< Any variant of SBC encoded data */
-    OMX_AUDIO_CodingVORBIS,      /**< Any variant of VORBIS encoded data */
-    OMX_AUDIO_CodingWMA,         /**< Any variant of WMA encoded data */
-    OMX_AUDIO_CodingRA,          /**< Any variant of RA encoded data */
-    OMX_AUDIO_CodingMIDI,        /**< Any variant of MIDI encoded data */
-    OMX_AUDIO_CodingKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_AUDIO_CodingVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_AUDIO_CodingMax = 0x7FFFFFFF
-} OMX_AUDIO_CODINGTYPE;
-
-
-/** The PortDefinition structure is used to define all of the parameters 
- *  necessary for the compliant component to setup an input or an output audio 
- *  path.  If additional information is needed to define the parameters of the
- *  port (such as frequency), additional structures must be sent such as the
- *  OMX_AUDIO_PARAM_PCMMODETYPE structure to supply the extra parameters for the port.
- */
-typedef struct OMX_AUDIO_PORTDEFINITIONTYPE {
-    OMX_STRING cMIMEType;            /**< MIME type of data for the port */
-    OMX_NATIVE_DEVICETYPE pNativeRender; /** < platform specific reference
-                                               for an output device, 
-                                               otherwise this field is 0 */
-    OMX_BOOL bFlagErrorConcealment;  /**< Turns on error concealment if it is 
-                                          supported by the OMX component */
-    OMX_AUDIO_CODINGTYPE eEncoding;  /**< Type of data expected for this 
-                                          port (e.g. PCM, AMR, MP3, etc) */
-} OMX_AUDIO_PORTDEFINITIONTYPE;
-
-
-/**  Port format parameter.  This structure is used to enumerate
-  *  the various data input/output format supported by the port.
-  */
-typedef struct OMX_AUDIO_PARAM_PORTFORMATTYPE {
-    OMX_U32 nSize;                  /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;       /**< OMX specification version information */
-    OMX_U32 nPortIndex;             /**< Indicates which port to set */
-    OMX_U32 nIndex;                 /**< Indicates the enumeration index for the format from 0x0 to N-1 */
-    OMX_AUDIO_CODINGTYPE eEncoding; /**< Type of data expected for this port (e.g. PCM, AMR, MP3, etc) */
-} OMX_AUDIO_PARAM_PORTFORMATTYPE;
-
-
-/** PCM mode type  */ 
-typedef enum OMX_AUDIO_PCMMODETYPE { 
-    OMX_AUDIO_PCMModeLinear = 0,  /**< Linear PCM encoded data */ 
-    OMX_AUDIO_PCMModeALaw,        /**< A law PCM encoded data (G.711) */ 
-    OMX_AUDIO_PCMModeMULaw,       /**< Mu law PCM encoded data (G.711)  */ 
-    OMX_AUDIO_PCMModeKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_AUDIO_PCMModeVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_AUDIO_PCMModeMax = 0x7FFFFFFF 
-} OMX_AUDIO_PCMMODETYPE; 
-
-
-typedef enum OMX_AUDIO_CHANNELTYPE {
-    OMX_AUDIO_ChannelNone = 0x0,    /**< Unused or empty */
-    OMX_AUDIO_ChannelLF   = 0x1,    /**< Left front */
-    OMX_AUDIO_ChannelRF   = 0x2,    /**< Right front */
-    OMX_AUDIO_ChannelCF   = 0x3,    /**< Center front */
-    OMX_AUDIO_ChannelLS   = 0x4,    /**< Left surround */
-    OMX_AUDIO_ChannelRS   = 0x5,    /**< Right surround */
-    OMX_AUDIO_ChannelLFE  = 0x6,    /**< Low frequency effects */
-    OMX_AUDIO_ChannelCS   = 0x7,    /**< Back surround */
-    OMX_AUDIO_ChannelLR   = 0x8,    /**< Left rear. */
-    OMX_AUDIO_ChannelRR   = 0x9,    /**< Right rear. */
-    OMX_AUDIO_ChannelKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_AUDIO_ChannelVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_AUDIO_ChannelMax  = 0x7FFFFFFF 
-} OMX_AUDIO_CHANNELTYPE;
-
-#define OMX_AUDIO_MAXCHANNELS 16  /**< maximum number distinct audio channels that a buffer may contain */
-#define OMX_MIN_PCMPAYLOAD_MSEC 5 /**< Minimum audio buffer payload size for uncompressed (PCM) audio */
-
-/** PCM format description */ 
-typedef struct OMX_AUDIO_PARAM_PCMMODETYPE { 
-    OMX_U32 nSize;                    /**< Size of this structure, in Bytes */ 
-    OMX_VERSIONTYPE nVersion;         /**< OMX specification version information */ 
-    OMX_U32 nPortIndex;               /**< port that this structure applies to */ 
-    OMX_U32 nChannels;                /**< Number of channels (e.g. 2 for stereo) */ 
-    OMX_NUMERICALDATATYPE eNumData;   /**< indicates PCM data as signed or unsigned */ 
-    OMX_ENDIANTYPE eEndian;           /**< indicates PCM data as little or big endian */ 
-    OMX_BOOL bInterleaved;            /**< True for normal interleaved data; false for 
-                                           non-interleaved data (e.g. block data) */ 
-    OMX_U32 nBitPerSample;            /**< Bit per sample */ 
-    OMX_U32 nSamplingRate;            /**< Sampling rate of the source data.  Use 0 for 
-                                           variable or unknown sampling rate. */ 
-    OMX_AUDIO_PCMMODETYPE ePCMMode;   /**< PCM mode enumeration */ 
-    OMX_AUDIO_CHANNELTYPE eChannelMapping[OMX_AUDIO_MAXCHANNELS]; /**< Slot i contains channel defined by eChannelMap[i] */
-
-} OMX_AUDIO_PARAM_PCMMODETYPE; 
-
-
-/** Audio channel mode.  This is used by both AAC and MP3, although the names are more appropriate
- * for the MP3.  For example, JointStereo for MP3 is CouplingChannels for AAC. 
- */
-typedef enum OMX_AUDIO_CHANNELMODETYPE {
-    OMX_AUDIO_ChannelModeStereo = 0,  /**< 2 channels, the bitrate allocation between those 
-                                          two channels changes accordingly to each channel information */
-    OMX_AUDIO_ChannelModeJointStereo, /**< mode that takes advantage of what is common between 
-                                           2 channels for higher compression gain */
-    OMX_AUDIO_ChannelModeDual,        /**< 2 mono-channels, each channel is encoded with half 
-                                           the bitrate of the overall bitrate */
-    OMX_AUDIO_ChannelModeMono,        /**< Mono channel mode */
-    OMX_AUDIO_ChannelModeKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_AUDIO_ChannelModeVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_AUDIO_ChannelModeMax = 0x7FFFFFFF
-} OMX_AUDIO_CHANNELMODETYPE;
-
-
-typedef enum OMX_AUDIO_MP3STREAMFORMATTYPE {
-    OMX_AUDIO_MP3StreamFormatMP1Layer3 = 0, /**< MP3 Audio MPEG 1 Layer 3 Stream format */
-    OMX_AUDIO_MP3StreamFormatMP2Layer3,     /**< MP3 Audio MPEG 2 Layer 3 Stream format */
-    OMX_AUDIO_MP3StreamFormatMP2_5Layer3,   /**< MP3 Audio MPEG2.5 Layer 3 Stream format */
-    OMX_AUDIO_MP3StreamFormatKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_AUDIO_MP3StreamFormatVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_AUDIO_MP3StreamFormatMax = 0x7FFFFFFF
-} OMX_AUDIO_MP3STREAMFORMATTYPE;
-
-/** MP3 params */
-typedef struct OMX_AUDIO_PARAM_MP3TYPE {
-    OMX_U32 nSize;                 /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;      /**< OMX specification version information */
-    OMX_U32 nPortIndex;            /**< port that this structure applies to */
-    OMX_U32 nChannels;             /**< Number of channels */
-    OMX_U32 nBitRate;              /**< Bit rate of the input data.  Use 0 for variable
-                                        rate or unknown bit rates */
-    OMX_U32 nSampleRate;           /**< Sampling rate of the source data.  Use 0 for
-                                        variable or unknown sampling rate. */
-    OMX_U32 nAudioBandWidth;       /**< Audio band width (in Hz) to which an encoder should
-                                        limit the audio signal. Use 0 to let encoder decide */
-    OMX_AUDIO_CHANNELMODETYPE eChannelMode;   /**< Channel mode enumeration */
-    OMX_AUDIO_MP3STREAMFORMATTYPE eFormat;  /**< MP3 stream format */
-} OMX_AUDIO_PARAM_MP3TYPE;
-
-
-typedef enum OMX_AUDIO_AACSTREAMFORMATTYPE {
-    OMX_AUDIO_AACStreamFormatMP2ADTS = 0, /**< AAC Audio Data Transport Stream 2 format */
-    OMX_AUDIO_AACStreamFormatMP4ADTS,     /**< AAC Audio Data Transport Stream 4 format */
-    OMX_AUDIO_AACStreamFormatMP4LOAS,     /**< AAC Low Overhead Audio Stream format */
-    OMX_AUDIO_AACStreamFormatMP4LATM,     /**< AAC Low overhead Audio Transport Multiplex */
-    OMX_AUDIO_AACStreamFormatADIF,        /**< AAC Audio Data Interchange Format */
-    OMX_AUDIO_AACStreamFormatMP4FF,       /**< AAC inside MPEG-4/ISO File Format */
-    OMX_AUDIO_AACStreamFormatRAW,         /**< AAC Raw Format */
-    OMX_AUDIO_AACStreamFormatKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_AUDIO_AACStreamFormatVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_AUDIO_AACStreamFormatMax = 0x7FFFFFFF
-} OMX_AUDIO_AACSTREAMFORMATTYPE;
-
-
-/** AAC mode type.  Note that the term profile is used with the MPEG-2
- * standard and the term object type and profile is used with MPEG-4 */
-typedef enum OMX_AUDIO_AACPROFILETYPE{
-  OMX_AUDIO_AACObjectNull = 0,      /**< Null, not used */
-  OMX_AUDIO_AACObjectMain = 1,      /**< AAC Main object */
-  OMX_AUDIO_AACObjectLC,            /**< AAC Low Complexity object (AAC profile) */
-  OMX_AUDIO_AACObjectSSR,           /**< AAC Scalable Sample Rate object */
-  OMX_AUDIO_AACObjectLTP,           /**< AAC Long Term Prediction object */
-  OMX_AUDIO_AACObjectHE,            /**< AAC High Efficiency (object type SBR, HE-AAC profile) */
-  OMX_AUDIO_AACObjectScalable,      /**< AAC Scalable object */
-  OMX_AUDIO_AACObjectERLC = 17,     /**< ER AAC Low Complexity object (Error Resilient AAC-LC) */
-  OMX_AUDIO_AACObjectLD = 23,       /**< AAC Low Delay object (Error Resilient) */
-  OMX_AUDIO_AACObjectHE_PS = 29,    /**< AAC High Efficiency with Parametric Stereo coding (HE-AAC v2, object type PS) */
-  OMX_AUDIO_AACObjectKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-  OMX_AUDIO_AACObjectVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-  OMX_AUDIO_AACObjectMax = 0x7FFFFFFF
-} OMX_AUDIO_AACPROFILETYPE;
-
-
-/** AAC tool usage (for nAACtools in OMX_AUDIO_PARAM_AACPROFILETYPE).
- * Required for encoder configuration and optional as decoder info output.
- * For MP3, OMX_AUDIO_CHANNELMODETYPE is sufficient. */
-#define OMX_AUDIO_AACToolNone 0x00000000 /**< no AAC tools allowed (encoder config) or active (decoder info output) */
-#define OMX_AUDIO_AACToolMS   0x00000001 /**< MS: Mid/side joint coding tool allowed or active */
-#define OMX_AUDIO_AACToolIS   0x00000002 /**< IS: Intensity stereo tool allowed or active */
-#define OMX_AUDIO_AACToolTNS  0x00000004 /**< TNS: Temporal Noise Shaping tool allowed or active */
-#define OMX_AUDIO_AACToolPNS  0x00000008 /**< PNS: MPEG-4 Perceptual Noise substitution tool allowed or active */
-#define OMX_AUDIO_AACToolLTP  0x00000010 /**< LTP: MPEG-4 Long Term Prediction tool allowed or active */
-#define OMX_AUDIO_AACToolAll  0x7FFFFFFF /**< all AAC tools allowed or active (*/
-
-/** MPEG-4 AAC error resilience (ER) tool usage (for nAACERtools in OMX_AUDIO_PARAM_AACPROFILETYPE).
- * Required for ER encoder configuration and optional as decoder info output */
-#define OMX_AUDIO_AACERNone  0x00000000  /**< no AAC ER tools allowed/used */
-#define OMX_AUDIO_AACERVCB11 0x00000001  /**< VCB11: Virtual Code Books for AAC section data */
-#define OMX_AUDIO_AACERRVLC  0x00000002  /**< RVLC: Reversible Variable Length Coding */
-#define OMX_AUDIO_AACERHCR   0x00000004  /**< HCR: Huffman Codeword Reordering */
-#define OMX_AUDIO_AACERAll   0x7FFFFFFF  /**< all AAC ER tools allowed/used */
-
-
-/** AAC params */
-typedef struct OMX_AUDIO_PARAM_AACPROFILETYPE {
-    OMX_U32 nSize;                 /**< Size of this structure, in Bytes */
-    OMX_VERSIONTYPE nVersion;      /**< OMX specification version information */
-    OMX_U32 nPortIndex;            /**< Port that this structure applies to */
-    OMX_U32 nChannels;             /**< Number of channels */
-    OMX_U32 nSampleRate;           /**< Sampling rate of the source data.  Use 0 for
-                                        variable or unknown sampling rate. */
-    OMX_U32 nBitRate;              /**< Bit rate of the input data.  Use 0 for variable
-                                        rate or unknown bit rates */
-    OMX_U32 nAudioBandWidth;       /**< Audio band width (in Hz) to which an encoder should
-                                        limit the audio signal. Use 0 to let encoder decide */
-    OMX_U32 nFrameLength;          /**< Frame length (in audio samples per channel) of the codec.
-                                        Can be 1024 or 960 (AAC-LC), 2048 (HE-AAC), 480 or 512 (AAC-LD).
-                                        Use 0 to let encoder decide */
-    OMX_U32 nAACtools;             /**< AAC tool usage */
-    OMX_U32 nAACERtools;           /**< MPEG-4 AAC error resilience tool usage */
-    OMX_AUDIO_AACPROFILETYPE eAACProfile;   /**< AAC profile enumeration */
-    OMX_AUDIO_AACSTREAMFORMATTYPE eAACStreamFormat; /**< AAC stream format enumeration */
-    OMX_AUDIO_CHANNELMODETYPE eChannelMode;   /**< Channel mode enumeration */
-} OMX_AUDIO_PARAM_AACPROFILETYPE;
-
-
-/** VORBIS params */
-typedef struct OMX_AUDIO_PARAM_VORBISTYPE {
-    OMX_U32 nSize;            /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion; /**< OMX specification version information */
-    OMX_U32 nPortIndex;       /**< port that this structure applies to */
-    OMX_U32 nChannels;        /**< Number of channels */
-    OMX_U32 nBitRate;         /**< Bit rate of the encoded data data.  Use 0 for variable
-                                   rate or unknown bit rates. Encoding is set to the
-                                   bitrate closest to specified  value (in bps) */
-    OMX_U32 nMinBitRate;      /**< Sets minimum bitrate (in bps). */
-    OMX_U32 nMaxBitRate;      /**< Sets maximum bitrate (in bps). */
-
-    OMX_U32 nSampleRate;      /**< Sampling rate of the source data.  Use 0 for
-                                   variable or unknown sampling rate. */
-    OMX_U32 nAudioBandWidth;  /**< Audio band width (in Hz) to which an encoder should
-                                   limit the audio signal. Use 0 to let encoder decide */
-    OMX_S32 nQuality;		  /**< Sets encoding quality to n, between -1 (low) and 10 (high).
-                                   In the default mode of operation, teh quality level is 3.
-                                   Normal quality range is 0 - 10. */
-    OMX_BOOL bManaged;		  /**< Set  bitrate  management  mode. This turns off the
-                                   normal VBR encoding, but allows hard or soft bitrate
-                                   constraints to be enforced by the encoder. This mode can
-                                   be slower, and may also be lower quality. It is
-                                   primarily useful for streaming. */
-    OMX_BOOL bDownmix;		  /**< Downmix input from stereo to mono (has no effect on 
-                                   non-stereo streams). Useful for lower-bitrate encoding. */     
-} OMX_AUDIO_PARAM_VORBISTYPE;
-
-
-/** WMA Version */
-typedef enum OMX_AUDIO_WMAFORMATTYPE {
-  OMX_AUDIO_WMAFormatUnused = 0, /**< format unused or unknown */
-  OMX_AUDIO_WMAFormat7,          /**< Windows Media Audio format 7 */
-  OMX_AUDIO_WMAFormat8,          /**< Windows Media Audio format 8 */
-  OMX_AUDIO_WMAFormat9,          /**< Windows Media Audio format 9 */
-  OMX_AUDIO_WMAFormatKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-  OMX_AUDIO_WMAFormatVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-  OMX_AUDIO_WMAFormatMax = 0x7FFFFFFF
-} OMX_AUDIO_WMAFORMATTYPE;
-
-
-/** WMA Profile */
-typedef enum OMX_AUDIO_WMAPROFILETYPE {
-  OMX_AUDIO_WMAProfileUnused = 0,  /**< profile unused or unknown */
-  OMX_AUDIO_WMAProfileL1,          /**< Windows Media audio version 9 profile L1 */
-  OMX_AUDIO_WMAProfileL2,          /**< Windows Media audio version 9 profile L2 */
-  OMX_AUDIO_WMAProfileL3,          /**< Windows Media audio version 9 profile L3 */
-  OMX_AUDIO_WMAProfileKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-  OMX_AUDIO_WMAProfileVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-  OMX_AUDIO_WMAProfileMax = 0x7FFFFFFF
-} OMX_AUDIO_WMAPROFILETYPE;
-
-
-/** WMA params */
-typedef struct OMX_AUDIO_PARAM_WMATYPE {
-    OMX_U32 nSize;            /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion; /**< OMX specification version information */
-    OMX_U32 nPortIndex;       /**< port that this structure applies to */
-    OMX_U16 nChannels;        /**< Number of channels */
-    OMX_U32 nBitRate;         /**< Bit rate of the input data.  Use 0 for variable
-                                   rate or unknown bit rates */
-    OMX_AUDIO_WMAFORMATTYPE eFormat; /**< Version of WMA stream / data */
-	OMX_AUDIO_WMAPROFILETYPE eProfile;  /**< Profile of WMA stream / data */
-    OMX_U32 nSamplingRate;    /**< Sampling rate of the source data */
-    OMX_U16 nBlockAlign;      /**< is the block alignment, or block size, in bytes of the audio codec */
-    OMX_U16 nEncodeOptions;   /**< WMA Type-specific data */
-    OMX_U32 nSuperBlockAlign; /**< WMA Type-specific data */
-} OMX_AUDIO_PARAM_WMATYPE;
-
-/** 
- * RealAudio format
- */
-typedef enum OMX_AUDIO_RAFORMATTYPE {
-    OMX_AUDIO_RAFormatUnused = 0, /**< Format unused or unknown */
-    OMX_AUDIO_RA8,                /**< RealAudio 8 codec */
-    OMX_AUDIO_RA9,                /**< RealAudio 9 codec */
-    OMX_AUDIO_RA10_AAC,           /**< MPEG-4 AAC codec for bitrates of more than 128kbps */
-    OMX_AUDIO_RA10_CODEC,         /**< RealAudio codec for bitrates less than 128 kbps */
-    OMX_AUDIO_RA10_LOSSLESS,      /**< RealAudio Lossless */
-    OMX_AUDIO_RA10_MULTICHANNEL,  /**< RealAudio Multichannel */
-    OMX_AUDIO_RA10_VOICE,         /**< RealAudio Voice for bitrates below 15 kbps */
-    OMX_AUDIO_RAFormatKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_AUDIO_RAFormatVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_VIDEO_RAFormatMax = 0x7FFFFFFF
-} OMX_AUDIO_RAFORMATTYPE;
-
-/** RA (Real Audio) params */ 
-typedef struct OMX_AUDIO_PARAM_RATYPE { 
-    OMX_U32 nSize;              /**< Size of this structure, in Bytes */ 
-    OMX_VERSIONTYPE nVersion;   /**< OMX specification version information */ 
-    OMX_U32 nPortIndex;         /**< Port that this structure applies to */ 
-    OMX_U32 nChannels;          /**< Number of channels */ 
-    OMX_U32 nSamplingRate;      /**< is the sampling rate of the source data */ 
-    OMX_U32 nBitsPerFrame;      /**< is the value for bits per frame  */ 
-    OMX_U32 nSamplePerFrame;    /**< is the value for samples per frame */ 
-    OMX_U32 nCouplingQuantBits; /**< is the number of coupling quantization bits in the stream */ 
-    OMX_U32 nCouplingStartRegion;   /**< is the coupling start region in the stream  */ 
-    OMX_U32 nNumRegions;        /**< is the number of regions value */ 
-    OMX_AUDIO_RAFORMATTYPE eFormat; /**< is the RealAudio audio format */
-} OMX_AUDIO_PARAM_RATYPE; 
-
-
-/** SBC Allocation Method Type */
-typedef enum OMX_AUDIO_SBCALLOCMETHODTYPE {
-  OMX_AUDIO_SBCAllocMethodLoudness, /**< Loudness allocation method */
-  OMX_AUDIO_SBCAllocMethodSNR,      /**< SNR allocation method */
-  OMX_AUDIO_SBCAllocMethodKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-  OMX_AUDIO_SBCAllocMethodVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-  OMX_AUDIO_SBCAllocMethodMax = 0x7FFFFFFF
-} OMX_AUDIO_SBCALLOCMETHODTYPE;
-
-
-/** SBC params */
-typedef struct OMX_AUDIO_PARAM_SBCTYPE {
-    OMX_U32 nSize;             /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;  /**< OMX specification version information */
-    OMX_U32 nPortIndex;        /**< port that this structure applies to */
-    OMX_U32 nChannels;         /**< Number of channels */
-    OMX_U32 nBitRate;          /**< Bit rate of the input data.  Use 0 for variable
-                                    rate or unknown bit rates */
-    OMX_U32 nSampleRate;       /**< Sampling rate of the source data.  Use 0 for
-                                    variable or unknown sampling rate. */
-    OMX_U32 nBlocks;           /**< Number of blocks */
-    OMX_U32 nSubbands;         /**< Number of subbands */
-    OMX_U32 nBitPool;          /**< Bitpool value */
-    OMX_BOOL bEnableBitrate;   /**< Use bitrate value instead of bitpool */
-    OMX_AUDIO_CHANNELMODETYPE eChannelMode; /**< Channel mode enumeration */
-    OMX_AUDIO_SBCALLOCMETHODTYPE eSBCAllocType;   /**< SBC Allocation method type */
-} OMX_AUDIO_PARAM_SBCTYPE;
-
-
-/** ADPCM stream format parameters */ 
-typedef struct OMX_AUDIO_PARAM_ADPCMTYPE { 
-    OMX_U32 nSize;              /**< size of the structure in bytes */ 
-    OMX_VERSIONTYPE nVersion;   /**< OMX specification version information */ 
-    OMX_U32 nPortIndex;         /**< port that this structure applies to */ 
-    OMX_U32 nChannels;          /**< Number of channels in the data stream (not 
-                                     necessarily the same as the number of channels 
-                                     to be rendered. */ 
-    OMX_U32 nBitsPerSample;     /**< Number of bits in each sample */ 
-    OMX_U32 nSampleRate;        /**< Sampling rate of the source data.  Use 0 for 
-                                    variable or unknown sampling rate. */ 
-} OMX_AUDIO_PARAM_ADPCMTYPE; 
-
-
-/** G723 rate */
-typedef enum OMX_AUDIO_G723RATE {
-    OMX_AUDIO_G723ModeUnused = 0,  /**< AMRNB Mode unused / unknown */
-    OMX_AUDIO_G723ModeLow,         /**< 5300 bps */
-    OMX_AUDIO_G723ModeHigh,        /**< 6300 bps */
-    OMX_AUDIO_G723ModeKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_AUDIO_G723ModeVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_AUDIO_G723ModeMax = 0x7FFFFFFF
-} OMX_AUDIO_G723RATE;
-
-
-/** G723 - Sample rate must be 8 KHz */
-typedef struct OMX_AUDIO_PARAM_G723TYPE { 
-    OMX_U32 nSize;                /**< size of the structure in bytes */ 
-    OMX_VERSIONTYPE nVersion;     /**< OMX specification version information */ 
-    OMX_U32 nPortIndex;           /**< port that this structure applies to */ 
-    OMX_U32 nChannels;            /**< Number of channels in the data stream (not 
-                                       necessarily the same as the number of channels 
-                                       to be rendered. */ 
-    OMX_BOOL bDTX;                /**< Enable Discontinuous Transmisssion */ 
-    OMX_AUDIO_G723RATE eBitRate;  /**< todo: Should this be moved to a config? */
-    OMX_BOOL bHiPassFilter;       /**< Enable High Pass Filter */ 
-    OMX_BOOL bPostFilter;         /**< Enable Post Filter */ 
-} OMX_AUDIO_PARAM_G723TYPE; 
-
-
-/** ITU G726 (ADPCM) rate */
-typedef enum OMX_AUDIO_G726MODE {
-    OMX_AUDIO_G726ModeUnused = 0,  /**< G726 Mode unused / unknown */
-    OMX_AUDIO_G726Mode16,          /**< 16 kbps */
-    OMX_AUDIO_G726Mode24,          /**< 24 kbps */
-    OMX_AUDIO_G726Mode32,          /**< 32 kbps, most common rate, also G721 */
-    OMX_AUDIO_G726Mode40,          /**< 40 kbps */
-    OMX_AUDIO_G726ModeKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_AUDIO_G726ModeVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_AUDIO_G726ModeMax = 0x7FFFFFFF
-} OMX_AUDIO_G726MODE;
-
-
-/** G.726 stream format parameters - must be at 8KHz */ 
-typedef struct OMX_AUDIO_PARAM_G726TYPE { 
-    OMX_U32 nSize;              /**< size of the structure in bytes */ 
-    OMX_VERSIONTYPE nVersion;   /**< OMX specification version information */ 
-    OMX_U32 nPortIndex;         /**< port that this structure applies to */ 
-    OMX_U32 nChannels;          /**< Number of channels in the data stream (not 
-                                     necessarily the same as the number of channels 
-                                     to be rendered. */ 
-     OMX_AUDIO_G726MODE eG726Mode;
-} OMX_AUDIO_PARAM_G726TYPE; 
-
-
-/** G729 coder type */
-typedef enum OMX_AUDIO_G729TYPE {
-    OMX_AUDIO_G729 = 0,           /**< ITU G.729  encoded data */
-    OMX_AUDIO_G729A,              /**< ITU G.729 annex A  encoded data */
-    OMX_AUDIO_G729B,              /**< ITU G.729 with annex B encoded data */
-    OMX_AUDIO_G729AB,             /**< ITU G.729 annexes A and B encoded data */
-    OMX_AUDIO_G729KhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_AUDIO_G729VendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_AUDIO_G729Max = 0x7FFFFFFF
-} OMX_AUDIO_G729TYPE;
-
-
-/** G729 stream format parameters - fixed 6KHz sample rate */
-typedef struct OMX_AUDIO_PARAM_G729TYPE {
-    OMX_U32 nSize;            /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion; /**< OMX specification version information */
-    OMX_U32 nPortIndex;       /**< port that this structure applies to */
-    OMX_U32 nChannels;        /**< Number of channels in the data stream (not
-                                   necessarily the same as the number of channels
-                                   to be rendered. */
-    OMX_BOOL bDTX;            /**< Enable Discontinuous Transmisssion */
-    OMX_AUDIO_G729TYPE eBitType;
-} OMX_AUDIO_PARAM_G729TYPE;
-
-
-/** AMR Frame format */ 
-typedef enum OMX_AUDIO_AMRFRAMEFORMATTYPE { 
-    OMX_AUDIO_AMRFrameFormatConformance = 0,  /**< Frame Format is AMR Conformance 
-                                                   (Standard) Format */ 
-    OMX_AUDIO_AMRFrameFormatIF1,              /**< Frame Format is AMR Interface 
-                                                   Format 1 */ 
-    OMX_AUDIO_AMRFrameFormatIF2,              /**< Frame Format is AMR Interface 
-                                                   Format 2*/ 
-    OMX_AUDIO_AMRFrameFormatFSF,              /**< Frame Format is AMR File Storage 
-                                                   Format */ 
-    OMX_AUDIO_AMRFrameFormatRTPPayload,       /**< Frame Format is AMR Real-Time 
-                                                   Transport Protocol Payload Format */ 
-    OMX_AUDIO_AMRFrameFormatITU,              /**< Frame Format is ITU Format (added at Motorola request) */ 
-    OMX_AUDIO_AMRFrameFormatKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_AUDIO_AMRFrameFormatVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_AUDIO_AMRFrameFormatMax = 0x7FFFFFFF 
-} OMX_AUDIO_AMRFRAMEFORMATTYPE; 
-
-
-/** AMR band mode */
-typedef enum OMX_AUDIO_AMRBANDMODETYPE {
-    OMX_AUDIO_AMRBandModeUnused = 0,          /**< AMRNB Mode unused / unknown */
-    OMX_AUDIO_AMRBandModeNB0,                 /**< AMRNB Mode 0 =  4750 bps */
-    OMX_AUDIO_AMRBandModeNB1,                 /**< AMRNB Mode 1 =  5150 bps */
-    OMX_AUDIO_AMRBandModeNB2,                 /**< AMRNB Mode 2 =  5900 bps */ 
-    OMX_AUDIO_AMRBandModeNB3,                 /**< AMRNB Mode 3 =  6700 bps */
-    OMX_AUDIO_AMRBandModeNB4,                 /**< AMRNB Mode 4 =  7400 bps */
-    OMX_AUDIO_AMRBandModeNB5,                 /**< AMRNB Mode 5 =  7950 bps */
-    OMX_AUDIO_AMRBandModeNB6,                 /**< AMRNB Mode 6 = 10200 bps */
-    OMX_AUDIO_AMRBandModeNB7,                 /**< AMRNB Mode 7 = 12200 bps */
-    OMX_AUDIO_AMRBandModeWB0,                 /**< AMRWB Mode 0 =  6600 bps */
-    OMX_AUDIO_AMRBandModeWB1,                 /**< AMRWB Mode 1 =  8850 bps */
-    OMX_AUDIO_AMRBandModeWB2,                 /**< AMRWB Mode 2 = 12650 bps */ 
-    OMX_AUDIO_AMRBandModeWB3,                 /**< AMRWB Mode 3 = 14250 bps */ 
-    OMX_AUDIO_AMRBandModeWB4,                 /**< AMRWB Mode 4 = 15850 bps */
-    OMX_AUDIO_AMRBandModeWB5,                 /**< AMRWB Mode 5 = 18250 bps */
-    OMX_AUDIO_AMRBandModeWB6,                 /**< AMRWB Mode 6 = 19850 bps */
-    OMX_AUDIO_AMRBandModeWB7,                 /**< AMRWB Mode 7 = 23050 bps */
-    OMX_AUDIO_AMRBandModeWB8,                 /**< AMRWB Mode 8 = 23850 bps */      
-    OMX_AUDIO_AMRBandModeKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_AUDIO_AMRBandModeVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_AUDIO_AMRBandModeMax = 0x7FFFFFFF
-} OMX_AUDIO_AMRBANDMODETYPE;
-     
-
-/** AMR Discontinuous Transmission mode */ 
-typedef enum OMX_AUDIO_AMRDTXMODETYPE { 
-    OMX_AUDIO_AMRDTXModeOff = 0,        /**< AMR Discontinuous Transmission Mode is disabled */ 
-    OMX_AUDIO_AMRDTXModeOnVAD1,         /**< AMR Discontinuous Transmission Mode using 
-                                             Voice Activity Detector 1 (VAD1) is enabled */ 
-    OMX_AUDIO_AMRDTXModeOnVAD2,         /**< AMR Discontinuous Transmission Mode using 
-                                             Voice Activity Detector 2 (VAD2) is enabled */       
-    OMX_AUDIO_AMRDTXModeOnAuto,         /**< The codec will automatically select between 
-                                             Off, VAD1 or VAD2 modes */ 
-
-    OMX_AUDIO_AMRDTXasEFR,             /**< DTX as EFR instead of AMR standard (3GPP 26.101, frame type =8,9,10) */
-
-    OMX_AUDIO_AMRDTXModeKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_AUDIO_AMRDTXModeVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_AUDIO_AMRDTXModeMax = 0x7FFFFFFF 
-} OMX_AUDIO_AMRDTXMODETYPE; 
- 
-
-/** AMR params */
-typedef struct OMX_AUDIO_PARAM_AMRTYPE {
-    OMX_U32 nSize;                          /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;               /**< OMX specification version information */
-    OMX_U32 nPortIndex;                     /**< port that this structure applies to */
-    OMX_U32 nChannels;                      /**< Number of channels */
-    OMX_U32 nBitRate;                       /**< Bit rate read only field */
-    OMX_AUDIO_AMRBANDMODETYPE eAMRBandMode; /**< AMR Band Mode enumeration */ 
-    OMX_AUDIO_AMRDTXMODETYPE  eAMRDTXMode;  /**< AMR DTX Mode enumeration */
-    OMX_AUDIO_AMRFRAMEFORMATTYPE eAMRFrameFormat; /**< AMR frame format enumeration */
-} OMX_AUDIO_PARAM_AMRTYPE;
-
-
-/** GSM_FR (ETSI 06.10, 3GPP 46.010) stream format parameters */
-typedef struct OMX_AUDIO_PARAM_GSMFRTYPE {
-    OMX_U32 nSize;            /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion; /**< OMX specification version information */
-    OMX_U32 nPortIndex;       /**< port that this structure applies to */
-    OMX_BOOL bDTX;            /**< Enable Discontinuous Transmisssion */
-    OMX_BOOL bHiPassFilter;   /**< Enable High Pass Filter */
-} OMX_AUDIO_PARAM_GSMFRTYPE;
-
-
-/** GSM-HR (ETSI 06.20, 3GPP 46.020) stream format parameters */
-typedef struct OMX_AUDIO_PARAM_GSMHRTYPE {
-    OMX_U32 nSize;            /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion; /**< OMX specification version information */
-    OMX_U32 nPortIndex;       /**< port that this structure applies to */
-    OMX_BOOL bDTX;            /**< Enable Discontinuous Transmisssion */
-    OMX_BOOL bHiPassFilter;   /**< Enable High Pass Filter */
-} OMX_AUDIO_PARAM_GSMHRTYPE;
-
-
-/** GSM-EFR (ETSI 06.60, 3GPP 46.060) stream format parameters */
-typedef struct OMX_AUDIO_PARAM_GSMEFRTYPE {
-    OMX_U32 nSize;            /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion; /**< OMX specification version information */
-    OMX_U32 nPortIndex;       /**< port that this structure applies to */
-    OMX_BOOL bDTX;            /**< Enable Discontinuous Transmisssion */
-    OMX_BOOL bHiPassFilter;   /**< Enable High Pass Filter */
-} OMX_AUDIO_PARAM_GSMEFRTYPE;
-
-
-/** TDMA FR (TIA/EIA-136-420, VSELP 7.95kbps coder) stream format parameters */
-typedef struct OMX_AUDIO_PARAM_TDMAFRTYPE {
-    OMX_U32 nSize;                /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;     /**< OMX specification version information */
-    OMX_U32 nPortIndex;           /**< port that this structure applies to */
-    OMX_U32 nChannels;            /**< Number of channels in the data stream (not
-                                       necessarily the same as the number of channels
-                                       to be rendered. */
-    OMX_BOOL bDTX;                /**< Enable Discontinuous Transmisssion */
-    OMX_BOOL bHiPassFilter;       /**< Enable High Pass Filter */
-} OMX_AUDIO_PARAM_TDMAFRTYPE;
-
-
-/** TDMA EFR (TIA/EIA-136-410, ACELP 7.4kbps coder) stream format parameters */
-typedef struct OMX_AUDIO_PARAM_TDMAEFRTYPE {
-    OMX_U32 nSize;                /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;     /**< OMX specification version information */
-    OMX_U32 nPortIndex;           /**< port that this structure applies to */
-    OMX_U32 nChannels;            /**< Number of channels in the data stream (not
-                                       necessarily the same as the number of channels
-                                       to be rendered. */
-    OMX_BOOL bDTX;                /**< Enable Discontinuous Transmisssion */
-    OMX_BOOL bHiPassFilter;       /**< Enable High Pass Filter */
-} OMX_AUDIO_PARAM_TDMAEFRTYPE;
-
-
-/** PDC FR ( RCR-27, VSELP 6.7kbps coder) stream format parameters */
-typedef struct OMX_AUDIO_PARAM_PDCFRTYPE {
-    OMX_U32 nSize;                /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;     /**< OMX specification version information */
-    OMX_U32 nPortIndex;           /**< port that this structure applies to */
-    OMX_U32 nChannels;            /**< Number of channels in the data stream (not
-                                       necessarily the same as the number of channels
-                                       to be rendered. */
-    OMX_BOOL bDTX;                /**< Enable Discontinuous Transmisssion */
-    OMX_BOOL bHiPassFilter;       /**< Enable High Pass Filter */
-} OMX_AUDIO_PARAM_PDCFRTYPE;
-
-
-/** PDC EFR ( RCR-27, ACELP 6.7kbps coder) stream format parameters */
-typedef struct OMX_AUDIO_PARAM_PDCEFRTYPE {
-    OMX_U32 nSize;                /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;     /**< OMX specification version information */
-    OMX_U32 nPortIndex;           /**< port that this structure applies to */
-    OMX_U32 nChannels;            /**< Number of channels in the data stream (not
-                                       necessarily the same as the number of channels
-                                       to be rendered. */
-    OMX_BOOL bDTX;                /**< Enable Discontinuous Transmisssion */
-    OMX_BOOL bHiPassFilter;       /**< Enable High Pass Filter */
-} OMX_AUDIO_PARAM_PDCEFRTYPE;
-
-/** PDC HR ( RCR-27, PSI-CELP 3.45kbps coder) stream format parameters */
-typedef struct OMX_AUDIO_PARAM_PDCHRTYPE {
-    OMX_U32 nSize;                /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;     /**< OMX specification version information */
-    OMX_U32 nPortIndex;           /**< port that this structure applies to */
-    OMX_U32 nChannels;            /**< Number of channels in the data stream (not
-                                       necessarily the same as the number of channels
-                                       to be rendered. */
-    OMX_BOOL bDTX;                /**< Enable Discontinuous Transmisssion */
-    OMX_BOOL bHiPassFilter;       /**< Enable High Pass Filter */
-} OMX_AUDIO_PARAM_PDCHRTYPE;
-
-
-/** CDMA Rate types */
-typedef enum OMX_AUDIO_CDMARATETYPE {
-    OMX_AUDIO_CDMARateBlank = 0,          /**< CDMA encoded frame is blank */
-    OMX_AUDIO_CDMARateFull,               /**< CDMA encoded frame in full rate */
-    OMX_AUDIO_CDMARateHalf,               /**< CDMA encoded frame in half rate */
-    OMX_AUDIO_CDMARateQuarter,            /**< CDMA encoded frame in quarter rate */
-    OMX_AUDIO_CDMARateEighth,             /**< CDMA encoded frame in eighth rate (DTX)*/
-    OMX_AUDIO_CDMARateErasure,            /**< CDMA erasure frame */
-    OMX_AUDIO_CDMARateKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_AUDIO_CDMARateVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_AUDIO_CDMARateMax = 0x7FFFFFFF
-} OMX_AUDIO_CDMARATETYPE;
-
-
-/** QCELP8 (TIA/EIA-96, up to 8kbps coder) stream format parameters */
-typedef struct OMX_AUDIO_PARAM_QCELP8TYPE {
-    OMX_U32 nSize;                /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;     /**< OMX specification version information */
-    OMX_U32 nPortIndex;           /**< port that this structure applies to */
-    OMX_U32 nChannels;            /**< Number of channels in the data stream (not
-                                       necessarily the same as the number of channels
-                                       to be rendered. */
-    OMX_U32 nBitRate;             /**< Bit rate of the input data.  Use 0 for variable
-                                       rate or unknown bit rates */
-    OMX_AUDIO_CDMARATETYPE eCDMARate; /**< Frame rate */
-    OMX_U32 nMinBitRate;          /**< minmal rate for the encoder = 1,2,3,4, default = 1 */
-    OMX_U32 nMaxBitRate;          /**< maximal rate for the encoder = 1,2,3,4, default = 4 */
-} OMX_AUDIO_PARAM_QCELP8TYPE;
-
-
-/** QCELP13 ( CDMA, EIA/TIA-733, 13.3kbps coder) stream format parameters */
-typedef struct OMX_AUDIO_PARAM_QCELP13TYPE {
-    OMX_U32 nSize;                /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;     /**< OMX specification version information */
-    OMX_U32 nPortIndex;           /**< port that this structure applies to */
-    OMX_U32 nChannels;            /**< Number of channels in the data stream (not
-                                       necessarily the same as the number of channels
-                                       to be rendered. */
-    OMX_AUDIO_CDMARATETYPE eCDMARate; /**< Frame rate */
-    OMX_U32 nMinBitRate;          /**< minmal rate for the encoder = 1,2,3,4, default = 1 */
-    OMX_U32 nMaxBitRate;          /**< maximal rate for the encoder = 1,2,3,4, default = 4 */
-} OMX_AUDIO_PARAM_QCELP13TYPE;
-
-
-/** EVRC ( CDMA, EIA/TIA-127, RCELP up to 8.55kbps coder) stream format parameters */
-typedef struct OMX_AUDIO_PARAM_EVRCTYPE {
-    OMX_U32 nSize;                /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;     /**< OMX specification version information */
-    OMX_U32 nPortIndex;           /**< port that this structure applies to */
-    OMX_U32 nChannels;            /**< Number of channels in the data stream (not
-                                       necessarily the same as the number of channels
-                                       to be rendered. */
-    OMX_AUDIO_CDMARATETYPE eCDMARate; /**< actual Frame rate */
-    OMX_BOOL bRATE_REDUCon;       /**< RATE_REDUCtion is requested for this frame */
-    OMX_U32 nMinBitRate;          /**< minmal rate for the encoder = 1,2,3,4, default = 1 */
-    OMX_U32 nMaxBitRate;          /**< maximal rate for the encoder = 1,2,3,4, default = 4 */
-    OMX_BOOL bHiPassFilter;       /**< Enable encoder's High Pass Filter */
-    OMX_BOOL bNoiseSuppressor;    /**< Enable encoder's noise suppressor pre-processing */
-    OMX_BOOL bPostFilter;         /**< Enable decoder's post Filter */
-} OMX_AUDIO_PARAM_EVRCTYPE;
-
-
-/** SMV ( up to 8.55kbps coder) stream format parameters */
-typedef struct OMX_AUDIO_PARAM_SMVTYPE {
-    OMX_U32 nSize;                /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;     /**< OMX specification version information */
-    OMX_U32 nPortIndex;           /**< port that this structure applies to */
-    OMX_U32 nChannels;            /**< Number of channels in the data stream (not
-                                       necessarily the same as the number of channels
-                                       to be rendered. */
-    OMX_AUDIO_CDMARATETYPE eCDMARate; /**< Frame rate */
-    OMX_BOOL bRATE_REDUCon;           /**< RATE_REDUCtion is requested for this frame */
-    OMX_U32 nMinBitRate;          /**< minmal rate for the encoder = 1,2,3,4, default = 1 ??*/
-    OMX_U32 nMaxBitRate;          /**< maximal rate for the encoder = 1,2,3,4, default = 4 ??*/
-    OMX_BOOL bHiPassFilter;       /**< Enable encoder's High Pass Filter ??*/
-    OMX_BOOL bNoiseSuppressor;    /**< Enable encoder's noise suppressor pre-processing */
-    OMX_BOOL bPostFilter;         /**< Enable decoder's post Filter ??*/
-} OMX_AUDIO_PARAM_SMVTYPE;
-
-
-/** MIDI Format 
- * @ingroup midi
- */
-typedef enum OMX_AUDIO_MIDIFORMATTYPE
-{
-    OMX_AUDIO_MIDIFormatUnknown = 0, /**< MIDI Format unknown or don't care */
-    OMX_AUDIO_MIDIFormatSMF0,        /**< Standard MIDI File Type 0 */
-    OMX_AUDIO_MIDIFormatSMF1,        /**< Standard MIDI File Type 1 */
-    OMX_AUDIO_MIDIFormatSMF2,        /**< Standard MIDI File Type 2 */
-    OMX_AUDIO_MIDIFormatSPMIDI,      /**< SP-MIDI */
-    OMX_AUDIO_MIDIFormatXMF0,        /**< eXtensible Music Format type 0 */
-    OMX_AUDIO_MIDIFormatXMF1,        /**< eXtensible Music Format type 1 */
-    OMX_AUDIO_MIDIFormatMobileXMF,   /**< Mobile XMF (eXtensible Music Format type 2) */
-    OMX_AUDIO_MIDIFormatKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_AUDIO_MIDIFormatVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_AUDIO_MIDIFormatMax = 0x7FFFFFFF
-} OMX_AUDIO_MIDIFORMATTYPE;
-
-
-/** MIDI params 
- * @ingroup midi
- */
-typedef struct OMX_AUDIO_PARAM_MIDITYPE {
-    OMX_U32 nSize;                 /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;      /**< OMX specification version information */
-    OMX_U32 nPortIndex;            /**< port that this structure applies to */
-    OMX_U32 nFileSize;             /**< size of the MIDI file in bytes, where the entire 
-                                        MIDI file passed in, otherwise if 0x0, the MIDI data 
-                                        is merged and streamed (instead of passed as an 
-                                        entire MIDI file) */
-    OMX_BU32 sMaxPolyphony;        /**< Specifies the maximum simultaneous polyphonic 
-                                        voices. A value of zero indicates that the default 
-                                        polyphony of the device is used  */                                    
-    OMX_BOOL bLoadDefaultSound;    /**< Whether to load default sound 
-                                        bank at initialization */
-    OMX_AUDIO_MIDIFORMATTYPE eMidiFormat; /**< Version of the MIDI file */                                                                           
-} OMX_AUDIO_PARAM_MIDITYPE;
-
-
-/** Type of the MIDI sound bank 
- * @ingroup midi
- */
-typedef enum OMX_AUDIO_MIDISOUNDBANKTYPE {
-    OMX_AUDIO_MIDISoundBankUnused = 0,           /**< unused/unknown soundbank type */
-    OMX_AUDIO_MIDISoundBankDLS1,                 /**< DLS version 1 */
-    OMX_AUDIO_MIDISoundBankDLS2,                 /**< DLS version 2 */
-    OMX_AUDIO_MIDISoundBankMobileDLSBase,        /**< Mobile DLS, using the base functionality */
-    OMX_AUDIO_MIDISoundBankMobileDLSPlusOptions, /**< Mobile DLS, using the specification-defined optional feature set */
-    OMX_AUDIO_MIDISoundBankKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_AUDIO_MIDISoundBankVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_AUDIO_MIDISoundBankMax = 0x7FFFFFFF
-} OMX_AUDIO_MIDISOUNDBANKTYPE;
-
-
-/** Bank Layout describes how bank MSB & LSB are used in the DLS instrument definitions sound bank 
- * @ingroup midi
- */
-typedef enum OMX_AUDIO_MIDISOUNDBANKLAYOUTTYPE {
-   OMX_AUDIO_MIDISoundBankLayoutUnused = 0,   /**< unused/unknown soundbank type */
-   OMX_AUDIO_MIDISoundBankLayoutGM,           /**< GS layout (based on bank MSB 0x00) */
-   OMX_AUDIO_MIDISoundBankLayoutGM2,          /**< General MIDI 2 layout (using MSB 0x78/0x79, LSB 0x00) */
-   OMX_AUDIO_MIDISoundBankLayoutUser,         /**< Does not conform to any bank numbering standards */
-   OMX_AUDIO_MIDISoundBankLayoutKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-   OMX_AUDIO_MIDISoundBankLayoutVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-   OMX_AUDIO_MIDISoundBankLayoutMax = 0x7FFFFFFF
-} OMX_AUDIO_MIDISOUNDBANKLAYOUTTYPE;
-
-
-/** MIDI params to load/unload user soundbank 
- * @ingroup midi
- */
-typedef struct OMX_AUDIO_PARAM_MIDILOADUSERSOUNDTYPE {
-    OMX_U32 nSize;            /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion; /**< OMX specification version information */
-    OMX_U32 nPortIndex;       /**< port that this structure applies to */
-    OMX_U32 nDLSIndex;        /**< DLS file index to be loaded */
-    OMX_U32 nDLSSize;         /**< Size in bytes */
-    OMX_PTR pDLSData;         /**< Pointer to DLS file data */
-    OMX_AUDIO_MIDISOUNDBANKTYPE eMidiSoundBank;   /**< Midi sound bank type enumeration */
-    OMX_AUDIO_MIDISOUNDBANKLAYOUTTYPE eMidiSoundBankLayout; /**< Midi sound bank layout enumeration */
-} OMX_AUDIO_PARAM_MIDILOADUSERSOUNDTYPE;
-
-
-/** Structure for Live MIDI events and MIP messages. 
- * (MIP = Maximum Instantaneous Polyphony; part of the SP-MIDI standard.) 
- * @ingroup midi
- */
-typedef struct OMX_AUDIO_CONFIG_MIDIIMMEDIATEEVENTTYPE {
-    OMX_U32 nSize;            /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion; /**< OMX specification version information */
-    OMX_U32 nPortIndex;       /**< Port that this structure applies to */
-    OMX_U32 nMidiEventSize;   /**< Size of immediate MIDI events or MIP message in bytes  */
-    OMX_U8 nMidiEvents[1];    /**< MIDI event array to be rendered immediately, or an
-                                   array for the MIP message buffer, where the size is 
-                                   indicated by nMidiEventSize */
-} OMX_AUDIO_CONFIG_MIDIIMMEDIATEEVENTTYPE;
-
-
-/** MIDI sound bank/ program pair in a given channel 
- * @ingroup midi
- */
-typedef struct OMX_AUDIO_CONFIG_MIDISOUNDBANKPROGRAMTYPE {
-    OMX_U32 nSize;              /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;   /**< OMX specification version information */
-    OMX_U32 nPortIndex;         /**< Port that this structure applies to */
-    OMX_U32 nChannel;           /**< Valid channel values range from 1 to 16 */
-    OMX_U16 nIDProgram;         /**< Valid program ID range is 1 to 128 */
-    OMX_U16 nIDSoundBank;       /**< Sound bank ID */
-    OMX_U32 nUserSoundBankIndex;/**< User soundbank index, easier to access soundbanks 
-                                     by index if multiple banks are present */
-} OMX_AUDIO_CONFIG_MIDISOUNDBANKPROGRAMTYPE;
-
-
-/** MIDI control 
- * @ingroup midi
- */
-typedef struct OMX_AUDIO_CONFIG_MIDICONTROLTYPE {
-    OMX_U32 nSize;                /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;     /**< OMX specification version information */
-    OMX_U32 nPortIndex;           /**< port that this structure applies to */
-    OMX_BS32 sPitchTransposition; /**< Pitch transposition in semitones, stored as Q22.10 
-                                       format based on JAVA MMAPI (JSR-135) requirement */
-    OMX_BU32 sPlayBackRate;       /**< Relative playback rate, stored as Q14.17 fixed-point
-                                       number based on JSR-135 requirement */
-    OMX_BU32 sTempo ;             /**< Tempo in beats per minute (BPM), stored as Q22.10 
-                                       fixed-point number based on JSR-135 requirement */
-    OMX_U32 nMaxPolyphony;        /**< Specifies the maximum simultaneous polyphonic 
-                                       voices. A value of zero indicates that the default 
-                                       polyphony of the device is used  */
-    OMX_U32 nNumRepeat;           /**< Number of times to repeat playback */
-    OMX_U32 nStopTime;            /**< Time in milliseconds to indicate when playback 
-                                       will stop automatically.  Set to zero if not used */
-    OMX_U16 nChannelMuteMask;     /**< 16 bit mask for channel mute status */
-    OMX_U16 nChannelSoloMask;     /**< 16 bit mask for channel solo status */
-    OMX_U32 nTrack0031MuteMask;   /**< 32 bit mask for track mute status. Note: This is for tracks 0-31 */
-    OMX_U32 nTrack3263MuteMask;   /**< 32 bit mask for track mute status. Note: This is for tracks 32-63 */
-    OMX_U32 nTrack0031SoloMask;   /**< 32 bit mask for track solo status. Note: This is for tracks 0-31 */
-    OMX_U32 nTrack3263SoloMask;   /**< 32 bit mask for track solo status. Note: This is for tracks 32-63 */
-
-} OMX_AUDIO_CONFIG_MIDICONTROLTYPE;
-
-
-/** MIDI Playback States 
- * @ingroup midi
- */
-typedef enum OMX_AUDIO_MIDIPLAYBACKSTATETYPE {
-  OMX_AUDIO_MIDIPlayBackStateUnknown = 0,      /**< Unknown state or state does not map to 
-  													other defined states */
-  OMX_AUDIO_MIDIPlayBackStateClosedEngaged,    /**< No MIDI resource is currently open. 
-                                                    The MIDI engine is currently processing 
-                                                    MIDI events. */
-  OMX_AUDIO_MIDIPlayBackStateParsing,          /**< A MIDI resource is open and is being 
-                                                    primed. The MIDI engine is currently 
-                                                    processing MIDI events. */
-  OMX_AUDIO_MIDIPlayBackStateOpenEngaged,      /**< A MIDI resource is open and primed but 
-                                                    not playing. The MIDI engine is currently
-                                                    processing MIDI events. The transition to
-                                                    this state is only possible from the 
-                                                    OMX_AUDIO_MIDIPlayBackStatePlaying state,
-                                                    when the 'playback head' reaches the end
-                                                    of media data or the playback stops due
-                                                    to stop time set.*/
-  OMX_AUDIO_MIDIPlayBackStatePlaying,          /**< A MIDI resource is open and currently
-                                                    playing. The MIDI engine is currently
-                                                    processing MIDI events.*/
-  OMX_AUDIO_MIDIPlayBackStatePlayingPartially, /**< Best-effort playback due to SP-MIDI/DLS
-                                                    resource constraints */
-  OMX_AUDIO_MIDIPlayBackStatePlayingSilently,  /**< Due to system resource constraints and
-                                                    SP-MIDI content constraints, there is
-                                                    no audible MIDI content during playback
-                                                    currently. The situation may change if
-                                                    resources are freed later.*/
-  OMX_AUDIO_MIDIPlayBackStateKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-  OMX_AUDIO_MIDIPlayBackStateVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-  OMX_AUDIO_MIDIPlayBackStateMax = 0x7FFFFFFF
-} OMX_AUDIO_MIDIPLAYBACKSTATETYPE;
-
-
-/** MIDI status 
- * @ingroup midi
- */
-typedef struct OMX_AUDIO_CONFIG_MIDISTATUSTYPE {
-    OMX_U32 nSize;              /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;   /**< OMX specification version information */
-    OMX_U32 nPortIndex;         /**< port that this structure applies to */
-    OMX_U16 nNumTracks;         /**< Number of MIDI tracks in the file, read only field. 
-                                     NOTE: May not return a meaningful value until the entire 
-                                     file is parsed and buffered.  */
-    OMX_U32 nDuration;          /**< The length of the currently open MIDI resource 
-                                     in milliseconds. NOTE: May not return a meaningful value 
-                                     until the entire file is parsed and buffered.  */  
-    OMX_U32 nPosition;          /**< Current Position of the MIDI resource being played 
-                                     in milliseconds */
-    OMX_BOOL bVibra;            /**< Does Vibra track exist? NOTE: May not return a meaningful 
-                                     value until the entire file is parsed and buffered. */
-    OMX_U32 nNumMetaEvents;     /**< Total number of MIDI Meta Events in the currently 
-                                     open MIDI resource. NOTE: May not return a meaningful value 
-                                     until the entire file is parsed and buffered.  */
-    OMX_U32 nNumActiveVoices;   /**< Number of active voices in the currently playing 
-                                     MIDI resource. NOTE: May not return a meaningful value until 
-                                     the entire file is parsed and buffered. */
-    OMX_AUDIO_MIDIPLAYBACKSTATETYPE eMIDIPlayBackState;  /**< MIDI playback state enumeration, read only field */
-} OMX_AUDIO_CONFIG_MIDISTATUSTYPE;
-
-
-/** MIDI Meta Event structure one per Meta Event.
- *  MIDI Meta Events are like audio metadata, except that they are interspersed 
- *  with the MIDI content throughout the file and are not localized in the header. 
- *  As such, it is necessary to retrieve information about these Meta Events from 
- *  the engine, as it encounters these Meta Events within the MIDI content. 
- *  For example, SMF files can have up to 14 types of MIDI Meta Events (copyright, 
- *  author, default tempo, etc.) scattered throughout the file. 
- *  @ingroup midi
- */
-typedef struct OMX_AUDIO_CONFIG_MIDIMETAEVENTTYPE{ 
-    OMX_U32 nSize;            /**< size of the structure in bytes */ 
-    OMX_VERSIONTYPE nVersion; /**< OMX specification version information */ 
-    OMX_U32 nPortIndex;       /**< port that this structure applies to */ 
-    OMX_U32 nIndex;           /**< Index of Meta Event */ 
-    OMX_U8 nMetaEventType;    /**< Meta Event Type, 7bits (i.e. 0 - 127) */ 
-    OMX_U32 nMetaEventSize;   /**< size of the Meta Event in bytes */ 
-    OMX_U32 nTrack;           /**< track number for the meta event */
-    OMX_U32 nPosition;        /**< Position of the meta-event in milliseconds */
-} OMX_AUDIO_CONFIG_MIDIMETAEVENTTYPE; 
-
-
-/** MIDI Meta Event Data structure - one per Meta Event. 
- * @ingroup midi
- */ 
-typedef struct OMX_AUDIO_CONFIG_MIDIMETAEVENTDATATYPE{ 
-    OMX_U32 nSize;            /**< size of the structure in bytes */ 
-    OMX_VERSIONTYPE nVersion; /**< OMX specification version information */ 
-    OMX_U32 nPortIndex;       /**< port that this structure applies to */ 
-    OMX_U32 nIndex;           /**< Index of Meta Event */ 
-    OMX_U32 nMetaEventSize;   /**< size of the Meta Event in bytes */ 
-    OMX_U8 nData[1];          /**< array of one or more bytes of meta data 
-                                   as indicated by the nMetaEventSize field */ 
-} OMX_AUDIO_CONFIG__MIDIMETAEVENTDATATYPE; 
-
-
-/** Audio Volume adjustment for a port */
-typedef struct OMX_AUDIO_CONFIG_VOLUMETYPE {
-    OMX_U32 nSize;              /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;   /**< OMX specification version information */
-    OMX_U32 nPortIndex;         /**< Port index indicating which port to 
-                                     set.  Select the input port to set 
-                                     just that port's volume.  Select the 
-                                     output port to adjust the master 
-                                     volume. */
-    OMX_BOOL bLinear;           /**< Is the volume to be set in linear (0.100) 
-                                     or logarithmic scale (mB) */
-    OMX_BS32 sVolume;           /**< Volume linear setting in the 0..100 range, OR
-                                     Volume logarithmic setting for this port.  The values
-                                     for volume are in mB (millibels = 1/100 dB) relative
-                                     to a gain of 1 (e.g. the output is the same as the 
-                                     input level).  Values are in mB from nMax 
-                                     (maximum volume) to nMin mB (typically negative).
-                                     Since the volume is "voltage"
-                                     and not a "power", it takes a setting of
-                                     -600 mB to decrease the volume by 1/2.  If
-                                     a component cannot accurately set the 
-                                     volume to the requested value, it must
-                                     set the volume to the closest value BELOW
-                                     the requested value.  When getting the
-                                     volume setting, the current actual volume
-                                     must be returned. */
-} OMX_AUDIO_CONFIG_VOLUMETYPE;
-
-
-/** Audio Volume adjustment for a channel */
-typedef struct OMX_AUDIO_CONFIG_CHANNELVOLUMETYPE {
-    OMX_U32 nSize;              /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;   /**< OMX specification version information */
-    OMX_U32 nPortIndex;         /**< Port index indicating which port to 
-                                     set.  Select the input port to set 
-                                     just that port's volume.  Select the 
-                                     output port to adjust the master 
-                                     volume. */
-    OMX_U32 nChannel;           /**< channel to select from 0 to N-1, 
-                                     using OMX_ALL to apply volume settings
-                                     to all channels */
-    OMX_BOOL bLinear;           /**< Is the volume to be set in linear (0.100) or 
-                                     logarithmic scale (mB) */
-    OMX_BS32 sVolume;           /**< Volume linear setting in the 0..100 range, OR
-                                     Volume logarithmic setting for this port.  
-                                     The values for volume are in mB 
-                                     (millibels = 1/100 dB) relative to a gain
-                                     of 1 (e.g. the output is the same as the 
-                                     input level).  Values are in mB from nMax 
-                                     (maximum volume) to nMin mB (typically negative).  
-                                     Since the volume is "voltage"
-                                     and not a "power", it takes a setting of
-                                     -600 mB to decrease the volume by 1/2.  If
-                                     a component cannot accurately set the 
-                                     volume to the requested value, it must
-                                     set the volume to the closest value BELOW
-                                     the requested value.  When getting the
-                                     volume setting, the current actual volume
-                                     must be returned. */
-    OMX_BOOL bIsMIDI;           /**< TRUE if nChannel refers to a MIDI channel,
-                                     FALSE otherwise */
-} OMX_AUDIO_CONFIG_CHANNELVOLUMETYPE;
-
-
-/** Audio balance setting */
-typedef struct OMX_AUDIO_CONFIG_BALANCETYPE {
-    OMX_U32 nSize;              /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;   /**< OMX specification version information */
-    OMX_U32 nPortIndex;         /**< Port index indicating which port to 
-                                     set.  Select the input port to set 
-                                     just that port's balance.  Select the 
-                                     output port to adjust the master 
-                                     balance. */
-    OMX_S32 nBalance;           /**< balance setting for this port 
-                                     (-100 to 100, where -100 indicates
-                                     all left, and no right */
-} OMX_AUDIO_CONFIG_BALANCETYPE;
-
-
-/** Audio Port mute */
-typedef struct OMX_AUDIO_CONFIG_MUTETYPE {
-    OMX_U32 nSize;              /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;   /**< OMX specification version information */
-    OMX_U32 nPortIndex;         /**< Port index indicating which port to 
-                                     set.  Select the input port to set 
-                                     just that port's mute.  Select the 
-                                     output port to adjust the master 
-                                     mute. */
-    OMX_BOOL bMute;             /**< Mute setting for this port */
-} OMX_AUDIO_CONFIG_MUTETYPE;
-
-
-/** Audio Channel mute */
-typedef struct OMX_AUDIO_CONFIG_CHANNELMUTETYPE {
-    OMX_U32 nSize;              /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;   /**< OMX specification version information */
-    OMX_U32 nPortIndex;         /**< port that this structure applies to */
-    OMX_U32 nChannel;           /**< channel to select from 0 to N-1, 
-                                     using OMX_ALL to apply mute settings
-                                     to all channels */
-    OMX_BOOL bMute;             /**< Mute setting for this channel */
-    OMX_BOOL bIsMIDI;           /**< TRUE if nChannel refers to a MIDI channel,
-                                     FALSE otherwise */ 
-} OMX_AUDIO_CONFIG_CHANNELMUTETYPE;
-
-
-
-/** Enable / Disable for loudness control, which boosts bass and to a 
- *  smaller extent high end frequencies to compensate for hearing
- *  ability at the extreme ends of the audio spectrum
- */ 
-typedef struct OMX_AUDIO_CONFIG_LOUDNESSTYPE {
-    OMX_U32 nSize;             /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;  /**< OMX specification version information */
-    OMX_U32 nPortIndex;        /**< port that this structure applies to */
-    OMX_BOOL bLoudness;        /**< Enable/disable for loudness */
-} OMX_AUDIO_CONFIG_LOUDNESSTYPE;
-
-
-/** Enable / Disable for bass, which controls low frequencies
- */ 
-typedef struct OMX_AUDIO_CONFIG_BASSTYPE {
-    OMX_U32 nSize;             /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;  /**< OMX specification version information */
-    OMX_U32 nPortIndex;        /**< port that this structure applies to */
-    OMX_BOOL bEnable;          /**< Enable/disable for bass control */
-    OMX_S32 nBass;             /**< bass setting for the port, as a 
-                                    continuous value from -100 to 100  
-                                    (0 means no change in bass level)*/
-} OMX_AUDIO_CONFIG_BASSTYPE;
-
-
-/** Enable / Disable for treble, which controls high frequencies tones
- */ 
-typedef struct OMX_AUDIO_CONFIG_TREBLETYPE {
-    OMX_U32 nSize;             /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;  /**< OMX specification version information */
-    OMX_U32 nPortIndex;        /**< port that this structure applies to */
-    OMX_BOOL bEnable;          /**< Enable/disable for treble control */
-    OMX_S32  nTreble;          /**< treble setting for the port, as a
-                                    continuous value from -100 to 100  
-                                    (0 means no change in treble level) */
-} OMX_AUDIO_CONFIG_TREBLETYPE;
-
-
-/** An equalizer is typically used for two reasons: to compensate for an 
- *  sub-optimal frequency response of a system to make it sound more natural 
- *  or to create intentionally some unnatural coloring to the sound to create
- *  an effect.
- *  @ingroup effects
- */
-typedef struct OMX_AUDIO_CONFIG_EQUALIZERTYPE {
-    OMX_U32 nSize;             /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;  /**< OMX specification version information */
-    OMX_U32 nPortIndex;        /**< port that this structure applies to */
-    OMX_BOOL bEnable;          /**< Enable/disable for equalizer */
-    OMX_BU32 sBandIndex;       /**< Band number to be set.  Upper Limit is 
-                                    N-1, where N is the number of bands, lower limit is 0 */
-    OMX_BU32 sCenterFreq;      /**< Center frequecies in Hz.  This is a
-                                    read only element and is used to determine 
-                                    the lower, center and upper frequency of 
-                                    this band.  */
-    OMX_BS32 sBandLevel;       /**< band level in millibels */
-} OMX_AUDIO_CONFIG_EQUALIZERTYPE;
-
-
-/** Stereo widening mode type 
- * @ingroup effects
- */ 
-typedef enum OMX_AUDIO_STEREOWIDENINGTYPE {
-    OMX_AUDIO_StereoWideningHeadphones,    /**< Stereo widening for loudspeakers */
-    OMX_AUDIO_StereoWideningLoudspeakers,  /**< Stereo widening for closely spaced loudspeakers */
-    OMX_AUDIO_StereoWideningKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_AUDIO_StereoWideningVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_AUDIO_StereoWideningMax = 0x7FFFFFFF
-} OMX_AUDIO_STEREOWIDENINGTYPE;
-
-
-/** Control for stereo widening, which is a special 2-channel
- *  case of the audio virtualizer effect. For example, for 5.1-channel 
- *  output, it translates to virtual surround sound. 
- * @ingroup effects
- */ 
-typedef struct OMX_AUDIO_CONFIG_STEREOWIDENINGTYPE {
-    OMX_U32 nSize;             /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;  /**< OMX specification version information */
-    OMX_U32 nPortIndex;        /**< port that this structure applies to */
-    OMX_BOOL bEnable;          /**< Enable/disable for stereo widening control */
-    OMX_AUDIO_STEREOWIDENINGTYPE eWideningType; /**< Stereo widening algorithm type */
-    OMX_U32  nStereoWidening;  /**< stereo widening setting for the port,
-                                    as a continuous value from 0 to 100  */
-} OMX_AUDIO_CONFIG_STEREOWIDENINGTYPE;
-
-
-/** The chorus effect (or ``choralizer'') is any signal processor which makes
- *  one sound source (such as a voice) sound like many such sources singing 
- *  (or playing) in unison. Since performance in unison is never exact, chorus 
- *  effects simulate this by making independently modified copies of the input 
- *  signal. Modifications may include (1) delay, (2) frequency shift, and 
- *  (3) amplitude modulation.
- * @ingroup effects
- */
-typedef struct OMX_AUDIO_CONFIG_CHORUSTYPE {
-    OMX_U32 nSize;             /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;  /**< OMX specification version information */
-    OMX_U32 nPortIndex;        /**< port that this structure applies to */
-    OMX_BOOL bEnable;          /**< Enable/disable for chorus */
-    OMX_BU32 sDelay;           /**< average delay in milliseconds */
-    OMX_BU32 sModulationRate;  /**< rate of modulation in millihertz */
-    OMX_U32 nModulationDepth;  /**< depth of modulation as a percentage of 
-                                    delay (i.e. 0 to 100) */
-    OMX_BU32 nFeedback;        /**< Feedback from chorus output to input in percentage */
-} OMX_AUDIO_CONFIG_CHORUSTYPE;
-
-
-/** Reverberation is part of the reflected sound that follows the early 
- *  reflections. In a typical room, this consists of a dense succession of 
- *  echoes whose energy decays exponentially. The reverberation effect structure 
- *  as defined here includes both (early) reflections as well as (late) reverberations. 
- * @ingroup effects
- */
-typedef struct OMX_AUDIO_CONFIG_REVERBERATIONTYPE {
-    OMX_U32 nSize;                /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;     /**< OMX specification version information */
-    OMX_U32 nPortIndex;           /**< port that this structure applies to */
-    OMX_BOOL bEnable;             /**< Enable/disable for reverberation control */
-    OMX_BS32 sRoomLevel;          /**< Intensity level for the whole room effect 
-                                       (i.e. both early reflections and late 
-                                       reverberation) in millibels */
-    OMX_BS32 sRoomHighFreqLevel;  /**< Attenuation at high frequencies
-                                       relative to the intensity at low
-                                       frequencies in millibels */
-    OMX_BS32 sReflectionsLevel;   /**< Intensity level of early reflections
-                                       (relative to room value), in millibels */
-    OMX_BU32 sReflectionsDelay;   /**< Delay time of the first reflection relative 
-                                       to the direct path, in milliseconds */
-    OMX_BS32 sReverbLevel;        /**< Intensity level of late reverberation
-                                       relative to room level, in millibels */
-    OMX_BU32 sReverbDelay;        /**< Time delay from the first early reflection 
-                                       to the beginning of the late reverberation 
-                                       section, in milliseconds */
-    OMX_BU32 sDecayTime;          /**< Late reverberation decay time at low
-                                       frequencies, in milliseconds */
-    OMX_BU32 nDecayHighFreqRatio; /**< Ratio of high frequency decay time relative 
-                                       to low frequency decay time in percent  */
-    OMX_U32 nDensity;             /**< Modal density in the late reverberation decay,
-                                       in percent (i.e. 0 - 100) */
-    OMX_U32 nDiffusion;           /**< Echo density in the late reverberation decay,
-                                       in percent (i.e. 0 - 100) */
-    OMX_BU32 sReferenceHighFreq;  /**< Reference high frequency in Hertz. This is 
-                                       the frequency used as the reference for all 
-                                       the high-frequency settings above */
-
-} OMX_AUDIO_CONFIG_REVERBERATIONTYPE;
-
-
-/** Possible settings for the Echo Cancelation structure to use 
- * @ingroup effects
- */
-typedef enum OMX_AUDIO_ECHOCANTYPE {
-   OMX_AUDIO_EchoCanOff = 0,    /**< Echo Cancellation is disabled */
-   OMX_AUDIO_EchoCanNormal,     /**< Echo Cancellation normal operation - 
-                                     echo from plastics and face */
-   OMX_AUDIO_EchoCanHFree,      /**< Echo Cancellation optimized for 
-                                     Hands Free operation */
-   OMX_AUDIO_EchoCanCarKit,    /**< Echo Cancellation optimized for 
-                                     Car Kit (longer echo) */
-   OMX_AUDIO_EchoCanKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-   OMX_AUDIO_EchoCanVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-   OMX_AUDIO_EchoCanMax = 0x7FFFFFFF
-} OMX_AUDIO_ECHOCANTYPE;
-
-
-/** Enable / Disable for echo cancelation, which removes undesired echo's
- *  from the audio
- * @ingroup effects
- */ 
-typedef struct OMX_AUDIO_CONFIG_ECHOCANCELATIONTYPE {
-    OMX_U32 nSize;             /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;  /**< OMX specification version information */
-    OMX_U32 nPortIndex;        /**< port that this structure applies to */
-    OMX_AUDIO_ECHOCANTYPE eEchoCancelation; /**< Echo cancelation settings */
-} OMX_AUDIO_CONFIG_ECHOCANCELATIONTYPE;
-
-
-/** Enable / Disable for noise reduction, which undesired noise from
- * the audio
- * @ingroup effects
- */ 
-typedef struct OMX_AUDIO_CONFIG_NOISEREDUCTIONTYPE {
-    OMX_U32 nSize;             /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;  /**< OMX specification version information */
-    OMX_U32 nPortIndex;        /**< port that this structure applies to */
-    OMX_BOOL bNoiseReduction;  /**< Enable/disable for noise reduction */
-} OMX_AUDIO_CONFIG_NOISEREDUCTIONTYPE;
-
-/** @} */
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif
-/* File EOF */
-
diff --git a/include/media/stagefright/openmax/OMX_Component.h b/include/media/stagefright/openmax/OMX_Component.h
deleted file mode 100644
index b5b784e..0000000
--- a/include/media/stagefright/openmax/OMX_Component.h
+++ /dev/null
@@ -1,596 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/*
- * Copyright (c) 2008 The Khronos Group Inc. 
- * 
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject
- * to the following conditions: 
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software. 
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
- * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 
- *
- */
-
-/** OMX_Component.h - OpenMax IL version 1.1.2
- *  The OMX_Component header file contains the definitions used to define
- *  the public interface of a component.  This header file is intended to
- *  be used by both the application and the component.
- */
-
-#ifndef OMX_Component_h
-#define OMX_Component_h
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-
-
-/* Each OMX header must include all required header files to allow the
- *  header to compile without errors.  The includes below are required
- *  for this header file to compile successfully 
- */
-
-#include <OMX_Audio.h>
-#include <OMX_Video.h>
-#include <OMX_Image.h>
-#include <OMX_Other.h>
-
-/** @ingroup comp */
-typedef enum OMX_PORTDOMAINTYPE { 
-    OMX_PortDomainAudio, 
-    OMX_PortDomainVideo, 
-    OMX_PortDomainImage, 
-    OMX_PortDomainOther,
-    OMX_PortDomainKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_PortDomainVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_PortDomainMax = 0x7ffffff
-} OMX_PORTDOMAINTYPE;
-
-/** @ingroup comp */
-typedef struct OMX_PARAM_PORTDEFINITIONTYPE {
-    OMX_U32 nSize;                 /**< Size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;      /**< OMX specification version information */
-    OMX_U32 nPortIndex;            /**< Port number the structure applies to */
-    OMX_DIRTYPE eDir;              /**< Direction (input or output) of this port */
-    OMX_U32 nBufferCountActual;    /**< The actual number of buffers allocated on this port */
-    OMX_U32 nBufferCountMin;       /**< The minimum number of buffers this port requires */
-    OMX_U32 nBufferSize;           /**< Size, in bytes, for buffers to be used for this channel */
-    OMX_BOOL bEnabled;             /**< Ports default to enabled and are enabled/disabled by
-                                        OMX_CommandPortEnable/OMX_CommandPortDisable.
-                                        When disabled a port is unpopulated. A disabled port
-                                        is not populated with buffers on a transition to IDLE. */
-    OMX_BOOL bPopulated;           /**< Port is populated with all of its buffers as indicated by
-                                        nBufferCountActual. A disabled port is always unpopulated. 
-                                        An enabled port is populated on a transition to OMX_StateIdle
-                                        and unpopulated on a transition to loaded. */
-    OMX_PORTDOMAINTYPE eDomain;    /**< Domain of the port. Determines the contents of metadata below. */
-    union {
-        OMX_AUDIO_PORTDEFINITIONTYPE audio;
-        OMX_VIDEO_PORTDEFINITIONTYPE video;
-        OMX_IMAGE_PORTDEFINITIONTYPE image;
-        OMX_OTHER_PORTDEFINITIONTYPE other;
-    } format;
-    OMX_BOOL bBuffersContiguous;
-    OMX_U32 nBufferAlignment;
-} OMX_PARAM_PORTDEFINITIONTYPE;
-
-/** @ingroup comp */
-typedef struct OMX_PARAM_U32TYPE { 
-    OMX_U32 nSize;                    /**< Size of this structure, in Bytes */ 
-    OMX_VERSIONTYPE nVersion;         /**< OMX specification version information */ 
-    OMX_U32 nPortIndex;               /**< port that this structure applies to */ 
-    OMX_U32 nU32;                     /**< U32 value */
-} OMX_PARAM_U32TYPE;
-
-/** @ingroup rpm */
-typedef enum OMX_SUSPENSIONPOLICYTYPE {
-    OMX_SuspensionDisabled, /**< No suspension; v1.0 behavior */
-    OMX_SuspensionEnabled,  /**< Suspension allowed */   
-    OMX_SuspensionPolicyKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_SuspensionPolicyStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_SuspensionPolicyMax = 0x7fffffff
-} OMX_SUSPENSIONPOLICYTYPE;
-
-/** @ingroup rpm */
-typedef struct OMX_PARAM_SUSPENSIONPOLICYTYPE {
-    OMX_U32 nSize;                  
-    OMX_VERSIONTYPE nVersion;        
-    OMX_SUSPENSIONPOLICYTYPE ePolicy;
-} OMX_PARAM_SUSPENSIONPOLICYTYPE;
-
-/** @ingroup rpm */
-typedef enum OMX_SUSPENSIONTYPE {
-    OMX_NotSuspended, /**< component is not suspended */
-    OMX_Suspended,    /**< component is suspended */
-    OMX_SuspensionKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_SuspensionVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_SuspendMax = 0x7FFFFFFF
-} OMX_SUSPENSIONTYPE;
-
-/** @ingroup rpm */
-typedef struct OMX_PARAM_SUSPENSIONTYPE {
-    OMX_U32 nSize;                  
-    OMX_VERSIONTYPE nVersion;       
-    OMX_SUSPENSIONTYPE eType;             
-} OMX_PARAM_SUSPENSIONTYPE ;
-
-typedef struct OMX_CONFIG_BOOLEANTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_BOOL bEnabled;    
-} OMX_CONFIG_BOOLEANTYPE;
-
-/* Parameter specifying the content uri to use. */
-/** @ingroup cp */
-typedef struct OMX_PARAM_CONTENTURITYPE
-{
-    OMX_U32 nSize;                      /**< size of the structure in bytes, including
-                                             actual URI name */
-    OMX_VERSIONTYPE nVersion;           /**< OMX specification version information */
-    OMX_U8 contentURI[1];               /**< The URI name */
-} OMX_PARAM_CONTENTURITYPE;
-
-/* Parameter specifying the pipe to use. */
-/** @ingroup cp */
-typedef struct OMX_PARAM_CONTENTPIPETYPE
-{
-    OMX_U32 nSize;              /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;   /**< OMX specification version information */
-    OMX_HANDLETYPE hPipe;       /**< The pipe handle*/
-} OMX_PARAM_CONTENTPIPETYPE;
-
-/** @ingroup rpm */
-typedef struct OMX_RESOURCECONCEALMENTTYPE {
-    OMX_U32 nSize;             /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;  /**< OMX specification version information */
-    OMX_BOOL bResourceConcealmentForbidden; /**< disallow the use of resource concealment 
-                                            methods (like degrading algorithm quality to 
-                                            lower resource consumption or functional bypass) 
-                                            on a component as a resolution to resource conflicts. */
-} OMX_RESOURCECONCEALMENTTYPE;
-
-
-/** @ingroup metadata */
-typedef enum OMX_METADATACHARSETTYPE {
-    OMX_MetadataCharsetUnknown = 0,
-    OMX_MetadataCharsetASCII,
-    OMX_MetadataCharsetBinary,
-    OMX_MetadataCharsetCodePage1252,
-    OMX_MetadataCharsetUTF8,
-    OMX_MetadataCharsetJavaConformantUTF8,
-    OMX_MetadataCharsetUTF7,
-    OMX_MetadataCharsetImapUTF7,
-    OMX_MetadataCharsetUTF16LE, 
-    OMX_MetadataCharsetUTF16BE,
-    OMX_MetadataCharsetGB12345,
-    OMX_MetadataCharsetHZGB2312,
-    OMX_MetadataCharsetGB2312,
-    OMX_MetadataCharsetGB18030,
-    OMX_MetadataCharsetGBK,
-    OMX_MetadataCharsetBig5,
-    OMX_MetadataCharsetISO88591,
-    OMX_MetadataCharsetISO88592,
-    OMX_MetadataCharsetISO88593,
-    OMX_MetadataCharsetISO88594,
-    OMX_MetadataCharsetISO88595,
-    OMX_MetadataCharsetISO88596,
-    OMX_MetadataCharsetISO88597,
-    OMX_MetadataCharsetISO88598,
-    OMX_MetadataCharsetISO88599,
-    OMX_MetadataCharsetISO885910,
-    OMX_MetadataCharsetISO885913,
-    OMX_MetadataCharsetISO885914,
-    OMX_MetadataCharsetISO885915,
-    OMX_MetadataCharsetShiftJIS,
-    OMX_MetadataCharsetISO2022JP,
-    OMX_MetadataCharsetISO2022JP1,
-    OMX_MetadataCharsetISOEUCJP,
-    OMX_MetadataCharsetSMS7Bit,
-    OMX_MetadataCharsetKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_MetadataCharsetVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_MetadataCharsetTypeMax= 0x7FFFFFFF
-} OMX_METADATACHARSETTYPE;
-
-/** @ingroup metadata */
-typedef enum OMX_METADATASCOPETYPE
-{
-    OMX_MetadataScopeAllLevels,
-    OMX_MetadataScopeTopLevel,
-    OMX_MetadataScopePortLevel,
-    OMX_MetadataScopeNodeLevel,
-    OMX_MetadataScopeKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_MetadataScopeVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_MetadataScopeTypeMax = 0x7fffffff
-} OMX_METADATASCOPETYPE;
-
-/** @ingroup metadata */
-typedef enum OMX_METADATASEARCHMODETYPE
-{
-    OMX_MetadataSearchValueSizeByIndex,
-    OMX_MetadataSearchItemByIndex,
-    OMX_MetadataSearchNextItemByKey,
-    OMX_MetadataSearchKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_MetadataSearchVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_MetadataSearchTypeMax = 0x7fffffff
-} OMX_METADATASEARCHMODETYPE;
-/** @ingroup metadata */
-typedef struct OMX_CONFIG_METADATAITEMCOUNTTYPE
-{
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_METADATASCOPETYPE eScopeMode;
-    OMX_U32 nScopeSpecifier;
-    OMX_U32 nMetadataItemCount;
-} OMX_CONFIG_METADATAITEMCOUNTTYPE;
-
-/** @ingroup metadata */
-typedef struct OMX_CONFIG_METADATAITEMTYPE
-{
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_METADATASCOPETYPE eScopeMode;
-    OMX_U32 nScopeSpecifier;
-    OMX_U32 nMetadataItemIndex;  
-    OMX_METADATASEARCHMODETYPE eSearchMode;
-    OMX_METADATACHARSETTYPE eKeyCharset;
-    OMX_U8 nKeySizeUsed;
-    OMX_U8 nKey[128];
-    OMX_METADATACHARSETTYPE eValueCharset;
-    OMX_STRING sLanguageCountry;
-    OMX_U32 nValueMaxSize;
-    OMX_U32 nValueSizeUsed;
-    OMX_U8 nValue[1];
-} OMX_CONFIG_METADATAITEMTYPE;
-
-/* @ingroup metadata */
-typedef struct OMX_CONFIG_CONTAINERNODECOUNTTYPE
-{
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_BOOL bAllKeys;
-    OMX_U32 nParentNodeID;
-    OMX_U32 nNumNodes;
-} OMX_CONFIG_CONTAINERNODECOUNTTYPE;
-
-/** @ingroup metadata */
-typedef struct OMX_CONFIG_CONTAINERNODEIDTYPE
-{
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_BOOL bAllKeys;
-    OMX_U32 nParentNodeID;
-    OMX_U32 nNodeIndex; 
-    OMX_U32 nNodeID; 
-    OMX_STRING cNodeName;
-    OMX_BOOL bIsLeafType;
-} OMX_CONFIG_CONTAINERNODEIDTYPE;
-
-/** @ingroup metadata */
-typedef struct OMX_PARAM_METADATAFILTERTYPE 
-{ 
-    OMX_U32 nSize; 
-    OMX_VERSIONTYPE nVersion; 
-    OMX_BOOL bAllKeys;	/* if true then this structure refers to all keys and 
-                         * the three key fields below are ignored */
-    OMX_METADATACHARSETTYPE eKeyCharset;
-    OMX_U32 nKeySizeUsed; 
-    OMX_U8   nKey [128]; 
-    OMX_U32 nLanguageCountrySizeUsed;
-    OMX_U8 nLanguageCountry[128];
-    OMX_BOOL bEnabled;	/* if true then key is part of filter (e.g. 
-                         * retained for query later). If false then
-                         * key is not part of filter */
-} OMX_PARAM_METADATAFILTERTYPE; 
-
-/** The OMX_HANDLETYPE structure defines the component handle.  The component 
- *  handle is used to access all of the component's public methods and also
- *  contains pointers to the component's private data area.  The component
- *  handle is initialized by the OMX core (with help from the component)
- *  during the process of loading the component.  After the component is
- *  successfully loaded, the application can safely access any of the
- *  component's public functions (although some may return an error because
- *  the state is inappropriate for the access).
- * 
- *  @ingroup comp
- */
-typedef struct OMX_COMPONENTTYPE
-{
-    /** The size of this structure, in bytes.  It is the responsibility
-        of the allocator of this structure to fill in this value.  Since
-        this structure is allocated by the GetHandle function, this
-        function will fill in this value. */
-    OMX_U32 nSize;
-
-    /** nVersion is the version of the OMX specification that the structure 
-        is built against.  It is the responsibility of the creator of this 
-        structure to initialize this value and every user of this structure 
-        should verify that it knows how to use the exact version of 
-        this structure found herein. */
-    OMX_VERSIONTYPE nVersion;
-
-    /** pComponentPrivate is a pointer to the component private data area.  
-        This member is allocated and initialized by the component when the 
-        component is first loaded.  The application should not access this 
-        data area. */
-    OMX_PTR pComponentPrivate;
-
-    /** pApplicationPrivate is a pointer that is a parameter to the 
-        OMX_GetHandle method, and contains an application private value 
-        provided by the IL client.  This application private data is 
-        returned to the IL Client by OMX in all callbacks */
-    OMX_PTR pApplicationPrivate;
-
-    /** refer to OMX_GetComponentVersion in OMX_core.h or the OMX IL 
-        specification for details on the GetComponentVersion method.
-     */
-    OMX_ERRORTYPE (*GetComponentVersion)(
-            OMX_IN  OMX_HANDLETYPE hComponent,
-            OMX_OUT OMX_STRING pComponentName,
-            OMX_OUT OMX_VERSIONTYPE* pComponentVersion,
-            OMX_OUT OMX_VERSIONTYPE* pSpecVersion,
-            OMX_OUT OMX_UUIDTYPE* pComponentUUID);
-
-    /** refer to OMX_SendCommand in OMX_core.h or the OMX IL 
-        specification for details on the SendCommand method.
-     */
-    OMX_ERRORTYPE (*SendCommand)(
-            OMX_IN  OMX_HANDLETYPE hComponent,
-            OMX_IN  OMX_COMMANDTYPE Cmd,
-            OMX_IN  OMX_U32 nParam1,
-            OMX_IN  OMX_PTR pCmdData);
-
-    /** refer to OMX_GetParameter in OMX_core.h or the OMX IL 
-        specification for details on the GetParameter method.
-     */
-    OMX_ERRORTYPE (*GetParameter)(
-            OMX_IN  OMX_HANDLETYPE hComponent, 
-            OMX_IN  OMX_INDEXTYPE nParamIndex,  
-            OMX_INOUT OMX_PTR pComponentParameterStructure);
-
-
-    /** refer to OMX_SetParameter in OMX_core.h or the OMX IL 
-        specification for details on the SetParameter method.
-     */
-    OMX_ERRORTYPE (*SetParameter)(
-            OMX_IN  OMX_HANDLETYPE hComponent, 
-            OMX_IN  OMX_INDEXTYPE nIndex,
-            OMX_IN  OMX_PTR pComponentParameterStructure);
-
-
-    /** refer to OMX_GetConfig in OMX_core.h or the OMX IL 
-        specification for details on the GetConfig method.
-     */
-    OMX_ERRORTYPE (*GetConfig)(
-            OMX_IN  OMX_HANDLETYPE hComponent,
-            OMX_IN  OMX_INDEXTYPE nIndex, 
-            OMX_INOUT OMX_PTR pComponentConfigStructure);
-
-
-    /** refer to OMX_SetConfig in OMX_core.h or the OMX IL 
-        specification for details on the SetConfig method.
-     */
-    OMX_ERRORTYPE (*SetConfig)(
-            OMX_IN  OMX_HANDLETYPE hComponent,
-            OMX_IN  OMX_INDEXTYPE nIndex, 
-            OMX_IN  OMX_PTR pComponentConfigStructure);
-
-
-    /** refer to OMX_GetExtensionIndex in OMX_core.h or the OMX IL 
-        specification for details on the GetExtensionIndex method.
-     */
-    OMX_ERRORTYPE (*GetExtensionIndex)(
-            OMX_IN  OMX_HANDLETYPE hComponent,
-            OMX_IN  OMX_STRING cParameterName,
-            OMX_OUT OMX_INDEXTYPE* pIndexType);
-
-
-    /** refer to OMX_GetState in OMX_core.h or the OMX IL 
-        specification for details on the GetState method.
-     */
-    OMX_ERRORTYPE (*GetState)(
-            OMX_IN  OMX_HANDLETYPE hComponent,
-            OMX_OUT OMX_STATETYPE* pState);
-
-    
-    /** The ComponentTunnelRequest method will interact with another OMX
-        component to determine if tunneling is possible and to setup the
-        tunneling.  The return codes for this method can be used to 
-        determine if tunneling is not possible, or if tunneling is not
-        supported.  
-        
-        Base profile components (i.e. non-interop) do not support this
-        method and should return OMX_ErrorNotImplemented 
-
-        The interop profile component MUST support tunneling to another 
-        interop profile component with a compatible port parameters.  
-        A component may also support proprietary communication.
-        
-        If proprietary communication is supported the negotiation of 
-        proprietary communication is done outside of OMX in a vendor 
-        specific way. It is only required that the proper result be 
-        returned and the details of how the setup is done is left 
-        to the component implementation.  
-    
-        When this method is invoked when nPort in an output port, the
-        component will:
-        1.  Populate the pTunnelSetup structure with the output port's 
-            requirements and constraints for the tunnel.
-
-        When this method is invoked when nPort in an input port, the
-        component will:
-        1.  Query the necessary parameters from the output port to 
-            determine if the ports are compatible for tunneling
-        2.  If the ports are compatible, the component should store
-            the tunnel step provided by the output port
-        3.  Determine which port (either input or output) is the buffer
-            supplier, and call OMX_SetParameter on the output port to
-            indicate this selection.
-        
-        The component will return from this call within 5 msec.
-    
-        @param [in] hComp
-            Handle of the component to be accessed.  This is the component
-            handle returned by the call to the OMX_GetHandle method.
-        @param [in] nPort
-            nPort is used to select the port on the component to be used
-            for tunneling.
-        @param [in] hTunneledComp
-            Handle of the component to tunnel with.  This is the component 
-            handle returned by the call to the OMX_GetHandle method.  When
-            this parameter is 0x0 the component should setup the port for
-            communication with the application / IL Client.
-        @param [in] nPortOutput
-            nPortOutput is used indicate the port the component should
-            tunnel with.
-        @param [in] pTunnelSetup
-            Pointer to the tunnel setup structure.  When nPort is an output port
-            the component should populate the fields of this structure.  When
-            When nPort is an input port the component should review the setup
-            provided by the component with the output port.
-        @return OMX_ERRORTYPE
-            If the command successfully executes, the return code will be
-            OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-        @ingroup tun
-    */
-
-    OMX_ERRORTYPE (*ComponentTunnelRequest)(
-        OMX_IN  OMX_HANDLETYPE hComp,
-        OMX_IN  OMX_U32 nPort,
-        OMX_IN  OMX_HANDLETYPE hTunneledComp,
-        OMX_IN  OMX_U32 nTunneledPort,
-        OMX_INOUT  OMX_TUNNELSETUPTYPE* pTunnelSetup); 
-
-    /** refer to OMX_UseBuffer in OMX_core.h or the OMX IL 
-        specification for details on the UseBuffer method.
-        @ingroup buf
-     */
-    OMX_ERRORTYPE (*UseBuffer)(
-            OMX_IN OMX_HANDLETYPE hComponent,
-            OMX_INOUT OMX_BUFFERHEADERTYPE** ppBufferHdr,
-            OMX_IN OMX_U32 nPortIndex,
-            OMX_IN OMX_PTR pAppPrivate,
-            OMX_IN OMX_U32 nSizeBytes,
-            OMX_IN OMX_U8* pBuffer);
-
-    /** refer to OMX_AllocateBuffer in OMX_core.h or the OMX IL 
-        specification for details on the AllocateBuffer method.
-        @ingroup buf
-     */
-    OMX_ERRORTYPE (*AllocateBuffer)(
-            OMX_IN OMX_HANDLETYPE hComponent,
-            OMX_INOUT OMX_BUFFERHEADERTYPE** ppBuffer,
-            OMX_IN OMX_U32 nPortIndex,
-            OMX_IN OMX_PTR pAppPrivate,
-            OMX_IN OMX_U32 nSizeBytes);
-
-    /** refer to OMX_FreeBuffer in OMX_core.h or the OMX IL 
-        specification for details on the FreeBuffer method.
-        @ingroup buf
-     */
-    OMX_ERRORTYPE (*FreeBuffer)(
-            OMX_IN  OMX_HANDLETYPE hComponent,
-            OMX_IN  OMX_U32 nPortIndex,
-            OMX_IN  OMX_BUFFERHEADERTYPE* pBuffer);
-
-    /** refer to OMX_EmptyThisBuffer in OMX_core.h or the OMX IL 
-        specification for details on the EmptyThisBuffer method.
-        @ingroup buf
-     */
-    OMX_ERRORTYPE (*EmptyThisBuffer)(
-            OMX_IN  OMX_HANDLETYPE hComponent,
-            OMX_IN  OMX_BUFFERHEADERTYPE* pBuffer);
-
-    /** refer to OMX_FillThisBuffer in OMX_core.h or the OMX IL 
-        specification for details on the FillThisBuffer method.
-        @ingroup buf
-     */
-    OMX_ERRORTYPE (*FillThisBuffer)(
-            OMX_IN  OMX_HANDLETYPE hComponent,
-            OMX_IN  OMX_BUFFERHEADERTYPE* pBuffer);
-
-    /** The SetCallbacks method is used by the core to specify the callback
-        structure from the application to the component.  This is a blocking
-        call.  The component will return from this call within 5 msec.
-        @param [in] hComponent
-            Handle of the component to be accessed.  This is the component
-            handle returned by the call to the GetHandle function.
-        @param [in] pCallbacks
-            pointer to an OMX_CALLBACKTYPE structure used to provide the 
-            callback information to the component
-        @param [in] pAppData
-            pointer to an application defined value.  It is anticipated that 
-            the application will pass a pointer to a data structure or a "this
-            pointer" in this area to allow the callback (in the application)
-            to determine the context of the call
-        @return OMX_ERRORTYPE
-            If the command successfully executes, the return code will be
-            OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-     */
-    OMX_ERRORTYPE (*SetCallbacks)(
-            OMX_IN  OMX_HANDLETYPE hComponent,
-            OMX_IN  OMX_CALLBACKTYPE* pCallbacks, 
-            OMX_IN  OMX_PTR pAppData);
-
-    /** ComponentDeInit method is used to deinitialize the component
-        providing a means to free any resources allocated at component
-        initialization.  NOTE:  After this call the component handle is
-        not valid for further use.
-        @param [in] hComponent
-            Handle of the component to be accessed.  This is the component
-            handle returned by the call to the GetHandle function.
-        @return OMX_ERRORTYPE
-            If the command successfully executes, the return code will be
-            OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-     */
-    OMX_ERRORTYPE (*ComponentDeInit)(
-            OMX_IN  OMX_HANDLETYPE hComponent);
-
-    /** @ingroup buf */
-    OMX_ERRORTYPE (*UseEGLImage)(
-            OMX_IN OMX_HANDLETYPE hComponent,
-            OMX_INOUT OMX_BUFFERHEADERTYPE** ppBufferHdr,
-            OMX_IN OMX_U32 nPortIndex,
-            OMX_IN OMX_PTR pAppPrivate,
-            OMX_IN void* eglImage);
-
-    OMX_ERRORTYPE (*ComponentRoleEnum)(
-        OMX_IN OMX_HANDLETYPE hComponent,
-		OMX_OUT OMX_U8 *cRole,
-		OMX_IN OMX_U32 nIndex);
-
-} OMX_COMPONENTTYPE;
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif
-/* File EOF */
diff --git a/include/media/stagefright/openmax/OMX_ContentPipe.h b/include/media/stagefright/openmax/OMX_ContentPipe.h
deleted file mode 100644
index ee9e4db..0000000
--- a/include/media/stagefright/openmax/OMX_ContentPipe.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/*
- * Copyright (c) 2008 The Khronos Group Inc. 
- * 
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject
- * to the following conditions: 
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software. 
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
- * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 
- *
- */
-
-/** OMX_ContentPipe.h - OpenMax IL version 1.1.2
- *  The OMX_ContentPipe header file contains the definitions used to define
- *  the public interface for content piples.  This header file is intended to
- *  be used by the component.
- */
-
-#ifndef OMX_CONTENTPIPE_H
-#define OMX_CONTENTPIPE_H
-
-#ifndef KD_EACCES
-/* OpenKODE error codes. CPResult values may be zero (indicating success
-   or one of the following values) */
-#define KD_EACCES (1)
-#define KD_EADDRINUSE (2)
-#define KD_EAGAIN (5)
-#define KD_EBADF (7)
-#define KD_EBUSY (8)
-#define KD_ECONNREFUSED (9)
-#define KD_ECONNRESET (10)
-#define KD_EDEADLK (11)
-#define KD_EDESTADDRREQ (12)
-#define KD_ERANGE (35)
-#define KD_EEXIST (13)
-#define KD_EFBIG (14)
-#define KD_EHOSTUNREACH (15)
-#define KD_EINVAL (17)
-#define KD_EIO (18)
-#define KD_EISCONN (20)
-#define KD_EISDIR (21)
-#define KD_EMFILE (22)
-#define KD_ENAMETOOLONG (23)
-#define KD_ENOENT (24)
-#define KD_ENOMEM (25)
-#define KD_ENOSPC (26)
-#define KD_ENOSYS (27)
-#define KD_ENOTCONN (28)
-#define KD_EPERM (33)
-#define KD_ETIMEDOUT (36)
-#define KD_EILSEQ (19)
-#endif
-
-/** Map types from OMX standard types only here so interface is as generic as possible. */
-typedef OMX_U32    CPresult;
-typedef char *     CPstring;  
-typedef void *     CPhandle;
-typedef OMX_U32    CPuint;
-typedef OMX_S32    CPint;  
-typedef char       CPbyte;  
-typedef OMX_BOOL   CPbool;
-
-/** enumeration of origin types used in the CP_PIPETYPE's Seek function 
- * @ingroup cp
- */
-typedef enum CP_ORIGINTYPE {
-    CP_OriginBegin,      
-    CP_OriginCur,      
-    CP_OriginEnd,      
-    CP_OriginKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    CP_OriginVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    CP_OriginMax = 0X7FFFFFFF
-} CP_ORIGINTYPE;
-
-/** enumeration of contact access types used in the CP_PIPETYPE's Open function 
- * @ingroup cp
- */
-typedef enum CP_ACCESSTYPE {
-    CP_AccessRead,      
-    CP_AccessWrite,  
-    CP_AccessReadWrite ,  
-    CP_AccessKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    CP_AccessVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    CP_AccessMax = 0X7FFFFFFF
-} CP_ACCESSTYPE;
-
-/** enumeration of results returned by the CP_PIPETYPE's CheckAvailableBytes function 
- * @ingroup cp
- */
-typedef enum CP_CHECKBYTESRESULTTYPE
-{
-    CP_CheckBytesOk,                    /**< There are at least the request number 
-                                              of bytes available */
-    CP_CheckBytesNotReady,              /**< The pipe is still retrieving bytes 
-                                              and presently lacks sufficient bytes. 
-                                              Client will be called when they are 
-                                              sufficient bytes are available. */
-    CP_CheckBytesInsufficientBytes  ,     /**< The pipe has retrieved all bytes 
-                                              but those available are less than those 
-                                              requested */
-    CP_CheckBytesAtEndOfStream,         /**< The pipe has reached the end of stream
-                                              and no more bytes are available. */
-    CP_CheckBytesOutOfBuffers,          /**< All read/write buffers are currently in use. */
-    CP_CheckBytesKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    CP_CheckBytesVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    CP_CheckBytesMax = 0X7FFFFFFF
-} CP_CHECKBYTESRESULTTYPE;
-
-/** enumeration of content pipe events sent to the client callback. 
- * @ingroup cp
- */
-typedef enum CP_EVENTTYPE{
-    CP_BytesAvailable,      	    /** bytes requested in a CheckAvailableBytes call are now available*/
-    CP_Overflow,  		           /** enumeration of content pipe events sent to the client callback*/
-    CP_PipeDisconnected  ,  		    /** enumeration of content pipe events sent to the client callback*/
-    CP_EventKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    CP_EventVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    CP_EventMax = 0X7FFFFFFF
-} CP_EVENTTYPE;
-
-/** content pipe definition 
- * @ingroup cp
- */
-typedef struct CP_PIPETYPE
-{
-    /** Open a content stream for reading or writing. */ 
-    CPresult (*Open)( CPhandle* hContent, CPstring szURI, CP_ACCESSTYPE eAccess );
-
-    /** Close a content stream. */ 
-    CPresult (*Close)( CPhandle hContent );
-
-    /** Create a content source and open it for writing. */ 
-    CPresult (*Create)( CPhandle *hContent, CPstring szURI );
-
-    /** Check the that specified number of bytes are available for reading or writing (depending on access type).*/
-    CPresult (*CheckAvailableBytes)( CPhandle hContent, CPuint nBytesRequested, CP_CHECKBYTESRESULTTYPE *eResult );
-
-    /** Seek to certain position in the content relative to the specified origin. */
-    CPresult (*SetPosition)( CPhandle  hContent, CPint nOffset, CP_ORIGINTYPE eOrigin);
-
-    /** Retrieve the current position relative to the start of the content. */
-    CPresult (*GetPosition)( CPhandle hContent, CPuint *pPosition);
-
-    /** Retrieve data of the specified size from the content stream (advance content pointer by size of data).
-       Note: pipe client provides pointer. This function is appropriate for small high frequency reads. */
-    CPresult (*Read)( CPhandle hContent, CPbyte *pData, CPuint nSize); 
-
-    /** Retrieve a buffer allocated by the pipe that contains the requested number of bytes. 
-       Buffer contains the next block of bytes, as specified by nSize, of the content. nSize also
-       returns the size of the block actually read. Content pointer advances the by the returned size. 
-       Note: pipe provides pointer. This function is appropriate for large reads. The client must call 
-       ReleaseReadBuffer when done with buffer. 
-
-       In some cases the requested block may not reside in contiguous memory within the
-       pipe implementation. For instance if the pipe leverages a circular buffer then the requested 
-       block may straddle the boundary of the circular buffer. By default a pipe implementation 
-       performs a copy in this case to provide the block to the pipe client in one contiguous buffer.
-       If, however, the client sets bForbidCopy, then the pipe returns only those bytes preceding the memory 
-       boundary. Here the client may retrieve the data in segments over successive calls. */
-    CPresult (*ReadBuffer)( CPhandle hContent, CPbyte **ppBuffer, CPuint *nSize, CPbool bForbidCopy);
-
-    /** Release a buffer obtained by ReadBuffer back to the pipe. */
-    CPresult (*ReleaseReadBuffer)(CPhandle hContent, CPbyte *pBuffer);
-
-    /** Write data of the specified size to the content (advance content pointer by size of data).
-       Note: pipe client provides pointer. This function is appropriate for small high frequency writes. */
-    CPresult (*Write)( CPhandle hContent, CPbyte *data, CPuint nSize); 
-
-    /** Retrieve a buffer allocated by the pipe used to write data to the content. 
-       Client will fill buffer with output data. Note: pipe provides pointer. This function is appropriate
-       for large writes. The client must call WriteBuffer when done it has filled the buffer with data.*/
-    CPresult (*GetWriteBuffer)( CPhandle hContent, CPbyte **ppBuffer, CPuint nSize);
-
-    /** Deliver a buffer obtained via GetWriteBuffer to the pipe. Pipe will write the 
-       the contents of the buffer to content and advance content pointer by the size of the buffer */
-    CPresult (*WriteBuffer)( CPhandle hContent, CPbyte *pBuffer, CPuint nFilledSize);
-
-    /** Register a per-handle client callback with the content pipe. */
-    CPresult (*RegisterCallback)( CPhandle hContent, CPresult (*ClientCallback)(CP_EVENTTYPE eEvent, CPuint iParam));
-
-} CP_PIPETYPE;
-
-#endif
-
diff --git a/include/media/stagefright/openmax/OMX_Core.h b/include/media/stagefright/openmax/OMX_Core.h
deleted file mode 100644
index 9fb0f6f..0000000
--- a/include/media/stagefright/openmax/OMX_Core.h
+++ /dev/null
@@ -1,1448 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/*
- * Copyright (c) 2008 The Khronos Group Inc. 
- * 
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject
- * to the following conditions: 
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software. 
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
- * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 
- *
- */
-
-/** OMX_Core.h - OpenMax IL version 1.1.2
- *  The OMX_Core header file contains the definitions used by both the
- *  application and the component to access common items.
- */
-
-#ifndef OMX_Core_h
-#define OMX_Core_h
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-
-/* Each OMX header shall include all required header files to allow the
- *  header to compile without errors.  The includes below are required
- *  for this header file to compile successfully 
- */
-
-#include <OMX_Index.h>
-
-
-/** The OMX_COMMANDTYPE enumeration is used to specify the action in the
- *  OMX_SendCommand macro.  
- *  @ingroup core
- */
-typedef enum OMX_COMMANDTYPE
-{
-    OMX_CommandStateSet,    /**< Change the component state */
-    OMX_CommandFlush,       /**< Flush the data queue(s) of a component */
-    OMX_CommandPortDisable, /**< Disable a port on a component. */
-    OMX_CommandPortEnable,  /**< Enable a port on a component. */
-    OMX_CommandMarkBuffer,  /**< Mark a component/buffer for observation */
-    OMX_CommandKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_CommandVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_CommandMax = 0X7FFFFFFF
-} OMX_COMMANDTYPE;
-
-
-
-/** The OMX_STATETYPE enumeration is used to indicate or change the component
- *  state.  This enumeration reflects the current state of the component when
- *  used with the OMX_GetState macro or becomes the parameter in a state change
- *  command when used with the OMX_SendCommand macro.
- *
- *  The component will be in the Loaded state after the component is initially
- *  loaded into memory.  In the Loaded state, the component is not allowed to
- *  allocate or hold resources other than to build it's internal parameter
- *  and configuration tables.  The application will send one or more
- *  SetParameters/GetParameters and SetConfig/GetConfig commands to the
- *  component and the component will record each of these parameter and
- *  configuration changes for use later.  When the application sends the
- *  Idle command, the component will acquire the resources needed for the
- *  specified configuration and will transition to the idle state if the
- *  allocation is successful.  If the component cannot successfully
- *  transition to the idle state for any reason, the state of the component
- *  shall be fully rolled back to the Loaded state (e.g. all allocated 
- *  resources shall be released).  When the component receives the command
- *  to go to the Executing state, it shall begin processing buffers by
- *  sending all input buffers it holds to the application.  While
- *  the component is in the Idle state, the application may also send the
- *  Pause command.  If the component receives the pause command while in the
- *  Idle state, the component shall send all input buffers it holds to the 
- *  application, but shall not begin processing buffers.  This will allow the
- *  application to prefill buffers.
- * 
- *  @ingroup comp
- */
-
-typedef enum OMX_STATETYPE
-{
-    OMX_StateInvalid,      /**< component has detected that it's internal data 
-                                structures are corrupted to the point that
-                                it cannot determine it's state properly */
-    OMX_StateLoaded,      /**< component has been loaded but has not completed
-                                initialization.  The OMX_SetParameter macro
-                                and the OMX_GetParameter macro are the only 
-                                valid macros allowed to be sent to the 
-                                component in this state. */
-    OMX_StateIdle,        /**< component initialization has been completed
-                                successfully and the component is ready to
-                                to start. */
-    OMX_StateExecuting,   /**< component has accepted the start command and
-                                is processing data (if data is available) */
-    OMX_StatePause,       /**< component has received pause command */
-    OMX_StateWaitForResources, /**< component is waiting for resources, either after 
-                                preemption or before it gets the resources requested.
-                                See specification for complete details. */
-    OMX_StateKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_StateVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_StateMax = 0X7FFFFFFF
-} OMX_STATETYPE;
-
-/** The OMX_ERRORTYPE enumeration defines the standard OMX Errors.  These 
- *  errors should cover most of the common failure cases.  However, 
- *  vendors are free to add additional error messages of their own as 
- *  long as they follow these rules:
- *  1.  Vendor error messages shall be in the range of 0x90000000 to
- *      0x9000FFFF.
- *  2.  Vendor error messages shall be defined in a header file provided
- *      with the component.  No error messages are allowed that are
- *      not defined.
- */
-typedef enum OMX_ERRORTYPE
-{
-  OMX_ErrorNone = 0,
-
-  /** There were insufficient resources to perform the requested operation */
-  OMX_ErrorInsufficientResources = (OMX_S32) 0x80001000,
-
-  /** There was an error, but the cause of the error could not be determined */
-  OMX_ErrorUndefined = (OMX_S32) 0x80001001,
-
-  /** The component name string was not valid */
-  OMX_ErrorInvalidComponentName = (OMX_S32) 0x80001002,
-
-  /** No component with the specified name string was found */
-  OMX_ErrorComponentNotFound = (OMX_S32) 0x80001003,
-
-  /** The component specified did not have a "OMX_ComponentInit" or
-      "OMX_ComponentDeInit entry point */
-  OMX_ErrorInvalidComponent = (OMX_S32) 0x80001004,
-
-  /** One or more parameters were not valid */
-  OMX_ErrorBadParameter = (OMX_S32) 0x80001005,
-
-  /** The requested function is not implemented */
-  OMX_ErrorNotImplemented = (OMX_S32) 0x80001006,
-
-  /** The buffer was emptied before the next buffer was ready */
-  OMX_ErrorUnderflow = (OMX_S32) 0x80001007,
-
-  /** The buffer was not available when it was needed */
-  OMX_ErrorOverflow = (OMX_S32) 0x80001008,
-
-  /** The hardware failed to respond as expected */
-  OMX_ErrorHardware = (OMX_S32) 0x80001009,
-
-  /** The component is in the state OMX_StateInvalid */
-  OMX_ErrorInvalidState = (OMX_S32) 0x8000100A,
-
-  /** Stream is found to be corrupt */
-  OMX_ErrorStreamCorrupt = (OMX_S32) 0x8000100B,
-
-  /** Ports being connected are not compatible */
-  OMX_ErrorPortsNotCompatible = (OMX_S32) 0x8000100C,
-
-  /** Resources allocated to an idle component have been
-      lost resulting in the component returning to the loaded state */
-  OMX_ErrorResourcesLost = (OMX_S32) 0x8000100D,
-
-  /** No more indicies can be enumerated */
-  OMX_ErrorNoMore = (OMX_S32) 0x8000100E,
-
-  /** The component detected a version mismatch */
-  OMX_ErrorVersionMismatch = (OMX_S32) 0x8000100F,
-
-  /** The component is not ready to return data at this time */
-  OMX_ErrorNotReady = (OMX_S32) 0x80001010,
-
-  /** There was a timeout that occurred */
-  OMX_ErrorTimeout = (OMX_S32) 0x80001011,
-
-  /** This error occurs when trying to transition into the state you are already in */
-  OMX_ErrorSameState = (OMX_S32) 0x80001012,
-
-  /** Resources allocated to an executing or paused component have been 
-      preempted, causing the component to return to the idle state */
-  OMX_ErrorResourcesPreempted = (OMX_S32) 0x80001013, 
-
-  /** A non-supplier port sends this error to the IL client (via the EventHandler callback) 
-      during the allocation of buffers (on a transition from the LOADED to the IDLE state or
-      on a port restart) when it deems that it has waited an unusually long time for the supplier 
-      to send it an allocated buffer via a UseBuffer call. */
-  OMX_ErrorPortUnresponsiveDuringAllocation = (OMX_S32) 0x80001014,
-
-  /** A non-supplier port sends this error to the IL client (via the EventHandler callback) 
-      during the deallocation of buffers (on a transition from the IDLE to LOADED state or 
-      on a port stop) when it deems that it has waited an unusually long time for the supplier 
-      to request the deallocation of a buffer header via a FreeBuffer call. */
-  OMX_ErrorPortUnresponsiveDuringDeallocation = (OMX_S32) 0x80001015,
-
-  /** A supplier port sends this error to the IL client (via the EventHandler callback) 
-      during the stopping of a port (either on a transition from the IDLE to LOADED 
-      state or a port stop) when it deems that it has waited an unusually long time for 
-      the non-supplier to return a buffer via an EmptyThisBuffer or FillThisBuffer call. */
-  OMX_ErrorPortUnresponsiveDuringStop = (OMX_S32) 0x80001016,
-
-  /** Attempting a state transtion that is not allowed */
-  OMX_ErrorIncorrectStateTransition = (OMX_S32) 0x80001017,
-
-  /* Attempting a command that is not allowed during the present state. */
-  OMX_ErrorIncorrectStateOperation = (OMX_S32) 0x80001018, 
-
-  /** The values encapsulated in the parameter or config structure are not supported. */
-  OMX_ErrorUnsupportedSetting = (OMX_S32) 0x80001019,
-
-  /** The parameter or config indicated by the given index is not supported. */
-  OMX_ErrorUnsupportedIndex = (OMX_S32) 0x8000101A,
-
-  /** The port index supplied is incorrect. */
-  OMX_ErrorBadPortIndex = (OMX_S32) 0x8000101B,
-
-  /** The port has lost one or more of its buffers and it thus unpopulated. */
-  OMX_ErrorPortUnpopulated = (OMX_S32) 0x8000101C,
-
-  /** Component suspended due to temporary loss of resources */
-  OMX_ErrorComponentSuspended = (OMX_S32) 0x8000101D,
-
-  /** Component suspended due to an inability to acquire dynamic resources */
-  OMX_ErrorDynamicResourcesUnavailable = (OMX_S32) 0x8000101E,
-
-  /** When the macroblock error reporting is enabled the component returns new error 
-  for every frame that has errors */
-  OMX_ErrorMbErrorsInFrame = (OMX_S32) 0x8000101F,
-
-  /** A component reports this error when it cannot parse or determine the format of an input stream. */
-  OMX_ErrorFormatNotDetected = (OMX_S32) 0x80001020, 
-
-  /** The content open operation failed. */
-  OMX_ErrorContentPipeOpenFailed = (OMX_S32) 0x80001021,
-
-  /** The content creation operation failed. */
-  OMX_ErrorContentPipeCreationFailed = (OMX_S32) 0x80001022,
-
-  /** Separate table information is being used */
-  OMX_ErrorSeperateTablesUsed = (OMX_S32) 0x80001023,
-
-  /** Tunneling is unsupported by the component*/
-  OMX_ErrorTunnelingUnsupported = (OMX_S32) 0x80001024,
-
-  OMX_ErrorKhronosExtensions = (OMX_S32)0x8F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-  OMX_ErrorVendorStartUnused = (OMX_S32)0x90000000, /**< Reserved region for introducing Vendor Extensions */
-  OMX_ErrorMax = 0x7FFFFFFF
-} OMX_ERRORTYPE;
-
-/** @ingroup core */
-typedef OMX_ERRORTYPE (* OMX_COMPONENTINITTYPE)(OMX_IN  OMX_HANDLETYPE hComponent);
-
-/** @ingroup core */
-typedef struct OMX_COMPONENTREGISTERTYPE
-{
-  const char          * pName;       /* Component name, 128 byte limit (including '\0') applies */
-  OMX_COMPONENTINITTYPE pInitialize; /* Component instance initialization function */
-} OMX_COMPONENTREGISTERTYPE;
-
-/** @ingroup core */
-extern OMX_COMPONENTREGISTERTYPE OMX_ComponentRegistered[];
-
-/** @ingroup rpm */
-typedef struct OMX_PRIORITYMGMTTYPE {
- OMX_U32 nSize;             /**< size of the structure in bytes */
- OMX_VERSIONTYPE nVersion;  /**< OMX specification version information */
- OMX_U32 nGroupPriority;            /**< Priority of the component group */
- OMX_U32 nGroupID;                  /**< ID of the component group */
-} OMX_PRIORITYMGMTTYPE;
-
-/* Component name and Role names are limited to 128 characters including the terminating '\0'. */
-#define OMX_MAX_STRINGNAME_SIZE 128
-
-/** @ingroup comp */
-typedef struct OMX_PARAM_COMPONENTROLETYPE {
-    OMX_U32 nSize;              /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;   /**< OMX specification version information */
-    OMX_U8 cRole[OMX_MAX_STRINGNAME_SIZE];  /**< name of standard component which defines component role */
-} OMX_PARAM_COMPONENTROLETYPE;
-
-/** End of Stream Buffer Flag: 
-  *
-  * A component sets EOS when it has no more data to emit on a particular 
-  * output port. Thus an output port shall set EOS on the last buffer it 
-  * emits. A component's determination of when an output port should 
-  * cease sending data is implemenation specific.
-  * @ingroup buf
-  */
-
-#define OMX_BUFFERFLAG_EOS 0x00000001 
-
-/** Start Time Buffer Flag: 
- *
- * The source of a stream (e.g. a demux component) sets the STARTTIME
- * flag on the buffer that contains the starting timestamp for the
- * stream. The starting timestamp corresponds to the first data that
- * should be displayed at startup or after a seek.
- * The first timestamp of the stream is not necessarily the start time.
- * For instance, in the case of a seek to a particular video frame, 
- * the target frame may be an interframe. Thus the first buffer of 
- * the stream will be the intra-frame preceding the target frame and
- * the starttime will occur with the target frame (with any other
- * required frames required to reconstruct the target intervening).
- *
- * The STARTTIME flag is directly associated with the buffer's 
- * timestamp ' thus its association to buffer data and its 
- * propagation is identical to the timestamp's.
- *
- * When a Sync Component client receives a buffer with the 
- * STARTTIME flag it shall perform a SetConfig on its sync port 
- * using OMX_ConfigTimeClientStartTime and passing the buffer's
- * timestamp.
- * 
- * @ingroup buf
- */
-
-#define OMX_BUFFERFLAG_STARTTIME 0x00000002
-
- 
-
-/** Decode Only Buffer Flag: 
- *
- * The source of a stream (e.g. a demux component) sets the DECODEONLY
- * flag on any buffer that should shall be decoded but should not be
- * displayed. This flag is used, for instance, when a source seeks to 
- * a target interframe that requires the decode of frames preceding the 
- * target to facilitate the target's reconstruction. In this case the 
- * source would emit the frames preceding the target downstream 
- * but mark them as decode only.
- *
- * The DECODEONLY is associated with buffer data and propagated in a 
- * manner identical to the buffer timestamp.
- *
- * A component that renders data should ignore all buffers with 
- * the DECODEONLY flag set.
- * 
- * @ingroup buf
- */
-
-#define OMX_BUFFERFLAG_DECODEONLY 0x00000004
-
-
-/* Data Corrupt Flag: This flag is set when the IL client believes the data in the associated buffer is corrupt 
- * @ingroup buf
- */
-
-#define OMX_BUFFERFLAG_DATACORRUPT 0x00000008
-
-/* End of Frame: The buffer contains exactly one end of frame and no data
- *  occurs after the end of frame. This flag is an optional hint. The absence
- *  of this flag does not imply the absence of an end of frame within the buffer. 
- * @ingroup buf
-*/
-#define OMX_BUFFERFLAG_ENDOFFRAME 0x00000010
-
-/* Sync Frame Flag: This flag is set when the buffer content contains a coded sync frame ' 
- *  a frame that has no dependency on any other frame information 
- *  @ingroup buf
- */
-#define OMX_BUFFERFLAG_SYNCFRAME 0x00000020
-
-/* Extra data present flag: there is extra data appended to the data stream
- * residing in the buffer 
- * @ingroup buf  
- */
-#define OMX_BUFFERFLAG_EXTRADATA 0x00000040
-
-/** Codec Config Buffer Flag: 
-* OMX_BUFFERFLAG_CODECCONFIG is an optional flag that is set by an
-* output port when all bytes in the buffer form part or all of a set of
-* codec specific configuration data.  Examples include SPS/PPS nal units
-* for OMX_VIDEO_CodingAVC or AudioSpecificConfig data for
-* OMX_AUDIO_CodingAAC.  Any component that for a given stream sets 
-* OMX_BUFFERFLAG_CODECCONFIG shall not mix codec configuration bytes
-* with frame data in the same buffer, and shall send all buffers
-* containing codec configuration bytes before any buffers containing
-* frame data that those configurations bytes describe.
-* If the stream format for a particular codec has a frame specific
-* header at the start of each frame, for example OMX_AUDIO_CodingMP3 or
-* OMX_AUDIO_CodingAAC in ADTS mode, then these shall be presented as
-* normal without setting OMX_BUFFERFLAG_CODECCONFIG.
- * @ingroup buf
- */
-#define OMX_BUFFERFLAG_CODECCONFIG 0x00000080
-
-
-
-/** @ingroup buf */
-typedef struct OMX_BUFFERHEADERTYPE
-{
-    OMX_U32 nSize;              /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;   /**< OMX specification version information */
-    OMX_U8* pBuffer;            /**< Pointer to actual block of memory 
-                                     that is acting as the buffer */
-    OMX_U32 nAllocLen;          /**< size of the buffer allocated, in bytes */
-    OMX_U32 nFilledLen;         /**< number of bytes currently in the 
-                                     buffer */
-    OMX_U32 nOffset;            /**< start offset of valid data in bytes from
-                                     the start of the buffer */
-    OMX_PTR pAppPrivate;        /**< pointer to any data the application
-                                     wants to associate with this buffer */
-    OMX_PTR pPlatformPrivate;   /**< pointer to any data the platform
-                                     wants to associate with this buffer */ 
-    OMX_PTR pInputPortPrivate;  /**< pointer to any data the input port
-                                     wants to associate with this buffer */
-    OMX_PTR pOutputPortPrivate; /**< pointer to any data the output port
-                                     wants to associate with this buffer */
-    OMX_HANDLETYPE hMarkTargetComponent; /**< The component that will generate a 
-                                              mark event upon processing this buffer. */
-    OMX_PTR pMarkData;          /**< Application specific data associated with 
-                                     the mark sent on a mark event to disambiguate 
-                                     this mark from others. */
-    OMX_U32 nTickCount;         /**< Optional entry that the component and
-                                     application can update with a tick count
-                                     when they access the component.  This
-                                     value should be in microseconds.  Since
-                                     this is a value relative to an arbitrary
-                                     starting point, this value cannot be used 
-                                     to determine absolute time.  This is an
-                                     optional entry and not all components
-                                     will update it.*/
- OMX_TICKS nTimeStamp;          /**< Timestamp corresponding to the sample 
-                                     starting at the first logical sample 
-                                     boundary in the buffer. Timestamps of 
-                                     successive samples within the buffer may
-                                     be inferred by adding the duration of the 
-                                     of the preceding buffer to the timestamp
-                                     of the preceding buffer.*/
-  OMX_U32     nFlags;           /**< buffer specific flags */
-  OMX_U32 nOutputPortIndex;     /**< The index of the output port (if any) using 
-                                     this buffer */
-  OMX_U32 nInputPortIndex;      /**< The index of the input port (if any) using
-                                     this buffer */
-} OMX_BUFFERHEADERTYPE;
-
-/** The OMX_EXTRADATATYPE enumeration is used to define the 
- * possible extra data payload types.
- * NB: this enum is binary backwards compatible with the previous
- * OMX_EXTRADATA_QUANT define.  This should be replaced with
- * OMX_ExtraDataQuantization.
- */
-typedef enum OMX_EXTRADATATYPE
-{
-   OMX_ExtraDataNone = 0,                       /**< Indicates that no more extra data sections follow */        
-   OMX_ExtraDataQuantization,                   /**< The data payload contains quantization data */
-   OMX_ExtraDataKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-   OMX_ExtraDataVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-   OMX_ExtraDataMax = 0x7FFFFFFF
-} OMX_EXTRADATATYPE;
-
-
-typedef struct OMX_OTHER_EXTRADATATYPE  {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;               
-    OMX_U32 nPortIndex;
-    OMX_EXTRADATATYPE eType;       /* Extra Data type */
-    OMX_U32 nDataSize;   /* Size of the supporting data to follow */
-    OMX_U8  data[1];     /* Supporting data hint  */
-} OMX_OTHER_EXTRADATATYPE;
-
-/** @ingroup comp */
-typedef struct OMX_PORT_PARAM_TYPE {
-    OMX_U32 nSize;              /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;   /**< OMX specification version information */
-    OMX_U32 nPorts;             /**< The number of ports for this component */
-    OMX_U32 nStartPortNumber;   /** first port number for this type of port */
-} OMX_PORT_PARAM_TYPE; 
-
-/** @ingroup comp */
-typedef enum OMX_EVENTTYPE
-{
-    OMX_EventCmdComplete,         /**< component has sucessfully completed a command */
-    OMX_EventError,               /**< component has detected an error condition */
-    OMX_EventMark,                /**< component has detected a buffer mark */
-    OMX_EventPortSettingsChanged, /**< component is reported a port settings change */
-    OMX_EventBufferFlag,          /**< component has detected an EOS */ 
-    OMX_EventResourcesAcquired,   /**< component has been granted resources and is
-                                       automatically starting the state change from
-                                       OMX_StateWaitForResources to OMX_StateIdle. */
-   OMX_EventComponentResumed,     /**< Component resumed due to reacquisition of resources */
-   OMX_EventDynamicResourcesAvailable, /**< Component has acquired previously unavailable dynamic resources */
-   OMX_EventPortFormatDetected,      /**< Component has detected a supported format. */
-   OMX_EventKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-   OMX_EventVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-   OMX_EventMax = 0x7FFFFFFF
-} OMX_EVENTTYPE;
-
-typedef struct OMX_CALLBACKTYPE
-{
-    /** The EventHandler method is used to notify the application when an
-        event of interest occurs.  Events are defined in the OMX_EVENTTYPE
-        enumeration.  Please see that enumeration for details of what will
-        be returned for each type of event. Callbacks should not return
-        an error to the component, so if an error occurs, the application 
-        shall handle it internally.  This is a blocking call.
-
-        The application should return from this call within 5 msec to avoid
-        blocking the component for an excessively long period of time.
-
-        @param hComponent
-            handle of the component to access.  This is the component
-            handle returned by the call to the GetHandle function.
-        @param pAppData
-            pointer to an application defined value that was provided in the 
-            pAppData parameter to the OMX_GetHandle method for the component.
-            This application defined value is provided so that the application 
-            can have a component specific context when receiving the callback.
-        @param eEvent
-            Event that the component wants to notify the application about.
-        @param nData1
-            nData will be the OMX_ERRORTYPE for an error event and will be 
-            an OMX_COMMANDTYPE for a command complete event and OMX_INDEXTYPE for a OMX_PortSettingsChanged event.
-         @param nData2
-            nData2 will hold further information related to the event. Can be OMX_STATETYPE for
-            a OMX_CommandStateSet command or port index for a OMX_PortSettingsChanged event.
-            Default value is 0 if not used. )
-        @param pEventData
-            Pointer to additional event-specific data (see spec for meaning).
-      */
-
-   OMX_ERRORTYPE (*EventHandler)(
-        OMX_IN OMX_HANDLETYPE hComponent,
-        OMX_IN OMX_PTR pAppData,
-        OMX_IN OMX_EVENTTYPE eEvent,
-        OMX_IN OMX_U32 nData1,
-        OMX_IN OMX_U32 nData2,
-        OMX_IN OMX_PTR pEventData);
-
-    /** The EmptyBufferDone method is used to return emptied buffers from an
-        input port back to the application for reuse.  This is a blocking call 
-        so the application should not attempt to refill the buffers during this
-        call, but should queue them and refill them in another thread.  There
-        is no error return, so the application shall handle any errors generated
-        internally.  
-        
-        The application should return from this call within 5 msec.
-        
-        @param hComponent
-            handle of the component to access.  This is the component
-            handle returned by the call to the GetHandle function.
-        @param pAppData
-            pointer to an application defined value that was provided in the 
-            pAppData parameter to the OMX_GetHandle method for the component.
-            This application defined value is provided so that the application 
-            can have a component specific context when receiving the callback.
-        @param pBuffer
-            pointer to an OMX_BUFFERHEADERTYPE structure allocated with UseBuffer
-            or AllocateBuffer indicating the buffer that was emptied.
-        @ingroup buf
-     */
-    OMX_ERRORTYPE (*EmptyBufferDone)(
-        OMX_IN OMX_HANDLETYPE hComponent,
-        OMX_IN OMX_PTR pAppData,
-        OMX_IN OMX_BUFFERHEADERTYPE* pBuffer);
-
-    /** The FillBufferDone method is used to return filled buffers from an
-        output port back to the application for emptying and then reuse.  
-        This is a blocking call so the application should not attempt to 
-        empty the buffers during this call, but should queue the buffers 
-        and empty them in another thread.  There is no error return, so 
-        the application shall handle any errors generated internally.  The 
-        application shall also update the buffer header to indicate the
-        number of bytes placed into the buffer.  
-
-        The application should return from this call within 5 msec.
-        
-        @param hComponent
-            handle of the component to access.  This is the component
-            handle returned by the call to the GetHandle function.
-        @param pAppData
-            pointer to an application defined value that was provided in the 
-            pAppData parameter to the OMX_GetHandle method for the component.
-            This application defined value is provided so that the application 
-            can have a component specific context when receiving the callback.
-        @param pBuffer
-            pointer to an OMX_BUFFERHEADERTYPE structure allocated with UseBuffer
-            or AllocateBuffer indicating the buffer that was filled.
-        @ingroup buf
-     */
-    OMX_ERRORTYPE (*FillBufferDone)(
-        OMX_OUT OMX_HANDLETYPE hComponent,
-        OMX_OUT OMX_PTR pAppData,
-        OMX_OUT OMX_BUFFERHEADERTYPE* pBuffer);
-
-} OMX_CALLBACKTYPE;
-
-/** The OMX_BUFFERSUPPLIERTYPE enumeration is used to dictate port supplier
-    preference when tunneling between two ports.
-    @ingroup tun buf
-*/
-typedef enum OMX_BUFFERSUPPLIERTYPE
-{
-    OMX_BufferSupplyUnspecified = 0x0, /**< port supplying the buffers is unspecified,
-                                              or don't care */
-    OMX_BufferSupplyInput,             /**< input port supplies the buffers */
-    OMX_BufferSupplyOutput,            /**< output port supplies the buffers */
-    OMX_BufferSupplyKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_BufferSupplyVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_BufferSupplyMax = 0x7FFFFFFF
-} OMX_BUFFERSUPPLIERTYPE;
-
-
-/** buffer supplier parameter 
- * @ingroup tun
- */
-typedef struct OMX_PARAM_BUFFERSUPPLIERTYPE {
-    OMX_U32 nSize; /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion; /**< OMX specification version information */
-    OMX_U32 nPortIndex; /**< port that this structure applies to */
-    OMX_BUFFERSUPPLIERTYPE eBufferSupplier; /**< buffer supplier */
-} OMX_PARAM_BUFFERSUPPLIERTYPE;
-
-
-/**< indicates that buffers received by an input port of a tunnel 
-     may not modify the data in the buffers 
-     @ingroup tun
- */
-#define OMX_PORTTUNNELFLAG_READONLY 0x00000001 
-
-
-/** The OMX_TUNNELSETUPTYPE structure is used to pass data from an output
-    port to an input port as part the two ComponentTunnelRequest calls
-    resulting from a OMX_SetupTunnel call from the IL Client. 
-    @ingroup tun
- */   
-typedef struct OMX_TUNNELSETUPTYPE
-{
-    OMX_U32 nTunnelFlags;             /**< bit flags for tunneling */
-    OMX_BUFFERSUPPLIERTYPE eSupplier; /**< supplier preference */
-} OMX_TUNNELSETUPTYPE; 
-
-/* OMX Component headers is included to enable the core to use
-   macros for functions into the component for OMX release 1.0.  
-   Developers should not access any structures or data from within
-   the component header directly */
-/* TO BE REMOVED - #include <OMX_Component.h> */
-
-/** GetComponentVersion will return information about the component.  
-    This is a blocking call.  This macro will go directly from the
-    application to the component (via a core macro).  The
-    component will return from this call within 5 msec.
-    @param [in] hComponent
-        handle of component to execute the command
-    @param [out] pComponentName
-        pointer to an empty string of length 128 bytes.  The component 
-        will write its name into this string.  The name will be 
-        terminated by a single zero byte.  The name of a component will 
-        be 127 bytes or less to leave room for the trailing zero byte.  
-        An example of a valid component name is "OMX.ABC.ChannelMixer\0".
-    @param [out] pComponentVersion
-        pointer to an OMX Version structure that the component will fill 
-        in.  The component will fill in a value that indicates the 
-        component version.  NOTE: the component version is NOT the same 
-        as the OMX Specification version (found in all structures).  The 
-        component version is defined by the vendor of the component and 
-        its value is entirely up to the component vendor.
-    @param [out] pSpecVersion
-        pointer to an OMX Version structure that the component will fill 
-        in.  The SpecVersion is the version of the specification that the 
-        component was built against.  Please note that this value may or 
-        may not match the structure's version.  For example, if the 
-        component was built against the 2.0 specification, but the 
-        application (which creates the structure is built against the 
-        1.0 specification the versions would be different.
-    @param [out] pComponentUUID
-        pointer to the UUID of the component which will be filled in by 
-        the component.  The UUID is a unique identifier that is set at 
-        RUN time for the component and is unique to each instantion of 
-        the component.
-    @return OMX_ERRORTYPE
-        If the command successfully executes, the return code will be
-        OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-    @ingroup comp
- */
-#define OMX_GetComponentVersion(                            \
-        hComponent,                                         \
-        pComponentName,                                     \
-        pComponentVersion,                                  \
-        pSpecVersion,                                       \
-        pComponentUUID)                                     \
-    ((OMX_COMPONENTTYPE*)hComponent)->GetComponentVersion(  \
-        hComponent,                                         \
-        pComponentName,                                     \
-        pComponentVersion,                                  \
-        pSpecVersion,                                       \
-        pComponentUUID)                 /* Macro End */
-
-
-/** Send a command to the component.  This call is a non-blocking call.
-    The component should check the parameters and then queue the command
-    to the component thread to be executed.  The component thread shall 
-    send the EventHandler() callback at the conclusion of the command. 
-    This macro will go directly from the application to the component (via
-    a core macro).  The component will return from this call within 5 msec.
-    
-    When the command is "OMX_CommandStateSet" the component will queue a
-    state transition to the new state idenfied in nParam.
-    
-    When the command is "OMX_CommandFlush", to flush a port's buffer queues,
-    the command will force the component to return all buffers NOT CURRENTLY 
-    BEING PROCESSED to the application, in the order in which the buffers 
-    were received.
-    
-    When the command is "OMX_CommandPortDisable" or 
-    "OMX_CommandPortEnable", the component's port (given by the value of
-    nParam) will be stopped or restarted. 
-    
-    When the command "OMX_CommandMarkBuffer" is used to mark a buffer, the
-    pCmdData will point to a OMX_MARKTYPE structure containing the component
-    handle of the component to examine the buffer chain for the mark.  nParam1
-    contains the index of the port on which the buffer mark is applied.
-
-    Specification text for more details. 
-    
-    @param [in] hComponent
-        handle of component to execute the command
-    @param [in] Cmd
-        Command for the component to execute
-    @param [in] nParam
-        Parameter for the command to be executed.  When Cmd has the value 
-        OMX_CommandStateSet, value is a member of OMX_STATETYPE.  When Cmd has 
-        the value OMX_CommandFlush, value of nParam indicates which port(s) 
-        to flush. -1 is used to flush all ports a single port index will 
-        only flush that port.  When Cmd has the value "OMX_CommandPortDisable"
-        or "OMX_CommandPortEnable", the component's port is given by 
-        the value of nParam.  When Cmd has the value "OMX_CommandMarkBuffer"
-        the components pot is given by the value of nParam.
-    @param [in] pCmdData
-        Parameter pointing to the OMX_MARKTYPE structure when Cmd has the value
-        "OMX_CommandMarkBuffer".     
-    @return OMX_ERRORTYPE
-        If the command successfully executes, the return code will be
-        OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-    @ingroup comp
- */
-#define OMX_SendCommand(                                    \
-         hComponent,                                        \
-         Cmd,                                               \
-         nParam,                                            \
-         pCmdData)                                          \
-     ((OMX_COMPONENTTYPE*)hComponent)->SendCommand(         \
-         hComponent,                                        \
-         Cmd,                                               \
-         nParam,                                            \
-         pCmdData)                          /* Macro End */
-
-
-/** The OMX_GetParameter macro will get one of the current parameter 
-    settings from the component.  This macro cannot only be invoked when 
-    the component is in the OMX_StateInvalid state.  The nParamIndex
-    parameter is used to indicate which structure is being requested from
-    the component.  The application shall allocate the correct structure 
-    and shall fill in the structure size and version information before 
-    invoking this macro.  When the parameter applies to a port, the
-    caller shall fill in the appropriate nPortIndex value indicating the
-    port on which the parameter applies. If the component has not had 
-    any settings changed, then the component should return a set of 
-    valid DEFAULT  parameters for the component.  This is a blocking 
-    call.  
-    
-    The component should return from this call within 20 msec.
-    
-    @param [in] hComponent
-        Handle of the component to be accessed.  This is the component
-        handle returned by the call to the OMX_GetHandle function.
-    @param [in] nParamIndex
-        Index of the structure to be filled.  This value is from the
-        OMX_INDEXTYPE enumeration.
-    @param [in,out] pComponentParameterStructure
-        Pointer to application allocated structure to be filled by the 
-        component.
-    @return OMX_ERRORTYPE
-        If the command successfully executes, the return code will be
-        OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-    @ingroup comp
- */
-#define OMX_GetParameter(                                   \
-        hComponent,                                         \
-        nParamIndex,                                        \
-        pComponentParameterStructure)                        \
-    ((OMX_COMPONENTTYPE*)hComponent)->GetParameter(         \
-        hComponent,                                         \
-        nParamIndex,                                        \
-        pComponentParameterStructure)    /* Macro End */
-
-
-/** The OMX_SetParameter macro will send an initialization parameter
-    structure to a component.  Each structure shall be sent one at a time,
-    in a separate invocation of the macro.  This macro can only be
-    invoked when the component is in the OMX_StateLoaded state, or the
-    port is disabled (when the parameter applies to a port). The 
-    nParamIndex parameter is used to indicate which structure is being
-    passed to the component.  The application shall allocate the 
-    correct structure and shall fill in the structure size and version 
-    information (as well as the actual data) before invoking this macro.
-    The application is free to dispose of this structure after the call
-    as the component is required to copy any data it shall retain.  This 
-    is a blocking call.  
-    
-    The component should return from this call within 20 msec.
-    
-    @param [in] hComponent
-        Handle of the component to be accessed.  This is the component
-        handle returned by the call to the OMX_GetHandle function.
-    @param [in] nIndex
-        Index of the structure to be sent.  This value is from the
-        OMX_INDEXTYPE enumeration.
-    @param [in] pComponentParameterStructure
-        pointer to application allocated structure to be used for
-        initialization by the component.
-    @return OMX_ERRORTYPE
-        If the command successfully executes, the return code will be
-        OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-    @ingroup comp
- */
-#define OMX_SetParameter(                                   \
-        hComponent,                                         \
-        nParamIndex,                                        \
-        pComponentParameterStructure)                        \
-    ((OMX_COMPONENTTYPE*)hComponent)->SetParameter(         \
-        hComponent,                                         \
-        nParamIndex,                                        \
-        pComponentParameterStructure)    /* Macro End */
-
-
-/** The OMX_GetConfig macro will get one of the configuration structures 
-    from a component.  This macro can be invoked anytime after the 
-    component has been loaded.  The nParamIndex call parameter is used to 
-    indicate which structure is being requested from the component.  The 
-    application shall allocate the correct structure and shall fill in the 
-    structure size and version information before invoking this macro.  
-    If the component has not had this configuration parameter sent before, 
-    then the component should return a set of valid DEFAULT values for the 
-    component.  This is a blocking call.  
-    
-    The component should return from this call within 5 msec.
-    
-    @param [in] hComponent
-        Handle of the component to be accessed.  This is the component
-        handle returned by the call to the OMX_GetHandle function.
-    @param [in] nIndex
-        Index of the structure to be filled.  This value is from the
-        OMX_INDEXTYPE enumeration.
-    @param [in,out] pComponentConfigStructure
-        pointer to application allocated structure to be filled by the 
-        component.
-    @return OMX_ERRORTYPE
-        If the command successfully executes, the return code will be
-        OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-    @ingroup comp
-*/        
-#define OMX_GetConfig(                                      \
-        hComponent,                                         \
-        nConfigIndex,                                       \
-        pComponentConfigStructure)                           \
-    ((OMX_COMPONENTTYPE*)hComponent)->GetConfig(            \
-        hComponent,                                         \
-        nConfigIndex,                                       \
-        pComponentConfigStructure)       /* Macro End */
-
-
-/** The OMX_SetConfig macro will send one of the configuration 
-    structures to a component.  Each structure shall be sent one at a time,
-    each in a separate invocation of the macro.  This macro can be invoked 
-    anytime after the component has been loaded.  The application shall 
-    allocate the correct structure and shall fill in the structure size 
-    and version information (as well as the actual data) before invoking 
-    this macro.  The application is free to dispose of this structure after 
-    the call as the component is required to copy any data it shall retain.  
-    This is a blocking call.  
-    
-    The component should return from this call within 5 msec.
-    
-    @param [in] hComponent
-        Handle of the component to be accessed.  This is the component
-        handle returned by the call to the OMX_GetHandle function.
-    @param [in] nConfigIndex
-        Index of the structure to be sent.  This value is from the
-        OMX_INDEXTYPE enumeration above.
-    @param [in] pComponentConfigStructure
-        pointer to application allocated structure to be used for
-        initialization by the component.
-    @return OMX_ERRORTYPE
-        If the command successfully executes, the return code will be
-        OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-    @ingroup comp
- */
-#define OMX_SetConfig(                                      \
-        hComponent,                                         \
-        nConfigIndex,                                       \
-        pComponentConfigStructure)                           \
-    ((OMX_COMPONENTTYPE*)hComponent)->SetConfig(            \
-        hComponent,                                         \
-        nConfigIndex,                                       \
-        pComponentConfigStructure)       /* Macro End */
-
-
-/** The OMX_GetExtensionIndex macro will invoke a component to translate 
-    a vendor specific configuration or parameter string into an OMX 
-    structure index.  There is no requirement for the vendor to support 
-    this command for the indexes already found in the OMX_INDEXTYPE 
-    enumeration (this is done to save space in small components).  The 
-    component shall support all vendor supplied extension indexes not found
-    in the master OMX_INDEXTYPE enumeration.  This is a blocking call.  
-    
-    The component should return from this call within 5 msec.
-    
-    @param [in] hComponent
-        Handle of the component to be accessed.  This is the component
-        handle returned by the call to the GetHandle function.
-    @param [in] cParameterName
-        OMX_STRING that shall be less than 128 characters long including
-        the trailing null byte.  This is the string that will get 
-        translated by the component into a configuration index.
-    @param [out] pIndexType
-        a pointer to a OMX_INDEXTYPE to receive the index value.
-    @return OMX_ERRORTYPE
-        If the command successfully executes, the return code will be
-        OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-    @ingroup comp
- */
-#define OMX_GetExtensionIndex(                              \
-        hComponent,                                         \
-        cParameterName,                                     \
-        pIndexType)                                         \
-    ((OMX_COMPONENTTYPE*)hComponent)->GetExtensionIndex(    \
-        hComponent,                                         \
-        cParameterName,                                     \
-        pIndexType)                     /* Macro End */
-
-
-/** The OMX_GetState macro will invoke the component to get the current 
-    state of the component and place the state value into the location
-    pointed to by pState.  
-    
-    The component should return from this call within 5 msec.
-    
-    @param [in] hComponent
-        Handle of the component to be accessed.  This is the component
-        handle returned by the call to the OMX_GetHandle function.
-    @param [out] pState
-        pointer to the location to receive the state.  The value returned
-        is one of the OMX_STATETYPE members 
-    @return OMX_ERRORTYPE
-        If the command successfully executes, the return code will be
-        OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-    @ingroup comp
- */
-#define OMX_GetState(                                       \
-        hComponent,                                         \
-        pState)                                             \
-    ((OMX_COMPONENTTYPE*)hComponent)->GetState(             \
-        hComponent,                                         \
-        pState)                         /* Macro End */
-
-
-/** The OMX_UseBuffer macro will request that the component use
-    a buffer (and allocate its own buffer header) already allocated 
-    by another component, or by the IL Client. This is a blocking 
-    call.
-    
-    The component should return from this call within 20 msec.
-    
-    @param [in] hComponent
-        Handle of the component to be accessed.  This is the component
-        handle returned by the call to the OMX_GetHandle function.
-    @param [out] ppBuffer
-        pointer to an OMX_BUFFERHEADERTYPE structure used to receive the 
-        pointer to the buffer header
-    @return OMX_ERRORTYPE
-        If the command successfully executes, the return code will be
-        OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-    @ingroup comp buf
- */
-
-#define OMX_UseBuffer(                                      \
-           hComponent,                                      \
-           ppBufferHdr,                                     \
-           nPortIndex,                                      \
-           pAppPrivate,                                     \
-           nSizeBytes,                                      \
-           pBuffer)                                         \
-    ((OMX_COMPONENTTYPE*)hComponent)->UseBuffer(            \
-           hComponent,                                      \
-           ppBufferHdr,                                     \
-           nPortIndex,                                      \
-           pAppPrivate,                                     \
-           nSizeBytes,                                      \
-           pBuffer)
-
-
-/** The OMX_AllocateBuffer macro will request that the component allocate 
-    a new buffer and buffer header.  The component will allocate the 
-    buffer and the buffer header and return a pointer to the buffer 
-    header.  This is a blocking call.
-    
-    The component should return from this call within 5 msec.
-    
-    @param [in] hComponent
-        Handle of the component to be accessed.  This is the component
-        handle returned by the call to the OMX_GetHandle function.
-    @param [out] ppBuffer
-        pointer to an OMX_BUFFERHEADERTYPE structure used to receive 
-        the pointer to the buffer header
-    @param [in] nPortIndex
-        nPortIndex is used to select the port on the component the buffer will
-        be used with.  The port can be found by using the nPortIndex
-        value as an index into the Port Definition array of the component.
-    @param [in] pAppPrivate
-        pAppPrivate is used to initialize the pAppPrivate member of the 
-        buffer header structure.
-    @param [in] nSizeBytes
-        size of the buffer to allocate.  Used when bAllocateNew is true.
-    @return OMX_ERRORTYPE
-        If the command successfully executes, the return code will be
-        OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-    @ingroup comp buf
- */    
-#define OMX_AllocateBuffer(                                 \
-        hComponent,                                         \
-        ppBuffer,                                           \
-        nPortIndex,                                         \
-        pAppPrivate,                                        \
-        nSizeBytes)                                         \
-    ((OMX_COMPONENTTYPE*)hComponent)->AllocateBuffer(       \
-        hComponent,                                         \
-        ppBuffer,                                           \
-        nPortIndex,                                         \
-        pAppPrivate,                                        \
-        nSizeBytes)                     /* Macro End */
-
-
-/** The OMX_FreeBuffer macro will release a buffer header from the component
-    which was allocated using either OMX_AllocateBuffer or OMX_UseBuffer. If  
-    the component allocated the buffer (see the OMX_UseBuffer macro) then 
-    the component shall free the buffer and buffer header. This is a 
-    blocking call. 
-    
-    The component should return from this call within 20 msec.
-    
-    @param [in] hComponent
-        Handle of the component to be accessed.  This is the component
-        handle returned by the call to the OMX_GetHandle function.
-    @param [in] nPortIndex
-        nPortIndex is used to select the port on the component the buffer will
-        be used with.
-    @param [in] pBuffer
-        pointer to an OMX_BUFFERHEADERTYPE structure allocated with UseBuffer
-        or AllocateBuffer.
-    @return OMX_ERRORTYPE
-        If the command successfully executes, the return code will be
-        OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-    @ingroup comp buf
- */
-#define OMX_FreeBuffer(                                     \
-        hComponent,                                         \
-        nPortIndex,                                         \
-        pBuffer)                                            \
-    ((OMX_COMPONENTTYPE*)hComponent)->FreeBuffer(           \
-        hComponent,                                         \
-        nPortIndex,                                         \
-        pBuffer)                        /* Macro End */
-
-
-/** The OMX_EmptyThisBuffer macro will send a buffer full of data to an 
-    input port of a component.  The buffer will be emptied by the component
-    and returned to the application via the EmptyBufferDone call back.
-    This is a non-blocking call in that the component will record the buffer
-    and return immediately and then empty the buffer, later, at the proper 
-    time.  As expected, this macro may be invoked only while the component 
-    is in the OMX_StateExecuting.  If nPortIndex does not specify an input
-    port, the component shall return an error.  
-    
-    The component should return from this call within 5 msec.
-    
-    @param [in] hComponent
-        Handle of the component to be accessed.  This is the component
-        handle returned by the call to the OMX_GetHandle function.
-    @param [in] pBuffer
-        pointer to an OMX_BUFFERHEADERTYPE structure allocated with UseBuffer
-        or AllocateBuffer.
-    @return OMX_ERRORTYPE
-        If the command successfully executes, the return code will be
-        OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-    @ingroup comp buf
- */
-#define OMX_EmptyThisBuffer(                                \
-        hComponent,                                         \
-        pBuffer)                                            \
-    ((OMX_COMPONENTTYPE*)hComponent)->EmptyThisBuffer(      \
-        hComponent,                                         \
-        pBuffer)                        /* Macro End */
-
-
-/** The OMX_FillThisBuffer macro will send an empty buffer to an 
-    output port of a component.  The buffer will be filled by the component
-    and returned to the application via the FillBufferDone call back.
-    This is a non-blocking call in that the component will record the buffer
-    and return immediately and then fill the buffer, later, at the proper 
-    time.  As expected, this macro may be invoked only while the component 
-    is in the OMX_ExecutingState.  If nPortIndex does not specify an output
-    port, the component shall return an error.  
-    
-    The component should return from this call within 5 msec.
-    
-    @param [in] hComponent
-        Handle of the component to be accessed.  This is the component
-        handle returned by the call to the OMX_GetHandle function.
-    @param [in] pBuffer
-        pointer to an OMX_BUFFERHEADERTYPE structure allocated with UseBuffer
-        or AllocateBuffer.
-    @return OMX_ERRORTYPE
-        If the command successfully executes, the return code will be
-        OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-    @ingroup comp buf
- */
-#define OMX_FillThisBuffer(                                 \
-        hComponent,                                         \
-        pBuffer)                                            \
-    ((OMX_COMPONENTTYPE*)hComponent)->FillThisBuffer(       \
-        hComponent,                                         \
-        pBuffer)                        /* Macro End */
-
-
-
-/** The OMX_UseEGLImage macro will request that the component use
-    a EGLImage provided by EGL (and allocate its own buffer header)
-    This is a blocking call.
-    
-    The component should return from this call within 20 msec.
-    
-    @param [in] hComponent
-        Handle of the component to be accessed.  This is the component
-        handle returned by the call to the OMX_GetHandle function.
-    @param [out] ppBuffer
-        pointer to an OMX_BUFFERHEADERTYPE structure used to receive the 
-        pointer to the buffer header.  Note that the memory location used
-        for this buffer is NOT visible to the IL Client.
-    @param [in] nPortIndex
-        nPortIndex is used to select the port on the component the buffer will
-        be used with.  The port can be found by using the nPortIndex
-        value as an index into the Port Definition array of the component.
-    @param [in] pAppPrivate
-        pAppPrivate is used to initialize the pAppPrivate member of the 
-        buffer header structure.
-    @param [in] eglImage
-        eglImage contains the handle of the EGLImage to use as a buffer on the
-        specified port.  The component is expected to validate properties of 
-        the EGLImage against the configuration of the port to ensure the component
-        can use the EGLImage as a buffer.          
-    @return OMX_ERRORTYPE
-        If the command successfully executes, the return code will be
-        OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-    @ingroup comp buf
- */
-#define OMX_UseEGLImage(                                    \
-           hComponent,                                      \
-           ppBufferHdr,                                     \
-           nPortIndex,                                      \
-           pAppPrivate,                                     \
-           eglImage)                                        \
-    ((OMX_COMPONENTTYPE*)hComponent)->UseEGLImage(          \
-           hComponent,                                      \
-           ppBufferHdr,                                     \
-           nPortIndex,                                      \
-           pAppPrivate,                                     \
-           eglImage)
-
-/** The OMX_Init method is used to initialize the OMX core.  It shall be the
-    first call made into OMX and it should only be executed one time without
-    an interviening OMX_Deinit call.  
-    
-    The core should return from this call within 20 msec.
-
-    @return OMX_ERRORTYPE
-        If the command successfully executes, the return code will be
-        OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-    @ingroup core
- */
-OMX_API OMX_ERRORTYPE OMX_APIENTRY OMX_Init(void);
-
-
-/** The OMX_Deinit method is used to deinitialize the OMX core.  It shall be 
-    the last call made into OMX. In the event that the core determines that 
-    thare are components loaded when this call is made, the core may return 
-    with an error rather than try to unload the components.
-        
-    The core should return from this call within 20 msec.
-    
-    @return OMX_ERRORTYPE
-        If the command successfully executes, the return code will be
-        OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-    @ingroup core
- */
-OMX_API OMX_ERRORTYPE OMX_APIENTRY OMX_Deinit(void);
-
-
-/** The OMX_ComponentNameEnum method will enumerate through all the names of
-    recognised valid components in the system. This function is provided
-    as a means to detect all the components in the system run-time. There is
-    no strict ordering to the enumeration order of component names, although
-    each name will only be enumerated once.  If the OMX core supports run-time
-    installation of new components, it is only requried to detect newly
-    installed components when the first call to enumerate component names
-    is made (i.e. when nIndex is 0x0).
-    
-    The core should return from this call in 20 msec.
-    
-    @param [out] cComponentName
-        pointer to a null terminated string with the component name.  The
-        names of the components are strings less than 127 bytes in length
-        plus the trailing null for a maximum size of 128 bytes.  An example 
-        of a valid component name is "OMX.TI.AUDIO.DSP.MIXER\0".  Names are 
-        assigned by the vendor, but shall start with "OMX." and then have 
-        the Vendor designation next.
-    @param [in] nNameLength
-        number of characters in the cComponentName string.  With all 
-        component name strings restricted to less than 128 characters 
-        (including the trailing null) it is recomended that the caller
-        provide a input string for the cComponentName of 128 characters.
-    @param [in] nIndex
-        number containing the enumeration index for the component. 
-        Multiple calls to OMX_ComponentNameEnum with increasing values
-        of nIndex will enumerate through the component names in the
-        system until OMX_ErrorNoMore is returned.  The value of nIndex
-        is 0 to (N-1), where N is the number of valid installed components
-        in the system.
-    @return OMX_ERRORTYPE
-        If the command successfully executes, the return code will be
-        OMX_ErrorNone.  When the value of nIndex exceeds the number of 
-        components in the system minus 1, OMX_ErrorNoMore will be
-        returned. Otherwise the appropriate OMX error will be returned.
-    @ingroup core
- */
-OMX_API OMX_ERRORTYPE OMX_APIENTRY OMX_ComponentNameEnum(
-    OMX_OUT OMX_STRING cComponentName,
-    OMX_IN  OMX_U32 nNameLength,
-    OMX_IN  OMX_U32 nIndex);
-
-
-/** The OMX_GetHandle method will locate the component specified by the
-    component name given, load that component into memory and then invoke
-    the component's methods to create an instance of the component.  
-    
-    The core should return from this call within 20 msec.
-    
-    @param [out] pHandle
-        pointer to an OMX_HANDLETYPE pointer to be filled in by this method.
-    @param [in] cComponentName
-        pointer to a null terminated string with the component name.  The
-        names of the components are strings less than 127 bytes in length
-        plus the trailing null for a maximum size of 128 bytes.  An example 
-        of a valid component name is "OMX.TI.AUDIO.DSP.MIXER\0".  Names are 
-        assigned by the vendor, but shall start with "OMX." and then have 
-        the Vendor designation next.
-    @param [in] pAppData
-        pointer to an application defined value that will be returned
-        during callbacks so that the application can identify the source
-        of the callback.
-    @param [in] pCallBacks
-        pointer to a OMX_CALLBACKTYPE structure that will be passed to the
-        component to initialize it with.  
-    @return OMX_ERRORTYPE
-        If the command successfully executes, the return code will be
-        OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-    @ingroup core
- */
-OMX_API OMX_ERRORTYPE OMX_APIENTRY OMX_GetHandle(
-    OMX_OUT OMX_HANDLETYPE* pHandle, 
-    OMX_IN  OMX_STRING cComponentName,
-    OMX_IN  OMX_PTR pAppData,
-    OMX_IN  OMX_CALLBACKTYPE* pCallBacks);
-
-
-/** The OMX_FreeHandle method will free a handle allocated by the OMX_GetHandle 
-    method.  If the component reference count goes to zero, the component will
-    be unloaded from memory.  
-    
-    The core should return from this call within 20 msec when the component is 
-    in the OMX_StateLoaded state.
-
-    @param [in] hComponent
-        Handle of the component to be accessed.  This is the component
-        handle returned by the call to the GetHandle function.
-    @return OMX_ERRORTYPE
-        If the command successfully executes, the return code will be
-        OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-    @ingroup core
- */
-OMX_API OMX_ERRORTYPE OMX_APIENTRY OMX_FreeHandle(
-    OMX_IN  OMX_HANDLETYPE hComponent);
-
-
-
-/** The OMX_SetupTunnel method will handle the necessary calls to the components
-    to setup the specified tunnel the two components.  NOTE: This is
-    an actual method (not a #define macro).  This method will make calls into
-    the component ComponentTunnelRequest method to do the actual tunnel 
-    connection.  
-
-    The ComponentTunnelRequest method on both components will be called. 
-    This method shall not be called unless the component is in the 
-    OMX_StateLoaded state except when the ports used for the tunnel are
-    disabled. In this case, the component may be in the OMX_StateExecuting,
-    OMX_StatePause, or OMX_StateIdle states. 
-
-    The core should return from this call within 20 msec.
-    
-    @param [in] hOutput
-        Handle of the component to be accessed.  Also this is the handle
-        of the component whose port, specified in the nPortOutput parameter
-        will be used the source for the tunnel. This is the component handle
-        returned by the call to the OMX_GetHandle function.  There is a 
-        requirement that hOutput be the source for the data when
-        tunelling (i.e. nPortOutput is an output port).  If 0x0, the component
-        specified in hInput will have it's port specified in nPortInput
-        setup for communication with the application / IL client.
-    @param [in] nPortOutput
-        nPortOutput is used to select the source port on component to be
-        used in the tunnel. 
-    @param [in] hInput
-        This is the component to setup the tunnel with. This is the handle
-        of the component whose port, specified in the nPortInput parameter
-        will be used the destination for the tunnel. This is the component handle
-        returned by the call to the OMX_GetHandle function.  There is a 
-        requirement that hInput be the destination for the data when
-        tunelling (i.e. nPortInut is an input port).   If 0x0, the component
-        specified in hOutput will have it's port specified in nPortPOutput
-        setup for communication with the application / IL client.
-    @param [in] nPortInput
-        nPortInput is used to select the destination port on component to be
-        used in the tunnel.
-    @return OMX_ERRORTYPE
-        If the command successfully executes, the return code will be
-        OMX_ErrorNone.  Otherwise the appropriate OMX error will be returned.
-        When OMX_ErrorNotImplemented is returned, one or both components is 
-        a non-interop component and does not support tunneling.
-        
-        On failure, the ports of both components are setup for communication
-        with the application / IL Client.
-    @ingroup core tun
- */
-OMX_API OMX_ERRORTYPE OMX_APIENTRY OMX_SetupTunnel(
-    OMX_IN  OMX_HANDLETYPE hOutput,
-    OMX_IN  OMX_U32 nPortOutput,
-    OMX_IN  OMX_HANDLETYPE hInput,
-    OMX_IN  OMX_U32 nPortInput);
-    
-/** @ingroup cp */
-OMX_API OMX_ERRORTYPE   OMX_GetContentPipe(
-    OMX_OUT OMX_HANDLETYPE *hPipe,
-    OMX_IN OMX_STRING szURI);
-
-/** The OMX_GetComponentsOfRole method will return the number of components that support the given
-    role and (if the compNames field is non-NULL) the names of those components. The call will fail if 
-    an insufficiently sized array of names is supplied. To ensure the array is sufficiently sized the
-    client should:
-        * first call this function with the compNames field NULL to determine the number of component names
-        * second call this function with the compNames field pointing to an array of names allocated 
-          according to the number returned by the first call.
-
-    The core should return from this call within 5 msec.
-    
-    @param [in] role
-        This is generic standard component name consisting only of component class 
-        name and the type within that class (e.g. 'audio_decoder.aac').
-    @param [inout] pNumComps
-        This is used both as input and output. 
- 
-        If compNames is NULL, the input is ignored and the output specifies how many components support
-        the given role.
-     
-        If compNames is not NULL, on input it bounds the size of the input structure and 
-        on output, it specifies the number of components string names listed within the compNames parameter.
-    @param [inout] compNames
-        If NULL this field is ignored. If non-NULL this points to an array of 128-byte strings which accepts 
-        a list of the names of all physical components that implement the specified standard component name. 
-        Each name is NULL terminated. numComps indicates the number of names.
-    @ingroup core
- */
-OMX_API OMX_ERRORTYPE OMX_GetComponentsOfRole ( 
-	OMX_IN      OMX_STRING role,
-    OMX_INOUT   OMX_U32 *pNumComps,
-    OMX_INOUT   OMX_U8  **compNames);
-
-/** The OMX_GetRolesOfComponent method will return the number of roles supported by the given
-    component and (if the roles field is non-NULL) the names of those roles. The call will fail if 
-    an insufficiently sized array of names is supplied. To ensure the array is sufficiently sized the
-    client should:
-        * first call this function with the roles field NULL to determine the number of role names
-        * second call this function with the roles field pointing to an array of names allocated 
-          according to the number returned by the first call.
-
-    The core should return from this call within 5 msec.
-
-    @param [in] compName
-        This is the name of the component being queried about.
-    @param [inout] pNumRoles
-        This is used both as input and output. 
- 
-        If roles is NULL, the input is ignored and the output specifies how many roles the component supports.
-     
-        If compNames is not NULL, on input it bounds the size of the input structure and 
-        on output, it specifies the number of roles string names listed within the roles parameter.
-    @param [out] roles
-        If NULL this field is ignored. If non-NULL this points to an array of 128-byte strings 
-        which accepts a list of the names of all standard components roles implemented on the 
-        specified component name. numComps indicates the number of names.
-    @ingroup core
- */
-OMX_API OMX_ERRORTYPE OMX_GetRolesOfComponent ( 
-	OMX_IN      OMX_STRING compName, 
-    OMX_INOUT   OMX_U32 *pNumRoles,
-    OMX_OUT     OMX_U8 **roles);
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif
-/* File EOF */
-
diff --git a/include/media/stagefright/openmax/OMX_IVCommon.h b/include/media/stagefright/openmax/OMX_IVCommon.h
deleted file mode 100644
index 8bb4ded..0000000
--- a/include/media/stagefright/openmax/OMX_IVCommon.h
+++ /dev/null
@@ -1,947 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/**
- * Copyright (c) 2008 The Khronos Group Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject
- * to the following conditions:
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
- * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-/**
- * @file OMX_IVCommon.h - OpenMax IL version 1.1.2
- *  The structures needed by Video and Image components to exchange
- *  parameters and configuration data with the components.
- */
-#ifndef OMX_IVCommon_h
-#define OMX_IVCommon_h
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-/**
- * Each OMX header must include all required header files to allow the header
- * to compile without errors.  The includes below are required for this header
- * file to compile successfully
- */
-
-#include <OMX_Core.h>
-
-/** @defgroup iv OpenMAX IL Imaging and Video Domain
- * Common structures for OpenMAX IL Imaging and Video domains
- * @{
- */
-
-
-/**
- * Enumeration defining possible uncompressed image/video formats.
- *
- * ENUMS:
- *  Unused                 : Placeholder value when format is N/A
- *  Monochrome             : black and white
- *  8bitRGB332             : Red 7:5, Green 4:2, Blue 1:0
- *  12bitRGB444            : Red 11:8, Green 7:4, Blue 3:0
- *  16bitARGB4444          : Alpha 15:12, Red 11:8, Green 7:4, Blue 3:0
- *  16bitARGB1555          : Alpha 15, Red 14:10, Green 9:5, Blue 4:0
- *  16bitRGB565            : Red 15:11, Green 10:5, Blue 4:0
- *  16bitBGR565            : Blue 15:11, Green 10:5, Red 4:0
- *  18bitRGB666            : Red 17:12, Green 11:6, Blue 5:0
- *  18bitARGB1665          : Alpha 17, Red 16:11, Green 10:5, Blue 4:0
- *  19bitARGB1666          : Alpha 18, Red 17:12, Green 11:6, Blue 5:0
- *  24bitRGB888            : Red 24:16, Green 15:8, Blue 7:0
- *  24bitBGR888            : Blue 24:16, Green 15:8, Red 7:0
- *  24bitARGB1887          : Alpha 23, Red 22:15, Green 14:7, Blue 6:0
- *  25bitARGB1888          : Alpha 24, Red 23:16, Green 15:8, Blue 7:0
- *  32bitBGRA8888          : Blue 31:24, Green 23:16, Red 15:8, Alpha 7:0
- *  32bitARGB8888          : Alpha 31:24, Red 23:16, Green 15:8, Blue 7:0
- *  YUV411Planar           : U,Y are subsampled by a factor of 4 horizontally
- *  YUV411PackedPlanar     : packed per payload in planar slices
- *  YUV420Planar           : Three arrays Y,U,V.
- *  YUV420PackedPlanar     : packed per payload in planar slices
- *  YUV420SemiPlanar       : Two arrays, one is all Y, the other is U and V
- *  YUV422Planar           : Three arrays Y,U,V.
- *  YUV422PackedPlanar     : packed per payload in planar slices
- *  YUV422SemiPlanar       : Two arrays, one is all Y, the other is U and V
- *  YCbYCr                 : Organized as 16bit YUYV (i.e. YCbYCr)
- *  YCrYCb                 : Organized as 16bit YVYU (i.e. YCrYCb)
- *  CbYCrY                 : Organized as 16bit UYVY (i.e. CbYCrY)
- *  CrYCbY                 : Organized as 16bit VYUY (i.e. CrYCbY)
- *  YUV444Interleaved      : Each pixel contains equal parts YUV
- *  RawBayer8bit           : SMIA camera output format
- *  RawBayer10bit          : SMIA camera output format
- *  RawBayer8bitcompressed : SMIA camera output format
- */
-typedef enum OMX_COLOR_FORMATTYPE {
-    OMX_COLOR_FormatUnused,
-    OMX_COLOR_FormatMonochrome,
-    OMX_COLOR_Format8bitRGB332,
-    OMX_COLOR_Format12bitRGB444,
-    OMX_COLOR_Format16bitARGB4444,
-    OMX_COLOR_Format16bitARGB1555,
-    OMX_COLOR_Format16bitRGB565,
-    OMX_COLOR_Format16bitBGR565,
-    OMX_COLOR_Format18bitRGB666,
-    OMX_COLOR_Format18bitARGB1665,
-    OMX_COLOR_Format19bitARGB1666,
-    OMX_COLOR_Format24bitRGB888,
-    OMX_COLOR_Format24bitBGR888,
-    OMX_COLOR_Format24bitARGB1887,
-    OMX_COLOR_Format25bitARGB1888,
-    OMX_COLOR_Format32bitBGRA8888,
-    OMX_COLOR_Format32bitARGB8888,
-    OMX_COLOR_FormatYUV411Planar,
-    OMX_COLOR_FormatYUV411PackedPlanar,
-    OMX_COLOR_FormatYUV420Planar,
-    OMX_COLOR_FormatYUV420PackedPlanar,
-    OMX_COLOR_FormatYUV420SemiPlanar,
-    OMX_COLOR_FormatYUV422Planar,
-    OMX_COLOR_FormatYUV422PackedPlanar,
-    OMX_COLOR_FormatYUV422SemiPlanar,
-    OMX_COLOR_FormatYCbYCr,
-    OMX_COLOR_FormatYCrYCb,
-    OMX_COLOR_FormatCbYCrY,
-    OMX_COLOR_FormatCrYCbY,
-    OMX_COLOR_FormatYUV444Interleaved,
-    OMX_COLOR_FormatRawBayer8bit,
-    OMX_COLOR_FormatRawBayer10bit,
-    OMX_COLOR_FormatRawBayer8bitcompressed,
-    OMX_COLOR_FormatL2,
-    OMX_COLOR_FormatL4,
-    OMX_COLOR_FormatL8,
-    OMX_COLOR_FormatL16,
-    OMX_COLOR_FormatL24,
-    OMX_COLOR_FormatL32,
-    OMX_COLOR_FormatYUV420PackedSemiPlanar,
-    OMX_COLOR_FormatYUV422PackedSemiPlanar,
-    OMX_COLOR_Format18BitBGR666,
-    OMX_COLOR_Format24BitARGB6666,
-    OMX_COLOR_Format24BitABGR6666,
-    OMX_COLOR_FormatKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
-    OMX_COLOR_FormatVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    /**<Reserved android opaque colorformat. Tells the encoder that
-     * the actual colorformat will be  relayed by the
-     * Gralloc Buffers.
-     * FIXME: In the process of reserving some enum values for
-     * Android-specific OMX IL colorformats. Change this enum to
-     * an acceptable range once that is done.
-     * */
-    OMX_COLOR_FormatAndroidOpaque = 0x7F000789,
-    OMX_TI_COLOR_FormatYUV420PackedSemiPlanar = 0x7F000100,
-    OMX_QCOM_COLOR_FormatYVU420SemiPlanar = 0x7FA30C00,
-    OMX_COLOR_FormatMax = 0x7FFFFFFF
-} OMX_COLOR_FORMATTYPE;
-
-
-/**
- * Defines the matrix for conversion from RGB to YUV or vice versa.
- * iColorMatrix should be initialized with the fixed point values
- * used in converting between formats.
- */
-typedef struct OMX_CONFIG_COLORCONVERSIONTYPE {
-    OMX_U32 nSize;              /**< Size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;   /**< OMX specification version info */
-    OMX_U32 nPortIndex;         /**< Port that this struct applies to */
-    OMX_S32 xColorMatrix[3][3]; /**< Stored in signed Q16 format */
-    OMX_S32 xColorOffset[4];    /**< Stored in signed Q16 format */
-}OMX_CONFIG_COLORCONVERSIONTYPE;
-
-
-/**
- * Structure defining percent to scale each frame dimension.  For example:
- * To make the width 50% larger, use fWidth = 1.5 and to make the width
- * 1/2 the original size, use fWidth = 0.5
- */
-typedef struct OMX_CONFIG_SCALEFACTORTYPE {
-    OMX_U32 nSize;            /**< Size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion; /**< OMX specification version info */
-    OMX_U32 nPortIndex;       /**< Port that this struct applies to */
-    OMX_S32 xWidth;           /**< Fixed point value stored as Q16 */
-    OMX_S32 xHeight;          /**< Fixed point value stored as Q16 */
-}OMX_CONFIG_SCALEFACTORTYPE;
-
-
-/**
- * Enumeration of possible image filter types
- */
-typedef enum OMX_IMAGEFILTERTYPE {
-    OMX_ImageFilterNone,
-    OMX_ImageFilterNoise,
-    OMX_ImageFilterEmboss,
-    OMX_ImageFilterNegative,
-    OMX_ImageFilterSketch,
-    OMX_ImageFilterOilPaint,
-    OMX_ImageFilterHatch,
-    OMX_ImageFilterGpen,
-    OMX_ImageFilterAntialias,
-    OMX_ImageFilterDeRing,
-    OMX_ImageFilterSolarize,
-    OMX_ImageFilterKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
-    OMX_ImageFilterVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_ImageFilterMax = 0x7FFFFFFF
-} OMX_IMAGEFILTERTYPE;
-
-
-/**
- * Image filter configuration
- *
- * STRUCT MEMBERS:
- *  nSize        : Size of the structure in bytes
- *  nVersion     : OMX specification version information
- *  nPortIndex   : Port that this structure applies to
- *  eImageFilter : Image filter type enumeration
- */
-typedef struct OMX_CONFIG_IMAGEFILTERTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_IMAGEFILTERTYPE eImageFilter;
-} OMX_CONFIG_IMAGEFILTERTYPE;
-
-
-/**
- * Customized U and V for color enhancement
- *
- * STRUCT MEMBERS:
- *  nSize             : Size of the structure in bytes
- *  nVersion          : OMX specification version information
- *  nPortIndex        : Port that this structure applies to
- *  bColorEnhancement : Enable/disable color enhancement
- *  nCustomizedU      : Practical values: 16-240, range: 0-255, value set for
- *                      U component
- *  nCustomizedV      : Practical values: 16-240, range: 0-255, value set for
- *                      V component
- */
-typedef struct OMX_CONFIG_COLORENHANCEMENTTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_BOOL bColorEnhancement;
-    OMX_U8 nCustomizedU;
-    OMX_U8 nCustomizedV;
-} OMX_CONFIG_COLORENHANCEMENTTYPE;
-
-
-/**
- * Define color key and color key mask
- *
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes
- *  nVersion   : OMX specification version information
- *  nPortIndex : Port that this structure applies to
- *  nARGBColor : 32bit Alpha, Red, Green, Blue Color
- *  nARGBMask  : 32bit Mask for Alpha, Red, Green, Blue channels
- */
-typedef struct OMX_CONFIG_COLORKEYTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_U32 nARGBColor;
-    OMX_U32 nARGBMask;
-} OMX_CONFIG_COLORKEYTYPE;
-
-
-/**
- * List of color blend types for pre/post processing
- *
- * ENUMS:
- *  None          : No color blending present
- *  AlphaConstant : Function is (alpha_constant * src) +
- *                  (1 - alpha_constant) * dst)
- *  AlphaPerPixel : Function is (alpha * src) + (1 - alpha) * dst)
- *  Alternate     : Function is alternating pixels from src and dst
- *  And           : Function is (src & dst)
- *  Or            : Function is (src | dst)
- *  Invert        : Function is ~src
- */
-typedef enum OMX_COLORBLENDTYPE {
-    OMX_ColorBlendNone,
-    OMX_ColorBlendAlphaConstant,
-    OMX_ColorBlendAlphaPerPixel,
-    OMX_ColorBlendAlternate,
-    OMX_ColorBlendAnd,
-    OMX_ColorBlendOr,
-    OMX_ColorBlendInvert,
-    OMX_ColorBlendKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
-    OMX_ColorBlendVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_ColorBlendMax = 0x7FFFFFFF
-} OMX_COLORBLENDTYPE;
-
-
-/**
- * Color blend configuration
- *
- * STRUCT MEMBERS:
- *  nSize             : Size of the structure in bytes
- *  nVersion          : OMX specification version information
- *  nPortIndex        : Port that this structure applies to
- *  nRGBAlphaConstant : Constant global alpha values when global alpha is used
- *  eColorBlend       : Color blend type enumeration
- */
-typedef struct OMX_CONFIG_COLORBLENDTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_U32 nRGBAlphaConstant;
-    OMX_COLORBLENDTYPE  eColorBlend;
-} OMX_CONFIG_COLORBLENDTYPE;
-
-
-/**
- * Hold frame dimension
- *
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes
- *  nVersion   : OMX specification version information
- *  nPortIndex : Port that this structure applies to
- *  nWidth     : Frame width in pixels
- *  nHeight    : Frame height in pixels
- */
-typedef struct OMX_FRAMESIZETYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_U32 nWidth;
-    OMX_U32 nHeight;
-} OMX_FRAMESIZETYPE;
-
-
-/**
- * Rotation configuration
- *
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes
- *  nVersion   : OMX specification version information
- *  nPortIndex : Port that this structure applies to
- *  nRotation  : +/- integer rotation value
- */
-typedef struct OMX_CONFIG_ROTATIONTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_S32 nRotation;
-} OMX_CONFIG_ROTATIONTYPE;
-
-
-/**
- * Possible mirroring directions for pre/post processing
- *
- * ENUMS:
- *  None       : No mirroring
- *  Vertical   : Vertical mirroring, flip on X axis
- *  Horizontal : Horizontal mirroring, flip on Y axis
- *  Both       : Both vertical and horizontal mirroring
- */
-typedef enum OMX_MIRRORTYPE {
-    OMX_MirrorNone = 0,
-    OMX_MirrorVertical,
-    OMX_MirrorHorizontal,
-    OMX_MirrorBoth,
-    OMX_MirrorKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
-    OMX_MirrorVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_MirrorMax = 0x7FFFFFFF
-} OMX_MIRRORTYPE;
-
-
-/**
- * Mirroring configuration
- *
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes
- *  nVersion   : OMX specification version information
- *  nPortIndex : Port that this structure applies to
- *  eMirror    : Mirror type enumeration
- */
-typedef struct OMX_CONFIG_MIRRORTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_MIRRORTYPE  eMirror;
-} OMX_CONFIG_MIRRORTYPE;
-
-
-/**
- * Position information only
- *
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes
- *  nVersion   : OMX specification version information
- *  nPortIndex : Port that this structure applies to
- *  nX         : X coordinate for the point
- *  nY         : Y coordinate for the point
- */
-typedef struct OMX_CONFIG_POINTTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_S32 nX;
-    OMX_S32 nY;
-} OMX_CONFIG_POINTTYPE;
-
-
-/**
- * Frame size plus position
- *
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes
- *  nVersion   : OMX specification version information
- *  nPortIndex : Port that this structure applies to
- *  nLeft      : X Coordinate of the top left corner of the rectangle
- *  nTop       : Y Coordinate of the top left corner of the rectangle
- *  nWidth     : Width of the rectangle
- *  nHeight    : Height of the rectangle
- */
-typedef struct OMX_CONFIG_RECTTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_S32 nLeft;
-    OMX_S32 nTop;
-    OMX_U32 nWidth;
-    OMX_U32 nHeight;
-} OMX_CONFIG_RECTTYPE;
-
-
-/**
- * Deblocking state; it is required to be set up before starting the codec
- *
- * STRUCT MEMBERS:
- *  nSize       : Size of the structure in bytes
- *  nVersion    : OMX specification version information
- *  nPortIndex  : Port that this structure applies to
- *  bDeblocking : Enable/disable deblocking mode
- */
-typedef struct OMX_PARAM_DEBLOCKINGTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_BOOL bDeblocking;
-} OMX_PARAM_DEBLOCKINGTYPE;
-
-
-/**
- * Stabilization state
- *
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes
- *  nVersion   : OMX specification version information
- *  nPortIndex : Port that this structure applies to
- *  bStab      : Enable/disable frame stabilization state
- */
-typedef struct OMX_CONFIG_FRAMESTABTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_BOOL bStab;
-} OMX_CONFIG_FRAMESTABTYPE;
-
-
-/**
- * White Balance control type
- *
- * STRUCT MEMBERS:
- *  SunLight : Referenced in JSR-234
- *  Flash    : Optimal for device's integrated flash
- */
-typedef enum OMX_WHITEBALCONTROLTYPE {
-    OMX_WhiteBalControlOff = 0,
-    OMX_WhiteBalControlAuto,
-    OMX_WhiteBalControlSunLight,
-    OMX_WhiteBalControlCloudy,
-    OMX_WhiteBalControlShade,
-    OMX_WhiteBalControlTungsten,
-    OMX_WhiteBalControlFluorescent,
-    OMX_WhiteBalControlIncandescent,
-    OMX_WhiteBalControlFlash,
-    OMX_WhiteBalControlHorizon,
-    OMX_WhiteBalControlKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
-    OMX_WhiteBalControlVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_WhiteBalControlMax = 0x7FFFFFFF
-} OMX_WHITEBALCONTROLTYPE;
-
-
-/**
- * White Balance control configuration
- *
- * STRUCT MEMBERS:
- *  nSize            : Size of the structure in bytes
- *  nVersion         : OMX specification version information
- *  nPortIndex       : Port that this structure applies to
- *  eWhiteBalControl : White balance enumeration
- */
-typedef struct OMX_CONFIG_WHITEBALCONTROLTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_WHITEBALCONTROLTYPE eWhiteBalControl;
-} OMX_CONFIG_WHITEBALCONTROLTYPE;
-
-
-/**
- * Exposure control type
- */
-typedef enum OMX_EXPOSURECONTROLTYPE {
-    OMX_ExposureControlOff = 0,
-    OMX_ExposureControlAuto,
-    OMX_ExposureControlNight,
-    OMX_ExposureControlBackLight,
-    OMX_ExposureControlSpotLight,
-    OMX_ExposureControlSports,
-    OMX_ExposureControlSnow,
-    OMX_ExposureControlBeach,
-    OMX_ExposureControlLargeAperture,
-    OMX_ExposureControlSmallApperture,
-    OMX_ExposureControlKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
-    OMX_ExposureControlVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_ExposureControlMax = 0x7FFFFFFF
-} OMX_EXPOSURECONTROLTYPE;
-
-
-/**
- * White Balance control configuration
- *
- * STRUCT MEMBERS:
- *  nSize            : Size of the structure in bytes
- *  nVersion         : OMX specification version information
- *  nPortIndex       : Port that this structure applies to
- *  eExposureControl : Exposure control enumeration
- */
-typedef struct OMX_CONFIG_EXPOSURECONTROLTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_EXPOSURECONTROLTYPE eExposureControl;
-} OMX_CONFIG_EXPOSURECONTROLTYPE;
-
-
-/**
- * Defines sensor supported mode.
- *
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes
- *  nVersion   : OMX specification version information
- *  nPortIndex : Port that this structure applies to
- *  nFrameRate : Single shot mode is indicated by a 0
- *  bOneShot   : Enable for single shot, disable for streaming
- *  sFrameSize : Framesize
- */
-typedef struct OMX_PARAM_SENSORMODETYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_U32 nFrameRate;
-    OMX_BOOL bOneShot;
-    OMX_FRAMESIZETYPE sFrameSize;
-} OMX_PARAM_SENSORMODETYPE;
-
-
-/**
- * Defines contrast level
- *
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes
- *  nVersion   : OMX specification version information
- *  nPortIndex : Port that this structure applies to
- *  nContrast  : Values allowed for contrast -100 to 100, zero means no change
- */
-typedef struct OMX_CONFIG_CONTRASTTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_S32 nContrast;
-} OMX_CONFIG_CONTRASTTYPE;
-
-
-/**
- * Defines brightness level
- *
- * STRUCT MEMBERS:
- *  nSize       : Size of the structure in bytes
- *  nVersion    : OMX specification version information
- *  nPortIndex  : Port that this structure applies to
- *  nBrightness : 0-100%
- */
-typedef struct OMX_CONFIG_BRIGHTNESSTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_U32 nBrightness;
-} OMX_CONFIG_BRIGHTNESSTYPE;
-
-
-/**
- * Defines backlight level configuration for a video sink, e.g. LCD panel
- *
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes
- *  nVersion   : OMX specification version information
- *  nPortIndex : Port that this structure applies to
- *  nBacklight : Values allowed for backlight 0-100%
- *  nTimeout   : Number of milliseconds before backlight automatically turns
- *               off.  A value of 0x0 disables backight timeout
- */
-typedef struct OMX_CONFIG_BACKLIGHTTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_U32 nBacklight;
-    OMX_U32 nTimeout;
-} OMX_CONFIG_BACKLIGHTTYPE;
-
-
-/**
- * Defines setting for Gamma
- *
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes
- *  nVersion   : OMX specification version information
- *  nPortIndex : Port that this structure applies to
- *  nGamma     : Values allowed for gamma -100 to 100, zero means no change
- */
-typedef struct OMX_CONFIG_GAMMATYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_S32 nGamma;
-} OMX_CONFIG_GAMMATYPE;
-
-
-/**
- * Define for setting saturation
- *
- * STRUCT MEMBERS:
- *  nSize       : Size of the structure in bytes
- *  nVersion    : OMX specification version information
- *  nPortIndex  : Port that this structure applies to
- *  nSaturation : Values allowed for saturation -100 to 100, zero means
- *                no change
- */
-typedef struct OMX_CONFIG_SATURATIONTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_S32 nSaturation;
-} OMX_CONFIG_SATURATIONTYPE;
-
-
-/**
- * Define for setting Lightness
- *
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes
- *  nVersion   : OMX specification version information
- *  nPortIndex : Port that this structure applies to
- *  nLightness : Values allowed for lightness -100 to 100, zero means no
- *               change
- */
-typedef struct OMX_CONFIG_LIGHTNESSTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_S32 nLightness;
-} OMX_CONFIG_LIGHTNESSTYPE;
-
-
-/**
- * Plane blend configuration
- *
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes
- *  nVersion   : OMX specification version information
- *  nPortIndex : Index of input port associated with the plane.
- *  nDepth     : Depth of the plane in relation to the screen. Higher
- *               numbered depths are "behind" lower number depths.
- *               This number defaults to the Port Index number.
- *  nAlpha     : Transparency blending component for the entire plane.
- *               See blending modes for more detail.
- */
-typedef struct OMX_CONFIG_PLANEBLENDTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_U32 nDepth;
-    OMX_U32 nAlpha;
-} OMX_CONFIG_PLANEBLENDTYPE;
-
-
-/**
- * Define interlace type
- *
- * STRUCT MEMBERS:
- *  nSize                 : Size of the structure in bytes
- *  nVersion              : OMX specification version information
- *  nPortIndex            : Port that this structure applies to
- *  bEnable               : Enable control variable for this functionality
- *                          (see below)
- *  nInterleavePortIndex  : Index of input or output port associated with
- *                          the interleaved plane.
- *  pPlanarPortIndexes[4] : Index of input or output planar ports.
- */
-typedef struct OMX_PARAM_INTERLEAVETYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_BOOL bEnable;
-    OMX_U32 nInterleavePortIndex;
-} OMX_PARAM_INTERLEAVETYPE;
-
-
-/**
- * Defines the picture effect used for an input picture
- */
-typedef enum OMX_TRANSITIONEFFECTTYPE {
-    OMX_EffectNone,
-    OMX_EffectFadeFromBlack,
-    OMX_EffectFadeToBlack,
-    OMX_EffectUnspecifiedThroughConstantColor,
-    OMX_EffectDissolve,
-    OMX_EffectWipe,
-    OMX_EffectUnspecifiedMixOfTwoScenes,
-    OMX_EffectKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
-    OMX_EffectVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_EffectMax = 0x7FFFFFFF
-} OMX_TRANSITIONEFFECTTYPE;
-
-
-/**
- * Structure used to configure current transition effect
- *
- * STRUCT MEMBERS:
- * nSize      : Size of the structure in bytes
- * nVersion   : OMX specification version information
- * nPortIndex : Port that this structure applies to
- * eEffect    : Effect to enable
- */
-typedef struct OMX_CONFIG_TRANSITIONEFFECTTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_TRANSITIONEFFECTTYPE eEffect;
-} OMX_CONFIG_TRANSITIONEFFECTTYPE;
-
-
-/**
- * Defines possible data unit types for encoded video data. The data unit
- * types are used both for encoded video input for playback as well as
- * encoded video output from recording.
- */
-typedef enum OMX_DATAUNITTYPE {
-    OMX_DataUnitCodedPicture,
-    OMX_DataUnitVideoSegment,
-    OMX_DataUnitSeveralSegments,
-    OMX_DataUnitArbitraryStreamSection,
-    OMX_DataUnitKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
-    OMX_DataUnitVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_DataUnitMax = 0x7FFFFFFF
-} OMX_DATAUNITTYPE;
-
-
-/**
- * Defines possible encapsulation types for coded video data unit. The
- * encapsulation information is used both for encoded video input for
- * playback as well as encoded video output from recording.
- */
-typedef enum OMX_DATAUNITENCAPSULATIONTYPE {
-    OMX_DataEncapsulationElementaryStream,
-    OMX_DataEncapsulationGenericPayload,
-    OMX_DataEncapsulationRtpPayload,
-    OMX_DataEncapsulationKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
-    OMX_DataEncapsulationVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_DataEncapsulationMax = 0x7FFFFFFF
-} OMX_DATAUNITENCAPSULATIONTYPE;
-
-
-/**
- * Structure used to configure the type of being decoded/encoded
- */
-typedef struct OMX_PARAM_DATAUNITTYPE {
-    OMX_U32 nSize;            /**< Size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion; /**< OMX specification version information */
-    OMX_U32 nPortIndex;       /**< Port that this structure applies to */
-    OMX_DATAUNITTYPE eUnitType;
-    OMX_DATAUNITENCAPSULATIONTYPE eEncapsulationType;
-} OMX_PARAM_DATAUNITTYPE;
-
-
-/**
- * Defines dither types
- */
-typedef enum OMX_DITHERTYPE {
-    OMX_DitherNone,
-    OMX_DitherOrdered,
-    OMX_DitherErrorDiffusion,
-    OMX_DitherOther,
-    OMX_DitherKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
-    OMX_DitherVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_DitherMax = 0x7FFFFFFF
-} OMX_DITHERTYPE;
-
-
-/**
- * Structure used to configure current type of dithering
- */
-typedef struct OMX_CONFIG_DITHERTYPE {
-    OMX_U32 nSize;            /**< Size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion; /**< OMX specification version information */
-    OMX_U32 nPortIndex;       /**< Port that this structure applies to */
-    OMX_DITHERTYPE eDither;   /**< Type of dithering to use */
-} OMX_CONFIG_DITHERTYPE;
-
-typedef struct OMX_CONFIG_CAPTUREMODETYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;     /**< Port that this structure applies to */
-    OMX_BOOL bContinuous;   /**< If true then ignore frame rate and emit capture
-                             *   data as fast as possible (otherwise obey port's frame rate). */
-    OMX_BOOL bFrameLimited; /**< If true then terminate capture after the port emits the
-                             *   specified number of frames (otherwise the port does not
-                             *   terminate the capture until instructed to do so by the client).
-                             *   Even if set, the client may manually terminate the capture prior
-                             *   to reaching the limit. */
-    OMX_U32 nFrameLimit;      /**< Limit on number of frames emitted during a capture (only
-                               *   valid if bFrameLimited is set). */
-} OMX_CONFIG_CAPTUREMODETYPE;
-
-typedef enum OMX_METERINGTYPE {
-
-    OMX_MeteringModeAverage,     /**< Center-weighted average metering. */
-    OMX_MeteringModeSpot,  	      /**< Spot (partial) metering. */
-    OMX_MeteringModeMatrix,      /**< Matrix or evaluative metering. */
-
-    OMX_MeteringKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
-    OMX_MeteringVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_EVModeMax = 0x7fffffff
-} OMX_METERINGTYPE;
-
-typedef struct OMX_CONFIG_EXPOSUREVALUETYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_METERINGTYPE eMetering;
-    OMX_S32 xEVCompensation;      /**< Fixed point value stored as Q16 */
-    OMX_U32 nApertureFNumber;     /**< e.g. nApertureFNumber = 2 implies "f/2" - Q16 format */
-    OMX_BOOL bAutoAperture;		/**< Whether aperture number is defined automatically */
-    OMX_U32 nShutterSpeedMsec;    /**< Shutterspeed in milliseconds */
-    OMX_BOOL bAutoShutterSpeed;	/**< Whether shutter speed is defined automatically */
-    OMX_U32 nSensitivity;         /**< e.g. nSensitivity = 100 implies "ISO 100" */
-    OMX_BOOL bAutoSensitivity;	/**< Whether sensitivity is defined automatically */
-} OMX_CONFIG_EXPOSUREVALUETYPE;
-
-/**
- * Focus region configuration
- *
- * STRUCT MEMBERS:
- *  nSize           : Size of the structure in bytes
- *  nVersion        : OMX specification version information
- *  nPortIndex      : Port that this structure applies to
- *  bCenter         : Use center region as focus region of interest
- *  bLeft           : Use left region as focus region of interest
- *  bRight          : Use right region as focus region of interest
- *  bTop            : Use top region as focus region of interest
- *  bBottom         : Use bottom region as focus region of interest
- *  bTopLeft        : Use top left region as focus region of interest
- *  bTopRight       : Use top right region as focus region of interest
- *  bBottomLeft     : Use bottom left region as focus region of interest
- *  bBottomRight    : Use bottom right region as focus region of interest
- */
-typedef struct OMX_CONFIG_FOCUSREGIONTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_BOOL bCenter;
-    OMX_BOOL bLeft;
-    OMX_BOOL bRight;
-    OMX_BOOL bTop;
-    OMX_BOOL bBottom;
-    OMX_BOOL bTopLeft;
-    OMX_BOOL bTopRight;
-    OMX_BOOL bBottomLeft;
-    OMX_BOOL bBottomRight;
-} OMX_CONFIG_FOCUSREGIONTYPE;
-
-/**
- * Focus Status type
- */
-typedef enum OMX_FOCUSSTATUSTYPE {
-    OMX_FocusStatusOff = 0,
-    OMX_FocusStatusRequest,
-    OMX_FocusStatusReached,
-    OMX_FocusStatusUnableToReach,
-    OMX_FocusStatusLost,
-    OMX_FocusStatusKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
-    OMX_FocusStatusVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_FocusStatusMax = 0x7FFFFFFF
-} OMX_FOCUSSTATUSTYPE;
-
-/**
- * Focus status configuration
- *
- * STRUCT MEMBERS:
- *  nSize               : Size of the structure in bytes
- *  nVersion            : OMX specification version information
- *  nPortIndex          : Port that this structure applies to
- *  eFocusStatus        : Specifies the focus status
- *  bCenterStatus       : Use center region as focus region of interest
- *  bLeftStatus         : Use left region as focus region of interest
- *  bRightStatus        : Use right region as focus region of interest
- *  bTopStatus          : Use top region as focus region of interest
- *  bBottomStatus       : Use bottom region as focus region of interest
- *  bTopLeftStatus      : Use top left region as focus region of interest
- *  bTopRightStatus     : Use top right region as focus region of interest
- *  bBottomLeftStatus   : Use bottom left region as focus region of interest
- *  bBottomRightStatus  : Use bottom right region as focus region of interest
- */
-typedef struct OMX_PARAM_FOCUSSTATUSTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_FOCUSSTATUSTYPE eFocusStatus;
-    OMX_BOOL bCenterStatus;
-    OMX_BOOL bLeftStatus;
-    OMX_BOOL bRightStatus;
-    OMX_BOOL bTopStatus;
-    OMX_BOOL bBottomStatus;
-    OMX_BOOL bTopLeftStatus;
-    OMX_BOOL bTopRightStatus;
-    OMX_BOOL bBottomLeftStatus;
-    OMX_BOOL bBottomRightStatus;
-} OMX_PARAM_FOCUSSTATUSTYPE;
-
-/** @} */
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif
-/* File EOF */
diff --git a/include/media/stagefright/openmax/OMX_Image.h b/include/media/stagefright/openmax/OMX_Image.h
deleted file mode 100644
index 42e39ec..0000000
--- a/include/media/stagefright/openmax/OMX_Image.h
+++ /dev/null
@@ -1,345 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/**
- * Copyright (c) 2008 The Khronos Group Inc. 
- * 
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject
- * to the following conditions: 
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software. 
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
- * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 
- */
-
-/** 
- * @file OMX_Image.h - OpenMax IL version 1.1.2
- * The structures needed by Image components to exchange parameters and 
- * configuration data with the components.
- */
-#ifndef OMX_Image_h
-#define OMX_Image_h
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-
-/**
- * Each OMX header must include all required header files to allow the 
- * header to compile without errors.  The includes below are required  
- * for this header file to compile successfully 
- */
-
-#include <OMX_IVCommon.h>
-
-/** @defgroup imaging OpenMAX IL Imaging Domain
- * @ingroup iv
- * Structures for OpenMAX IL Imaging domain
- * @{
- */
-
-/** 
- * Enumeration used to define the possible image compression coding. 
- */
-typedef enum OMX_IMAGE_CODINGTYPE {
-    OMX_IMAGE_CodingUnused,      /**< Value when format is N/A */
-    OMX_IMAGE_CodingAutoDetect,  /**< Auto detection of image format */
-    OMX_IMAGE_CodingJPEG,        /**< JPEG/JFIF image format */
-    OMX_IMAGE_CodingJPEG2K,      /**< JPEG 2000 image format */
-    OMX_IMAGE_CodingEXIF,        /**< EXIF image format */
-    OMX_IMAGE_CodingTIFF,        /**< TIFF image format */
-    OMX_IMAGE_CodingGIF,         /**< Graphics image format */
-    OMX_IMAGE_CodingPNG,         /**< PNG image format */
-    OMX_IMAGE_CodingLZW,         /**< LZW image format */
-    OMX_IMAGE_CodingBMP,         /**< Windows Bitmap format */
-    OMX_IMAGE_CodingKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_IMAGE_CodingVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_IMAGE_CodingMax = 0x7FFFFFFF
-} OMX_IMAGE_CODINGTYPE;
-
-
-/**
- * Data structure used to define an image path. The number of image paths 
- * for input and output will vary by type of the image component.  
- * 
- *  Input (aka Source) : Zero Inputs, one Output,
- *  Splitter           : One Input, 2 or more Outputs,
- *  Processing Element : One Input, one output,
- *  Mixer              : 2 or more inputs, one output,
- *  Output (aka Sink)  : One Input, zero outputs.
- * 
- * The PortDefinition structure is used to define all of the parameters 
- * necessary for the compliant component to setup an input or an output  
- * image path.  If additional vendor specific data is required, it should  
- * be transmitted to the component using the CustomCommand function.   
- * Compliant components will prepopulate this structure with optimal  
- * values during the OMX_GetParameter() command.
- *
- * STRUCT MEMBERS:
- *  cMIMEType             : MIME type of data for the port
- *  pNativeRender         : Platform specific reference for a display if a 
- *                          sync, otherwise this field is 0
- *  nFrameWidth           : Width of frame to be used on port if 
- *                          uncompressed format is used.  Use 0 for 
- *                          unknown, don't care or variable
- *  nFrameHeight          : Height of frame to be used on port if 
- *                          uncompressed format is used. Use 0 for 
- *                          unknown, don't care or variable
- *  nStride               : Number of bytes per span of an image (i.e. 
- *                          indicates the number of bytes to get from
- *                          span N to span N+1, where negative stride 
- *                          indicates the image is bottom up
- *  nSliceHeight          : Height used when encoding in slices
- *  bFlagErrorConcealment : Turns on error concealment if it is supported by 
- *                          the OMX component
- *  eCompressionFormat    : Compression format used in this instance of  
- *                          the component. When OMX_IMAGE_CodingUnused is 
- *                          specified, eColorFormat is valid
- *  eColorFormat          : Decompressed format used by this component
- *  pNativeWindow         : Platform specific reference for a window object if a 
- *                          display sink , otherwise this field is 0x0. 
- */
-typedef struct OMX_IMAGE_PORTDEFINITIONTYPE {
-    OMX_STRING cMIMEType;
-    OMX_NATIVE_DEVICETYPE pNativeRender;
-    OMX_U32 nFrameWidth; 
-    OMX_U32 nFrameHeight;
-    OMX_S32 nStride;     
-    OMX_U32 nSliceHeight;
-    OMX_BOOL bFlagErrorConcealment;
-    OMX_IMAGE_CODINGTYPE eCompressionFormat;
-    OMX_COLOR_FORMATTYPE eColorFormat;
-    OMX_NATIVE_WINDOWTYPE pNativeWindow;
-} OMX_IMAGE_PORTDEFINITIONTYPE;
-
-
-/**  
- * Port format parameter.  This structure is used to enumerate the various 
- * data input/output format supported by the port.
- * 
- * STRUCT MEMBERS:
- *  nSize              : Size of the structure in bytes
- *  nVersion           : OMX specification version information
- *  nPortIndex         : Indicates which port to set
- *  nIndex             : Indicates the enumeration index for the format from 
- *                       0x0 to N-1
- *  eCompressionFormat : Compression format used in this instance of the 
- *                       component. When OMX_IMAGE_CodingUnused is specified, 
- *                       eColorFormat is valid
- *  eColorFormat       : Decompressed format used by this component
- */
-typedef struct OMX_IMAGE_PARAM_PORTFORMATTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_U32 nIndex;
-    OMX_IMAGE_CODINGTYPE eCompressionFormat;
-    OMX_COLOR_FORMATTYPE eColorFormat;
-} OMX_IMAGE_PARAM_PORTFORMATTYPE;
-
-
-/** 
- * Flash control type 
- *
- * ENUMS
- *  Torch : Flash forced constantly on
- */
-typedef enum OMX_IMAGE_FLASHCONTROLTYPE {
-    OMX_IMAGE_FlashControlOn = 0,
-    OMX_IMAGE_FlashControlOff,
-    OMX_IMAGE_FlashControlAuto,
-    OMX_IMAGE_FlashControlRedEyeReduction,
-    OMX_IMAGE_FlashControlFillin,
-    OMX_IMAGE_FlashControlTorch,
-    OMX_IMAGE_FlashControlKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_IMAGE_FlashControlVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_IMAGE_FlashControlMax = 0x7FFFFFFF
-} OMX_IMAGE_FLASHCONTROLTYPE;
-
-
-/** 
- * Flash control configuration 
- *
- * STRUCT MEMBERS:
- *  nSize         : Size of the structure in bytes
- *  nVersion      : OMX specification version information
- *  nPortIndex    : Port that this structure applies to
- *  eFlashControl : Flash control type
- */
-typedef struct OMX_IMAGE_PARAM_FLASHCONTROLTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_IMAGE_FLASHCONTROLTYPE eFlashControl;
-} OMX_IMAGE_PARAM_FLASHCONTROLTYPE;
-
-
-/** 
- * Focus control type 
- */
-typedef enum OMX_IMAGE_FOCUSCONTROLTYPE {
-    OMX_IMAGE_FocusControlOn = 0,
-    OMX_IMAGE_FocusControlOff,
-    OMX_IMAGE_FocusControlAuto,
-    OMX_IMAGE_FocusControlAutoLock,
-    OMX_IMAGE_FocusControlKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_IMAGE_FocusControlVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_IMAGE_FocusControlMax = 0x7FFFFFFF
-} OMX_IMAGE_FOCUSCONTROLTYPE;
-
- 
-/** 
- * Focus control configuration 
- *
- * STRUCT MEMBERS:
- *  nSize           : Size of the structure in bytes
- *  nVersion        : OMX specification version information
- *  nPortIndex      : Port that this structure applies to
- *  eFocusControl   : Focus control
- *  nFocusSteps     : Focus can take on values from 0 mm to infinity. 
- *                    Interest is only in number of steps over this range.
- *  nFocusStepIndex : Current focus step index
- */
-typedef struct OMX_IMAGE_CONFIG_FOCUSCONTROLTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_IMAGE_FOCUSCONTROLTYPE eFocusControl;
-    OMX_U32 nFocusSteps;
-    OMX_U32 nFocusStepIndex;
-} OMX_IMAGE_CONFIG_FOCUSCONTROLTYPE;
-
-
-/** 
- * Q Factor for JPEG compression, which controls the tradeoff between image
- * quality and size.  Q Factor provides a more simple means of controlling
- * JPEG compression quality, without directly programming Quantization
- * tables for chroma and luma 
- *
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes         
- *  nVersion   : OMX specification version information 
- *  nPortIndex : Port that this structure applies to 
- *  nQFactor   : JPEG Q factor value in the range of 1-100. A factor of 1 
- *               produces the smallest, worst quality images, and a factor 
- *               of 100 produces the largest, best quality images.  A 
- *               typical default is 75 for small good quality images               
- */
-typedef struct OMX_IMAGE_PARAM_QFACTORTYPE {
-    OMX_U32 nSize;            
-    OMX_VERSIONTYPE nVersion; 
-    OMX_U32 nPortIndex;       
-    OMX_U32 nQFactor;                                        
-} OMX_IMAGE_PARAM_QFACTORTYPE;
-
-/** 
- * Quantization table type 
- */
-
-typedef enum OMX_IMAGE_QUANTIZATIONTABLETYPE {
-    OMX_IMAGE_QuantizationTableLuma = 0,
-    OMX_IMAGE_QuantizationTableChroma,
-    OMX_IMAGE_QuantizationTableChromaCb,
-    OMX_IMAGE_QuantizationTableChromaCr,
-    OMX_IMAGE_QuantizationTableKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_IMAGE_QuantizationTableVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_IMAGE_QuantizationTableMax = 0x7FFFFFFF
-} OMX_IMAGE_QUANTIZATIONTABLETYPE;
-
-/** 
- * JPEG quantization tables are used to determine DCT compression for
- * YUV data, as an alternative to specifying Q factor, providing exact 
- * control of compression 
- *
- * STRUCT MEMBERS:
- *  nSize                   : Size of the structure in bytes
- *  nVersion                : OMX specification version information 
- *  nPortIndex              : Port that this structure applies to
- *  eQuantizationTable      : Quantization table type
- *  nQuantizationMatrix[64] : JPEG quantization table of coefficients stored 
- *                            in increasing columns then by rows of data (i.e. 
- *                            row 1, ... row 8). Quantization values are in 
- *                            the range 0-255 and stored in linear order
- *                            (i.e. the component will zig-zag the 
- *                            quantization table data if required internally) 
- */
-typedef struct OMX_IMAGE_PARAM_QUANTIZATIONTABLETYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_IMAGE_QUANTIZATIONTABLETYPE eQuantizationTable;
-    OMX_U8 nQuantizationMatrix[64];
-} OMX_IMAGE_PARAM_QUANTIZATIONTABLETYPE;
-
-
-/** 
- * Huffman table type, the same Huffman table is applied for chroma and 
- * luma component 
- */
-typedef enum OMX_IMAGE_HUFFMANTABLETYPE {
-    OMX_IMAGE_HuffmanTableAC = 0,
-    OMX_IMAGE_HuffmanTableDC,
-    OMX_IMAGE_HuffmanTableACLuma,
-    OMX_IMAGE_HuffmanTableACChroma,
-    OMX_IMAGE_HuffmanTableDCLuma,
-    OMX_IMAGE_HuffmanTableDCChroma,
-    OMX_IMAGE_HuffmanTableKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_IMAGE_HuffmanTableVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_IMAGE_HuffmanTableMax = 0x7FFFFFFF
-} OMX_IMAGE_HUFFMANTABLETYPE;
-
-/** 
- * JPEG Huffman table 
- *
- * STRUCT MEMBERS:
- *  nSize                            : Size of the structure in bytes
- *  nVersion                         : OMX specification version information
- *  nPortIndex                       : Port that this structure applies to
- *  eHuffmanTable                    : Huffman table type
- *  nNumberOfHuffmanCodeOfLength[16] : 0-16, number of Huffman codes of each 
- *                                     possible length
- *  nHuffmanTable[256]               : 0-255, the size used for AC and DC 
- *                                     HuffmanTable are 16 and 162 
- */
-typedef struct OMX_IMAGE_PARAM_HUFFMANTTABLETYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_IMAGE_HUFFMANTABLETYPE eHuffmanTable;
-    OMX_U8 nNumberOfHuffmanCodeOfLength[16];
-    OMX_U8 nHuffmanTable[256];
-}OMX_IMAGE_PARAM_HUFFMANTTABLETYPE;
-
-/** @} */
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif
-/* File EOF */
diff --git a/include/media/stagefright/openmax/OMX_Index.h b/include/media/stagefright/openmax/OMX_Index.h
deleted file mode 100644
index c0b8d92..0000000
--- a/include/media/stagefright/openmax/OMX_Index.h
+++ /dev/null
@@ -1,275 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/*
- * Copyright (c) 2008 The Khronos Group Inc. 
- * 
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject
- * to the following conditions: 
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software. 
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
- * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 
- *
- */
-
-/** @file OMX_Index.h - OpenMax IL version 1.1.2
- *  The OMX_Index header file contains the definitions for both applications
- *  and components .
- */
-
-
-#ifndef OMX_Index_h
-#define OMX_Index_h
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-
-/* Each OMX header must include all required header files to allow the
- *  header to compile without errors.  The includes below are required
- *  for this header file to compile successfully 
- */
-#include <OMX_Types.h>
-
-
-/** The OMX_INDEXTYPE enumeration is used to select a structure when either
- *  getting or setting parameters and/or configuration data.  Each entry in 
- *  this enumeration maps to an OMX specified structure.  When the 
- *  OMX_GetParameter, OMX_SetParameter, OMX_GetConfig or OMX_SetConfig methods
- *  are used, the second parameter will always be an entry from this enumeration
- *  and the third entry will be the structure shown in the comments for the entry.
- *  For example, if the application is initializing a cropping function, the 
- *  OMX_SetConfig command would have OMX_IndexConfigCommonInputCrop as the second parameter 
- *  and would send a pointer to an initialized OMX_RECTTYPE structure as the 
- *  third parameter.
- *  
- *  The enumeration entries named with the OMX_Config prefix are sent using
- *  the OMX_SetConfig command and the enumeration entries named with the
- *  OMX_PARAM_ prefix are sent using the OMX_SetParameter command.
- */
-typedef enum OMX_INDEXTYPE {
-
-    OMX_IndexComponentStartUnused = 0x01000000,
-    OMX_IndexParamPriorityMgmt,             /**< reference: OMX_PRIORITYMGMTTYPE */
-    OMX_IndexParamAudioInit,                /**< reference: OMX_PORT_PARAM_TYPE */
-    OMX_IndexParamImageInit,                /**< reference: OMX_PORT_PARAM_TYPE */
-    OMX_IndexParamVideoInit,                /**< reference: OMX_PORT_PARAM_TYPE */
-    OMX_IndexParamOtherInit,                /**< reference: OMX_PORT_PARAM_TYPE */
-    OMX_IndexParamNumAvailableStreams,      /**< reference: OMX_PARAM_U32TYPE */
-    OMX_IndexParamActiveStream,             /**< reference: OMX_PARAM_U32TYPE */
-    OMX_IndexParamSuspensionPolicy,         /**< reference: OMX_PARAM_SUSPENSIONPOLICYTYPE */
-    OMX_IndexParamComponentSuspended,       /**< reference: OMX_PARAM_SUSPENSIONTYPE */
-    OMX_IndexConfigCapturing,               /**< reference: OMX_CONFIG_BOOLEANTYPE */ 
-    OMX_IndexConfigCaptureMode,             /**< reference: OMX_CONFIG_CAPTUREMODETYPE */ 
-    OMX_IndexAutoPauseAfterCapture,         /**< reference: OMX_CONFIG_BOOLEANTYPE */ 
-    OMX_IndexParamContentURI,               /**< reference: OMX_PARAM_CONTENTURITYPE */
-    OMX_IndexParamCustomContentPipe,        /**< reference: OMX_PARAM_CONTENTPIPETYPE */ 
-    OMX_IndexParamDisableResourceConcealment, /**< reference: OMX_RESOURCECONCEALMENTTYPE */
-    OMX_IndexConfigMetadataItemCount,       /**< reference: OMX_CONFIG_METADATAITEMCOUNTTYPE */
-    OMX_IndexConfigContainerNodeCount,      /**< reference: OMX_CONFIG_CONTAINERNODECOUNTTYPE */
-    OMX_IndexConfigMetadataItem,            /**< reference: OMX_CONFIG_METADATAITEMTYPE */
-    OMX_IndexConfigCounterNodeID,           /**< reference: OMX_CONFIG_CONTAINERNODEIDTYPE */
-    OMX_IndexParamMetadataFilterType,       /**< reference: OMX_PARAM_METADATAFILTERTYPE */
-    OMX_IndexParamMetadataKeyFilter,        /**< reference: OMX_PARAM_METADATAFILTERTYPE */
-    OMX_IndexConfigPriorityMgmt,            /**< reference: OMX_PRIORITYMGMTTYPE */
-    OMX_IndexParamStandardComponentRole,    /**< reference: OMX_PARAM_COMPONENTROLETYPE */
-
-    OMX_IndexPortStartUnused = 0x02000000,
-    OMX_IndexParamPortDefinition,           /**< reference: OMX_PARAM_PORTDEFINITIONTYPE */
-    OMX_IndexParamCompBufferSupplier,       /**< reference: OMX_PARAM_BUFFERSUPPLIERTYPE */ 
-    OMX_IndexReservedStartUnused = 0x03000000,
-
-    /* Audio parameters and configurations */
-    OMX_IndexAudioStartUnused = 0x04000000,
-    OMX_IndexParamAudioPortFormat,          /**< reference: OMX_AUDIO_PARAM_PORTFORMATTYPE */
-    OMX_IndexParamAudioPcm,                 /**< reference: OMX_AUDIO_PARAM_PCMMODETYPE */
-    OMX_IndexParamAudioAac,                 /**< reference: OMX_AUDIO_PARAM_AACPROFILETYPE */
-    OMX_IndexParamAudioRa,                  /**< reference: OMX_AUDIO_PARAM_RATYPE */
-    OMX_IndexParamAudioMp3,                 /**< reference: OMX_AUDIO_PARAM_MP3TYPE */
-    OMX_IndexParamAudioAdpcm,               /**< reference: OMX_AUDIO_PARAM_ADPCMTYPE */
-    OMX_IndexParamAudioG723,                /**< reference: OMX_AUDIO_PARAM_G723TYPE */
-    OMX_IndexParamAudioG729,                /**< reference: OMX_AUDIO_PARAM_G729TYPE */
-    OMX_IndexParamAudioAmr,                 /**< reference: OMX_AUDIO_PARAM_AMRTYPE */
-    OMX_IndexParamAudioWma,                 /**< reference: OMX_AUDIO_PARAM_WMATYPE */
-    OMX_IndexParamAudioSbc,                 /**< reference: OMX_AUDIO_PARAM_SBCTYPE */
-    OMX_IndexParamAudioMidi,                /**< reference: OMX_AUDIO_PARAM_MIDITYPE */
-    OMX_IndexParamAudioGsm_FR,              /**< reference: OMX_AUDIO_PARAM_GSMFRTYPE */
-    OMX_IndexParamAudioMidiLoadUserSound,   /**< reference: OMX_AUDIO_PARAM_MIDILOADUSERSOUNDTYPE */
-    OMX_IndexParamAudioG726,                /**< reference: OMX_AUDIO_PARAM_G726TYPE */
-    OMX_IndexParamAudioGsm_EFR,             /**< reference: OMX_AUDIO_PARAM_GSMEFRTYPE */
-    OMX_IndexParamAudioGsm_HR,              /**< reference: OMX_AUDIO_PARAM_GSMHRTYPE */
-    OMX_IndexParamAudioPdc_FR,              /**< reference: OMX_AUDIO_PARAM_PDCFRTYPE */
-    OMX_IndexParamAudioPdc_EFR,             /**< reference: OMX_AUDIO_PARAM_PDCEFRTYPE */
-    OMX_IndexParamAudioPdc_HR,              /**< reference: OMX_AUDIO_PARAM_PDCHRTYPE */
-    OMX_IndexParamAudioTdma_FR,             /**< reference: OMX_AUDIO_PARAM_TDMAFRTYPE */
-    OMX_IndexParamAudioTdma_EFR,            /**< reference: OMX_AUDIO_PARAM_TDMAEFRTYPE */
-    OMX_IndexParamAudioQcelp8,              /**< reference: OMX_AUDIO_PARAM_QCELP8TYPE */
-    OMX_IndexParamAudioQcelp13,             /**< reference: OMX_AUDIO_PARAM_QCELP13TYPE */
-    OMX_IndexParamAudioEvrc,                /**< reference: OMX_AUDIO_PARAM_EVRCTYPE */
-    OMX_IndexParamAudioSmv,                 /**< reference: OMX_AUDIO_PARAM_SMVTYPE */
-    OMX_IndexParamAudioVorbis,              /**< reference: OMX_AUDIO_PARAM_VORBISTYPE */
-
-    OMX_IndexConfigAudioMidiImmediateEvent, /**< reference: OMX_AUDIO_CONFIG_MIDIIMMEDIATEEVENTTYPE */
-    OMX_IndexConfigAudioMidiControl,        /**< reference: OMX_AUDIO_CONFIG_MIDICONTROLTYPE */
-    OMX_IndexConfigAudioMidiSoundBankProgram, /**< reference: OMX_AUDIO_CONFIG_MIDISOUNDBANKPROGRAMTYPE */
-    OMX_IndexConfigAudioMidiStatus,         /**< reference: OMX_AUDIO_CONFIG_MIDISTATUSTYPE */
-    OMX_IndexConfigAudioMidiMetaEvent,      /**< reference: OMX_AUDIO_CONFIG_MIDIMETAEVENTTYPE */
-    OMX_IndexConfigAudioMidiMetaEventData,  /**< reference: OMX_AUDIO_CONFIG_MIDIMETAEVENTDATATYPE */
-    OMX_IndexConfigAudioVolume,             /**< reference: OMX_AUDIO_CONFIG_VOLUMETYPE */
-    OMX_IndexConfigAudioBalance,            /**< reference: OMX_AUDIO_CONFIG_BALANCETYPE */
-    OMX_IndexConfigAudioChannelMute,        /**< reference: OMX_AUDIO_CONFIG_CHANNELMUTETYPE */
-    OMX_IndexConfigAudioMute,               /**< reference: OMX_AUDIO_CONFIG_MUTETYPE */
-    OMX_IndexConfigAudioLoudness,           /**< reference: OMX_AUDIO_CONFIG_LOUDNESSTYPE */
-    OMX_IndexConfigAudioEchoCancelation,    /**< reference: OMX_AUDIO_CONFIG_ECHOCANCELATIONTYPE */
-    OMX_IndexConfigAudioNoiseReduction,     /**< reference: OMX_AUDIO_CONFIG_NOISEREDUCTIONTYPE */
-    OMX_IndexConfigAudioBass,               /**< reference: OMX_AUDIO_CONFIG_BASSTYPE */
-    OMX_IndexConfigAudioTreble,             /**< reference: OMX_AUDIO_CONFIG_TREBLETYPE */
-    OMX_IndexConfigAudioStereoWidening,     /**< reference: OMX_AUDIO_CONFIG_STEREOWIDENINGTYPE */
-    OMX_IndexConfigAudioChorus,             /**< reference: OMX_AUDIO_CONFIG_CHORUSTYPE */
-    OMX_IndexConfigAudioEqualizer,          /**< reference: OMX_AUDIO_CONFIG_EQUALIZERTYPE */
-    OMX_IndexConfigAudioReverberation,      /**< reference: OMX_AUDIO_CONFIG_REVERBERATIONTYPE */
-    OMX_IndexConfigAudioChannelVolume,      /**< reference: OMX_AUDIO_CONFIG_CHANNELVOLUMETYPE */
-
-    /* Image specific parameters and configurations */
-    OMX_IndexImageStartUnused = 0x05000000,
-    OMX_IndexParamImagePortFormat,          /**< reference: OMX_IMAGE_PARAM_PORTFORMATTYPE */
-    OMX_IndexParamFlashControl,             /**< reference: OMX_IMAGE_PARAM_FLASHCONTROLTYPE */
-    OMX_IndexConfigFocusControl,            /**< reference: OMX_IMAGE_CONFIG_FOCUSCONTROLTYPE */
-    OMX_IndexParamQFactor,                  /**< reference: OMX_IMAGE_PARAM_QFACTORTYPE */
-    OMX_IndexParamQuantizationTable,        /**< reference: OMX_IMAGE_PARAM_QUANTIZATIONTABLETYPE */
-    OMX_IndexParamHuffmanTable,             /**< reference: OMX_IMAGE_PARAM_HUFFMANTTABLETYPE */
-    OMX_IndexConfigFlashControl,            /**< reference: OMX_IMAGE_PARAM_FLASHCONTROLTYPE */
-
-    /* Video specific parameters and configurations */
-    OMX_IndexVideoStartUnused = 0x06000000,
-    OMX_IndexParamVideoPortFormat,          /**< reference: OMX_VIDEO_PARAM_PORTFORMATTYPE */
-    OMX_IndexParamVideoQuantization,        /**< reference: OMX_VIDEO_PARAM_QUANTIZATIONTYPE */
-    OMX_IndexParamVideoFastUpdate,          /**< reference: OMX_VIDEO_PARAM_VIDEOFASTUPDATETYPE */
-    OMX_IndexParamVideoBitrate,             /**< reference: OMX_VIDEO_PARAM_BITRATETYPE */
-    OMX_IndexParamVideoMotionVector,        /**< reference: OMX_VIDEO_PARAM_MOTIONVECTORTYPE */
-    OMX_IndexParamVideoIntraRefresh,        /**< reference: OMX_VIDEO_PARAM_INTRAREFRESHTYPE */
-    OMX_IndexParamVideoErrorCorrection,     /**< reference: OMX_VIDEO_PARAM_ERRORCORRECTIONTYPE */
-    OMX_IndexParamVideoVBSMC,               /**< reference: OMX_VIDEO_PARAM_VBSMCTYPE */
-    OMX_IndexParamVideoMpeg2,               /**< reference: OMX_VIDEO_PARAM_MPEG2TYPE */
-    OMX_IndexParamVideoMpeg4,               /**< reference: OMX_VIDEO_PARAM_MPEG4TYPE */
-    OMX_IndexParamVideoWmv,                 /**< reference: OMX_VIDEO_PARAM_WMVTYPE */
-    OMX_IndexParamVideoRv,                  /**< reference: OMX_VIDEO_PARAM_RVTYPE */
-    OMX_IndexParamVideoAvc,                 /**< reference: OMX_VIDEO_PARAM_AVCTYPE */
-    OMX_IndexParamVideoH263,                /**< reference: OMX_VIDEO_PARAM_H263TYPE */
-    OMX_IndexParamVideoProfileLevelQuerySupported, /**< reference: OMX_VIDEO_PARAM_PROFILELEVELTYPE */
-    OMX_IndexParamVideoProfileLevelCurrent, /**< reference: OMX_VIDEO_PARAM_PROFILELEVELTYPE */
-    OMX_IndexConfigVideoBitrate,            /**< reference: OMX_VIDEO_CONFIG_BITRATETYPE */
-    OMX_IndexConfigVideoFramerate,          /**< reference: OMX_CONFIG_FRAMERATETYPE */
-    OMX_IndexConfigVideoIntraVOPRefresh,    /**< reference: OMX_CONFIG_INTRAREFRESHVOPTYPE */
-    OMX_IndexConfigVideoIntraMBRefresh,     /**< reference: OMX_CONFIG_MACROBLOCKERRORMAPTYPE */
-    OMX_IndexConfigVideoMBErrorReporting,   /**< reference: OMX_CONFIG_MBERRORREPORTINGTYPE */
-    OMX_IndexParamVideoMacroblocksPerFrame, /**< reference: OMX_PARAM_MACROBLOCKSTYPE */
-    OMX_IndexConfigVideoMacroBlockErrorMap, /**< reference: OMX_CONFIG_MACROBLOCKERRORMAPTYPE */
-    OMX_IndexParamVideoSliceFMO,            /**< reference: OMX_VIDEO_PARAM_AVCSLICEFMO */
-    OMX_IndexConfigVideoAVCIntraPeriod,     /**< reference: OMX_VIDEO_CONFIG_AVCINTRAPERIOD */
-    OMX_IndexConfigVideoNalSize,            /**< reference: OMX_VIDEO_CONFIG_NALSIZE */
-
-    /* Image & Video common Configurations */
-    OMX_IndexCommonStartUnused = 0x07000000,
-    OMX_IndexParamCommonDeblocking,         /**< reference: OMX_PARAM_DEBLOCKINGTYPE */
-    OMX_IndexParamCommonSensorMode,         /**< reference: OMX_PARAM_SENSORMODETYPE */
-    OMX_IndexParamCommonInterleave,         /**< reference: OMX_PARAM_INTERLEAVETYPE */
-    OMX_IndexConfigCommonColorFormatConversion, /**< reference: OMX_CONFIG_COLORCONVERSIONTYPE */
-    OMX_IndexConfigCommonScale,             /**< reference: OMX_CONFIG_SCALEFACTORTYPE */
-    OMX_IndexConfigCommonImageFilter,       /**< reference: OMX_CONFIG_IMAGEFILTERTYPE */
-    OMX_IndexConfigCommonColorEnhancement,  /**< reference: OMX_CONFIG_COLORENHANCEMENTTYPE */
-    OMX_IndexConfigCommonColorKey,          /**< reference: OMX_CONFIG_COLORKEYTYPE */
-    OMX_IndexConfigCommonColorBlend,        /**< reference: OMX_CONFIG_COLORBLENDTYPE */
-    OMX_IndexConfigCommonFrameStabilisation,/**< reference: OMX_CONFIG_FRAMESTABTYPE */
-    OMX_IndexConfigCommonRotate,            /**< reference: OMX_CONFIG_ROTATIONTYPE */
-    OMX_IndexConfigCommonMirror,            /**< reference: OMX_CONFIG_MIRRORTYPE */
-    OMX_IndexConfigCommonOutputPosition,    /**< reference: OMX_CONFIG_POINTTYPE */
-    OMX_IndexConfigCommonInputCrop,         /**< reference: OMX_CONFIG_RECTTYPE */
-    OMX_IndexConfigCommonOutputCrop,        /**< reference: OMX_CONFIG_RECTTYPE */
-    OMX_IndexConfigCommonDigitalZoom,       /**< reference: OMX_CONFIG_SCALEFACTORTYPE */
-    OMX_IndexConfigCommonOpticalZoom,       /**< reference: OMX_CONFIG_SCALEFACTORTYPE*/
-    OMX_IndexConfigCommonWhiteBalance,      /**< reference: OMX_CONFIG_WHITEBALCONTROLTYPE */
-    OMX_IndexConfigCommonExposure,          /**< reference: OMX_CONFIG_EXPOSURECONTROLTYPE */
-    OMX_IndexConfigCommonContrast,          /**< reference: OMX_CONFIG_CONTRASTTYPE */
-    OMX_IndexConfigCommonBrightness,        /**< reference: OMX_CONFIG_BRIGHTNESSTYPE */
-    OMX_IndexConfigCommonBacklight,         /**< reference: OMX_CONFIG_BACKLIGHTTYPE */
-    OMX_IndexConfigCommonGamma,             /**< reference: OMX_CONFIG_GAMMATYPE */
-    OMX_IndexConfigCommonSaturation,        /**< reference: OMX_CONFIG_SATURATIONTYPE */
-    OMX_IndexConfigCommonLightness,         /**< reference: OMX_CONFIG_LIGHTNESSTYPE */
-    OMX_IndexConfigCommonExclusionRect,     /**< reference: OMX_CONFIG_RECTTYPE */
-    OMX_IndexConfigCommonDithering,         /**< reference: OMX_CONFIG_DITHERTYPE */
-    OMX_IndexConfigCommonPlaneBlend,        /**< reference: OMX_CONFIG_PLANEBLENDTYPE */
-    OMX_IndexConfigCommonExposureValue,     /**< reference: OMX_CONFIG_EXPOSUREVALUETYPE */
-    OMX_IndexConfigCommonOutputSize,        /**< reference: OMX_FRAMESIZETYPE */
-    OMX_IndexParamCommonExtraQuantData,     /**< reference: OMX_OTHER_EXTRADATATYPE */
-    OMX_IndexConfigCommonFocusRegion,       /**< reference: OMX_CONFIG_FOCUSREGIONTYPE */
-    OMX_IndexConfigCommonFocusStatus,       /**< reference: OMX_PARAM_FOCUSSTATUSTYPE */
-    OMX_IndexConfigCommonTransitionEffect,  /**< reference: OMX_CONFIG_TRANSITIONEFFECTTYPE */
-
-    /* Reserved Configuration range */
-    OMX_IndexOtherStartUnused = 0x08000000,
-    OMX_IndexParamOtherPortFormat,          /**< reference: OMX_OTHER_PARAM_PORTFORMATTYPE */
-    OMX_IndexConfigOtherPower,              /**< reference: OMX_OTHER_CONFIG_POWERTYPE */
-    OMX_IndexConfigOtherStats,              /**< reference: OMX_OTHER_CONFIG_STATSTYPE */
-
-
-    /* Reserved Time range */
-    OMX_IndexTimeStartUnused = 0x09000000,
-    OMX_IndexConfigTimeScale,               /**< reference: OMX_TIME_CONFIG_SCALETYPE */
-    OMX_IndexConfigTimeClockState,          /**< reference: OMX_TIME_CONFIG_CLOCKSTATETYPE */
-    OMX_IndexConfigTimeActiveRefClock,      /**< reference: OMX_TIME_CONFIG_ACTIVEREFCLOCKTYPE */
-    OMX_IndexConfigTimeCurrentMediaTime,    /**< reference: OMX_TIME_CONFIG_TIMESTAMPTYPE (read only) */
-    OMX_IndexConfigTimeCurrentWallTime,     /**< reference: OMX_TIME_CONFIG_TIMESTAMPTYPE (read only) */
-    OMX_IndexConfigTimeCurrentAudioReference, /**< reference: OMX_TIME_CONFIG_TIMESTAMPTYPE (write only) */
-    OMX_IndexConfigTimeCurrentVideoReference, /**< reference: OMX_TIME_CONFIG_TIMESTAMPTYPE (write only) */
-    OMX_IndexConfigTimeMediaTimeRequest,    /**< reference: OMX_TIME_CONFIG_MEDIATIMEREQUESTTYPE (write only) */
-    OMX_IndexConfigTimeClientStartTime,     /**<reference:  OMX_TIME_CONFIG_TIMESTAMPTYPE (write only) */
-    OMX_IndexConfigTimePosition,            /**< reference: OMX_TIME_CONFIG_TIMESTAMPTYPE */
-    OMX_IndexConfigTimeSeekMode,            /**< reference: OMX_TIME_CONFIG_SEEKMODETYPE */
-
-
-    OMX_IndexKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    /* Vendor specific area */
-    OMX_IndexVendorStartUnused = 0x7F000000,
-    /* Vendor specific structures should be in the range of 0x7F000000 
-       to 0x7FFFFFFE.  This range is not broken out by vendor, so
-       private indexes are not guaranteed unique and therefore should
-       only be sent to the appropriate component. */
-
-    OMX_IndexMax = 0x7FFFFFFF
-
-} OMX_INDEXTYPE;
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif
-/* File EOF */
diff --git a/include/media/stagefright/openmax/OMX_Other.h b/include/media/stagefright/openmax/OMX_Other.h
deleted file mode 100644
index efbce83..0000000
--- a/include/media/stagefright/openmax/OMX_Other.h
+++ /dev/null
@@ -1,354 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/*
- * Copyright (c) 2008 The Khronos Group Inc. 
- * 
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject
- * to the following conditions: 
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software. 
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
- * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 
- *
- */
-
-/** @file OMX_Other.h - OpenMax IL version 1.1.2
- *  The structures needed by Other components to exchange
- *  parameters and configuration data with the components.
- */
-
-#ifndef OMX_Other_h
-#define OMX_Other_h
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-
-/* Each OMX header must include all required header files to allow the
- *  header to compile without errors.  The includes below are required
- *  for this header file to compile successfully 
- */
-
-#include <OMX_Core.h>
-
-
-/** 
- * Enumeration of possible data types which match to multiple domains or no
- * domain at all.  For types which are vendor specific, a value above
- * OMX_OTHER_VENDORTSTART should be used.
- */
-typedef enum OMX_OTHER_FORMATTYPE {
-    OMX_OTHER_FormatTime = 0, /**< Transmission of various timestamps, elapsed time, 
-                                   time deltas, etc */
-    OMX_OTHER_FormatPower,    /**< Perhaps used for enabling/disabling power 
-                                   management, setting clocks? */
-    OMX_OTHER_FormatStats,    /**< Could be things such as frame rate, frames 
-                                   dropped, etc */
-    OMX_OTHER_FormatBinary,   /**< Arbitrary binary data */
-    OMX_OTHER_FormatVendorReserved = 1000, /**< Starting value for vendor specific 
-                                                formats */
-
-    OMX_OTHER_FormatKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_OTHER_FormatVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_OTHER_FormatMax = 0x7FFFFFFF
-} OMX_OTHER_FORMATTYPE;
-
-/** 
- * Enumeration of seek modes.
- */
-typedef enum OMX_TIME_SEEKMODETYPE {
-    OMX_TIME_SeekModeFast = 0, /**< Prefer seeking to an approximation
-                                * of the requested seek position over   
-                                * the actual seek position if it
-                                * results in a faster seek. */
-    OMX_TIME_SeekModeAccurate, /**< Prefer seeking to the actual seek 
-                                * position over an approximation
-                                * of the requested seek position even
-                                * if it results in a slower seek. */
-    OMX_TIME_SeekModeKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_TIME_SeekModeVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_TIME_SeekModeMax = 0x7FFFFFFF
-} OMX_TIME_SEEKMODETYPE;
-
-/* Structure representing the seekmode of the component */
-typedef struct OMX_TIME_CONFIG_SEEKMODETYPE {
-    OMX_U32 nSize;                  /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;       /**< OMX specification version information */
-    OMX_TIME_SEEKMODETYPE eType;    /**< The seek mode */
-} OMX_TIME_CONFIG_SEEKMODETYPE;
-
-/** Structure representing a time stamp used with the following configs 
- * on the Clock Component (CC):
- * 
- * OMX_IndexConfigTimeCurrentWallTime: query of the CCÂ’s current wall  
- *     time
- * OMX_IndexConfigTimeCurrentMediaTime: query of the CCÂ’s current media
- *     time
- * OMX_IndexConfigTimeCurrentAudioReference and  
- * OMX_IndexConfigTimeCurrentVideoReference: audio/video reference 
- *     clock sending SC its reference time
- * OMX_IndexConfigTimeClientStartTime: a Clock Component client sends 
- *     this structure to the Clock Component via a SetConfig on its 
- *     client port when it receives a buffer with
- *     OMX_BUFFERFLAG_STARTTIME set. It must use the timestamp
- *     specified by that buffer for nStartTimestamp. 
- *
- * ItÂ’s also used with the following config on components in general:
- *
- * OMX_IndexConfigTimePosition: IL client querying component position 
- * (GetConfig) or commanding a component to seek to the given location
- * (SetConfig)
- */	
-typedef struct OMX_TIME_CONFIG_TIMESTAMPTYPE {
-    OMX_U32 nSize;               /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;    /**< OMX specification version
-                                  *   information */
-    OMX_U32 nPortIndex;     /**< port that this structure applies to */
-    OMX_TICKS nTimestamp;  	     /**< timestamp .*/ 
-} OMX_TIME_CONFIG_TIMESTAMPTYPE;  
-
-/** Enumeration of possible reference clocks to the media time. */
-typedef enum OMX_TIME_UPDATETYPE {
-      OMX_TIME_UpdateRequestFulfillment,    /**< Update is the fulfillment of a media time request. */
-      OMX_TIME_UpdateScaleChanged,	        /**< Update was generated because the scale chagned. */
-      OMX_TIME_UpdateClockStateChanged,     /**< Update was generated because the clock state changed. */
-      OMX_TIME_UpdateKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-      OMX_TIME_UpdateVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-      OMX_TIME_UpdateMax = 0x7FFFFFFF
-} OMX_TIME_UPDATETYPE;
-
-/** Enumeration of possible reference clocks to the media time. */
-typedef enum OMX_TIME_REFCLOCKTYPE {
-      OMX_TIME_RefClockNone,    /**< Use no references. */
-      OMX_TIME_RefClockAudio,	/**< Use references sent through OMX_IndexConfigTimeCurrentAudioReference */
-      OMX_TIME_RefClockVideo,   /**< Use references sent through OMX_IndexConfigTimeCurrentVideoReference */
-      OMX_TIME_RefClockKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-      OMX_TIME_RefClockVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-      OMX_TIME_RefClockMax = 0x7FFFFFFF
-} OMX_TIME_REFCLOCKTYPE;
-
-/** Enumeration of clock states. */
-typedef enum OMX_TIME_CLOCKSTATE {
-      OMX_TIME_ClockStateRunning,             /**< Clock running. */
-      OMX_TIME_ClockStateWaitingForStartTime, /**< Clock waiting until the 
-                                               *   prescribed clients emit their
-                                               *   start time. */
-      OMX_TIME_ClockStateStopped,             /**< Clock stopped. */
-      OMX_TIME_ClockStateKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-      OMX_TIME_ClockStateVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-      OMX_TIME_ClockStateMax = 0x7FFFFFFF
-} OMX_TIME_CLOCKSTATE;
-
-/** Structure representing a media time request to the clock component.
- *
- *  A client component sends this structure to the Clock Component via a SetConfig
- *  on its client port to specify a media timestamp the Clock Component
- *  should emit.  The Clock Component should fulfill the request by sending a
- *  OMX_TIME_MEDIATIMETYPE when its media clock matches the requested 
- *  timestamp.
- *
- *  The client may require a media time request be fulfilled slightly
- *  earlier than the media time specified. In this case the client specifies 
- *  an offset which is equal to the difference between wall time corresponding 
- *  to the requested media time and the wall time when it will be 
- *  fulfilled. 
- *
- *  A client component may uses these requests and the OMX_TIME_MEDIATIMETYPE to
- *  time events according to timestamps. If a client must perform an operation O at
- *  a time T (e.g. deliver a video frame at its corresponding timestamp), it makes a 
- *  media time request at T (perhaps specifying an offset to ensure the request fulfillment
- *  is a little early). When the clock component passes the resulting OMX_TIME_MEDIATIMETYPE
- *  structure back to the client component, the client may perform operation O (perhaps having
- *  to wait a slight amount more time itself as specified by the return values).
- */
-
-typedef struct OMX_TIME_CONFIG_MEDIATIMEREQUESTTYPE {
-    OMX_U32 nSize;              /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;   /**< OMX specification version information */
-    OMX_U32 nPortIndex;         /**< port that this structure applies to */
-    OMX_PTR pClientPrivate;     /**< Client private data to disabiguate this media time 
-                                 *   from others (e.g. the number of the frame to deliver). 
-                                 *   Duplicated in the media time structure that fulfills 
-                                 *   this request. A value of zero is reserved for time scale 
-                                 *   updates. */
-    OMX_TICKS nMediaTimestamp;  /**< Media timestamp requested.*/ 
-    OMX_TICKS nOffset;          /**< Amount of wall clock time by which this
-                                 *   request should be fulfilled early */
-} OMX_TIME_CONFIG_MEDIATIMEREQUESTTYPE;
-
-/**< Structure sent from the clock component client either when fulfilling 
- *   a media time request or when the time scale has changed. 
- *
- *   In the former case the Clock Component fills this structure and times its emission 
- *   to a client component (via the client port) according to the corresponding media 
- *   time request sent by the client. The Clock Component should time the emission to occur
- *   when the requested timestamp matches the Clock Component's media time but also the 
- *   prescribed offset early. 
- *
- *   Upon scale changes the clock component clears the nClientPrivate data, sends the current
- *   media time and sets the nScale to the new scale via the client port. It emits a 
- *   OMX_TIME_MEDIATIMETYPE to all clients independent of any requests. This allows clients to 
- *   alter processing to accomodate scaling. For instance a video component might skip inter-frames 
- *   in the case of extreme fastforward. Likewise an audio component might add or remove samples 
- *   from an audio frame to scale audio data. 
- *
- *   It is expected that some clock components may not be able to fulfill requests
- *   at exactly the prescribed time. This is acceptable so long as the request is 
- *   fulfilled at least as early as described and not later. This structure provides 
- *   fields the client may use to wait for the remaining time.
- *
- *   The client may use either the nOffset or nWallTimeAtMedia fields to determine the 
- *   wall time until the nMediaTimestamp actually occurs. In the latter case the
- *   client can get a more accurate value for offset by getting the current wall
- *   from the cloc component and subtracting it from nWallTimeAtMedia. 
- */
-
-typedef struct OMX_TIME_MEDIATIMETYPE {
-    OMX_U32 nSize;                  /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;       /**< OMX specification version information */
-    OMX_U32 nClientPrivate;         /**< Client private data to disabiguate this media time 
-                                     *   from others. Copied from the media time request. 
-                                     *   A value of zero is reserved for time scale updates. */
-    OMX_TIME_UPDATETYPE eUpdateType; /**< Reason for the update */
-    OMX_TICKS nMediaTimestamp;      /**< Media time requested. If no media time was 
-                                     *   requested then this is the current media time. */ 
-    OMX_TICKS nOffset;              /**< Amount of wall clock time by which this
-                                     *   request was actually fulfilled early */
-
-    OMX_TICKS nWallTimeAtMediaTime; /**< Wall time corresponding to nMediaTimeStamp.
-                                     *   A client may compare this value to current
-                                     *   media time obtained from the Clock Component to determine
-                                     *   the wall time until the media timestamp is really
-                                     *   current. */
-    OMX_S32 xScale;                 /**< Current media time scale in Q16 format. */
-    OMX_TIME_CLOCKSTATE eState;     /* Seeking Change. Added 7/12.*/
-                                    /**< State of the media time. */
-} OMX_TIME_MEDIATIMETYPE;  
-
-/** Structure representing the current media time scale factor. Applicable only to clock 
- *  component, other components see scale changes via OMX_TIME_MEDIATIMETYPE buffers sent via
- *  the clock component client ports. Upon recieving this config the clock component changes 
- *  the rate by which the media time increases or decreases effectively implementing trick modes. 
- */ 
-typedef struct OMX_TIME_CONFIG_SCALETYPE {
-    OMX_U32 nSize;                  /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;       /**< OMX specification version information */
-    OMX_S32 xScale;                 /**< This is a value in Q16 format which is used for
-                                     * scaling the media time */
-} OMX_TIME_CONFIG_SCALETYPE;
- 
-/** Bits used to identify a clock port. Used in OMX_TIME_CONFIG_CLOCKSTATETYPEÂ’s nWaitMask field */
-#define OMX_CLOCKPORT0 0x00000001
-#define OMX_CLOCKPORT1 0x00000002
-#define OMX_CLOCKPORT2 0x00000004
-#define OMX_CLOCKPORT3 0x00000008
-#define OMX_CLOCKPORT4 0x00000010
-#define OMX_CLOCKPORT5 0x00000020
-#define OMX_CLOCKPORT6 0x00000040
-#define OMX_CLOCKPORT7 0x00000080
-
-/** Structure representing the current mode of the media clock. 
- *  IL Client uses this config to change or query the mode of the 
- *  media clock of the clock component. Applicable only to clock
- *  component. 
- *  
- *  On a SetConfig if eState is OMX_TIME_ClockStateRunning media time
- *  starts immediately at the prescribed start time. If
- *  OMX_TIME_ClockStateWaitingForStartTime the Clock Component ignores
- *  the given nStartTime and waits for all clients specified in the 
- *  nWaitMask to send starttimes (via 
- *  OMX_IndexConfigTimeClientStartTime). The Clock Component then starts 
- *  the media clock using the earliest start time supplied. */    
-typedef struct OMX_TIME_CONFIG_CLOCKSTATETYPE {
-    OMX_U32 nSize;              /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;   /**< OMX specification version 
-                                 *   information */
-    OMX_TIME_CLOCKSTATE eState; /**< State of the media time. */
-    OMX_TICKS nStartTime;       /**< Start time of the media time. */
-    OMX_TICKS nOffset;          /**< Time to offset the media time by 
-                                 * (e.g. preroll). Media time will be
-                                 * reported to be nOffset ticks earlier.     
-                                 */
-    OMX_U32 nWaitMask;          /**< Mask of OMX_CLOCKPORT values. */
-} OMX_TIME_CONFIG_CLOCKSTATETYPE;
-
-/** Structure representing the reference clock currently being used to
- *  compute media time. IL client uses this config to change or query the 
- *  clock component's active reference clock */
-typedef struct OMX_TIME_CONFIG_ACTIVEREFCLOCKTYPE {
-    OMX_U32 nSize;                  /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion;       /**< OMX specification version information */
-    OMX_TIME_REFCLOCKTYPE eClock;   /**< Reference clock used to compute media time */                        
-} OMX_TIME_CONFIG_ACTIVEREFCLOCKTYPE;
-
-/** Descriptor for setting specifics of power type.
- *  Note: this structure is listed for backwards compatibility. */
-typedef struct OMX_OTHER_CONFIG_POWERTYPE {
-    OMX_U32 nSize;            /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion; /**< OMX specification version information */
-    OMX_BOOL bEnablePM;       /**< Flag to enable Power Management */
-} OMX_OTHER_CONFIG_POWERTYPE;
-
-
-/** Descriptor for setting specifics of stats type.
- *  Note: this structure is listed for backwards compatibility. */
-typedef struct OMX_OTHER_CONFIG_STATSTYPE {
-    OMX_U32 nSize;            /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion; /**< OMX specification version information */
-    /* what goes here */
-} OMX_OTHER_CONFIG_STATSTYPE;
-
-
-/**
- * The PortDefinition structure is used to define all of the parameters 
- * necessary for the compliant component to setup an input or an output other 
- * path.
- */
-typedef struct OMX_OTHER_PORTDEFINITIONTYPE {
-    OMX_OTHER_FORMATTYPE eFormat;  /**< Type of data expected for this channel */
-} OMX_OTHER_PORTDEFINITIONTYPE;
-
-/**  Port format parameter.  This structure is used to enumerate
-  *  the various data input/output format supported by the port.
-  */
-typedef struct OMX_OTHER_PARAM_PORTFORMATTYPE {
-    OMX_U32 nSize; /**< size of the structure in bytes */
-    OMX_VERSIONTYPE nVersion; /**< OMX specification version information */
-    OMX_U32 nPortIndex; /**< Indicates which port to set */
-    OMX_U32 nIndex; /**< Indicates the enumeration index for the format from 0x0 to N-1 */
-    OMX_OTHER_FORMATTYPE eFormat; /**< Type of data expected for this channel */
-} OMX_OTHER_PARAM_PORTFORMATTYPE; 
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif
-/* File EOF */
diff --git a/include/media/stagefright/openmax/OMX_Types.h b/include/media/stagefright/openmax/OMX_Types.h
deleted file mode 100644
index 03fd4bc..0000000
--- a/include/media/stagefright/openmax/OMX_Types.h
+++ /dev/null
@@ -1,365 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/*
- * Copyright (c) 2008 The Khronos Group Inc. 
- * 
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject
- * to the following conditions: 
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software. 
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
- * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 
- *
- */
-
-/** OMX_Types.h - OpenMax IL version 1.1.2
- *  The OMX_Types header file contains the primitive type definitions used by 
- *  the core, the application and the component.  This file may need to be
- *  modified to be used on systems that do not have "char" set to 8 bits, 
- *  "short" set to 16 bits and "long" set to 32 bits.
- */
-
-#ifndef OMX_Types_h
-#define OMX_Types_h
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-/** The OMX_API and OMX_APIENTRY are platform specific definitions used
- *  to declare OMX function prototypes.  They are modified to meet the
- *  requirements for a particular platform */
-#ifdef __SYMBIAN32__   
-#   ifdef __OMX_EXPORTS
-#       define OMX_API __declspec(dllexport)
-#   else
-#       ifdef _WIN32
-#           define OMX_API __declspec(dllexport) 
-#       else
-#           define OMX_API __declspec(dllimport)
-#       endif
-#   endif
-#else
-#   ifdef _WIN32
-#      ifdef __OMX_EXPORTS
-#          define OMX_API __declspec(dllexport)
-#      else
-//#          define OMX_API __declspec(dllimport)
-#define OMX_API
-#      endif
-#   else
-#      ifdef __OMX_EXPORTS
-#          define OMX_API
-#      else
-#          define OMX_API extern
-#      endif
-#   endif
-#endif
-
-#ifndef OMX_APIENTRY
-#define OMX_APIENTRY 
-#endif 
-
-/** OMX_IN is used to identify inputs to an OMX function.  This designation 
-    will also be used in the case of a pointer that points to a parameter 
-    that is used as an output. */
-#ifndef OMX_IN
-#define OMX_IN
-#endif
-
-/** OMX_OUT is used to identify outputs from an OMX function.  This 
-    designation will also be used in the case of a pointer that points 
-    to a parameter that is used as an input. */
-#ifndef OMX_OUT
-#define OMX_OUT
-#endif
-
-
-/** OMX_INOUT is used to identify parameters that may be either inputs or
-    outputs from an OMX function at the same time.  This designation will 
-    also be used in the case of a pointer that  points to a parameter that 
-    is used both as an input and an output. */
-#ifndef OMX_INOUT
-#define OMX_INOUT
-#endif
-
-/** OMX_ALL is used to as a wildcard to select all entities of the same type
- *  when specifying the index, or referring to a object by an index.  (i.e.
- *  use OMX_ALL to indicate all N channels). When used as a port index
- *  for a config or parameter this OMX_ALL denotes that the config or
- *  parameter applies to the entire component not just one port. */
-#define OMX_ALL 0xFFFFFFFF
-
-/** In the following we define groups that help building doxygen documentation */
-
-/** @defgroup core OpenMAX IL core
- * Functions and structure related to the OMX IL core
- */
- 
- /** @defgroup comp OpenMAX IL component
- * Functions and structure related to the OMX IL component
- */
- 
-/** @defgroup rpm Resource and Policy Management 
- * Structures for resource and policy management of components
- */
-
-/** @defgroup buf Buffer Management
- * Buffer handling functions and structures
- */
-  
-/** @defgroup tun Tunneling
- * @ingroup core comp
- * Structures and functions to manage tunnels among component ports
- */
- 
-/** @defgroup cp Content Pipes
- *  @ingroup core
- */
- 
- /** @defgroup metadata Metadata handling
-  * 
-  */ 
-
-/** OMX_U8 is an 8 bit unsigned quantity that is byte aligned */
-typedef unsigned char OMX_U8;
-
-/** OMX_S8 is an 8 bit signed quantity that is byte aligned */
-typedef signed char OMX_S8;
-
-/** OMX_U16 is a 16 bit unsigned quantity that is 16 bit word aligned */
-typedef unsigned short OMX_U16;
-
-/** OMX_S16 is a 16 bit signed quantity that is 16 bit word aligned */
-typedef signed short OMX_S16;
-
-/** OMX_U32 is a 32 bit unsigned quantity that is 32 bit word aligned */
-typedef unsigned long OMX_U32;
-
-/** OMX_S32 is a 32 bit signed quantity that is 32 bit word aligned */
-typedef signed long OMX_S32;
-
-
-/* Users with compilers that cannot accept the "long long" designation should
-   define the OMX_SKIP64BIT macro.  It should be noted that this may cause 
-   some components to fail to compile if the component was written to require
-   64 bit integral types.  However, these components would NOT compile anyway
-   since the compiler does not support the way the component was written.
-*/
-#ifndef OMX_SKIP64BIT
-#ifdef __SYMBIAN32__
-/** OMX_U64 is a 64 bit unsigned quantity that is 64 bit word aligned */
-typedef unsigned long long OMX_U64;
-
-/** OMX_S64 is a 64 bit signed quantity that is 64 bit word aligned */
-typedef signed long long OMX_S64;
-
-#elif defined(WIN32)
-
-/** OMX_U64 is a 64 bit unsigned quantity that is 64 bit word aligned */   
-typedef unsigned __int64  OMX_U64;
-
-/** OMX_S64 is a 64 bit signed quantity that is 64 bit word aligned */
-typedef signed   __int64  OMX_S64;
-
-#else /* WIN32 */
-
-/** OMX_U64 is a 64 bit unsigned quantity that is 64 bit word aligned */
-typedef unsigned long long OMX_U64;
-
-/** OMX_S64 is a 64 bit signed quantity that is 64 bit word aligned */
-typedef signed long long OMX_S64;
-
-#endif /* WIN32 */
-#endif
-
-
-/** The OMX_BOOL type is intended to be used to represent a true or a false 
-    value when passing parameters to and from the OMX core and components.  The
-    OMX_BOOL is a 32 bit quantity and is aligned on a 32 bit word boundary.
- */
-typedef enum OMX_BOOL {
-    OMX_FALSE = 0,
-    OMX_TRUE = !OMX_FALSE,
-    OMX_BOOL_MAX = 0x7FFFFFFF
-} OMX_BOOL; 
- 
-/** The OMX_PTR type is intended to be used to pass pointers between the OMX
-    applications and the OMX Core and components.  This is a 32 bit pointer and
-    is aligned on a 32 bit boundary.
- */
-typedef void* OMX_PTR;
-
-/** The OMX_STRING type is intended to be used to pass "C" type strings between
-    the application and the core and component.  The OMX_STRING type is a 32 
-    bit pointer to a zero terminated string.  The  pointer is word aligned and 
-    the string is byte aligned.  
- */
-typedef char* OMX_STRING;
-
-/** The OMX_BYTE type is intended to be used to pass arrays of bytes such as
-    buffers between the application and the component and core.  The OMX_BYTE 
-    type is a 32 bit pointer to a zero terminated string.  The  pointer is word
-    aligned and the string is byte aligned.
- */
-typedef unsigned char* OMX_BYTE;
-
-/** OMX_UUIDTYPE is a very long unique identifier to uniquely identify
-    at runtime.  This identifier should be generated by a component in a way
-    that guarantees that every instance of the identifier running on the system
-    is unique. */
-typedef unsigned char OMX_UUIDTYPE[128];
-
-/** The OMX_DIRTYPE enumeration is used to indicate if a port is an input or
-    an output port.  This enumeration is common across all component types.    
- */
-typedef enum OMX_DIRTYPE
-{
-    OMX_DirInput,              /**< Port is an input port */
-    OMX_DirOutput,             /**< Port is an output port */
-    OMX_DirMax = 0x7FFFFFFF
-} OMX_DIRTYPE;
-
-/** The OMX_ENDIANTYPE enumeration is used to indicate the bit ordering 
-    for numerical data (i.e. big endian, or little endian).    
- */
-typedef enum OMX_ENDIANTYPE
-{
-    OMX_EndianBig, /**< big endian */
-    OMX_EndianLittle, /**< little endian */
-    OMX_EndianMax = 0x7FFFFFFF
-} OMX_ENDIANTYPE;
-
-
-/** The OMX_NUMERICALDATATYPE enumeration is used to indicate if data 
-    is signed or unsigned
- */
-typedef enum OMX_NUMERICALDATATYPE
-{
-    OMX_NumericalDataSigned, /**< signed data */
-    OMX_NumericalDataUnsigned, /**< unsigned data */
-    OMX_NumercialDataMax = 0x7FFFFFFF
-} OMX_NUMERICALDATATYPE;
-
-
-/** Unsigned bounded value type */
-typedef struct OMX_BU32 {
-    OMX_U32 nValue; /**< actual value */
-    OMX_U32 nMin;   /**< minimum for value (i.e. nValue >= nMin) */
-    OMX_U32 nMax;   /**< maximum for value (i.e. nValue <= nMax) */
-} OMX_BU32;
-
-
-/** Signed bounded value type */
-typedef struct OMX_BS32 {
-    OMX_S32 nValue; /**< actual value */
-    OMX_S32 nMin;   /**< minimum for value (i.e. nValue >= nMin) */
-    OMX_S32 nMax;   /**< maximum for value (i.e. nValue <= nMax) */
-} OMX_BS32;
-
-
-/** Structure representing some time or duration in microseconds. This structure
-  *  must be interpreted as a signed 64 bit value. The quantity is signed to accommodate 
-  *  negative deltas and preroll scenarios. The quantity is represented in microseconds 
-  *  to accomodate high resolution timestamps (e.g. DVD presentation timestamps based
-  *  on a 90kHz clock) and to allow more accurate and synchronized delivery (e.g. 
-  *  individual audio samples delivered at 192 kHz). The quantity is 64 bit to 
-  *  accommodate a large dynamic range (signed 32 bit values would allow only for plus
-  *  or minus 35 minutes).
-  *
-  *  Implementations with limited precision may convert the signed 64 bit value to 
-  *  a signed 32 bit value internally but risk loss of precision.  
-  */
-#ifndef OMX_SKIP64BIT
-typedef OMX_S64 OMX_TICKS;
-#else
-typedef struct OMX_TICKS
-{
-    OMX_U32 nLowPart;    /** low bits of the signed 64 bit tick value */
-    OMX_U32 nHighPart;   /** high bits of the signed 64 bit tick value */
-} OMX_TICKS;
-#endif
-#define OMX_TICKS_PER_SECOND 1000000
-
-/** Define the public interface for the OMX Handle.  The core will not use
-    this value internally, but the application should only use this value.
- */
-typedef void* OMX_HANDLETYPE;
-
-typedef struct OMX_MARKTYPE
-{
-    OMX_HANDLETYPE hMarkTargetComponent;   /**< The component that will 
-                                                generate a mark event upon 
-                                                processing the mark. */
-    OMX_PTR pMarkData;   /**< Application specific data associated with 
-                              the mark sent on a mark event to disambiguate 
-                              this mark from others. */
-} OMX_MARKTYPE;
-
-
-/** OMX_NATIVE_DEVICETYPE is used to map a OMX video port to the
- *  platform & operating specific object used to reference the display 
- *  or can be used by a audio port for native audio rendering */
-typedef void* OMX_NATIVE_DEVICETYPE;
-
-/** OMX_NATIVE_WINDOWTYPE is used to map a OMX video port to the
- *  platform & operating specific object used to reference the window */
-typedef void* OMX_NATIVE_WINDOWTYPE;
-
-/** The OMX_VERSIONTYPE union is used to specify the version for
-    a structure or component.  For a component, the version is entirely
-    specified by the component vendor.  Components doing the same function
-    from different vendors may or may not have the same version.  For 
-    structures, the version shall be set by the entity that allocates the
-    structure.  For structures specified in the OMX 1.1 specification, the
-    value of the version shall be set to 1.1.0.0 in all cases.  Access to the
-    OMX_VERSIONTYPE can be by a single 32 bit access (e.g. by nVersion) or
-    by accessing one of the structure elements to, for example, check only
-    the Major revision.
- */
-typedef union OMX_VERSIONTYPE
-{
-    struct
-    {
-        OMX_U8 nVersionMajor;   /**< Major version accessor element */
-        OMX_U8 nVersionMinor;   /**< Minor version accessor element */
-        OMX_U8 nRevision;       /**< Revision version accessor element */
-        OMX_U8 nStep;           /**< Step version accessor element */
-    } s;
-    OMX_U32 nVersion;           /**< 32 bit value to make accessing the
-                                    version easily done in a single word
-                                    size copy/compare operation */
-} OMX_VERSIONTYPE;
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif
-/* File EOF */
diff --git a/include/media/stagefright/openmax/OMX_Video.h b/include/media/stagefright/openmax/OMX_Video.h
deleted file mode 100644
index 4f8485d..0000000
--- a/include/media/stagefright/openmax/OMX_Video.h
+++ /dev/null
@@ -1,1078 +0,0 @@
-/* ------------------------------------------------------------------
- * Copyright (C) 1998-2009 PacketVideo
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied.
- * See the License for the specific language governing permissions
- * and limitations under the License.
- * -------------------------------------------------------------------
- */
-/**
- * Copyright (c) 2008 The Khronos Group Inc. 
- * 
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject
- * to the following conditions: 
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software. 
- * 
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
- * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 
- *
- */
-
-/** 
- *  @file OMX_Video.h - OpenMax IL version 1.1.2
- *  The structures is needed by Video components to exchange parameters 
- *  and configuration data with OMX components.
- */
-#ifndef OMX_Video_h
-#define OMX_Video_h
-
-/** @defgroup video OpenMAX IL Video Domain
- * @ingroup iv
- * Structures for OpenMAX IL Video domain
- * @{
- */
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-
-/**
- * Each OMX header must include all required header files to allow the
- * header to compile without errors.  The includes below are required
- * for this header file to compile successfully 
- */
-
-#include <OMX_IVCommon.h>
-
-
-/**
- * Enumeration used to define the possible video compression codings.  
- * NOTE:  This essentially refers to file extensions. If the coding is 
- *        being used to specify the ENCODE type, then additional work 
- *        must be done to configure the exact flavor of the compression 
- *        to be used.  For decode cases where the user application can 
- *        not differentiate between MPEG-4 and H.264 bit streams, it is 
- *        up to the codec to handle this.
- */
-typedef enum OMX_VIDEO_CODINGTYPE {
-    OMX_VIDEO_CodingUnused,     /**< Value when coding is N/A */
-    OMX_VIDEO_CodingAutoDetect, /**< Autodetection of coding type */
-    OMX_VIDEO_CodingMPEG2,      /**< AKA: H.262 */
-    OMX_VIDEO_CodingH263,       /**< H.263 */
-    OMX_VIDEO_CodingMPEG4,      /**< MPEG-4 */
-    OMX_VIDEO_CodingWMV,        /**< all versions of Windows Media Video */
-    OMX_VIDEO_CodingRV,         /**< all versions of Real Video */
-    OMX_VIDEO_CodingAVC,        /**< H.264/AVC */
-    OMX_VIDEO_CodingMJPEG,      /**< Motion JPEG */
-    OMX_VIDEO_CodingVPX,        /**< Google VPX, formerly known as On2 VP8 */
-    OMX_VIDEO_CodingKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_VIDEO_CodingVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_VIDEO_CodingMax = 0x7FFFFFFF
-} OMX_VIDEO_CODINGTYPE;
-
-
-/**
- * Data structure used to define a video path.  The number of Video paths for 
- * input and output will vary by type of the Video component.  
- * 
- *    Input (aka Source) : zero Inputs, one Output,
- *    Splitter           : one Input, 2 or more Outputs,
- *    Processing Element : one Input, one output,
- *    Mixer              : 2 or more inputs, one output,
- *    Output (aka Sink)  : one Input, zero outputs.
- * 
- * The PortDefinition structure is used to define all of the parameters 
- * necessary for the compliant component to setup an input or an output video 
- * path.  If additional vendor specific data is required, it should be 
- * transmitted to the component using the CustomCommand function.  Compliant 
- * components will prepopulate this structure with optimal values during the 
- * GetDefaultInitParams command.
- *
- * STRUCT MEMBERS:
- *  cMIMEType             : MIME type of data for the port
- *  pNativeRender         : Platform specific reference for a display if a 
- *                          sync, otherwise this field is 0
- *  nFrameWidth           : Width of frame to be used on channel if 
- *                          uncompressed format is used.  Use 0 for unknown,
- *                          don't care or variable
- *  nFrameHeight          : Height of frame to be used on channel if 
- *                          uncompressed format is used. Use 0 for unknown,
- *                          don't care or variable
- *  nStride               : Number of bytes per span of an image 
- *                          (i.e. indicates the number of bytes to get
- *                          from span N to span N+1, where negative stride
- *                          indicates the image is bottom up
- *  nSliceHeight          : Height used when encoding in slices
- *  nBitrate              : Bit rate of frame to be used on channel if 
- *                          compressed format is used. Use 0 for unknown, 
- *                          don't care or variable
- *  xFramerate            : Frame rate to be used on channel if uncompressed 
- *                          format is used. Use 0 for unknown, don't care or 
- *                          variable.  Units are Q16 frames per second.
- *  bFlagErrorConcealment : Turns on error concealment if it is supported by 
- *                          the OMX component
- *  eCompressionFormat    : Compression format used in this instance of the 
- *                          component. When OMX_VIDEO_CodingUnused is 
- *                          specified, eColorFormat is used
- *  eColorFormat : Decompressed format used by this component
- *  pNativeWindow : Platform specific reference for a window object if a 
- *                          display sink , otherwise this field is 0x0. 
- */
-typedef struct OMX_VIDEO_PORTDEFINITIONTYPE {
-    OMX_STRING cMIMEType;
-    OMX_NATIVE_DEVICETYPE pNativeRender;
-    OMX_U32 nFrameWidth;
-    OMX_U32 nFrameHeight;
-    OMX_S32 nStride;
-    OMX_U32 nSliceHeight;
-    OMX_U32 nBitrate;
-    OMX_U32 xFramerate;
-    OMX_BOOL bFlagErrorConcealment;
-    OMX_VIDEO_CODINGTYPE eCompressionFormat;
-    OMX_COLOR_FORMATTYPE eColorFormat;
-    OMX_NATIVE_WINDOWTYPE pNativeWindow;
-} OMX_VIDEO_PORTDEFINITIONTYPE;
-
-/**  
- * Port format parameter.  This structure is used to enumerate the various 
- * data input/output format supported by the port.
- * 
- * STRUCT MEMBERS:
- *  nSize              : Size of the structure in bytes
- *  nVersion           : OMX specification version information
- *  nPortIndex         : Indicates which port to set
- *  nIndex             : Indicates the enumeration index for the format from 
- *                       0x0 to N-1
- *  eCompressionFormat : Compression format used in this instance of the 
- *                       component. When OMX_VIDEO_CodingUnused is specified, 
- *                       eColorFormat is used 
- *  eColorFormat       : Decompressed format used by this component
- *  xFrameRate         : Indicates the video frame rate in Q16 format
- */
-typedef struct OMX_VIDEO_PARAM_PORTFORMATTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_U32 nIndex;
-    OMX_VIDEO_CODINGTYPE eCompressionFormat; 
-    OMX_COLOR_FORMATTYPE eColorFormat;
-    OMX_U32 xFramerate;
-} OMX_VIDEO_PARAM_PORTFORMATTYPE;
-
-
-/**
- * This is a structure for configuring video compression quantization 
- * parameter values.  Codecs may support different QP values for different
- * frame types.
- *
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes
- *  nVersion   : OMX specification version info
- *  nPortIndex : Port that this structure applies to
- *  nQpI       : QP value to use for index frames
- *  nQpP       : QP value to use for P frames
- *  nQpB       : QP values to use for bidirectional frames 
- */
-typedef struct OMX_VIDEO_PARAM_QUANTIZATIONTYPE {
-    OMX_U32 nSize;            
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_U32 nQpI;
-    OMX_U32 nQpP;
-    OMX_U32 nQpB;
-} OMX_VIDEO_PARAM_QUANTIZATIONTYPE;
-
-
-/** 
- * Structure for configuration of video fast update parameters. 
- *  
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes
- *  nVersion   : OMX specification version info 
- *  nPortIndex : Port that this structure applies to
- *  bEnableVFU : Enable/Disable video fast update
- *  nFirstGOB  : Specifies the number of the first macroblock row
- *  nFirstMB   : specifies the first MB relative to the specified first GOB
- *  nNumMBs    : Specifies the number of MBs to be refreshed from nFirstGOB 
- *               and nFirstMB
- */
-typedef struct OMX_VIDEO_PARAM_VIDEOFASTUPDATETYPE {
-    OMX_U32 nSize;            
-    OMX_VERSIONTYPE nVersion; 
-    OMX_U32 nPortIndex;       
-    OMX_BOOL bEnableVFU;      
-    OMX_U32 nFirstGOB;                            
-    OMX_U32 nFirstMB;                            
-    OMX_U32 nNumMBs;                                  
-} OMX_VIDEO_PARAM_VIDEOFASTUPDATETYPE;
-
-
-/** 
- * Enumeration of possible bitrate control types 
- */
-typedef enum OMX_VIDEO_CONTROLRATETYPE {
-    OMX_Video_ControlRateDisable,
-    OMX_Video_ControlRateVariable,
-    OMX_Video_ControlRateConstant,
-    OMX_Video_ControlRateVariableSkipFrames,
-    OMX_Video_ControlRateConstantSkipFrames,
-    OMX_Video_ControlRateKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_Video_ControlRateVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_Video_ControlRateMax = 0x7FFFFFFF
-} OMX_VIDEO_CONTROLRATETYPE;
-
-
-/** 
- * Structure for configuring bitrate mode of a codec. 
- *
- * STRUCT MEMBERS:
- *  nSize          : Size of the struct in bytes
- *  nVersion       : OMX spec version info
- *  nPortIndex     : Port that this struct applies to
- *  eControlRate   : Control rate type enum
- *  nTargetBitrate : Target bitrate to encode with
- */
-typedef struct OMX_VIDEO_PARAM_BITRATETYPE {
-    OMX_U32 nSize;                          
-    OMX_VERSIONTYPE nVersion;               
-    OMX_U32 nPortIndex;                     
-    OMX_VIDEO_CONTROLRATETYPE eControlRate; 
-    OMX_U32 nTargetBitrate;                 
-} OMX_VIDEO_PARAM_BITRATETYPE;
-
-
-/** 
- * Enumeration of possible motion vector (MV) types 
- */
-typedef enum OMX_VIDEO_MOTIONVECTORTYPE {
-    OMX_Video_MotionVectorPixel,
-    OMX_Video_MotionVectorHalfPel,
-    OMX_Video_MotionVectorQuarterPel,
-    OMX_Video_MotionVectorEighthPel,
-    OMX_Video_MotionVectorKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_Video_MotionVectorVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_Video_MotionVectorMax = 0x7FFFFFFF
-} OMX_VIDEO_MOTIONVECTORTYPE;
-
-
-/**
- * Structure for configuring the number of motion vectors used as well
- * as their accuracy.
- * 
- * STRUCT MEMBERS:
- *  nSize            : Size of the struct in bytes
- *  nVersion         : OMX spec version info
- *  nPortIndex       : port that this structure applies to
- *  eAccuracy        : Enumerated MV accuracy
- *  bUnrestrictedMVs : Allow unrestricted MVs
- *  bFourMV          : Allow use of 4 MVs
- *  sXSearchRange    : Search range in horizontal direction for MVs
- *  sYSearchRange    : Search range in vertical direction for MVs
- */
-typedef struct OMX_VIDEO_PARAM_MOTIONVECTORTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_VIDEO_MOTIONVECTORTYPE eAccuracy;
-    OMX_BOOL bUnrestrictedMVs;
-    OMX_BOOL bFourMV;
-    OMX_S32 sXSearchRange;
-    OMX_S32 sYSearchRange;
-} OMX_VIDEO_PARAM_MOTIONVECTORTYPE;
-
-
-/** 
- * Enumeration of possible methods to use for Intra Refresh 
- */
-typedef enum OMX_VIDEO_INTRAREFRESHTYPE {
-    OMX_VIDEO_IntraRefreshCyclic,
-    OMX_VIDEO_IntraRefreshAdaptive,
-    OMX_VIDEO_IntraRefreshBoth,
-    OMX_VIDEO_IntraRefreshKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_VIDEO_IntraRefreshVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_VIDEO_IntraRefreshMax = 0x7FFFFFFF
-} OMX_VIDEO_INTRAREFRESHTYPE;
-
-
-/**
- * Structure for configuring intra refresh mode 
- * 
- * STRUCT MEMBERS:
- *  nSize        : Size of the structure in bytes
- *  nVersion     : OMX specification version information
- *  nPortIndex   : Port that this structure applies to
- *  eRefreshMode : Cyclic, Adaptive, or Both
- *  nAirMBs      : Number of intra macroblocks to refresh in a frame when 
- *                 AIR is enabled
- *  nAirRef      : Number of times a motion marked macroblock has to be  
- *                 intra coded
- *  nCirMBs      : Number of consecutive macroblocks to be coded as "intra"  
- *                 when CIR is enabled
- */
-typedef struct OMX_VIDEO_PARAM_INTRAREFRESHTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_VIDEO_INTRAREFRESHTYPE eRefreshMode;
-    OMX_U32 nAirMBs;
-    OMX_U32 nAirRef;
-    OMX_U32 nCirMBs;
-} OMX_VIDEO_PARAM_INTRAREFRESHTYPE;
-
-
-/**
- * Structure for enabling various error correction methods for video 
- * compression.
- *
- * STRUCT MEMBERS:
- *  nSize                   : Size of the structure in bytes
- *  nVersion                : OMX specification version information 
- *  nPortIndex              : Port that this structure applies to 
- *  bEnableHEC              : Enable/disable header extension codes (HEC)
- *  bEnableResync           : Enable/disable resynchronization markers
- *  nResynchMarkerSpacing   : Resynch markers interval (in bits) to be 
- *                            applied in the stream 
- *  bEnableDataPartitioning : Enable/disable data partitioning 
- *  bEnableRVLC             : Enable/disable reversible variable length 
- *                            coding
- */
-typedef struct OMX_VIDEO_PARAM_ERRORCORRECTIONTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_BOOL bEnableHEC;
-    OMX_BOOL bEnableResync;
-    OMX_U32  nResynchMarkerSpacing;
-    OMX_BOOL bEnableDataPartitioning;
-    OMX_BOOL bEnableRVLC;
-} OMX_VIDEO_PARAM_ERRORCORRECTIONTYPE;
-
-
-/** 
- * Configuration of variable block-size motion compensation (VBSMC) 
- * 
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes
- *  nVersion   : OMX specification version information 
- *  nPortIndex : Port that this structure applies to
- *  b16x16     : Enable inter block search 16x16
- *  b16x8      : Enable inter block search 16x8
- *  b8x16      : Enable inter block search 8x16
- *  b8x8       : Enable inter block search 8x8
- *  b8x4       : Enable inter block search 8x4
- *  b4x8       : Enable inter block search 4x8
- *  b4x4       : Enable inter block search 4x4
- */
-typedef struct OMX_VIDEO_PARAM_VBSMCTYPE {
-    OMX_U32 nSize; 
-    OMX_VERSIONTYPE nVersion; 
-    OMX_U32 nPortIndex;       
-    OMX_BOOL b16x16; 
-    OMX_BOOL b16x8; 
-    OMX_BOOL b8x16;
-    OMX_BOOL b8x8;
-    OMX_BOOL b8x4;
-    OMX_BOOL b4x8;
-    OMX_BOOL b4x4;
-} OMX_VIDEO_PARAM_VBSMCTYPE;
-
-
-/** 
- * H.263 profile types, each profile indicates support for various 
- * performance bounds and different annexes.
- *
- * ENUMS:
- *  Baseline           : Baseline Profile: H.263 (V1), no optional modes                                                    
- *  H320 Coding        : H.320 Coding Efficiency Backward Compatibility 
- *                       Profile: H.263+ (V2), includes annexes I, J, L.4
- *                       and T
- *  BackwardCompatible : Backward Compatibility Profile: H.263 (V1), 
- *                       includes annex F                                    
- *  ISWV2              : Interactive Streaming Wireless Profile: H.263+ 
- *                       (V2), includes annexes I, J, K and T                 
- *  ISWV3              : Interactive Streaming Wireless Profile: H.263++  
- *                       (V3), includes profile 3 and annexes V and W.6.3.8   
- *  HighCompression    : Conversational High Compression Profile: H.263++  
- *                       (V3), includes profiles 1 & 2 and annexes D and U   
- *  Internet           : Conversational Internet Profile: H.263++ (V3),  
- *                       includes profile 5 and annex K                       
- *  Interlace          : Conversational Interlace Profile: H.263++ (V3),  
- *                       includes profile 5 and annex W.6.3.11               
- *  HighLatency        : High Latency Profile: H.263++ (V3), includes  
- *                       profile 6 and annexes O.1 and P.5                       
- */
-typedef enum OMX_VIDEO_H263PROFILETYPE {
-    OMX_VIDEO_H263ProfileBaseline            = 0x01,        
-    OMX_VIDEO_H263ProfileH320Coding          = 0x02,          
-    OMX_VIDEO_H263ProfileBackwardCompatible  = 0x04,  
-    OMX_VIDEO_H263ProfileISWV2               = 0x08,               
-    OMX_VIDEO_H263ProfileISWV3               = 0x10,               
-    OMX_VIDEO_H263ProfileHighCompression     = 0x20,     
-    OMX_VIDEO_H263ProfileInternet            = 0x40,            
-    OMX_VIDEO_H263ProfileInterlace           = 0x80,           
-    OMX_VIDEO_H263ProfileHighLatency         = 0x100,         
-    OMX_VIDEO_H263ProfileKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_VIDEO_H263ProfileVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_VIDEO_H263ProfileMax                 = 0x7FFFFFFF  
-} OMX_VIDEO_H263PROFILETYPE;
-
-
-/** 
- * H.263 level types, each level indicates support for various frame sizes, 
- * bit rates, decoder frame rates.
- */
-typedef enum OMX_VIDEO_H263LEVELTYPE {
-    OMX_VIDEO_H263Level10  = 0x01,  
-    OMX_VIDEO_H263Level20  = 0x02,      
-    OMX_VIDEO_H263Level30  = 0x04,      
-    OMX_VIDEO_H263Level40  = 0x08,      
-    OMX_VIDEO_H263Level45  = 0x10,      
-    OMX_VIDEO_H263Level50  = 0x20,      
-    OMX_VIDEO_H263Level60  = 0x40,      
-    OMX_VIDEO_H263Level70  = 0x80, 
-    OMX_VIDEO_H263LevelKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_VIDEO_H263LevelVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_VIDEO_H263LevelMax = 0x7FFFFFFF  
-} OMX_VIDEO_H263LEVELTYPE;
-
-
-/** 
- * Specifies the picture type. These values should be OR'd to signal all 
- * pictures types which are allowed.
- *
- * ENUMS:
- *  Generic Picture Types:          I, P and B
- *  H.263 Specific Picture Types:   SI and SP
- *  H.264 Specific Picture Types:   EI and EP
- *  MPEG-4 Specific Picture Types:  S
- */
-typedef enum OMX_VIDEO_PICTURETYPE {
-    OMX_VIDEO_PictureTypeI   = 0x01,
-    OMX_VIDEO_PictureTypeP   = 0x02,
-    OMX_VIDEO_PictureTypeB   = 0x04,
-    OMX_VIDEO_PictureTypeSI  = 0x08,
-    OMX_VIDEO_PictureTypeSP  = 0x10,
-    OMX_VIDEO_PictureTypeEI  = 0x11,
-    OMX_VIDEO_PictureTypeEP  = 0x12,
-    OMX_VIDEO_PictureTypeS   = 0x14,
-    OMX_VIDEO_PictureTypeKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_VIDEO_PictureTypeVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_VIDEO_PictureTypeMax = 0x7FFFFFFF
-} OMX_VIDEO_PICTURETYPE;
-
-
-/** 
- * H.263 Params 
- *
- * STRUCT MEMBERS:
- *  nSize                    : Size of the structure in bytes
- *  nVersion                 : OMX specification version information 
- *  nPortIndex               : Port that this structure applies to
- *  nPFrames                 : Number of P frames between each I frame
- *  nBFrames                 : Number of B frames between each I frame
- *  eProfile                 : H.263 profile(s) to use
- *  eLevel                   : H.263 level(s) to use
- *  bPLUSPTYPEAllowed        : Indicating that it is allowed to use PLUSPTYPE 
- *                             (specified in the 1998 version of H.263) to 
- *                             indicate custom picture sizes or clock 
- *                             frequencies 
- *  nAllowedPictureTypes     : Specifies the picture types allowed in the 
- *                             bitstream
- *  bForceRoundingTypeToZero : value of the RTYPE bit (bit 6 of MPPTYPE) is 
- *                             not constrained. It is recommended to change 
- *                             the value of the RTYPE bit for each reference 
- *                             picture in error-free communication
- *  nPictureHeaderRepetition : Specifies the frequency of picture header 
- *                             repetition
- *  nGOBHeaderInterval       : Specifies the interval of non-empty GOB  
- *                             headers in units of GOBs
- */
-typedef struct OMX_VIDEO_PARAM_H263TYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_U32 nPFrames;
-    OMX_U32 nBFrames;
-    OMX_VIDEO_H263PROFILETYPE eProfile;
-	OMX_VIDEO_H263LEVELTYPE eLevel;
-    OMX_BOOL bPLUSPTYPEAllowed;
-    OMX_U32 nAllowedPictureTypes;
-    OMX_BOOL bForceRoundingTypeToZero;
-    OMX_U32 nPictureHeaderRepetition;
-    OMX_U32 nGOBHeaderInterval;
-} OMX_VIDEO_PARAM_H263TYPE;
-
-
-/** 
- * MPEG-2 profile types, each profile indicates support for various 
- * performance bounds and different annexes.
- */
-typedef enum OMX_VIDEO_MPEG2PROFILETYPE {
-    OMX_VIDEO_MPEG2ProfileSimple = 0,  /**< Simple Profile */
-    OMX_VIDEO_MPEG2ProfileMain,        /**< Main Profile */
-    OMX_VIDEO_MPEG2Profile422,         /**< 4:2:2 Profile */
-    OMX_VIDEO_MPEG2ProfileSNR,         /**< SNR Profile */
-    OMX_VIDEO_MPEG2ProfileSpatial,     /**< Spatial Profile */
-    OMX_VIDEO_MPEG2ProfileHigh,        /**< High Profile */
-    OMX_VIDEO_MPEG2ProfileKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_VIDEO_MPEG2ProfileVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_VIDEO_MPEG2ProfileMax = 0x7FFFFFFF  
-} OMX_VIDEO_MPEG2PROFILETYPE;
-
-
-/** 
- * MPEG-2 level types, each level indicates support for various frame 
- * sizes, bit rates, decoder frame rates.  No need 
- */
-typedef enum OMX_VIDEO_MPEG2LEVELTYPE {
-    OMX_VIDEO_MPEG2LevelLL = 0,  /**< Low Level */ 
-    OMX_VIDEO_MPEG2LevelML,      /**< Main Level */ 
-    OMX_VIDEO_MPEG2LevelH14,     /**< High 1440 */ 
-    OMX_VIDEO_MPEG2LevelHL,      /**< High Level */   
-    OMX_VIDEO_MPEG2LevelKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_VIDEO_MPEG2LevelVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_VIDEO_MPEG2LevelMax = 0x7FFFFFFF  
-} OMX_VIDEO_MPEG2LEVELTYPE;
-
-
-/** 
- * MPEG-2 params 
- *
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes
- *  nVersion   : OMX specification version information
- *  nPortIndex : Port that this structure applies to
- *  nPFrames   : Number of P frames between each I frame
- *  nBFrames   : Number of B frames between each I frame
- *  eProfile   : MPEG-2 profile(s) to use
- *  eLevel     : MPEG-2 levels(s) to use
- */
-typedef struct OMX_VIDEO_PARAM_MPEG2TYPE {
-    OMX_U32 nSize;           
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;      
-    OMX_U32 nPFrames;        
-    OMX_U32 nBFrames;        
-    OMX_VIDEO_MPEG2PROFILETYPE eProfile;
-	OMX_VIDEO_MPEG2LEVELTYPE eLevel;   
-} OMX_VIDEO_PARAM_MPEG2TYPE;
-
-
-/** 
- * MPEG-4 profile types, each profile indicates support for various 
- * performance bounds and different annexes.
- * 
- * ENUMS:
- *  - Simple Profile, Levels 1-3
- *  - Simple Scalable Profile, Levels 1-2
- *  - Core Profile, Levels 1-2
- *  - Main Profile, Levels 2-4
- *  - N-bit Profile, Level 2
- *  - Scalable Texture Profile, Level 1
- *  - Simple Face Animation Profile, Levels 1-2
- *  - Simple Face and Body Animation (FBA) Profile, Levels 1-2
- *  - Basic Animated Texture Profile, Levels 1-2
- *  - Hybrid Profile, Levels 1-2
- *  - Advanced Real Time Simple Profiles, Levels 1-4
- *  - Core Scalable Profile, Levels 1-3
- *  - Advanced Coding Efficiency Profile, Levels 1-4
- *  - Advanced Core Profile, Levels 1-2
- *  - Advanced Scalable Texture, Levels 2-3
- */
-typedef enum OMX_VIDEO_MPEG4PROFILETYPE {
-    OMX_VIDEO_MPEG4ProfileSimple           = 0x01,        
-    OMX_VIDEO_MPEG4ProfileSimpleScalable   = 0x02,    
-    OMX_VIDEO_MPEG4ProfileCore             = 0x04,              
-    OMX_VIDEO_MPEG4ProfileMain             = 0x08,             
-    OMX_VIDEO_MPEG4ProfileNbit             = 0x10,              
-    OMX_VIDEO_MPEG4ProfileScalableTexture  = 0x20,   
-    OMX_VIDEO_MPEG4ProfileSimpleFace       = 0x40,        
-    OMX_VIDEO_MPEG4ProfileSimpleFBA        = 0x80,         
-    OMX_VIDEO_MPEG4ProfileBasicAnimated    = 0x100,     
-    OMX_VIDEO_MPEG4ProfileHybrid           = 0x200,            
-    OMX_VIDEO_MPEG4ProfileAdvancedRealTime = 0x400,  
-    OMX_VIDEO_MPEG4ProfileCoreScalable     = 0x800,      
-    OMX_VIDEO_MPEG4ProfileAdvancedCoding   = 0x1000,    
-    OMX_VIDEO_MPEG4ProfileAdvancedCore     = 0x2000,      
-    OMX_VIDEO_MPEG4ProfileAdvancedScalable = 0x4000,
-    OMX_VIDEO_MPEG4ProfileAdvancedSimple   = 0x8000,
-    OMX_VIDEO_MPEG4ProfileKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_VIDEO_MPEG4ProfileVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_VIDEO_MPEG4ProfileMax              = 0x7FFFFFFF  
-} OMX_VIDEO_MPEG4PROFILETYPE;
-
-
-/** 
- * MPEG-4 level types, each level indicates support for various frame 
- * sizes, bit rates, decoder frame rates.  No need 
- */
-typedef enum OMX_VIDEO_MPEG4LEVELTYPE {
-    OMX_VIDEO_MPEG4Level0  = 0x01,   /**< Level 0 */   
-    OMX_VIDEO_MPEG4Level0b = 0x02,   /**< Level 0b */   
-    OMX_VIDEO_MPEG4Level1  = 0x04,   /**< Level 1 */ 
-    OMX_VIDEO_MPEG4Level2  = 0x08,   /**< Level 2 */ 
-    OMX_VIDEO_MPEG4Level3  = 0x10,   /**< Level 3 */ 
-    OMX_VIDEO_MPEG4Level4  = 0x20,   /**< Level 4 */  
-    OMX_VIDEO_MPEG4Level4a = 0x40,   /**< Level 4a */  
-    OMX_VIDEO_MPEG4Level5  = 0x80,   /**< Level 5 */  
-    OMX_VIDEO_MPEG4LevelKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_VIDEO_MPEG4LevelVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_VIDEO_MPEG4LevelMax = 0x7FFFFFFF  
-} OMX_VIDEO_MPEG4LEVELTYPE;
-
-
-/** 
- * MPEG-4 configuration.  This structure handles configuration options
- * which are specific to MPEG4 algorithms
- *
- * STRUCT MEMBERS:
- *  nSize                : Size of the structure in bytes
- *  nVersion             : OMX specification version information
- *  nPortIndex           : Port that this structure applies to
- *  nSliceHeaderSpacing  : Number of macroblocks between slice header (H263+ 
- *                         Annex K). Put zero if not used
- *  bSVH                 : Enable Short Video Header mode
- *  bGov                 : Flag to enable GOV
- *  nPFrames             : Number of P frames between each I frame (also called 
- *                         GOV period)
- *  nBFrames             : Number of B frames between each I frame
- *  nIDCVLCThreshold     : Value of intra DC VLC threshold
- *  bACPred              : Flag to use ac prediction
- *  nMaxPacketSize       : Maximum size of packet in bytes.
- *  nTimeIncRes          : Used to pass VOP time increment resolution for MPEG4. 
- *                         Interpreted as described in MPEG4 standard.
- *  eProfile             : MPEG-4 profile(s) to use.
- *  eLevel               : MPEG-4 level(s) to use.
- *  nAllowedPictureTypes : Specifies the picture types allowed in the bitstream
- *  nHeaderExtension     : Specifies the number of consecutive video packet
- *                         headers within a VOP
- *  bReversibleVLC       : Specifies whether reversible variable length coding 
- *                         is in use
- */
-typedef struct OMX_VIDEO_PARAM_MPEG4TYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_U32 nSliceHeaderSpacing;
-    OMX_BOOL bSVH;
-    OMX_BOOL bGov;
-    OMX_U32 nPFrames;
-    OMX_U32 nBFrames;
-    OMX_U32 nIDCVLCThreshold;
-    OMX_BOOL bACPred;
-    OMX_U32 nMaxPacketSize;
-    OMX_U32 nTimeIncRes;
-    OMX_VIDEO_MPEG4PROFILETYPE eProfile;
-    OMX_VIDEO_MPEG4LEVELTYPE eLevel;
-    OMX_U32 nAllowedPictureTypes;
-    OMX_U32 nHeaderExtension;
-    OMX_BOOL bReversibleVLC;
-} OMX_VIDEO_PARAM_MPEG4TYPE;
-
-
-/** 
- * WMV Versions 
- */
-typedef enum OMX_VIDEO_WMVFORMATTYPE {
-    OMX_VIDEO_WMVFormatUnused = 0x01,   /**< Format unused or unknown */
-    OMX_VIDEO_WMVFormat7      = 0x02,   /**< Windows Media Video format 7 */
-    OMX_VIDEO_WMVFormat8      = 0x04,   /**< Windows Media Video format 8 */
-    OMX_VIDEO_WMVFormat9      = 0x08,   /**< Windows Media Video format 9 */
-    OMX_VIDEO_WMFFormatKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_VIDEO_WMFFormatVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_VIDEO_WMVFormatMax    = 0x7FFFFFFF
-} OMX_VIDEO_WMVFORMATTYPE;
-
-
-/** 
- * WMV Params 
- *
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes
- *  nVersion   : OMX specification version information
- *  nPortIndex : Port that this structure applies to
- *  eFormat    : Version of WMV stream / data
- */
-typedef struct OMX_VIDEO_PARAM_WMVTYPE {
-    OMX_U32 nSize; 
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_VIDEO_WMVFORMATTYPE eFormat;
-} OMX_VIDEO_PARAM_WMVTYPE;
-
-
-/** 
- * Real Video Version 
- */
-typedef enum OMX_VIDEO_RVFORMATTYPE {
-    OMX_VIDEO_RVFormatUnused = 0, /**< Format unused or unknown */
-    OMX_VIDEO_RVFormat8,          /**< Real Video format 8 */
-    OMX_VIDEO_RVFormat9,          /**< Real Video format 9 */
-    OMX_VIDEO_RVFormatG2,         /**< Real Video Format G2 */
-    OMX_VIDEO_RVFormatKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_VIDEO_RVFormatVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_VIDEO_RVFormatMax = 0x7FFFFFFF
-} OMX_VIDEO_RVFORMATTYPE;
-
-
-/** 
- * Real Video Params 
- *
- * STUCT MEMBERS:
- *  nSize              : Size of the structure in bytes
- *  nVersion           : OMX specification version information 
- *  nPortIndex         : Port that this structure applies to
- *  eFormat            : Version of RV stream / data
- *  nBitsPerPixel      : Bits per pixel coded in the frame
- *  nPaddedWidth       : Padded width in pixel of a video frame
- *  nPaddedHeight      : Padded Height in pixels of a video frame
- *  nFrameRate         : Rate of video in frames per second
- *  nBitstreamFlags    : Flags which internal information about the bitstream
- *  nBitstreamVersion  : Bitstream version
- *  nMaxEncodeFrameSize: Max encoded frame size
- *  bEnablePostFilter  : Turn on/off post filter
- *  bEnableTemporalInterpolation : Turn on/off temporal interpolation
- *  bEnableLatencyMode : When enabled, the decoder does not display a decoded 
- *                       frame until it has detected that no enhancement layer 
- *  					 frames or dependent B frames will be coming. This 
- *  					 detection usually occurs when a subsequent non-B 
- *  					 frame is encountered 
- */
-typedef struct OMX_VIDEO_PARAM_RVTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_VIDEO_RVFORMATTYPE eFormat;
-    OMX_U16 nBitsPerPixel;
-    OMX_U16 nPaddedWidth;
-    OMX_U16 nPaddedHeight;
-    OMX_U32 nFrameRate;
-    OMX_U32 nBitstreamFlags;
-    OMX_U32 nBitstreamVersion;
-    OMX_U32 nMaxEncodeFrameSize;
-    OMX_BOOL bEnablePostFilter;
-    OMX_BOOL bEnableTemporalInterpolation;
-    OMX_BOOL bEnableLatencyMode;
-} OMX_VIDEO_PARAM_RVTYPE;
-
-
-/** 
- * AVC profile types, each profile indicates support for various 
- * performance bounds and different annexes.
- */
-typedef enum OMX_VIDEO_AVCPROFILETYPE {
-    OMX_VIDEO_AVCProfileBaseline = 0x01,   /**< Baseline profile */
-    OMX_VIDEO_AVCProfileMain     = 0x02,   /**< Main profile */
-    OMX_VIDEO_AVCProfileExtended = 0x04,   /**< Extended profile */
-    OMX_VIDEO_AVCProfileHigh     = 0x08,   /**< High profile */
-    OMX_VIDEO_AVCProfileHigh10   = 0x10,   /**< High 10 profile */
-    OMX_VIDEO_AVCProfileHigh422  = 0x20,   /**< High 4:2:2 profile */
-    OMX_VIDEO_AVCProfileHigh444  = 0x40,   /**< High 4:4:4 profile */
-    OMX_VIDEO_AVCProfileKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_VIDEO_AVCProfileVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_VIDEO_AVCProfileMax      = 0x7FFFFFFF  
-} OMX_VIDEO_AVCPROFILETYPE;
-
-
-/** 
- * AVC level types, each level indicates support for various frame sizes, 
- * bit rates, decoder frame rates.  No need 
- */
-typedef enum OMX_VIDEO_AVCLEVELTYPE {
-    OMX_VIDEO_AVCLevel1   = 0x01,     /**< Level 1 */
-    OMX_VIDEO_AVCLevel1b  = 0x02,     /**< Level 1b */
-    OMX_VIDEO_AVCLevel11  = 0x04,     /**< Level 1.1 */
-    OMX_VIDEO_AVCLevel12  = 0x08,     /**< Level 1.2 */
-    OMX_VIDEO_AVCLevel13  = 0x10,     /**< Level 1.3 */
-    OMX_VIDEO_AVCLevel2   = 0x20,     /**< Level 2 */
-    OMX_VIDEO_AVCLevel21  = 0x40,     /**< Level 2.1 */
-    OMX_VIDEO_AVCLevel22  = 0x80,     /**< Level 2.2 */
-    OMX_VIDEO_AVCLevel3   = 0x100,    /**< Level 3 */
-    OMX_VIDEO_AVCLevel31  = 0x200,    /**< Level 3.1 */
-    OMX_VIDEO_AVCLevel32  = 0x400,    /**< Level 3.2 */
-    OMX_VIDEO_AVCLevel4   = 0x800,    /**< Level 4 */
-    OMX_VIDEO_AVCLevel41  = 0x1000,   /**< Level 4.1 */
-    OMX_VIDEO_AVCLevel42  = 0x2000,   /**< Level 4.2 */
-    OMX_VIDEO_AVCLevel5   = 0x4000,   /**< Level 5 */
-    OMX_VIDEO_AVCLevel51  = 0x8000,   /**< Level 5.1 */
-    OMX_VIDEO_AVCLevelKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_VIDEO_AVCLevelVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_VIDEO_AVCLevelMax = 0x7FFFFFFF  
-} OMX_VIDEO_AVCLEVELTYPE;
-
-
-/** 
- * AVC loop filter modes 
- *
- * OMX_VIDEO_AVCLoopFilterEnable               : Enable
- * OMX_VIDEO_AVCLoopFilterDisable              : Disable
- * OMX_VIDEO_AVCLoopFilterDisableSliceBoundary : Disabled on slice boundaries
- */
-typedef enum OMX_VIDEO_AVCLOOPFILTERTYPE {
-    OMX_VIDEO_AVCLoopFilterEnable = 0,
-    OMX_VIDEO_AVCLoopFilterDisable,
-    OMX_VIDEO_AVCLoopFilterDisableSliceBoundary,
-    OMX_VIDEO_AVCLoopFilterKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_VIDEO_AVCLoopFilterVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_VIDEO_AVCLoopFilterMax = 0x7FFFFFFF
-} OMX_VIDEO_AVCLOOPFILTERTYPE;
-
-
-/** 
- * AVC params 
- *
- * STRUCT MEMBERS:
- *  nSize                     : Size of the structure in bytes
- *  nVersion                  : OMX specification version information
- *  nPortIndex                : Port that this structure applies to
- *  nSliceHeaderSpacing       : Number of macroblocks between slice header, put  
- *                              zero if not used
- *  nPFrames                  : Number of P frames between each I frame
- *  nBFrames                  : Number of B frames between each I frame
- *  bUseHadamard              : Enable/disable Hadamard transform
- *  nRefFrames                : Max number of reference frames to use for inter
- *                              motion search (1-16)
- *  nRefIdxTrailing           : Pic param set ref frame index (index into ref
- *                              frame buffer of trailing frames list), B frame
- *                              support
- *  nRefIdxForward            : Pic param set ref frame index (index into ref
- *                              frame buffer of forward frames list), B frame
- *                              support
- *  bEnableUEP                : Enable/disable unequal error protection. This 
- *                              is only valid of data partitioning is enabled.
- *  bEnableFMO                : Enable/disable flexible macroblock ordering
- *  bEnableASO                : Enable/disable arbitrary slice ordering
- *  bEnableRS                 : Enable/disable sending of redundant slices
- *  eProfile                  : AVC profile(s) to use
- *  eLevel                    : AVC level(s) to use
- *  nAllowedPictureTypes      : Specifies the picture types allowed in the 
- *                              bitstream
- *  bFrameMBsOnly             : specifies that every coded picture of the 
- *                              coded video sequence is a coded frame 
- *                              containing only frame macroblocks
- *  bMBAFF                    : Enable/disable switching between frame and 
- *                              field macroblocks within a picture
- *  bEntropyCodingCABAC       : Entropy decoding method to be applied for the 
- *                              syntax elements for which two descriptors appear 
- *                              in the syntax tables
- *  bWeightedPPrediction      : Enable/disable weighted prediction shall not 
- *                              be applied to P and SP slices
- *  nWeightedBipredicitonMode : Default weighted prediction is applied to B 
- *                              slices 
- *  bconstIpred               : Enable/disable intra prediction
- *  bDirect8x8Inference       : Specifies the method used in the derivation 
- *                              process for luma motion vectors for B_Skip, 
- *                              B_Direct_16x16 and B_Direct_8x8 as specified 
- *                              in subclause 8.4.1.2 of the AVC spec 
- *  bDirectSpatialTemporal    : Flag indicating spatial or temporal direct
- *                              mode used in B slice coding (related to 
- *                              bDirect8x8Inference) . Spatial direct mode is 
- *                              more common and should be the default.
- *  nCabacInitIdx             : Index used to init CABAC contexts
- *  eLoopFilterMode           : Enable/disable loop filter
- */
-typedef struct OMX_VIDEO_PARAM_AVCTYPE {
-    OMX_U32 nSize;                 
-    OMX_VERSIONTYPE nVersion;      
-    OMX_U32 nPortIndex;            
-    OMX_U32 nSliceHeaderSpacing;  
-    OMX_U32 nPFrames;     
-    OMX_U32 nBFrames;     
-    OMX_BOOL bUseHadamard;
-    OMX_U32 nRefFrames;  
-	OMX_U32 nRefIdx10ActiveMinus1;
-	OMX_U32 nRefIdx11ActiveMinus1;
-    OMX_BOOL bEnableUEP;  
-    OMX_BOOL bEnableFMO;  
-    OMX_BOOL bEnableASO;  
-    OMX_BOOL bEnableRS;   
-    OMX_VIDEO_AVCPROFILETYPE eProfile;
-	OMX_VIDEO_AVCLEVELTYPE eLevel; 
-    OMX_U32 nAllowedPictureTypes;  
-	OMX_BOOL bFrameMBsOnly;        									
-    OMX_BOOL bMBAFF;               
-    OMX_BOOL bEntropyCodingCABAC;  
-    OMX_BOOL bWeightedPPrediction; 
-    OMX_U32 nWeightedBipredicitonMode; 
-    OMX_BOOL bconstIpred ;
-    OMX_BOOL bDirect8x8Inference;  
-	OMX_BOOL bDirectSpatialTemporal;
-	OMX_U32 nCabacInitIdc;
-	OMX_VIDEO_AVCLOOPFILTERTYPE eLoopFilterMode;
-} OMX_VIDEO_PARAM_AVCTYPE;
-
-typedef struct OMX_VIDEO_PARAM_PROFILELEVELTYPE {
-   OMX_U32 nSize;                 
-   OMX_VERSIONTYPE nVersion;      
-   OMX_U32 nPortIndex;            
-   OMX_U32 eProfile;      /**< type is OMX_VIDEO_AVCPROFILETYPE, OMX_VIDEO_H263PROFILETYPE, 
-                                 or OMX_VIDEO_MPEG4PROFILETYPE depending on context */
-   OMX_U32 eLevel;        /**< type is OMX_VIDEO_AVCLEVELTYPE, OMX_VIDEO_H263LEVELTYPE, 
-                                 or OMX_VIDEO_MPEG4PROFILETYPE depending on context */
-   OMX_U32 nProfileIndex; /**< Used to query for individual profile support information,
-                               This parameter is valid only for 
-                               OMX_IndexParamVideoProfileLevelQuerySupported index,
-                               For all other indices this parameter is to be ignored. */
-} OMX_VIDEO_PARAM_PROFILELEVELTYPE;
-
-/** 
- * Structure for dynamically configuring bitrate mode of a codec. 
- *
- * STRUCT MEMBERS:
- *  nSize          : Size of the struct in bytes
- *  nVersion       : OMX spec version info
- *  nPortIndex     : Port that this struct applies to
- *  nEncodeBitrate : Target average bitrate to be generated in bps
- */
-typedef struct OMX_VIDEO_CONFIG_BITRATETYPE {
-    OMX_U32 nSize;                          
-    OMX_VERSIONTYPE nVersion;               
-    OMX_U32 nPortIndex;                     
-    OMX_U32 nEncodeBitrate;                 
-} OMX_VIDEO_CONFIG_BITRATETYPE;
-
-/** 
- * Defines Encoder Frame Rate setting
- *
- * STRUCT MEMBERS:
- *  nSize            : Size of the structure in bytes
- *  nVersion         : OMX specification version information 
- *  nPortIndex       : Port that this structure applies to
- *  xEncodeFramerate : Encoding framerate represented in Q16 format
- */
-typedef struct OMX_CONFIG_FRAMERATETYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_U32 xEncodeFramerate; /* Q16 format */
-} OMX_CONFIG_FRAMERATETYPE;
-
-typedef struct OMX_CONFIG_INTRAREFRESHVOPTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_BOOL IntraRefreshVOP;
-} OMX_CONFIG_INTRAREFRESHVOPTYPE;
-
-typedef struct OMX_CONFIG_MACROBLOCKERRORMAPTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_U32 nErrMapSize;           /* Size of the Error Map in bytes */
-    OMX_U8  ErrMap[1];             /* Error map hint */
-} OMX_CONFIG_MACROBLOCKERRORMAPTYPE;
-
-typedef struct OMX_CONFIG_MBERRORREPORTINGTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_BOOL bEnabled;
-} OMX_CONFIG_MBERRORREPORTINGTYPE;
-
-typedef struct OMX_PARAM_MACROBLOCKSTYPE {
-    OMX_U32 nSize;
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_U32 nMacroblocks;
-} OMX_PARAM_MACROBLOCKSTYPE;
-
-/** 
- * AVC Slice Mode modes 
- *
- * OMX_VIDEO_SLICEMODE_AVCDefault   : Normal frame encoding, one slice per frame
- * OMX_VIDEO_SLICEMODE_AVCMBSlice   : NAL mode, number of MBs per frame
- * OMX_VIDEO_SLICEMODE_AVCByteSlice : NAL mode, number of bytes per frame
- */
-typedef enum OMX_VIDEO_AVCSLICEMODETYPE {
-    OMX_VIDEO_SLICEMODE_AVCDefault = 0,
-    OMX_VIDEO_SLICEMODE_AVCMBSlice,
-    OMX_VIDEO_SLICEMODE_AVCByteSlice,
-    OMX_VIDEO_SLICEMODE_AVCKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */ 
-    OMX_VIDEO_SLICEMODE_AVCVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
-    OMX_VIDEO_SLICEMODE_AVCLevelMax = 0x7FFFFFFF
-} OMX_VIDEO_AVCSLICEMODETYPE;
-
-/** 
- * AVC FMO Slice Mode Params 
- *
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes
- *  nVersion   : OMX specification version information
- *  nPortIndex : Port that this structure applies to
- *  nNumSliceGroups : Specifies the number of slice groups
- *  nSliceGroupMapType : Specifies the type of slice groups
- *  eSliceMode : Specifies the type of slice
- */
-typedef struct OMX_VIDEO_PARAM_AVCSLICEFMO {
-    OMX_U32 nSize; 
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_U8 nNumSliceGroups;
-    OMX_U8 nSliceGroupMapType;
-    OMX_VIDEO_AVCSLICEMODETYPE eSliceMode;
-} OMX_VIDEO_PARAM_AVCSLICEFMO;
-
-/** 
- * AVC IDR Period Configs
- *
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes
- *  nVersion   : OMX specification version information
- *  nPortIndex : Port that this structure applies to
- *  nIDRPeriod : Specifies periodicity of IDR frames
- *  nPFrames : Specifies internal of coding Intra frames
- */
-typedef struct OMX_VIDEO_CONFIG_AVCINTRAPERIOD {
-    OMX_U32 nSize; 
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_U32 nIDRPeriod;
-    OMX_U32 nPFrames;
-} OMX_VIDEO_CONFIG_AVCINTRAPERIOD;
-
-/** 
- * AVC NAL Size Configs
- *
- * STRUCT MEMBERS:
- *  nSize      : Size of the structure in bytes
- *  nVersion   : OMX specification version information
- *  nPortIndex : Port that this structure applies to
- *  nNaluBytes : Specifies the NAL unit size
- */
-typedef struct OMX_VIDEO_CONFIG_NALSIZE {
-    OMX_U32 nSize; 
-    OMX_VERSIONTYPE nVersion;
-    OMX_U32 nPortIndex;
-    OMX_U32 nNaluBytes;
-} OMX_VIDEO_CONFIG_NALSIZE;
-
-/** @} */
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif
-/* File EOF */
-
diff --git a/include/media/stagefright/timedtext/TimedTextDriver.h b/include/media/stagefright/timedtext/TimedTextDriver.h
new file mode 100644
index 0000000..b9752df
--- /dev/null
+++ b/include/media/stagefright/timedtext/TimedTextDriver.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TIMED_TEXT_DRIVER_H_
+#define TIMED_TEXT_DRIVER_H_
+
+#include <media/stagefright/foundation/ABase.h> // for DISALLOW_* macro
+#include <utils/Errors.h> // for status_t
+#include <utils/RefBase.h>
+#include <utils/threads.h>
+
+namespace android {
+
+class ALooper;
+class MediaPlayerBase;
+class MediaSource;
+class Parcel;
+class TimedTextPlayer;
+class TimedTextSource;
+
+class TimedTextDriver {
+public:
+    TimedTextDriver(const wp<MediaPlayerBase> &listener);
+
+    ~TimedTextDriver();
+
+    status_t start();
+    status_t pause();
+    status_t selectTrack(int32_t index);
+    status_t unselectTrack(int32_t index);
+
+    status_t seekToAsync(int64_t timeUs);
+
+    status_t addInBandTextSource(const sp<MediaSource>& source);
+    status_t addOutOfBandTextSource(const char *uri, const char *mimeType);
+    // Caller owns the file desriptor and caller is responsible for closing it.
+    status_t addOutOfBandTextSource(
+            int fd, off64_t offset, size_t length, const char *mimeType);
+
+    void getTrackInfo(Parcel *parcel);
+
+private:
+    Mutex mLock;
+
+    enum State {
+        UNINITIALIZED,
+        PLAYING,
+        PAUSED,
+    };
+
+    sp<ALooper> mLooper;
+    sp<TimedTextPlayer> mPlayer;
+    wp<MediaPlayerBase> mListener;
+
+    // Variables to be guarded by mLock.
+    State mState;
+    int32_t mCurrentTrackIndex;
+    Vector<sp<TimedTextSource> > mTextSourceVector;
+    // -- End of variables to be guarded by mLock
+
+    status_t selectTrack_l(int32_t index);
+
+    DISALLOW_EVIL_CONSTRUCTORS(TimedTextDriver);
+};
+
+}  // namespace android
+
+#endif  // TIMED_TEXT_DRIVER_H_
diff --git a/include/private/binder/Static.h b/include/private/binder/Static.h
deleted file mode 100644
index 5b0f9fc..0000000
--- a/include/private/binder/Static.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// All static variables go here, to control initialization and
-// destruction order in the library.
-
-#include <utils/threads.h>
-
-#include <binder/IBinder.h>
-#include <binder/IMemory.h>
-#include <binder/ProcessState.h>
-#include <binder/IPermissionController.h>
-#include <binder/IServiceManager.h>
-
-namespace android {
-
-// For ProcessState.cpp
-extern Mutex gProcessMutex;
-extern sp<ProcessState> gProcess;
-
-// For ServiceManager.cpp
-extern Mutex gDefaultServiceManagerLock;
-extern sp<IServiceManager> gDefaultServiceManager;
-extern sp<IPermissionController> gPermissionController;
-
-}   // namespace android
diff --git a/include/private/binder/binder_module.h b/include/private/binder/binder_module.h
deleted file mode 100644
index a8dd64f..0000000
--- a/include/private/binder/binder_module.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef _BINDER_MODULE_H_
-#define _BINDER_MODULE_H_
-
-#ifdef __cplusplus
-namespace android {
-#endif
-
-/* obtain structures and constants from the kernel header */
-
-#include <sys/ioctl.h>
-#include <linux/binder.h>
-
-#ifdef __cplusplus
-}   // namespace android
-#endif
-
-#endif // _BINDER_MODULE_H_
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 20abd51..af2db93 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -54,50 +54,99 @@
 #define CBLK_RESTORED_ON        0x0040  // track has been restored after invalidation
 #define CBLK_RESTORED_OFF       0x0040  // by AudioFlinger
 
+// Important: do not add any virtual methods, including ~
 struct audio_track_cblk_t
 {
 
     // The data members are grouped so that members accessed frequently and in the same context
     // are in the same line of data cache.
-                Mutex       lock;
-                Condition   cv;
+                Mutex       lock;           // sizeof(int)
+                Condition   cv;             // sizeof(int)
+
+                // next 4 are offsets within "buffers"
     volatile    uint32_t    user;
     volatile    uint32_t    server;
                 uint32_t    userBase;
                 uint32_t    serverBase;
+
+                // if there is a shared buffer, "buffers" is the value of pointer() for the shared
+                // buffer, otherwise "buffers" points immediately after the control block
                 void*       buffers;
                 uint32_t    frameCount;
+
                 // Cache line boundary
+
                 uint32_t    loopStart;
-                uint32_t    loopEnd;
-                int         loopCount;
-    volatile    union {
-                    uint16_t    volume[2];
-                    uint32_t    volumeLR;
-                };
+                uint32_t    loopEnd;        // read-only for server, read/write for client
+                int         loopCount;      // read/write for client
+
+                // Channel volumes are fixed point U4.12, so 0x1000 means 1.0.
+                // Left channel is in [0:15], right channel is in [16:31].
+                // Always read and write the combined pair atomically.
+                // For AudioTrack only, not used by AudioRecord.
+private:
+                uint32_t    mVolumeLR;
+public:
+
                 uint32_t    sampleRate;
+
                 // NOTE: audio_track_cblk_t::frameSize is not equal to AudioTrack::frameSize() for
                 // 8 bit PCM data: in this case,  mCblk->frameSize is based on a sample size of
                 // 16 bit because data is converted to 16 bit before being stored in buffer
 
-                uint8_t     frameSize;
+                // read-only for client, server writes once at initialization and is then read-only
+                uint8_t     frameSize;       // would normally be size_t, but 8 bits is plenty
+
+                // never used
                 uint8_t     pad1;
+
+                // used by client only
                 uint16_t    bufferTimeoutMs; // Maximum cumulated timeout before restarting audioflinger
 
-                uint16_t    waitTimeMs;      // Cumulated wait time
-                uint16_t    sendLevel;
+                uint16_t    waitTimeMs;      // Cumulated wait time, used by client only
+private:
+                // client write-only, server read-only
+                uint16_t    mSendLevel;      // Fixed point U4.12 so 0x1000 means 1.0
+public:
     volatile    int32_t     flags;
 
                 // Cache line boundary (32 bytes)
 
+                // Since the control block is always located in shared memory, this constructor
+                // is only used for placement new().  It is never used for regular new() or stack.
                             audio_track_cblk_t();
-                uint32_t    stepUser(uint32_t frameCount);
-                bool        stepServer(uint32_t frameCount);
+                uint32_t    stepUser(uint32_t frameCount);      // called by client only, where
+                // client includes regular AudioTrack and AudioFlinger::PlaybackThread::OutputTrack
+                bool        stepServer(uint32_t frameCount);    // called by server only
                 void*       buffer(uint32_t offset) const;
                 uint32_t    framesAvailable();
                 uint32_t    framesAvailable_l();
-                uint32_t    framesReady();
+                uint32_t    framesReady();                      // called by server only
                 bool        tryLock();
+
+                // No barriers on the following operations, so the ordering of loads/stores
+                // with respect to other parameters is UNPREDICTABLE. That's considered safe.
+
+                // for AudioTrack client only, caller must limit to 0.0 <= sendLevel <= 1.0
+                void        setSendLevel(float sendLevel) {
+                    mSendLevel = uint16_t(sendLevel * 0x1000);
+                }
+
+                // for AudioFlinger only; the return value must be validated by the caller
+                uint16_t    getSendLevel_U4_12() const {
+                    return mSendLevel;
+                }
+
+                // for AudioTrack client only, caller must limit to 0 <= volumeLR <= 0x10001000
+                void        setVolumeLR(uint32_t volumeLR) {
+                    mVolumeLR = volumeLR;
+                }
+
+                // for AudioFlinger only; the return value must be validated by the caller
+                uint32_t    getVolumeLR() const {
+                    return mVolumeLR;
+                }
+
 };
 
 
diff --git a/include/private/media/VideoFrame.h b/include/private/media/VideoFrame.h
index 3aff0c6..0ecc348 100644
--- a/include/private/media/VideoFrame.h
+++ b/include/private/media/VideoFrame.h
@@ -26,7 +26,7 @@
 namespace android {
 
 // A simple buffer to hold binary data
-class MediaAlbumArt 
+class MediaAlbumArt
 {
 public:
     MediaAlbumArt(): mSize(0), mData(0) {}
@@ -57,9 +57,9 @@
         fclose(in);
     }
 
-    MediaAlbumArt(const MediaAlbumArt& copy) { 
-        mSize = copy.mSize; 
-        mData = NULL;  // initialize it first 
+    MediaAlbumArt(const MediaAlbumArt& copy) {
+        mSize = copy.mSize;
+        mData = NULL;  // initialize it first
         if (mSize > 0 && copy.mData != NULL) {
            mData = new uint8_t[copy.mSize];
            if (mData != NULL) {
@@ -89,7 +89,7 @@
 {
 public:
     VideoFrame(): mWidth(0), mHeight(0), mDisplayWidth(0), mDisplayHeight(0), mSize(0), mData(0) {}
- 
+
     VideoFrame(const VideoFrame& copy) {
         mWidth = copy.mWidth;
         mHeight = copy.mHeight;
diff --git a/include/private/opengles/gl_context.h b/include/private/opengles/gl_context.h
deleted file mode 100644
index 6b1fa77..0000000
--- a/include/private/opengles/gl_context.h
+++ /dev/null
@@ -1,640 +0,0 @@
-/*
- * Copyright (C) 2006 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_OPENGLES_CONTEXT_H
-#define ANDROID_OPENGLES_CONTEXT_H
-
-#include <stdint.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <pthread.h>
-#ifdef HAVE_ANDROID_OS
-#include <bionic_tls.h>
-#endif
-
-#include <private/pixelflinger/ggl_context.h>
-#include <hardware/gralloc.h>
-
-#include <GLES/gl.h>
-#include <GLES/glext.h>
-
-namespace android {
-
-
-const unsigned int OGLES_NUM_COMPRESSED_TEXTURE_FORMATS = 10
-#ifdef GL_OES_compressed_ETC1_RGB8_texture
-        + 1
-#endif
-        ;
-
-class EGLTextureObject;
-class EGLSurfaceManager;
-class EGLBufferObjectManager;
-
-namespace gl {
-
-struct ogles_context_t;
-struct matrixx_t;
-struct transform_t;
-struct buffer_t;
-
-ogles_context_t* getGlContext();
-
-template<typename T>
-static inline void swap(T& a, T& b) {
-    T t(a); a = b; b = t;
-}
-template<typename T>
-inline T max(T a, T b) {
-    return a<b ? b : a;
-}
-template<typename T>
-inline T max(T a, T b, T c) {
-    return max(a, max(b, c));
-}
-template<typename T>
-inline T min(T a, T b) {
-    return a<b ? a : b;
-}
-template<typename T>
-inline T min(T a, T b, T c) {
-    return min(a, min(b, c));
-}
-template<typename T>
-inline T min(T a, T b, T c, T d) {
-    return min(min(a,b), min(c,d));
-}
-
-// ----------------------------------------------------------------------------
-// vertices
-// ----------------------------------------------------------------------------
-
-struct vec3_t {
-    union {
-        struct { GLfixed x, y, z; };
-        struct { GLfixed r, g, b; };
-        struct { GLfixed S, T, R; };
-        GLfixed v[3];
-    };
-};
-
-struct vec4_t {
-    union {
-        struct { GLfixed x, y, z, w; };
-        struct { GLfixed r, g, b, a; };
-        struct { GLfixed S, T, R, Q; };
-        GLfixed v[4];
-    };
-};
-
-struct vertex_t {
-    enum {
-        // these constant matter for our clipping
-        CLIP_L          = 0x0001,   // clipping flags
-        CLIP_R          = 0x0002,
-        CLIP_B          = 0x0004,
-        CLIP_T          = 0x0008,
-        CLIP_N          = 0x0010,
-        CLIP_F          = 0x0020,
-
-        EYE             = 0x0040,
-        RESERVED        = 0x0080,
-
-        USER_CLIP_0     = 0x0100,   // user clipping flags
-        USER_CLIP_1     = 0x0200,
-        USER_CLIP_2     = 0x0400,
-        USER_CLIP_3     = 0x0800,
-        USER_CLIP_4     = 0x1000,
-        USER_CLIP_5     = 0x2000,
-
-        LIT             = 0x4000,   // lighting has been applied
-        TT              = 0x8000,   // texture coords transformed
-
-        FRUSTUM_CLIP_ALL= 0x003F,
-        USER_CLIP_ALL   = 0x3F00,
-        CLIP_ALL        = 0x3F3F,
-    };
-
-    // the fields below are arranged to minimize d-cache usage
-    // we group together, by cache-line, the fields most likely to be used
-
-    union {
-    vec4_t          obj;
-    vec4_t          eye;
-    };
-    vec4_t          clip;
-
-    uint32_t        flags;
-    size_t          index;  // cache tag, and vertex index
-    GLfixed         fog;
-    uint8_t         locked;
-    uint8_t         mru;
-    uint8_t         reserved[2];
-    vec4_t          window;
-
-    vec4_t          color;
-    vec4_t          texture[GGL_TEXTURE_UNIT_COUNT];
-    uint32_t        reserved1[4];
-
-    inline void clear() {
-        flags = index = locked = mru = 0;
-    }
-};
-
-struct point_size_t {
-    GGLcoord    size;
-    GLboolean   smooth;
-};
-
-struct line_width_t {
-    GGLcoord    width;
-    GLboolean   smooth;
-};
-
-struct polygon_offset_t {
-    GLfixed     factor;
-    GLfixed     units;
-    GLboolean   enable;
-};
-
-// ----------------------------------------------------------------------------
-// arrays
-// ----------------------------------------------------------------------------
-
-struct array_t {
-    typedef void (*fetcher_t)(ogles_context_t*, GLfixed*, const GLvoid*);
-    fetcher_t       fetch;
-    GLvoid const*   physical_pointer;
-    GLint           size;
-    GLsizei         stride;
-    GLvoid const*   pointer;
-    buffer_t const* bo;
-    uint16_t        type;
-    GLboolean       enable;
-    GLboolean       pad;
-    GLsizei         bounds;
-    void init(GLint, GLenum, GLsizei, const GLvoid *, const buffer_t*, GLsizei);
-    inline void resolve();
-    inline const GLubyte* element(GLint i) const {
-        return (const GLubyte*)physical_pointer + i * stride;
-    }
-};
-
-struct array_machine_t {
-    array_t         vertex;
-    array_t         normal;
-    array_t         color;
-    array_t         texture[GGL_TEXTURE_UNIT_COUNT];
-    uint8_t         activeTexture;
-    uint8_t         tmu;
-    uint16_t        cull;
-    uint32_t        flags;
-    GLenum          indicesType;
-    buffer_t const* array_buffer;
-    buffer_t const* element_array_buffer;
-
-    void (*compileElements)(ogles_context_t*, vertex_t*, GLint, GLsizei);
-    void (*compileElement)(ogles_context_t*, vertex_t*, GLint);
-
-    void (*mvp_transform)(transform_t const*, vec4_t*, vec4_t const*);
-    void (*mv_transform)(transform_t const*, vec4_t*, vec4_t const*);
-    void (*tex_transform[2])(transform_t const*, vec4_t*, vec4_t const*);
-    void (*perspective)(ogles_context_t*c, vertex_t* v);
-    void (*clipVertex)(ogles_context_t* c, vertex_t* nv,
-            GGLfixed t, const vertex_t* s, const vertex_t* p);
-    void (*clipEye)(ogles_context_t* c, vertex_t* nv,
-            GGLfixed t, const vertex_t* s, const vertex_t* p);
-};
-
-struct vertex_cache_t {
-    enum {
-        // must be at least 4
-        // 3 vertice for triangles
-        // or 2 + 2 for indexed triangles w/ cache contention
-        VERTEX_BUFFER_SIZE  = 8,
-        // must be a power of two and at least 3
-        VERTEX_CACHE_SIZE   = 64,   // 8 KB
-
-        INDEX_BITS      = 16,
-        INDEX_MASK      = ((1LU<<INDEX_BITS)-1),
-        INDEX_SEQ       = 1LU<<INDEX_BITS,
-    };
-    vertex_t*       vBuffer;
-    vertex_t*       vCache;
-    uint32_t        sequence;
-    void*           base;
-    uint32_t        total;
-    uint32_t        misses;
-    int64_t         startTime;
-    void init();
-    void uninit();
-    void clear();
-    void dump_stats(GLenum mode);
-};
-
-// ----------------------------------------------------------------------------
-// fog
-// ----------------------------------------------------------------------------
-
-struct fog_t {
-    GLfixed     density;
-    GLfixed     start;
-    GLfixed     end;
-    GLfixed     invEndMinusStart;
-    GLenum      mode;
-    GLfixed     (*fog)(ogles_context_t* c, GLfixed z);
-};
-
-// ----------------------------------------------------------------------------
-// user clip planes
-// ----------------------------------------------------------------------------
-
-const unsigned int OGLES_MAX_CLIP_PLANES = 6;
-
-struct clip_plane_t {
-    vec4_t      equation;
-};
-
-struct user_clip_planes_t {
-    clip_plane_t    plane[OGLES_MAX_CLIP_PLANES];
-    uint32_t        enable;
-};
-
-// ----------------------------------------------------------------------------
-// lighting
-// ----------------------------------------------------------------------------
-
-const unsigned int OGLES_MAX_LIGHTS = 8;
-
-struct light_t {
-    vec4_t      ambient;
-    vec4_t      diffuse;
-    vec4_t      specular;
-    vec4_t      implicitAmbient;
-    vec4_t      implicitDiffuse;
-    vec4_t      implicitSpecular;
-    vec4_t      position;       // position in eye space
-    vec4_t      objPosition;
-    vec4_t      normalizedObjPosition;
-    vec4_t      spotDir;
-    vec4_t      normalizedSpotDir;
-    GLfixed     spotExp;
-    GLfixed     spotCutoff;
-    GLfixed     spotCutoffCosine;
-    GLfixed     attenuation[3];
-    GLfixed     rConstAttenuation;
-    GLboolean   enable;
-};
-
-struct material_t {
-    vec4_t      ambient;
-    vec4_t      diffuse;
-    vec4_t      specular;
-    vec4_t      emission;
-    GLfixed     shininess;
-};
-
-struct light_model_t {
-    vec4_t      ambient;
-    GLboolean   twoSide;
-};
-
-struct color_material_t {
-    GLenum      face;
-    GLenum      mode;
-    GLboolean   enable;
-};
-
-struct lighting_t {
-    light_t             lights[OGLES_MAX_LIGHTS];
-    material_t          front;
-    light_model_t       lightModel;
-    color_material_t    colorMaterial;
-    vec4_t              implicitSceneEmissionAndAmbient;
-    vec4_t              objViewer;
-    uint32_t            enabledLights;
-    GLboolean           enable;
-    GLenum              shadeModel;
-    typedef void (*light_fct_t)(ogles_context_t*, vertex_t*);
-    void (*lightVertex)(ogles_context_t* c, vertex_t* v);
-    void (*lightTriangle)(ogles_context_t* c,
-            vertex_t* v0, vertex_t* v1, vertex_t* v2);
-};
-
-struct culling_t {
-    GLenum      cullFace;
-    GLenum      frontFace;
-    GLboolean   enable;
-};
-
-// ----------------------------------------------------------------------------
-// textures
-// ----------------------------------------------------------------------------
-
-struct texture_unit_t {
-    GLuint              name;
-    EGLTextureObject*   texture;
-    uint8_t             dirty;
-};
-
-struct texture_state_t
-{
-    texture_unit_t      tmu[GGL_TEXTURE_UNIT_COUNT];
-    int                 active;     // active tmu
-    EGLTextureObject*   defaultTexture;
-    GGLContext*         ggl;
-    uint8_t             packAlignment;
-    uint8_t             unpackAlignment;
-};
-
-// ----------------------------------------------------------------------------
-// transformation and matrices
-// ----------------------------------------------------------------------------
-
-struct matrixf_t;
-
-struct matrixx_t {
-    GLfixed m[16];
-    void load(const matrixf_t& rhs);
-};
-
-struct matrix_stack_t;
-
-
-struct matrixf_t {
-    void loadIdentity();
-    void load(const matrixf_t& rhs);
-
-    inline GLfloat* editElements() { return m; }
-    inline GLfloat const* elements() const { return m; }
-
-    void set(const GLfixed* rhs);
-    void set(const GLfloat* rhs);
-
-    static void multiply(matrixf_t& r,
-            const matrixf_t& lhs, const matrixf_t& rhs);
-
-    void dump(const char* what);
-
-private:
-    friend struct matrix_stack_t;
-    GLfloat     m[16];
-    void load(const GLfixed* rhs);
-    void load(const GLfloat* rhs);
-    void multiply(const matrixf_t& rhs);
-    void translate(GLfloat x, GLfloat y, GLfloat z);
-    void scale(GLfloat x, GLfloat y, GLfloat z);
-    void rotate(GLfloat a, GLfloat x, GLfloat y, GLfloat z);
-};
-
-enum {
-    OP_IDENTITY         = 0x00,
-    OP_TRANSLATE        = 0x01,
-    OP_UNIFORM_SCALE    = 0x02,
-    OP_SCALE            = 0x05,
-    OP_ROTATE           = 0x08,
-    OP_SKEW             = 0x10,
-    OP_ALL              = 0x1F
-};
-
-struct transform_t {
-    enum {
-        FLAGS_2D_PROJECTION = 0x1
-    };
-    matrixx_t       matrix;
-    uint32_t        flags;
-    uint32_t        ops;
-
-    union {
-        struct {
-            void (*point2)(transform_t const* t, vec4_t*, vec4_t const*);
-            void (*point3)(transform_t const* t, vec4_t*, vec4_t const*);
-            void (*point4)(transform_t const* t, vec4_t*, vec4_t const*);
-        };
-        void (*pointv[3])(transform_t const* t, vec4_t*, vec4_t const*);
-    };
-
-    void loadIdentity();
-    void picker();
-    void dump(const char* what);
-};
-
-struct mvui_transform_t : public transform_t
-{
-    void picker();
-};
-
-struct matrix_stack_t {
-    enum {
-        DO_PICKER           = 0x1,
-        DO_FLOAT_TO_FIXED   = 0x2
-    };
-    transform_t     transform;
-    uint8_t         maxDepth;
-    uint8_t         depth;
-    uint8_t         dirty;
-    uint8_t         reserved;
-    matrixf_t       *stack;
-    uint8_t         *ops;
-    void init(int depth);
-    void uninit();
-    void loadIdentity();
-    void load(const GLfixed* rhs);
-    void load(const GLfloat* rhs);
-    void multiply(const matrixf_t& rhs);
-    void translate(GLfloat x, GLfloat y, GLfloat z);
-    void scale(GLfloat x, GLfloat y, GLfloat z);
-    void rotate(GLfloat a, GLfloat x, GLfloat y, GLfloat z);
-    GLint push();
-    GLint pop();
-    void validate();
-    matrixf_t& top() { return stack[depth]; }
-    const matrixf_t& top() const { return stack[depth]; }
-    uint32_t top_ops() const { return ops[depth]; }
-    inline bool isRigidBody() const {
-        return !(ops[depth] & ~(OP_TRANSLATE|OP_UNIFORM_SCALE|OP_ROTATE));
-    }
-};
-
-struct vp_transform_t {
-    transform_t     transform;
-    matrixf_t       matrix;
-    GLfloat         zNear;
-    GLfloat         zFar;
-    void loadIdentity();
-};
-
-struct transform_state_t {
-    enum {
-        MODELVIEW           = 0x01,
-        PROJECTION          = 0x02,
-        VIEWPORT            = 0x04,
-        TEXTURE             = 0x08,
-        MVUI                = 0x10,
-        MVIT                = 0x20,
-        MVP                 = 0x40,
-    };
-    matrix_stack_t      *current;
-    matrix_stack_t      modelview;
-    matrix_stack_t      projection;
-    matrix_stack_t      texture[GGL_TEXTURE_UNIT_COUNT];
-
-    // modelview * projection
-    transform_t         mvp     __attribute__((aligned(32)));
-    // viewport transformation
-    vp_transform_t      vpt     __attribute__((aligned(32)));
-    // same for 4-D vertices
-    transform_t         mvp4;
-    // full modelview inverse transpose
-    transform_t         mvit4;
-    // upper 3x3 of mv-inverse-transpose (for normals)
-    mvui_transform_t    mvui;
-
-    GLenum              matrixMode;
-    GLenum              rescaleNormals;
-    uint32_t            dirty;
-    void invalidate();
-    void update_mvp();
-    void update_mvit();
-    void update_mvui();
-};
-
-struct viewport_t {
-    GLint       x;
-    GLint       y;
-    GLsizei     w;
-    GLsizei     h;
-    struct {
-        GLint       x;
-        GLint       y;
-    } surfaceport;
-    struct {
-        GLint       x;
-        GLint       y;
-        GLsizei     w;
-        GLsizei     h;
-    } scissor;
-};
-
-// ----------------------------------------------------------------------------
-// Lerping
-// ----------------------------------------------------------------------------
-
-struct compute_iterators_t
-{
-    void initTriangle(
-            vertex_t const* v0,
-            vertex_t const* v1,
-            vertex_t const* v2);
-
-    void initLine(
-            vertex_t const* v0,
-            vertex_t const* v1);
-
-    inline void initLerp(vertex_t const* v0, uint32_t enables);
-
-    int iteratorsScale(int32_t it[3],
-            int32_t c0, int32_t c1, int32_t c2) const;
-
-    void iterators1616(GGLfixed it[3],
-            GGLfixed c0, GGLfixed c1, GGLfixed c2) const;
-
-    void iterators0032(int32_t it[3],
-            int32_t c0, int32_t c1, int32_t c2) const;
-
-    void iterators0032(int64_t it[3],
-            int32_t c0, int32_t c1, int32_t c2) const;
-
-    GGLcoord area() const { return m_area; }
-
-private:
-    // don't change order of members here -- used by iterators.S
-    GGLcoord m_dx01, m_dy10, m_dx20, m_dy02;
-    GGLcoord m_x0, m_y0;
-    GGLcoord m_area;
-    uint8_t m_scale;
-    uint8_t m_area_scale;
-    uint8_t m_reserved[2];
-
-};
-
-// ----------------------------------------------------------------------------
-// state
-// ----------------------------------------------------------------------------
-
-#ifdef HAVE_ANDROID_OS
-    // We have a dedicated TLS slot in bionic
-    inline void setGlThreadSpecific(ogles_context_t *value) {
-        ((uint32_t *)__get_tls())[TLS_SLOT_OPENGL] = (uint32_t)value;
-    }
-    inline ogles_context_t* getGlThreadSpecific() {
-        return (ogles_context_t *)(((unsigned *)__get_tls())[TLS_SLOT_OPENGL]);
-    }
-#else
-    extern pthread_key_t gGLKey;
-    inline void setGlThreadSpecific(ogles_context_t *value) {
-        pthread_setspecific(gGLKey, value);
-    }
-    inline ogles_context_t* getGlThreadSpecific() {
-        return static_cast<ogles_context_t*>(pthread_getspecific(gGLKey));
-    }
-#endif
-
-
-struct prims_t {
-    typedef ogles_context_t* GL;
-    void (*renderPoint)(GL, vertex_t*);
-    void (*renderLine)(GL, vertex_t*, vertex_t*);
-    void (*renderTriangle)(GL, vertex_t*, vertex_t*, vertex_t*);
-};
-
-struct ogles_context_t {
-    context_t               rasterizer;
-    array_machine_t         arrays         __attribute__((aligned(32)));
-    texture_state_t         textures;
-    transform_state_t       transforms;
-    vertex_cache_t          vc;
-    prims_t                 prims;
-    culling_t               cull;
-    lighting_t              lighting;
-    user_clip_planes_t      clipPlanes;
-    compute_iterators_t     lerp;           __attribute__((aligned(32)));
-    vertex_t                current;
-    vec4_t                  currentColorClamped;
-    vec3_t                  currentNormal;
-    viewport_t              viewport;
-    point_size_t            point;
-    line_width_t            line;
-    polygon_offset_t        polygonOffset;
-    fog_t                   fog;
-    uint32_t                perspective : 1;
-    uint32_t                transformTextures : 1;
-    EGLSurfaceManager*      surfaceManager;
-    EGLBufferObjectManager* bufferObjectManager;
-
-    GLenum                  error;
-
-    static inline ogles_context_t* get() {
-        return getGlThreadSpecific();
-    }
-
-};
-
-}; // namespace gl
-}; // namespace android
-
-#endif // ANDROID_OPENGLES_CONTEXT_H
-
diff --git a/include/private/surfaceflinger/LayerState.h b/include/private/surfaceflinger/LayerState.h
deleted file mode 100644
index 3eb5c99..0000000
--- a/include/private/surfaceflinger/LayerState.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_SF_LAYER_STATE_H
-#define ANDROID_SF_LAYER_STATE_H
-
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <utils/Errors.h>
-
-#include <ui/Region.h>
-
-#include <surfaceflinger/ISurface.h>
-
-namespace android {
-
-class Parcel;
-class ISurfaceComposerClient;
-
-struct layer_state_t {
-
-    layer_state_t()
-        :   surface(0), what(0),
-            x(0), y(0), z(0), w(0), h(0),
-            alpha(0), tint(0), flags(0), mask(0),
-            reserved(0)
-    {
-        matrix.dsdx = matrix.dtdy = 1.0f;
-        matrix.dsdy = matrix.dtdx = 0.0f;
-    }
-
-    status_t    write(Parcel& output) const;
-    status_t    read(const Parcel& input);
-
-            struct matrix22_t {
-                float   dsdx;
-                float   dtdx;
-                float   dsdy;
-                float   dtdy;
-            };
-            SurfaceID       surface;
-            uint32_t        what;
-            float           x;
-            float           y;
-            uint32_t        z;
-            uint32_t        w;
-            uint32_t        h;
-            float           alpha;
-            uint32_t        tint;
-            uint8_t         flags;
-            uint8_t         mask;
-            uint8_t         reserved;
-            matrix22_t      matrix;
-            // non POD must be last. see write/read
-            Region          transparentRegion;
-};
-
-struct ComposerState {
-    sp<ISurfaceComposerClient> client;
-    layer_state_t state;
-    status_t    write(Parcel& output) const;
-    status_t    read(const Parcel& input);
-};
-
-}; // namespace android
-
-#endif // ANDROID_SF_LAYER_STATE_H
-
diff --git a/include/private/surfaceflinger/SharedBufferStack.h b/include/private/surfaceflinger/SharedBufferStack.h
deleted file mode 100644
index 0da03d1..0000000
--- a/include/private/surfaceflinger/SharedBufferStack.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (C) 2007 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_SF_SHARED_BUFFER_STACK_H
-#define ANDROID_SF_SHARED_BUFFER_STACK_H
-
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <utils/Debug.h>
-
-namespace android {
-// ---------------------------------------------------------------------------
-
-#define NUM_DISPLAY_MAX 4
-
-struct display_cblk_t
-{
-    uint16_t    w;
-    uint16_t    h;
-    uint8_t     format;
-    uint8_t     orientation;
-    uint8_t     reserved[2];
-    float       fps;
-    float       density;
-    float       xdpi;
-    float       ydpi;
-    uint32_t    pad[2];
-};
-
-struct surface_flinger_cblk_t   // 4KB max
-{
-    uint8_t         connected;
-    uint8_t         reserved[3];
-    uint32_t        pad[7];
-    display_cblk_t  displays[NUM_DISPLAY_MAX];
-};
-
-// ---------------------------------------------------------------------------
-
-COMPILE_TIME_ASSERT(sizeof(surface_flinger_cblk_t) <= 4096)
-
-// ---------------------------------------------------------------------------
-}; // namespace android
-
-#endif /* ANDROID_SF_SHARED_BUFFER_STACK_H */
diff --git a/include/private/ui/RegionHelper.h b/include/private/ui/RegionHelper.h
deleted file mode 100644
index 8d76533..0000000
--- a/include/private/ui/RegionHelper.h
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_UI_PRIVATE_REGION_HELPER_H
-#define ANDROID_UI_PRIVATE_REGION_HELPER_H
-
-#include <stdint.h>
-#include <sys/types.h>
-
-namespace android {
-// ----------------------------------------------------------------------------
-
-template<typename RECT>
-class region_operator
-{
-    typedef typename RECT::value_type TYPE;    
-    static const TYPE max_value = 0x7FFFFFF;
-
-public:
-    /* 
-     * Common boolean operations:
-     * value is computed as 0b101 op 0b110
-     *    other boolean operation are possible, simply compute
-     *    their corresponding value with the above formulae and use
-     *    it when instantiating a region_operator.
-     */
-    static const uint32_t LHS = 0x5;  // 0b101
-    static const uint32_t RHS = 0x6;  // 0b110
-    enum {
-        op_nand = LHS & ~RHS,
-        op_and  = LHS &  RHS,
-        op_or   = LHS |  RHS,
-        op_xor  = LHS ^  RHS
-    };
-
-    struct region {
-        RECT const* rects;
-        size_t count;
-        TYPE dx;
-        TYPE dy;
-        inline region(const region& rhs) 
-            : rects(rhs.rects), count(rhs.count), dx(rhs.dx), dy(rhs.dy) { }
-        inline region(RECT const* r, size_t c) 
-            : rects(r), count(c), dx(), dy() { }
-        inline region(RECT const* r, size_t c, TYPE dx, TYPE dy) 
-            : rects(r), count(c), dx(dx), dy(dy) { }
-    };
-
-    class region_rasterizer {
-        friend class region_operator;
-        virtual void operator()(const RECT& rect) = 0;
-    public:
-        virtual ~region_rasterizer() { };
-    };
-    
-    inline region_operator(int op, const region& lhs, const region& rhs) 
-        : op_mask(op), spanner(lhs, rhs) 
-    {
-    }
-
-    void operator()(region_rasterizer& rasterizer) {
-        RECT current;
-        do {
-            SpannerInner spannerInner(spanner.lhs, spanner.rhs);
-            int inside = spanner.next(current.top, current.bottom);
-            spannerInner.prepare(inside);
-            do {
-                TYPE left, right;
-                int inside = spannerInner.next(current.left, current.right);
-                if ((op_mask >> inside) & 1) {
-                    if (current.left < current.right && 
-                            current.top < current.bottom) {
-                        rasterizer(current);
-                    }
-                }
-            } while(!spannerInner.isDone());
-        } while(!spanner.isDone());
-    }
-
-private:    
-    uint32_t op_mask;
-
-    class SpannerBase
-    {
-    public:
-        enum {
-            lhs_before_rhs   = 0,
-            lhs_after_rhs    = 1,
-            lhs_coincide_rhs = 2
-        };
-
-    protected:
-        TYPE lhs_head;
-        TYPE lhs_tail;
-        TYPE rhs_head;
-        TYPE rhs_tail;
-
-        inline int next(TYPE& head, TYPE& tail,
-                bool& more_lhs, bool& more_rhs) 
-        {
-            int inside;
-            more_lhs = false;
-            more_rhs = false;
-            if (lhs_head < rhs_head) {
-                inside = lhs_before_rhs;
-                head = lhs_head;
-                if (lhs_tail <= rhs_head) {
-                    tail = lhs_tail;
-                    more_lhs = true;
-                } else {
-                    lhs_head = rhs_head;
-                    tail = rhs_head;
-                }
-            } else if (rhs_head < lhs_head) {
-                inside = lhs_after_rhs;
-                head = rhs_head;
-                if (rhs_tail <= lhs_head) {
-                    tail = rhs_tail;
-                    more_rhs = true;
-                } else {
-                    rhs_head = lhs_head;
-                    tail = lhs_head;
-                }
-            } else {
-                inside = lhs_coincide_rhs;
-                head = lhs_head;
-                if (lhs_tail <= rhs_tail) {
-                    tail = rhs_head = lhs_tail;
-                    more_lhs = true;
-                }
-                if (rhs_tail <= lhs_tail) {
-                    tail = lhs_head = rhs_tail;
-                    more_rhs = true;
-                }
-            }
-            return inside;
-        }
-    };
-
-    class Spanner : protected SpannerBase 
-    {
-        friend class region_operator;
-        region lhs;
-        region rhs;
-
-    public:
-        inline Spanner(const region& lhs, const region& rhs)
-            : lhs(lhs), rhs(rhs) 
-        {
-            SpannerBase::lhs_head = lhs.rects->top      + lhs.dy;
-            SpannerBase::lhs_tail = lhs.rects->bottom   + lhs.dy;
-            SpannerBase::rhs_head = rhs.rects->top      + rhs.dy;
-            SpannerBase::rhs_tail = rhs.rects->bottom   + rhs.dy;
-        }
-
-        inline bool isDone() const {
-            return !rhs.count && !lhs.count;
-        }
-
-        inline int next(TYPE& top, TYPE& bottom) 
-        {
-            bool more_lhs = false;
-            bool more_rhs = false;
-            int inside = SpannerBase::next(top, bottom, more_lhs, more_rhs);
-            if (more_lhs) {
-                advance(lhs, SpannerBase::lhs_head, SpannerBase::lhs_tail);
-            }
-            if (more_rhs) {
-                advance(rhs, SpannerBase::rhs_head, SpannerBase::rhs_tail);
-            }
-            return inside;
-        }
-
-    private:
-        static inline 
-        void advance(region& reg, TYPE& aTop, TYPE& aBottom) {
-            // got to next span
-            size_t count = reg.count;
-            RECT const * rects = reg.rects;
-            RECT const * const end = rects + count;
-            const int top = rects->top;
-            while (rects != end && rects->top == top) {
-                rects++;
-                count--;
-            }
-            if (rects != end) {
-                aTop    = rects->top    + reg.dy;
-                aBottom = rects->bottom + reg.dy;
-            } else {
-                aTop    = max_value;
-                aBottom = max_value;
-            }
-            reg.rects = rects;
-            reg.count = count;
-        }
-    };
-
-    class SpannerInner : protected SpannerBase 
-    {
-        region lhs;
-        region rhs;
-        
-    public:
-        inline SpannerInner(const region& lhs, const region& rhs)
-            : lhs(lhs), rhs(rhs) 
-        {
-        }
-
-        inline void prepare(int inside) {
-            if (inside == SpannerBase::lhs_before_rhs) {
-                SpannerBase::lhs_head = lhs.rects->left  + lhs.dx;
-                SpannerBase::lhs_tail = lhs.rects->right + lhs.dx;
-                SpannerBase::rhs_head = max_value;
-                SpannerBase::rhs_tail = max_value;
-            } else if (inside == SpannerBase::lhs_after_rhs) {
-                SpannerBase::lhs_head = max_value;
-                SpannerBase::lhs_tail = max_value;
-                SpannerBase::rhs_head = rhs.rects->left  + rhs.dx;
-                SpannerBase::rhs_tail = rhs.rects->right + rhs.dx;
-            } else {
-                SpannerBase::lhs_head = lhs.rects->left  + lhs.dx;
-                SpannerBase::lhs_tail = lhs.rects->right + lhs.dx;
-                SpannerBase::rhs_head = rhs.rects->left  + rhs.dx;
-                SpannerBase::rhs_tail = rhs.rects->right + rhs.dx;
-            }
-        }
-
-        inline bool isDone() const {
-            return SpannerBase::lhs_head == max_value && 
-                   SpannerBase::rhs_head == max_value;
-        }
-
-        inline int next(TYPE& left, TYPE& right) 
-        {
-            bool more_lhs = false;
-            bool more_rhs = false;
-            int inside = SpannerBase::next(left, right, more_lhs, more_rhs);
-            if (more_lhs) {
-                advance(lhs, SpannerBase::lhs_head, SpannerBase::lhs_tail);
-            }
-            if (more_rhs) {
-                advance(rhs, SpannerBase::rhs_head, SpannerBase::rhs_tail);
-            }
-            return inside;
-        }
-
-    private:
-        static inline 
-        void advance(region& reg, TYPE& left, TYPE& right) {
-            if (reg.rects && reg.count) {
-                const int cur_span_top = reg.rects->top;
-                reg.rects++;
-                reg.count--;
-                if (!reg.count || reg.rects->top != cur_span_top) {
-                    left  = max_value;
-                    right = max_value;
-                } else {
-                    left  = reg.rects->left  + reg.dx;
-                    right = reg.rects->right + reg.dx;
-                }
-            }
-        }
-    };
-
-    Spanner spanner;
-};
-
-// ----------------------------------------------------------------------------
-};
-
-#endif /* ANDROID_UI_PRIVATE_REGION_HELPER_H */
diff --git a/include/private/ui/android_natives_priv.h b/include/private/ui/android_natives_priv.h
deleted file mode 100644
index 6b9f524..0000000
--- a/include/private/ui/android_natives_priv.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <ui/android_native_buffer.h>
diff --git a/include/private/utils/Static.h b/include/private/utils/Static.h
deleted file mode 100644
index d95ae0d..0000000
--- a/include/private/utils/Static.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// All static variables go here, to control initialization and
-// destruction order in the library.
-
-#include <utils/threads.h>
-#include <utils/KeyedVector.h>
-
-namespace android {
-// For TextStream.cpp
-extern Vector<int32_t> gTextBuffers;
-
-// For String8.cpp
-extern void initialize_string8();
-extern void terminate_string8();
-
-// For String16.cpp
-extern void initialize_string16();
-extern void terminate_string16();
-
-}   // namespace android
diff --git a/media/common_time/Android.mk b/media/common_time/Android.mk
new file mode 100644
index 0000000..526f17b
--- /dev/null
+++ b/media/common_time/Android.mk
@@ -0,0 +1,21 @@
+LOCAL_PATH:= $(call my-dir)
+#
+# libcommon_time_client
+# (binder marshalers for ICommonClock as well as common clock and local clock
+# helper code)
+#
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := libcommon_time_client
+LOCAL_MODULE_TAGS := optional
+LOCAL_SRC_FILES := cc_helper.cpp \
+                   local_clock.cpp \
+                   ICommonClock.cpp \
+                   ICommonTimeConfig.cpp \
+                   utils.cpp
+LOCAL_SHARED_LIBRARIES := libbinder \
+                          libhardware \
+                          libutils
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/common_time/ICommonClock.cpp b/media/common_time/ICommonClock.cpp
new file mode 100644
index 0000000..28b43ac
--- /dev/null
+++ b/media/common_time/ICommonClock.cpp
@@ -0,0 +1,432 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <linux/socket.h>
+
+#include <common_time/ICommonClock.h>
+#include <binder/Parcel.h>
+
+#include "utils.h"
+
+namespace android {
+
+/***** ICommonClock *****/
+
+enum {
+    IS_COMMON_TIME_VALID = IBinder::FIRST_CALL_TRANSACTION,
+    COMMON_TIME_TO_LOCAL_TIME,
+    LOCAL_TIME_TO_COMMON_TIME,
+    GET_COMMON_TIME,
+    GET_COMMON_FREQ,
+    GET_LOCAL_TIME,
+    GET_LOCAL_FREQ,
+    GET_ESTIMATED_ERROR,
+    GET_TIMELINE_ID,
+    GET_STATE,
+    GET_MASTER_ADDRESS,
+    REGISTER_LISTENER,
+    UNREGISTER_LISTENER,
+};
+
+const String16 ICommonClock::kServiceName("common_time.clock");
+const uint64_t ICommonClock::kInvalidTimelineID = 0;
+const int32_t ICommonClock::kErrorEstimateUnknown = 0x7FFFFFFF;
+
+class BpCommonClock : public BpInterface<ICommonClock>
+{
+  public:
+    BpCommonClock(const sp<IBinder>& impl)
+        : BpInterface<ICommonClock>(impl) {}
+
+    virtual status_t isCommonTimeValid(bool* valid, uint32_t* timelineID) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        status_t status = remote()->transact(IS_COMMON_TIME_VALID,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *valid = reply.readInt32();
+                *timelineID = reply.readInt32();
+            }
+        }
+        return status;
+    }
+
+    virtual status_t commonTimeToLocalTime(int64_t commonTime,
+            int64_t* localTime) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        data.writeInt64(commonTime);
+        status_t status = remote()->transact(COMMON_TIME_TO_LOCAL_TIME,
+                data, &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *localTime = reply.readInt64();
+            }
+        }
+        return status;
+    }
+
+    virtual status_t localTimeToCommonTime(int64_t localTime,
+            int64_t* commonTime) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        data.writeInt64(localTime);
+        status_t status = remote()->transact(LOCAL_TIME_TO_COMMON_TIME,
+                data, &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *commonTime = reply.readInt64();
+            }
+        }
+        return status;
+    }
+
+    virtual status_t getCommonTime(int64_t* commonTime) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_COMMON_TIME, data, &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *commonTime = reply.readInt64();
+            }
+        }
+        return status;
+    }
+
+    virtual status_t getCommonFreq(uint64_t* freq) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_COMMON_FREQ, data, &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *freq = reply.readInt64();
+            }
+        }
+        return status;
+    }
+
+    virtual status_t getLocalTime(int64_t* localTime) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_LOCAL_TIME, data, &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *localTime = reply.readInt64();
+            }
+        }
+        return status;
+    }
+
+    virtual status_t getLocalFreq(uint64_t* freq) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_LOCAL_FREQ, data, &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *freq = reply.readInt64();
+            }
+        }
+        return status;
+    }
+
+    virtual status_t getEstimatedError(int32_t* estimate) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_ESTIMATED_ERROR, data, &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *estimate = reply.readInt32();
+            }
+        }
+        return status;
+    }
+
+    virtual status_t getTimelineID(uint64_t* id) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_TIMELINE_ID, data, &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *id = static_cast<uint64_t>(reply.readInt64());
+            }
+        }
+        return status;
+    }
+
+    virtual status_t getState(State* state) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_STATE, data, &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *state = static_cast<State>(reply.readInt32());
+            }
+        }
+        return status;
+    }
+
+    virtual status_t getMasterAddr(struct sockaddr_storage* addr) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_MASTER_ADDRESS, data, &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK)
+                deserializeSockaddr(&reply, addr);
+        }
+        return status;
+    }
+
+    virtual status_t registerListener(
+            const sp<ICommonClockListener>& listener) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        data.writeStrongBinder(listener->asBinder());
+
+        status_t status = remote()->transact(REGISTER_LISTENER, data, &reply);
+
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+
+    virtual status_t unregisterListener(
+            const sp<ICommonClockListener>& listener) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        data.writeStrongBinder(listener->asBinder());
+        status_t status = remote()->transact(UNREGISTER_LISTENER, data, &reply);
+
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+};
+
+IMPLEMENT_META_INTERFACE(CommonClock, "android.os.ICommonClock");
+
+status_t BnCommonClock::onTransact(uint32_t code,
+                                   const Parcel& data,
+                                   Parcel* reply,
+                                   uint32_t flags) {
+    switch(code) {
+        case IS_COMMON_TIME_VALID: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            bool valid;
+            uint32_t timelineID;
+            status_t status = isCommonTimeValid(&valid, &timelineID);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt32(valid);
+                reply->writeInt32(timelineID);
+            }
+            return OK;
+        } break;
+
+        case COMMON_TIME_TO_LOCAL_TIME: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            int64_t commonTime = data.readInt64();
+            int64_t localTime;
+            status_t status = commonTimeToLocalTime(commonTime, &localTime);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt64(localTime);
+            }
+            return OK;
+        } break;
+
+        case LOCAL_TIME_TO_COMMON_TIME: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            int64_t localTime = data.readInt64();
+            int64_t commonTime;
+            status_t status = localTimeToCommonTime(localTime, &commonTime);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt64(commonTime);
+            }
+            return OK;
+        } break;
+
+        case GET_COMMON_TIME: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            int64_t commonTime;
+            status_t status = getCommonTime(&commonTime);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt64(commonTime);
+            }
+            return OK;
+        } break;
+
+        case GET_COMMON_FREQ: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            uint64_t freq;
+            status_t status = getCommonFreq(&freq);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt64(freq);
+            }
+            return OK;
+        } break;
+
+        case GET_LOCAL_TIME: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            int64_t localTime;
+            status_t status = getLocalTime(&localTime);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt64(localTime);
+            }
+            return OK;
+        } break;
+
+        case GET_LOCAL_FREQ: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            uint64_t freq;
+            status_t status = getLocalFreq(&freq);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt64(freq);
+            }
+            return OK;
+        } break;
+
+        case GET_ESTIMATED_ERROR: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            int32_t error;
+            status_t status = getEstimatedError(&error);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt32(error);
+            }
+            return OK;
+        } break;
+
+        case GET_TIMELINE_ID: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            uint64_t id;
+            status_t status = getTimelineID(&id);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt64(static_cast<int64_t>(id));
+            }
+            return OK;
+        } break;
+
+        case GET_STATE: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            State state;
+            status_t status = getState(&state);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt32(static_cast<int32_t>(state));
+            }
+            return OK;
+        } break;
+
+        case GET_MASTER_ADDRESS: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            struct sockaddr_storage addr;
+            status_t status = getMasterAddr(&addr);
+
+            if ((status == OK) && !canSerializeSockaddr(&addr)) {
+                status = UNKNOWN_ERROR;
+            }
+
+            reply->writeInt32(status);
+
+            if (status == OK) {
+                serializeSockaddr(reply, &addr);
+            }
+
+            return OK;
+        } break;
+
+        case REGISTER_LISTENER: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            sp<ICommonClockListener> listener =
+                interface_cast<ICommonClockListener>(data.readStrongBinder());
+            status_t status = registerListener(listener);
+            reply->writeInt32(status);
+            return OK;
+        } break;
+
+        case UNREGISTER_LISTENER: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            sp<ICommonClockListener> listener =
+                interface_cast<ICommonClockListener>(data.readStrongBinder());
+            status_t status = unregisterListener(listener);
+            reply->writeInt32(status);
+            return OK;
+        } break;
+    }
+    return BBinder::onTransact(code, data, reply, flags);
+}
+
+/***** ICommonClockListener *****/
+
+enum {
+    ON_TIMELINE_CHANGED = IBinder::FIRST_CALL_TRANSACTION,
+};
+
+class BpCommonClockListener : public BpInterface<ICommonClockListener>
+{
+  public:
+    BpCommonClockListener(const sp<IBinder>& impl)
+        : BpInterface<ICommonClockListener>(impl) {}
+
+    virtual void onTimelineChanged(uint64_t timelineID) {
+        Parcel data, reply;
+        data.writeInterfaceToken(
+                ICommonClockListener::getInterfaceDescriptor());
+        data.writeInt64(timelineID);
+        remote()->transact(ON_TIMELINE_CHANGED, data, &reply);
+    }
+};
+
+IMPLEMENT_META_INTERFACE(CommonClockListener,
+                         "android.os.ICommonClockListener");
+
+status_t BnCommonClockListener::onTransact(
+        uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) {
+    switch(code) {
+        case ON_TIMELINE_CHANGED: {
+            CHECK_INTERFACE(ICommonClockListener, data, reply);
+            uint32_t timelineID = data.readInt64();
+            onTimelineChanged(timelineID);
+            return NO_ERROR;
+        } break;
+    }
+
+    return BBinder::onTransact(code, data, reply, flags);
+}
+
+}; // namespace android
diff --git a/media/common_time/ICommonTimeConfig.cpp b/media/common_time/ICommonTimeConfig.cpp
new file mode 100644
index 0000000..8eb37cb
--- /dev/null
+++ b/media/common_time/ICommonTimeConfig.cpp
@@ -0,0 +1,508 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <linux/socket.h>
+
+#include <common_time/ICommonTimeConfig.h>
+#include <binder/Parcel.h>
+
+#include "utils.h"
+
+namespace android {
+
+/***** ICommonTimeConfig *****/
+
+enum {
+    GET_MASTER_ELECTION_PRIORITY = IBinder::FIRST_CALL_TRANSACTION,
+    SET_MASTER_ELECTION_PRIORITY,
+    GET_MASTER_ELECTION_ENDPOINT,
+    SET_MASTER_ELECTION_ENDPOINT,
+    GET_MASTER_ELECTION_GROUP_ID,
+    SET_MASTER_ELECTION_GROUP_ID,
+    GET_INTERFACE_BINDING,
+    SET_INTERFACE_BINDING,
+    GET_MASTER_ANNOUNCE_INTERVAL,
+    SET_MASTER_ANNOUNCE_INTERVAL,
+    GET_CLIENT_SYNC_INTERVAL,
+    SET_CLIENT_SYNC_INTERVAL,
+    GET_PANIC_THRESHOLD,
+    SET_PANIC_THRESHOLD,
+    GET_AUTO_DISABLE,
+    SET_AUTO_DISABLE,
+    FORCE_NETWORKLESS_MASTER_MODE,
+};
+
+const String16 ICommonTimeConfig::kServiceName("common_time.config");
+
+class BpCommonTimeConfig : public BpInterface<ICommonTimeConfig>
+{
+  public:
+    BpCommonTimeConfig(const sp<IBinder>& impl)
+        : BpInterface<ICommonTimeConfig>(impl) {}
+
+    virtual status_t getMasterElectionPriority(uint8_t *priority) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_MASTER_ELECTION_PRIORITY,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *priority = static_cast<uint8_t>(reply.readInt32());
+            }
+        }
+
+        return status;
+    }
+
+    virtual status_t setMasterElectionPriority(uint8_t priority) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        data.writeInt32(static_cast<int32_t>(priority));
+        status_t status = remote()->transact(SET_MASTER_ELECTION_PRIORITY,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+
+    virtual status_t getMasterElectionEndpoint(struct sockaddr_storage *addr) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_MASTER_ELECTION_ENDPOINT,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                deserializeSockaddr(&reply, addr);
+            }
+        }
+
+        return status;
+    }
+
+    virtual status_t setMasterElectionEndpoint(
+            const struct sockaddr_storage *addr) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        if (!canSerializeSockaddr(addr))
+            return BAD_VALUE;
+        if (NULL == addr) {
+            data.writeInt32(0);
+        } else {
+            data.writeInt32(1);
+            serializeSockaddr(&data, addr);
+        }
+        status_t status = remote()->transact(SET_MASTER_ELECTION_ENDPOINT,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+
+    virtual status_t getMasterElectionGroupId(uint64_t *id) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_MASTER_ELECTION_GROUP_ID,
+                                             data,
+                                             &reply);
+
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *id = static_cast<uint64_t>(reply.readInt64());
+            }
+        }
+
+        return status;
+    }
+
+    virtual status_t setMasterElectionGroupId(uint64_t id) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        data.writeInt64(id);
+        status_t status = remote()->transact(SET_MASTER_ELECTION_GROUP_ID,
+                                             data,
+                                             &reply);
+
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+
+    virtual status_t getInterfaceBinding(String16& ifaceName) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_INTERFACE_BINDING,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                ifaceName = reply.readString16();
+            }
+        }
+
+        return status;
+    }
+
+    virtual status_t setInterfaceBinding(const String16& ifaceName) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        data.writeString16(ifaceName);
+        status_t status = remote()->transact(SET_INTERFACE_BINDING,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+
+    virtual status_t getMasterAnnounceInterval(int *interval) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_MASTER_ANNOUNCE_INTERVAL,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *interval = reply.readInt32();
+            }
+        }
+
+        return status;
+    }
+
+    virtual status_t setMasterAnnounceInterval(int interval) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        data.writeInt32(interval);
+        status_t status = remote()->transact(SET_MASTER_ANNOUNCE_INTERVAL,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+
+    virtual status_t getClientSyncInterval(int *interval) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_CLIENT_SYNC_INTERVAL,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *interval = reply.readInt32();
+            }
+        }
+
+        return status;
+    }
+
+    virtual status_t setClientSyncInterval(int interval) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        data.writeInt32(interval);
+        status_t status = remote()->transact(SET_CLIENT_SYNC_INTERVAL,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+
+    virtual status_t getPanicThreshold(int *threshold) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_PANIC_THRESHOLD,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *threshold = reply.readInt32();
+            }
+        }
+
+        return status;
+    }
+
+    virtual status_t setPanicThreshold(int threshold) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        data.writeInt32(threshold);
+        status_t status = remote()->transact(SET_PANIC_THRESHOLD,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+
+    virtual status_t getAutoDisable(bool *autoDisable) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_AUTO_DISABLE,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *autoDisable = (0 != reply.readInt32());
+            }
+        }
+
+        return status;
+    }
+
+    virtual status_t setAutoDisable(bool autoDisable) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        data.writeInt32(autoDisable ? 1 : 0);
+        status_t status = remote()->transact(SET_AUTO_DISABLE,
+                                             data,
+                                             &reply);
+
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+
+    virtual status_t forceNetworklessMasterMode() {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        status_t status = remote()->transact(FORCE_NETWORKLESS_MASTER_MODE,
+                                             data,
+                                             &reply);
+
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+};
+
+IMPLEMENT_META_INTERFACE(CommonTimeConfig, "android.os.ICommonTimeConfig");
+
+status_t BnCommonTimeConfig::onTransact(uint32_t code,
+                                   const Parcel& data,
+                                   Parcel* reply,
+                                   uint32_t flags) {
+    switch(code) {
+        case GET_MASTER_ELECTION_PRIORITY: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            uint8_t priority;
+            status_t status = getMasterElectionPriority(&priority);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt32(static_cast<int32_t>(priority));
+            }
+            return OK;
+        } break;
+
+        case SET_MASTER_ELECTION_PRIORITY: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            uint8_t priority = static_cast<uint8_t>(data.readInt32());
+            status_t status = setMasterElectionPriority(priority);
+            reply->writeInt32(status);
+            return OK;
+        } break;
+
+        case GET_MASTER_ELECTION_ENDPOINT: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            struct sockaddr_storage addr;
+            status_t status = getMasterElectionEndpoint(&addr);
+
+            if ((status == OK) && !canSerializeSockaddr(&addr)) {
+                status = UNKNOWN_ERROR;
+            }
+
+            reply->writeInt32(status);
+
+            if (status == OK) {
+                serializeSockaddr(reply, &addr);
+            }
+
+            return OK;
+        } break;
+
+        case SET_MASTER_ELECTION_ENDPOINT: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            struct sockaddr_storage addr;
+            int hasAddr = data.readInt32();
+
+            status_t status;
+            if (hasAddr) {
+                deserializeSockaddr(&data, &addr);
+                status = setMasterElectionEndpoint(&addr);
+            } else {
+                status = setMasterElectionEndpoint(&addr);
+            }
+
+            reply->writeInt32(status);
+            return OK;
+        } break;
+
+        case GET_MASTER_ELECTION_GROUP_ID: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            uint64_t id;
+            status_t status = getMasterElectionGroupId(&id);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt64(id);
+            }
+            return OK;
+        } break;
+
+        case SET_MASTER_ELECTION_GROUP_ID: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            uint64_t id = static_cast<uint64_t>(data.readInt64());
+            status_t status = setMasterElectionGroupId(id);
+            reply->writeInt32(status);
+            return OK;
+        } break;
+
+        case GET_INTERFACE_BINDING: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            String16 ret;
+            status_t status = getInterfaceBinding(ret);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeString16(ret);
+            }
+            return OK;
+        } break;
+
+        case SET_INTERFACE_BINDING: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            String16 ifaceName;
+            ifaceName = data.readString16();
+            status_t status = setInterfaceBinding(ifaceName);
+            reply->writeInt32(status);
+            return OK;
+        } break;
+
+        case GET_MASTER_ANNOUNCE_INTERVAL: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            int interval;
+            status_t status = getMasterAnnounceInterval(&interval);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt32(interval);
+            }
+            return OK;
+        } break;
+
+        case SET_MASTER_ANNOUNCE_INTERVAL: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            int interval = data.readInt32();
+            status_t status = setMasterAnnounceInterval(interval);
+            reply->writeInt32(status);
+            return OK;
+        } break;
+
+        case GET_CLIENT_SYNC_INTERVAL: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            int interval;
+            status_t status = getClientSyncInterval(&interval);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt32(interval);
+            }
+            return OK;
+        } break;
+
+        case SET_CLIENT_SYNC_INTERVAL: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            int interval = data.readInt32();
+            status_t status = setClientSyncInterval(interval);
+            reply->writeInt32(status);
+            return OK;
+        } break;
+
+        case GET_PANIC_THRESHOLD: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            int threshold;
+            status_t status = getPanicThreshold(&threshold);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt32(threshold);
+            }
+            return OK;
+        } break;
+
+        case SET_PANIC_THRESHOLD: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            int threshold = data.readInt32();
+            status_t status = setPanicThreshold(threshold);
+            reply->writeInt32(status);
+            return OK;
+        } break;
+
+        case GET_AUTO_DISABLE: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            bool autoDisable;
+            status_t status = getAutoDisable(&autoDisable);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt32(autoDisable ? 1 : 0);
+            }
+            return OK;
+        } break;
+
+        case SET_AUTO_DISABLE: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            bool autoDisable = (0 != data.readInt32());
+            status_t status = setAutoDisable(autoDisable);
+            reply->writeInt32(status);
+            return OK;
+        } break;
+
+        case FORCE_NETWORKLESS_MASTER_MODE: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            status_t status = forceNetworklessMasterMode();
+            reply->writeInt32(status);
+            return OK;
+        } break;
+    }
+    return BBinder::onTransact(code, data, reply, flags);
+}
+
+}; // namespace android
+
diff --git a/media/common_time/cc_helper.cpp b/media/common_time/cc_helper.cpp
new file mode 100644
index 0000000..8d8556c
--- /dev/null
+++ b/media/common_time/cc_helper.cpp
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <common_time/cc_helper.h>
+#include <common_time/ICommonClock.h>
+#include <utils/threads.h>
+
+namespace android {
+
+Mutex CCHelper::lock_;
+sp<ICommonClock> CCHelper::common_clock_;
+sp<ICommonClockListener> CCHelper::common_clock_listener_;
+uint32_t CCHelper::ref_count_ = 0;
+
+bool CCHelper::verifyClock_l() {
+    bool ret = false;
+
+    if (common_clock_ == NULL) {
+        common_clock_ = ICommonClock::getInstance();
+        if (common_clock_ == NULL)
+            goto bailout;
+    }
+
+    if (ref_count_ > 0) {
+        if (common_clock_listener_ == NULL) {
+            common_clock_listener_ = new CommonClockListener();
+            if (common_clock_listener_ == NULL)
+                goto bailout;
+
+            if (OK != common_clock_->registerListener(common_clock_listener_))
+                goto bailout;
+        }
+    }
+
+    ret = true;
+
+bailout:
+    if (!ret) {
+        common_clock_listener_ = NULL;
+        common_clock_ = NULL;
+    }
+    return ret;
+}
+
+CCHelper::CCHelper() {
+    Mutex::Autolock lock(&lock_);
+    ref_count_++;
+    verifyClock_l();
+}
+
+CCHelper::~CCHelper() {
+    Mutex::Autolock lock(&lock_);
+
+    assert(ref_count_ > 0);
+    ref_count_--;
+
+    // If we were the last CCHelper instance in the system, and we had
+    // previously register a listener, unregister it now so that the common time
+    // service has the chance to go into auto-disabled mode.
+    if (!ref_count_ &&
+       (common_clock_ != NULL) &&
+       (common_clock_listener_ != NULL)) {
+        common_clock_->unregisterListener(common_clock_listener_);
+        common_clock_listener_ = NULL;
+    }
+}
+
+void CCHelper::CommonClockListener::onTimelineChanged(uint64_t timelineID) {
+    // do nothing; listener is only really used as a token so the server can
+    // find out when clients die.
+}
+
+// Helper methods which attempts to make calls to the common time binder
+// service.  If the first attempt fails with DEAD_OBJECT, the helpers will
+// attempt to make a connection to the service again (assuming that the process
+// hosting the service had crashed and the client proxy we are holding is dead)
+// If the second attempt fails, or no connection can be made, the we let the
+// error propagate up the stack and let the caller deal with the situation as
+// best they can.
+#define CCHELPER_METHOD(decl, call)                 \
+    status_t CCHelper::decl {                       \
+        Mutex::Autolock lock(&lock_);               \
+                                                    \
+        if (!verifyClock_l())                       \
+            return DEAD_OBJECT;                     \
+                                                    \
+        status_t status = common_clock_->call;      \
+        if (DEAD_OBJECT == status) {                \
+            if (!verifyClock_l())                   \
+                return DEAD_OBJECT;                 \
+            status = common_clock_->call;           \
+        }                                           \
+                                                    \
+        return status;                              \
+    }
+
+#define VERIFY_CLOCK()
+
+CCHELPER_METHOD(isCommonTimeValid(bool* valid, uint32_t* timelineID),
+                isCommonTimeValid(valid, timelineID))
+CCHELPER_METHOD(commonTimeToLocalTime(int64_t commonTime, int64_t* localTime),
+                commonTimeToLocalTime(commonTime, localTime))
+CCHELPER_METHOD(localTimeToCommonTime(int64_t localTime, int64_t* commonTime),
+                localTimeToCommonTime(localTime, commonTime))
+CCHELPER_METHOD(getCommonTime(int64_t* commonTime),
+                getCommonTime(commonTime))
+CCHELPER_METHOD(getCommonFreq(uint64_t* freq),
+                getCommonFreq(freq))
+CCHELPER_METHOD(getLocalTime(int64_t* localTime),
+                getLocalTime(localTime))
+CCHELPER_METHOD(getLocalFreq(uint64_t* freq),
+                getLocalFreq(freq))
+
+}  // namespace android
diff --git a/media/common_time/local_clock.cpp b/media/common_time/local_clock.cpp
new file mode 100644
index 0000000..a7c61fc
--- /dev/null
+++ b/media/common_time/local_clock.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "common_time"
+#include <utils/Log.h>
+
+#include <assert.h>
+#include <stdint.h>
+
+#include <common_time/local_clock.h>
+#include <hardware/hardware.h>
+#include <hardware/local_time_hal.h>
+#include <utils/Errors.h>
+#include <utils/threads.h>
+
+namespace android {
+
+Mutex LocalClock::dev_lock_;
+local_time_hw_device_t* LocalClock::dev_ = NULL;
+
+LocalClock::LocalClock() {
+    int res;
+    const hw_module_t* mod;
+
+    AutoMutex lock(&dev_lock_);
+
+    if (dev_ != NULL)
+        return;
+
+    res = hw_get_module_by_class(LOCAL_TIME_HARDWARE_MODULE_ID, NULL, &mod);
+    if (res) {
+        ALOGE("Failed to open local time HAL module (res = %d)", res);
+    } else {
+        res = local_time_hw_device_open(mod, &dev_);
+        if (res) {
+            ALOGE("Failed to open local time HAL device (res = %d)", res);
+            dev_ = NULL;
+        }
+    }
+}
+
+bool LocalClock::initCheck() {
+    return (NULL != dev_);
+}
+
+int64_t LocalClock::getLocalTime() {
+    assert(NULL != dev_);
+    assert(NULL != dev_->get_local_time);
+
+    return dev_->get_local_time(dev_);
+}
+
+uint64_t LocalClock::getLocalFreq() {
+    assert(NULL != dev_);
+    assert(NULL != dev_->get_local_freq);
+
+    return dev_->get_local_freq(dev_);
+}
+
+status_t LocalClock::setLocalSlew(int16_t rate) {
+    assert(NULL != dev_);
+
+    if (!dev_->set_local_slew)
+        return INVALID_OPERATION;
+
+    return static_cast<status_t>(dev_->set_local_slew(dev_, rate));
+}
+
+int32_t LocalClock::getDebugLog(struct local_time_debug_event* records,
+                                int max_records) {
+    assert(NULL != dev_);
+
+    if (!dev_->get_debug_log)
+        return INVALID_OPERATION;
+
+    return dev_->get_debug_log(dev_, records, max_records);
+}
+
+}  // namespace android
diff --git a/media/common_time/utils.cpp b/media/common_time/utils.cpp
new file mode 100644
index 0000000..6539171
--- /dev/null
+++ b/media/common_time/utils.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <arpa/inet.h>
+#include <linux/socket.h>
+
+#include <binder/Parcel.h>
+
+namespace android {
+
+bool canSerializeSockaddr(const struct sockaddr_storage* addr) {
+    switch (addr->ss_family) {
+        case AF_INET:
+        case AF_INET6:
+            return true;
+        default:
+            return false;
+    }
+}
+
+void serializeSockaddr(Parcel* p, const struct sockaddr_storage* addr) {
+    switch (addr->ss_family) {
+        case AF_INET: {
+            const struct sockaddr_in* s =
+                reinterpret_cast<const struct sockaddr_in*>(addr);
+            p->writeInt32(AF_INET);
+            p->writeInt32(ntohl(s->sin_addr.s_addr));
+            p->writeInt32(static_cast<int32_t>(ntohs(s->sin_port)));
+        } break;
+
+        case AF_INET6: {
+            const struct sockaddr_in6* s =
+                reinterpret_cast<const struct sockaddr_in6*>(addr);
+            const int32_t* a =
+                reinterpret_cast<const int32_t*>(s->sin6_addr.s6_addr);
+            p->writeInt32(AF_INET6);
+            p->writeInt32(ntohl(a[0]));
+            p->writeInt32(ntohl(a[1]));
+            p->writeInt32(ntohl(a[2]));
+            p->writeInt32(ntohl(a[3]));
+            p->writeInt32(static_cast<int32_t>(ntohs(s->sin6_port)));
+            p->writeInt32(ntohl(s->sin6_flowinfo));
+            p->writeInt32(ntohl(s->sin6_scope_id));
+        } break;
+    }
+}
+
+void deserializeSockaddr(const Parcel* p, struct sockaddr_storage* addr) {
+    memset(addr, 0, sizeof(addr));
+
+    addr->ss_family = p->readInt32();
+    switch(addr->ss_family) {
+        case AF_INET: {
+            struct sockaddr_in* s =
+                reinterpret_cast<struct sockaddr_in*>(addr);
+            s->sin_addr.s_addr = htonl(p->readInt32());
+            s->sin_port = htons(static_cast<uint16_t>(p->readInt32()));
+        } break;
+
+        case AF_INET6: {
+            struct sockaddr_in6* s =
+                reinterpret_cast<struct sockaddr_in6*>(addr);
+            int32_t* a = reinterpret_cast<int32_t*>(s->sin6_addr.s6_addr);
+
+            a[0] = htonl(p->readInt32());
+            a[1] = htonl(p->readInt32());
+            a[2] = htonl(p->readInt32());
+            a[3] = htonl(p->readInt32());
+            s->sin6_port = htons(static_cast<uint16_t>(p->readInt32()));
+            s->sin6_flowinfo = htonl(p->readInt32());
+            s->sin6_scope_id = htonl(p->readInt32());
+        } break;
+    }
+}
+
+}  // namespace android
diff --git a/media/common_time/utils.h b/media/common_time/utils.h
new file mode 100644
index 0000000..ce79d0d
--- /dev/null
+++ b/media/common_time/utils.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_LIBCOMMONCLOCK_UTILS_H
+#define ANDROID_LIBCOMMONCLOCK_UTILS_H
+
+#include <linux/socket.h>
+
+#include <binder/Parcel.h>
+#include <utils/Errors.h>
+
+namespace android {
+
+extern bool canSerializeSockaddr(const struct sockaddr_storage* addr);
+extern void serializeSockaddr(Parcel* p, const struct sockaddr_storage* addr);
+extern status_t deserializeSockaddr(const Parcel* p,
+                                    struct sockaddr_storage* addr);
+
+};  // namespace android
+
+#endif  // ANDROID_LIBCOMMONCLOCK_UTILS_H
diff --git a/media/libaah_rtp/Android.mk b/media/libaah_rtp/Android.mk
new file mode 100644
index 0000000..6c927ba
--- /dev/null
+++ b/media/libaah_rtp/Android.mk
@@ -0,0 +1,40 @@
+LOCAL_PATH:= $(call my-dir)
+#
+# libaah_rtp
+#
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := libaah_rtp
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SRC_FILES := \
+    aah_decoder_pump.cpp \
+    aah_rx_player.cpp \
+    aah_rx_player_core.cpp \
+    aah_rx_player_ring_buffer.cpp \
+    aah_rx_player_substream.cpp \
+    aah_tx_packet.cpp \
+    aah_tx_player.cpp \
+    aah_tx_sender.cpp \
+    pipe_event.cpp
+
+LOCAL_C_INCLUDES := \
+    frameworks/base/include \
+    frameworks/base/media \
+    frameworks/base/media/libstagefright \
+    frameworks/native/include/media/openmax
+
+LOCAL_SHARED_LIBRARIES := \
+    libcommon_time_client \
+    libbinder \
+    libmedia \
+    libmedia_native \
+    libstagefright \
+    libstagefright_foundation \
+    libutils
+
+LOCAL_LDLIBS := \
+    -lpthread
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libaah_rtp/aah_decoder_pump.cpp b/media/libaah_rtp/aah_decoder_pump.cpp
new file mode 100644
index 0000000..bebba54
--- /dev/null
+++ b/media/libaah_rtp/aah_decoder_pump.cpp
@@ -0,0 +1,519 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "LibAAH_RTP"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <poll.h>
+#include <pthread.h>
+
+#include <common_time/cc_helper.h>
+#include <media/AudioSystem.h>
+#include <media/AudioTrack.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/OMXClient.h>
+#include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/Utils.h>
+#include <utils/Timers.h>
+#include <utils/threads.h>
+
+#include "aah_decoder_pump.h"
+
+namespace android {
+
+static const long long kLongDecodeErrorThreshold = 1000000ll;
+static const uint32_t kMaxLongErrorsBeforeFatal = 3;
+static const uint32_t kMaxErrorsBeforeFatal = 60;
+
+AAH_DecoderPump::AAH_DecoderPump(OMXClient& omx)
+    : omx_(omx)
+    , thread_status_(OK)
+    , renderer_(NULL)
+    , last_queued_pts_valid_(false)
+    , last_queued_pts_(0)
+    , last_ts_transform_valid_(false)
+    , last_volume_(0xFF) {
+    thread_ = new ThreadWrapper(this);
+}
+
+AAH_DecoderPump::~AAH_DecoderPump() {
+    shutdown();
+}
+
+status_t AAH_DecoderPump::initCheck() {
+    if (thread_ == NULL) {
+        ALOGE("Failed to allocate thread");
+        return NO_MEMORY;
+    }
+
+    return OK;
+}
+
+status_t AAH_DecoderPump::queueForDecode(MediaBuffer* buf) {
+    if (NULL == buf) {
+        return BAD_VALUE;
+    }
+
+    if (OK != thread_status_) {
+        return thread_status_;
+    }
+
+    {   // Explicit scope for AutoMutex pattern.
+        AutoMutex lock(&thread_lock_);
+        in_queue_.push_back(buf);
+    }
+
+    thread_cond_.signal();
+
+    return OK;
+}
+
+void AAH_DecoderPump::queueToRenderer(MediaBuffer* decoded_sample) {
+    Mutex::Autolock lock(&render_lock_);
+    sp<MetaData> meta;
+    int64_t ts;
+    status_t res;
+
+    // Fetch the metadata and make sure the sample has a timestamp.  We
+    // cannot render samples which are missing PTSs.
+    meta = decoded_sample->meta_data();
+    if ((meta == NULL) || (!meta->findInt64(kKeyTime, &ts))) {
+        ALOGV("Decoded sample missing timestamp, cannot render.");
+        CHECK(false);
+    } else {
+        // If we currently are not holding on to a renderer, go ahead and
+        // make one now.
+        if (NULL == renderer_) {
+            renderer_ = new TimedAudioTrack();
+            if (NULL != renderer_) {
+                int frameCount;
+                AudioTrack::getMinFrameCount(&frameCount,
+                        AUDIO_STREAM_DEFAULT,
+                        static_cast<int>(format_sample_rate_));
+                audio_channel_mask_t ch_format =
+                        audio_channel_out_mask_from_count(format_channels_);
+
+                res = renderer_->set(AUDIO_STREAM_DEFAULT,
+                        format_sample_rate_,
+                        AUDIO_FORMAT_PCM_16_BIT,
+                        ch_format,
+                        frameCount);
+                if (res != OK) {
+                    ALOGE("Failed to setup audio renderer. (res = %d)", res);
+                    delete renderer_;
+                    renderer_ = NULL;
+                } else {
+                    CHECK(last_ts_transform_valid_);
+
+                    res = renderer_->setMediaTimeTransform(
+                            last_ts_transform_, TimedAudioTrack::COMMON_TIME);
+                    if (res != NO_ERROR) {
+                        ALOGE("Failed to set media time transform on AudioTrack"
+                              " (res = %d)", res);
+                        delete renderer_;
+                        renderer_ = NULL;
+                    } else {
+                        float volume = static_cast<float>(last_volume_)
+                                     / 255.0f;
+                        if (renderer_->setVolume(volume, volume) != OK) {
+                            ALOGW("%s: setVolume failed", __FUNCTION__);
+                        }
+
+                        renderer_->start();
+                    }
+                }
+            } else {
+                ALOGE("Failed to allocate AudioTrack to use as a renderer.");
+            }
+        }
+
+        if (NULL != renderer_) {
+            uint8_t* decoded_data =
+                reinterpret_cast<uint8_t*>(decoded_sample->data());
+            uint32_t decoded_amt  = decoded_sample->range_length();
+            decoded_data += decoded_sample->range_offset();
+
+            sp<IMemory> pcm_payload;
+            res = renderer_->allocateTimedBuffer(decoded_amt, &pcm_payload);
+            if (res != OK) {
+                ALOGE("Failed to allocate %d byte audio track buffer."
+                      " (res = %d)", decoded_amt, res);
+            } else {
+                memcpy(pcm_payload->pointer(), decoded_data, decoded_amt);
+
+                res = renderer_->queueTimedBuffer(pcm_payload, ts);
+                if (res != OK) {
+                    ALOGE("Failed to queue %d byte audio track buffer with"
+                          " media PTS %lld. (res = %d)", decoded_amt, ts, res);
+                } else {
+                    last_queued_pts_valid_ = true;
+                    last_queued_pts_ = ts;
+                }
+            }
+
+        } else {
+            ALOGE("No renderer, dropping audio payload.");
+        }
+    }
+}
+
+void AAH_DecoderPump::stopAndCleanupRenderer() {
+    if (NULL == renderer_) {
+        return;
+    }
+
+    renderer_->stop();
+    delete renderer_;
+    renderer_ = NULL;
+}
+
+void AAH_DecoderPump::setRenderTSTransform(const LinearTransform& trans) {
+    Mutex::Autolock lock(&render_lock_);
+
+    if (last_ts_transform_valid_ && !memcmp(&trans,
+                                            &last_ts_transform_,
+                                            sizeof(trans))) {
+        return;
+    }
+
+    last_ts_transform_       = trans;
+    last_ts_transform_valid_ = true;
+
+    if (NULL != renderer_) {
+        status_t res = renderer_->setMediaTimeTransform(
+                last_ts_transform_, TimedAudioTrack::COMMON_TIME);
+        if (res != NO_ERROR) {
+            ALOGE("Failed to set media time transform on AudioTrack"
+                  " (res = %d)", res);
+        }
+    }
+}
+
+void AAH_DecoderPump::setRenderVolume(uint8_t volume) {
+    Mutex::Autolock lock(&render_lock_);
+
+    if (volume == last_volume_) {
+        return;
+    }
+
+    last_volume_ = volume;
+    if (renderer_ != NULL) {
+        float volume = static_cast<float>(last_volume_) / 255.0f;
+        if (renderer_->setVolume(volume, volume) != OK) {
+            ALOGW("%s: setVolume failed", __FUNCTION__);
+        }
+    }
+}
+
+// isAboutToUnderflow is something of a hack used to figure out when it might be
+// time to give up on trying to fill in a gap in the RTP sequence and simply
+// move on with a discontinuity.  If we had perfect knowledge of when we were
+// going to underflow, it would not be a hack, but unfortunately we do not.
+// Right now, we just take the PTS of the last sample queued, and check to see
+// if its presentation time is within kAboutToUnderflowThreshold from now.  If
+// it is, then we say that we are about to underflow.  This decision is based on
+// two (possibly invalid) assumptions.
+//
+// 1) The transmitter is leading the clock by more than
+//    kAboutToUnderflowThreshold.
+// 2) The delta between the PTS of the last sample queued and the next sample
+//    is less than the transmitter's clock lead amount.
+//
+// Right now, the default transmitter lead time is 1 second, which is a pretty
+// large number and greater than the 50mSec that kAboutToUnderflowThreshold is
+// currently set to.  This should satisfy assumption #1 for now, but changes to
+// the transmitter clock lead time could effect this.
+//
+// For non-sparse streams with a homogeneous sample rate (the vast majority of
+// streams in the world), the delta between any two adjacent PTSs will always be
+// the homogeneous sample period.  It is very uncommon to see a sample period
+// greater than the 1 second clock lead we are currently using, and you
+// certainly will not see it in an MP3 file which should satisfy assumption #2.
+// Sparse audio streams (where no audio is transmitted for long periods of
+// silence) and extremely low framerate video stream (like an MPEG-2 slideshow
+// or the video stream for a pay TV audio channel) are examples of streams which
+// might violate assumption #2.
+bool AAH_DecoderPump::isAboutToUnderflow(int64_t threshold) {
+    Mutex::Autolock lock(&render_lock_);
+
+    // If we have never queued anything to the decoder, we really don't know if
+    // we are going to underflow or not.
+    if (!last_queued_pts_valid_ || !last_ts_transform_valid_) {
+        return false;
+    }
+
+    // Don't have access to Common Time?  If so, then things are Very Bad
+    // elsewhere in the system; it pretty much does not matter what we do here.
+    // Since we cannot really tell if we are about to underflow or not, its
+    // probably best to assume that we are not and proceed accordingly.
+    int64_t tt_now;
+    if (OK != cc_helper_.getCommonTime(&tt_now)) {
+        return false;
+    }
+
+    // Transform from media time to common time.
+    int64_t last_queued_pts_tt;
+    if (!last_ts_transform_.doForwardTransform(last_queued_pts_,
+                &last_queued_pts_tt)) {
+        return false;
+    }
+
+    // Check to see if we are underflowing.
+    return ((tt_now + threshold - last_queued_pts_tt) > 0);
+}
+
+void* AAH_DecoderPump::workThread() {
+    // No need to lock when accessing decoder_ from the thread.  The
+    // implementation of init and shutdown ensure that other threads never touch
+    // decoder_ while the work thread is running.
+    CHECK(decoder_ != NULL);
+    CHECK(format_  != NULL);
+
+    // Start the decoder and note its result code.  If something goes horribly
+    // wrong, callers of queueForDecode and getOutput will be able to detect
+    // that the thread encountered a fatal error and shut down by examining
+    // thread_status_.
+    thread_status_ = decoder_->start(format_.get());
+    if (OK != thread_status_) {
+        ALOGE("AAH_DecoderPump's work thread failed to start decoder"
+              " (res = %d)", thread_status_);
+        return NULL;
+    }
+
+    DurationTimer decode_timer;
+    uint32_t consecutive_long_errors = 0;
+    uint32_t consecutive_errors = 0;
+
+    while (!thread_->exitPending()) {
+        status_t res;
+        MediaBuffer* bufOut = NULL;
+
+        decode_timer.start();
+        res = decoder_->read(&bufOut);
+        decode_timer.stop();
+
+        if (res == INFO_FORMAT_CHANGED) {
+            // Format has changed.  Destroy our current renderer so that a new
+            // one can be created during queueToRenderer with the proper format.
+            //
+            // TODO : In order to transition seamlessly, we should change this
+            // to put the old renderer in a queue to play out completely before
+            // we destroy it.  We can still create a new renderer, the timed
+            // nature of the renderer should ensure a seamless splice.
+            stopAndCleanupRenderer();
+            res = OK;
+        }
+
+        // Try to be a little nuanced in our handling of actual decode errors.
+        // Errors could happen because of minor stream corruption or because of
+        // transient resource limitations.  In these cases, we would rather drop
+        // a little bit of output and ride out the unpleasantness then throw up
+        // our hands and abort everything.
+        //
+        // OTOH - When things are really bad (like we have a non-transient
+        // resource or bookkeeping issue, or the stream being fed to us is just
+        // complete and total garbage) we really want to terminate playback and
+        // raise an error condition all the way up to the application level so
+        // they can deal with it.
+        //
+        // Unfortunately, the error codes returned by the decoder can be a
+        // little non-specific.  For example, if an OMXCodec times out
+        // attempting to obtain an output buffer, the error we get back is a
+        // generic -1.  Try to distinguish between this resource timeout error
+        // and ES corruption error by timing how long the decode operation
+        // takes.  Maintain accounting for both errors and "long errors".  If we
+        // get more than a certain number consecutive errors of either type,
+        // consider it fatal and shutdown (which will cause the error to
+        // propagate all of the way up to the application level).  The threshold
+        // for "long errors" is deliberately much lower than that of normal
+        // decode errors, both because of how long they take to happen and
+        // because they generally indicate resource limitation errors which are
+        // unlikely to go away in pathologically bad cases (in contrast to
+        // stream corruption errors which might happen 20 times in a row and
+        // then be suddenly OK again)
+        if (res != OK) {
+            consecutive_errors++;
+            if (decode_timer.durationUsecs() >= kLongDecodeErrorThreshold)
+                consecutive_long_errors++;
+
+            CHECK(NULL == bufOut);
+
+            ALOGW("%s: Failed to decode data (res = %d)",
+                    __PRETTY_FUNCTION__, res);
+
+            if ((consecutive_errors      >= kMaxErrorsBeforeFatal) ||
+                (consecutive_long_errors >= kMaxLongErrorsBeforeFatal)) {
+                ALOGE("%s: Maximum decode error threshold has been reached."
+                      " There have been %d consecutive decode errors, and %d"
+                      " consecutive decode operations which resulted in errors"
+                      " and took more than %lld uSec to process.  The last"
+                      " decode operation took %lld uSec.",
+                      __PRETTY_FUNCTION__,
+                      consecutive_errors, consecutive_long_errors,
+                      kLongDecodeErrorThreshold, decode_timer.durationUsecs());
+                thread_status_ = res;
+                break;
+            }
+
+            continue;
+        }
+
+        if (NULL == bufOut) {
+            ALOGW("%s: Successful decode, but no buffer produced",
+                    __PRETTY_FUNCTION__);
+            continue;
+        }
+
+        // Successful decode (with actual output produced).  Clear the error
+        // counters.
+        consecutive_errors = 0;
+        consecutive_long_errors = 0;
+
+        queueToRenderer(bufOut);
+        bufOut->release();
+    }
+
+    decoder_->stop();
+    stopAndCleanupRenderer();
+
+    return NULL;
+}
+
+status_t AAH_DecoderPump::init(const sp<MetaData>& params) {
+    Mutex::Autolock lock(&init_lock_);
+
+    if (decoder_ != NULL) {
+        // already inited
+        return OK;
+    }
+
+    if (params == NULL) {
+        return BAD_VALUE;
+    }
+
+    if (!params->findInt32(kKeyChannelCount, &format_channels_)) {
+        return BAD_VALUE;
+    }
+
+    if (!params->findInt32(kKeySampleRate, &format_sample_rate_)) {
+        return BAD_VALUE;
+    }
+
+    CHECK(OK == thread_status_);
+    CHECK(decoder_ == NULL);
+
+    status_t ret_val = UNKNOWN_ERROR;
+
+    // Cache the format and attempt to create the decoder.
+    format_  = params;
+    decoder_ = OMXCodec::Create(
+            omx_.interface(),       // IOMX Handle
+            format_,                // Metadata for substream (indicates codec)
+            false,                  // Make a decoder, not an encoder
+            sp<MediaSource>(this)); // We will be the source for this codec.
+
+    if (decoder_ == NULL) {
+      ALOGE("Failed to allocate decoder in %s", __PRETTY_FUNCTION__);
+      goto bailout;
+    }
+
+    // Fire up the pump thread.  It will take care of starting and stopping the
+    // decoder.
+    ret_val = thread_->run("aah_decode_pump", ANDROID_PRIORITY_AUDIO);
+    if (OK != ret_val) {
+        ALOGE("Failed to start work thread in %s (res = %d)",
+                __PRETTY_FUNCTION__, ret_val);
+        goto bailout;
+    }
+
+bailout:
+    if (OK != ret_val) {
+        decoder_ = NULL;
+        format_  = NULL;
+    }
+
+    return OK;
+}
+
+status_t AAH_DecoderPump::shutdown() {
+    Mutex::Autolock lock(&init_lock_);
+    return shutdown_l();
+}
+
+status_t AAH_DecoderPump::shutdown_l() {
+    thread_->requestExit();
+    thread_cond_.signal();
+    thread_->requestExitAndWait();
+
+    for (MBQueue::iterator iter = in_queue_.begin();
+         iter != in_queue_.end();
+         ++iter) {
+        (*iter)->release();
+    }
+    in_queue_.clear();
+
+    last_queued_pts_valid_   = false;
+    last_ts_transform_valid_ = false;
+    last_volume_             = 0xFF;
+    thread_status_           = OK;
+
+    decoder_ = NULL;
+    format_  = NULL;
+
+    return OK;
+}
+
+status_t AAH_DecoderPump::read(MediaBuffer **buffer,
+                               const ReadOptions *options) {
+    if (!buffer) {
+        return BAD_VALUE;
+    }
+
+    *buffer = NULL;
+
+    // While its not time to shut down, and we have no data to process, wait.
+    AutoMutex lock(&thread_lock_);
+    while (!thread_->exitPending() && in_queue_.empty())
+        thread_cond_.wait(thread_lock_);
+
+    // At this point, if its not time to shutdown then we must have something to
+    // process.  Go ahead and pop the front of the queue for processing.
+    if (!thread_->exitPending()) {
+        CHECK(!in_queue_.empty());
+
+        *buffer = *(in_queue_.begin());
+        in_queue_.erase(in_queue_.begin());
+    }
+
+    // If we managed to get a buffer, then everything must be OK.  If not, then
+    // we must be shutting down.
+    return (NULL == *buffer) ? INVALID_OPERATION : OK;
+}
+
+AAH_DecoderPump::ThreadWrapper::ThreadWrapper(AAH_DecoderPump* owner)
+    : Thread(false /* canCallJava*/ )
+    , owner_(owner) {
+}
+
+bool AAH_DecoderPump::ThreadWrapper::threadLoop() {
+    CHECK(NULL != owner_);
+    owner_->workThread();
+    return false;
+}
+
+}  // namespace android
diff --git a/media/libaah_rtp/aah_decoder_pump.h b/media/libaah_rtp/aah_decoder_pump.h
new file mode 100644
index 0000000..4d57e49
--- /dev/null
+++ b/media/libaah_rtp/aah_decoder_pump.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __DECODER_PUMP_H__
+#define __DECODER_PUMP_H__
+
+#include <pthread.h>
+
+#include <common_time/cc_helper.h>
+#include <media/stagefright/MediaSource.h>
+#include <utils/LinearTransform.h>
+#include <utils/List.h>
+#include <utils/threads.h>
+
+namespace android {
+
+class MetaData;
+class OMXClient;
+class TimedAudioTrack;
+
+class AAH_DecoderPump : public MediaSource {
+  public:
+    explicit AAH_DecoderPump(OMXClient& omx);
+    status_t initCheck();
+
+    status_t queueForDecode(MediaBuffer* buf);
+
+    status_t init(const sp<MetaData>& params);
+    status_t shutdown();
+
+    void setRenderTSTransform(const LinearTransform& trans);
+    void setRenderVolume(uint8_t volume);
+    bool isAboutToUnderflow(int64_t threshold);
+    bool getStatus() const { return thread_status_; }
+
+    // MediaSource methods
+    virtual status_t     start(MetaData *params) { return OK; }
+    virtual sp<MetaData> getFormat() { return format_; }
+    virtual status_t     stop() { return OK; }
+    virtual status_t     read(MediaBuffer **buffer,
+                              const ReadOptions *options);
+
+  protected:
+    virtual ~AAH_DecoderPump();
+
+  private:
+    class ThreadWrapper : public Thread {
+      public:
+        friend class AAH_DecoderPump;
+        explicit ThreadWrapper(AAH_DecoderPump* owner);
+
+      private:
+        virtual bool threadLoop();
+        AAH_DecoderPump* owner_;
+
+        DISALLOW_EVIL_CONSTRUCTORS(ThreadWrapper);
+    };
+
+    void* workThread();
+    virtual status_t shutdown_l();
+    void queueToRenderer(MediaBuffer* decoded_sample);
+    void stopAndCleanupRenderer();
+
+    sp<MetaData>        format_;
+    int32_t             format_channels_;   // channel count, not channel mask
+    int32_t             format_sample_rate_;
+
+    sp<MediaSource>     decoder_;
+    OMXClient&          omx_;
+    Mutex               init_lock_;
+
+    sp<ThreadWrapper>   thread_;
+    Condition           thread_cond_;
+    Mutex               thread_lock_;
+    status_t            thread_status_;
+
+    Mutex               render_lock_;
+    TimedAudioTrack*    renderer_;
+    bool                last_queued_pts_valid_;
+    int64_t             last_queued_pts_;
+    bool                last_ts_transform_valid_;
+    LinearTransform     last_ts_transform_;
+    uint8_t             last_volume_;
+    CCHelper            cc_helper_;
+
+    // protected by the thread_lock_
+    typedef List<MediaBuffer*> MBQueue;
+    MBQueue in_queue_;
+
+    DISALLOW_EVIL_CONSTRUCTORS(AAH_DecoderPump);
+};
+
+}  // namespace android
+#endif  // __DECODER_PUMP_H__
diff --git a/media/libaah_rtp/aah_rx_player.cpp b/media/libaah_rtp/aah_rx_player.cpp
new file mode 100644
index 0000000..9dd79fd
--- /dev/null
+++ b/media/libaah_rtp/aah_rx_player.cpp
@@ -0,0 +1,288 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "LibAAH_RTP"
+//#define LOG_NDEBUG 0
+
+#include <binder/IServiceManager.h>
+#include <media/MediaPlayerInterface.h>
+#include <utils/Log.h>
+
+#include "aah_rx_player.h"
+
+namespace android {
+
+const uint32_t AAH_RXPlayer::kRTPRingBufferSize = 1 << 10;
+
+sp<MediaPlayerBase> createAAH_RXPlayer() {
+    sp<MediaPlayerBase> ret = new AAH_RXPlayer();
+    return ret;
+}
+
+AAH_RXPlayer::AAH_RXPlayer()
+        : ring_buffer_(kRTPRingBufferSize)
+        , substreams_(NULL) {
+    thread_wrapper_ = new ThreadWrapper(*this);
+
+    is_playing_          = false;
+    multicast_joined_    = false;
+    transmitter_known_   = false;
+    current_epoch_known_ = false;
+    data_source_set_     = false;
+    sock_fd_             = -1;
+
+    substreams_.setCapacity(4);
+
+    memset(&listen_addr_,      0, sizeof(listen_addr_));
+    memset(&transmitter_addr_, 0, sizeof(transmitter_addr_));
+
+    fetchAudioFlinger();
+}
+
+AAH_RXPlayer::~AAH_RXPlayer() {
+    reset_l();
+    CHECK(substreams_.size() == 0);
+    omx_.disconnect();
+}
+
+status_t AAH_RXPlayer::initCheck() {
+    if (thread_wrapper_ == NULL) {
+        ALOGE("Failed to allocate thread wrapper!");
+        return NO_MEMORY;
+    }
+
+    if (!ring_buffer_.initCheck()) {
+        ALOGE("Failed to allocate reassembly ring buffer!");
+        return NO_MEMORY;
+    }
+
+    // Check for the presense of the common time service by attempting to query
+    // for CommonTime's frequency.  If we get an error back, we cannot talk to
+    // the service at all and should abort now.
+    status_t res;
+    uint64_t freq;
+    res = cc_helper_.getCommonFreq(&freq);
+    if (OK != res) {
+        ALOGE("Failed to connect to common time service!");
+        return res;
+    }
+
+    return omx_.connect();
+}
+
+status_t AAH_RXPlayer::setDataSource(
+        const char *url,
+        const KeyedVector<String8, String8> *headers) {
+    AutoMutex api_lock(&api_lock_);
+    uint32_t a, b, c, d;
+    uint16_t port;
+
+    if (data_source_set_) {
+        return INVALID_OPERATION;
+    }
+
+    if (NULL == url) {
+        return BAD_VALUE;
+    }
+
+    if (5 != sscanf(url, "%*[^:/]://%u.%u.%u.%u:%hu", &a, &b, &c, &d, &port)) {
+        ALOGE("Failed to parse URL \"%s\"", url);
+        return BAD_VALUE;
+    }
+
+    if ((a > 255) || (b > 255) || (c > 255) || (d > 255) || (port == 0)) {
+        ALOGE("Bad multicast address \"%s\"", url);
+        return BAD_VALUE;
+    }
+
+    ALOGI("setDataSource :: %u.%u.%u.%u:%hu", a, b, c, d, port);
+
+    a = (a << 24) | (b << 16) | (c <<  8) | d;
+
+    memset(&listen_addr_, 0, sizeof(listen_addr_));
+    listen_addr_.sin_family      = AF_INET;
+    listen_addr_.sin_port        = htons(port);
+    listen_addr_.sin_addr.s_addr = htonl(a);
+    data_source_set_ = true;
+
+    return OK;
+}
+
+status_t AAH_RXPlayer::setDataSource(int fd, int64_t offset, int64_t length) {
+    return INVALID_OPERATION;
+}
+
+status_t AAH_RXPlayer::setVideoSurface(const sp<Surface>& surface) {
+    return OK;
+}
+
+status_t AAH_RXPlayer::setVideoSurfaceTexture(
+        const sp<ISurfaceTexture>& surfaceTexture) {
+    return OK;
+}
+
+status_t AAH_RXPlayer::prepare() {
+    return OK;
+}
+
+status_t AAH_RXPlayer::prepareAsync() {
+    sendEvent(MEDIA_PREPARED);
+    return OK;
+}
+
+status_t AAH_RXPlayer::start() {
+    AutoMutex api_lock(&api_lock_);
+
+    if (is_playing_) {
+        return OK;
+    }
+
+    status_t res = startWorkThread();
+    is_playing_ = (res == OK);
+    return res;
+}
+
+status_t AAH_RXPlayer::stop() {
+    return pause();
+}
+
+status_t AAH_RXPlayer::pause() {
+    AutoMutex api_lock(&api_lock_);
+    stopWorkThread();
+    CHECK(sock_fd_ < 0);
+    is_playing_ = false;
+    return OK;
+}
+
+bool AAH_RXPlayer::isPlaying() {
+    AutoMutex api_lock(&api_lock_);
+    return is_playing_;
+}
+
+status_t AAH_RXPlayer::seekTo(int msec) {
+    sendEvent(MEDIA_SEEK_COMPLETE);
+    return OK;
+}
+
+status_t AAH_RXPlayer::getCurrentPosition(int *msec) {
+    if (NULL != msec) {
+        *msec = 0;
+    }
+    return OK;
+}
+
+status_t AAH_RXPlayer::getDuration(int *msec) {
+    if (NULL != msec) {
+        *msec = 1;
+    }
+    return OK;
+}
+
+status_t AAH_RXPlayer::reset() {
+    AutoMutex api_lock(&api_lock_);
+    reset_l();
+    return OK;
+}
+
+void AAH_RXPlayer::reset_l() {
+    stopWorkThread();
+    CHECK(sock_fd_ < 0);
+    CHECK(!multicast_joined_);
+    is_playing_ = false;
+    data_source_set_ = false;
+    transmitter_known_ = false;
+    memset(&listen_addr_, 0, sizeof(listen_addr_));
+}
+
+status_t AAH_RXPlayer::setLooping(int loop) {
+    return OK;
+}
+
+player_type AAH_RXPlayer::playerType() {
+    return AAH_RX_PLAYER;
+}
+
+status_t AAH_RXPlayer::setParameter(int key, const Parcel &request) {
+    return ERROR_UNSUPPORTED;
+}
+
+status_t AAH_RXPlayer::getParameter(int key, Parcel *reply) {
+    return ERROR_UNSUPPORTED;
+}
+
+status_t AAH_RXPlayer::invoke(const Parcel& request, Parcel *reply) {
+    if (!reply) {
+        return BAD_VALUE;
+    }
+
+    int32_t magic;
+    status_t err = request.readInt32(&magic);
+    if (err != OK) {
+        reply->writeInt32(err);
+        return OK;
+    }
+
+    if (magic != 0x12345) {
+        reply->writeInt32(BAD_VALUE);
+        return OK;
+    }
+
+    int32_t methodID;
+    err = request.readInt32(&methodID);
+    if (err != OK) {
+        reply->writeInt32(err);
+        return OK;
+    }
+
+    switch (methodID) {
+        // Get Volume
+        case INVOKE_GET_MASTER_VOLUME: {
+            if (audio_flinger_ != NULL) {
+                reply->writeInt32(OK);
+                reply->writeFloat(audio_flinger_->masterVolume());
+            } else {
+                reply->writeInt32(UNKNOWN_ERROR);
+            }
+        } break;
+
+        // Set Volume
+        case INVOKE_SET_MASTER_VOLUME: {
+            float targetVol = request.readFloat();
+            reply->writeInt32(audio_flinger_->setMasterVolume(targetVol));
+        } break;
+
+        default: return BAD_VALUE;
+    }
+
+    return OK;
+}
+
+void AAH_RXPlayer::fetchAudioFlinger() {
+    if (audio_flinger_ == NULL) {
+        sp<IServiceManager> sm = defaultServiceManager();
+        sp<IBinder> binder;
+        binder = sm->getService(String16("media.audio_flinger"));
+
+        if (binder == NULL) {
+            ALOGW("AAH_RXPlayer failed to fetch handle to audio flinger."
+                  " Master volume control will not be possible.");
+        }
+
+        audio_flinger_ = interface_cast<IAudioFlinger>(binder);
+    }
+}
+
+}  // namespace android
diff --git a/media/libaah_rtp/aah_rx_player.h b/media/libaah_rtp/aah_rx_player.h
new file mode 100644
index 0000000..ba5617e
--- /dev/null
+++ b/media/libaah_rtp/aah_rx_player.h
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __AAH_RX_PLAYER_H__
+#define __AAH_RX_PLAYER_H__
+
+#include <common_time/cc_helper.h>
+#include <media/MediaPlayerInterface.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/OMXClient.h>
+#include <netinet/in.h>
+#include <utils/KeyedVector.h>
+#include <utils/LinearTransform.h>
+#include <utils/threads.h>
+
+#include "aah_decoder_pump.h"
+#include "pipe_event.h"
+
+namespace android {
+
+class AAH_RXPlayer : public MediaPlayerInterface {
+  public:
+    AAH_RXPlayer();
+
+    virtual status_t    initCheck();
+    virtual status_t    setDataSource(const char *url,
+                                      const KeyedVector<String8, String8>*
+                                      headers);
+    virtual status_t    setDataSource(int fd, int64_t offset, int64_t length);
+    virtual status_t    setVideoSurface(const sp<Surface>& surface);
+    virtual status_t    setVideoSurfaceTexture(const sp<ISurfaceTexture>&
+                                               surfaceTexture);
+    virtual status_t    prepare();
+    virtual status_t    prepareAsync();
+    virtual status_t    start();
+    virtual status_t    stop();
+    virtual status_t    pause();
+    virtual bool        isPlaying();
+    virtual status_t    seekTo(int msec);
+    virtual status_t    getCurrentPosition(int *msec);
+    virtual status_t    getDuration(int *msec);
+    virtual status_t    reset();
+    virtual status_t    setLooping(int loop);
+    virtual player_type playerType();
+    virtual status_t    setParameter(int key, const Parcel &request);
+    virtual status_t    getParameter(int key, Parcel *reply);
+    virtual status_t    invoke(const Parcel& request, Parcel *reply);
+
+  protected:
+    virtual ~AAH_RXPlayer();
+
+  private:
+    class ThreadWrapper : public Thread {
+      public:
+        friend class AAH_RXPlayer;
+        explicit ThreadWrapper(AAH_RXPlayer& player)
+            : Thread(false /* canCallJava */ )
+            , player_(player) { }
+
+        virtual bool threadLoop() { return player_.threadLoop(); }
+
+      private:
+        AAH_RXPlayer& player_;
+
+        DISALLOW_EVIL_CONSTRUCTORS(ThreadWrapper);
+    };
+
+#pragma pack(push, 1)
+    // PacketBuffers are structures used by the RX ring buffer.  The ring buffer
+    // is a ring of pointers to PacketBuffer structures which act as variable
+    // length byte arrays and hold the contents of received UDP packets.  Rather
+    // than make this a structure which hold a length and a pointer to another
+    // allocated structure (which would require two allocations), this struct
+    // uses a structure overlay pattern where allocation for the byte array
+    // consists of allocating (arrayLen + sizeof(ssize_t)) bytes of data from
+    // whatever pool/heap the packet buffer pulls from, and then overlaying the
+    // packed PacketBuffer structure on top of the allocation.  The one-byte
+    // array at the end of the structure serves as an offset to the the data
+    // portion of the allocation; packet buffers are never allocated on the
+    // stack or using the new operator.  Instead, the static allocate-byte-array
+    // and destroy methods handle the allocate and overlay pattern.  They also
+    // allow for a potential future optimization where instead of just
+    // allocating blocks from the process global heap and overlaying, the
+    // allocator is replaced with a different implementation (private heap,
+    // free-list, circular buffer, etc) which reduces potential heap
+    // fragmentation issues which might arise from the frequent allocation and
+    // destruction of the received UDP traffic.
+    struct PacketBuffer {
+        ssize_t length_;
+        uint8_t data_[1];
+
+        // TODO : consider changing this to be some form of ring buffer or free
+        // pool system instead of just using the heap in order to avoid heap
+        // fragmentation.
+        static PacketBuffer* allocate(ssize_t length);
+        static void destroy(PacketBuffer* pb);
+
+      private:
+        // Force people to use allocate/destroy instead of new/delete.
+        PacketBuffer() { }
+        ~PacketBuffer() { }
+    };
+
+    struct RetransRequest {
+        uint32_t magic_;
+        uint32_t mcast_ip_;
+        uint16_t mcast_port_;
+        uint16_t start_seq_;
+        uint16_t end_seq_;
+    };
+#pragma pack(pop)
+
+    enum GapStatus {
+        kGS_NoGap = 0,
+        kGS_NormalGap,
+        kGS_FastStartGap,
+    };
+
+    struct SeqNoGap {
+        uint16_t start_seq_;
+        uint16_t end_seq_;
+    };
+
+    class RXRingBuffer {
+      public:
+        explicit RXRingBuffer(uint32_t capacity);
+        ~RXRingBuffer();
+
+        bool initCheck() const { return (ring_ != NULL); }
+        void reset();
+
+        // Push a packet buffer with a given sequence number into the ring
+        // buffer.  pushBuffer will always consume the buffer pushed to it,
+        // either destroying it because it was a duplicate or overflow, or
+        // holding on to it in the ring.  Callers should not hold any references
+        // to PacketBuffers after they have been pushed to the ring.  Returns
+        // false in the case of a serious error (such as ring overflow).
+        // Callers should consider resetting the pipeline entirely in the event
+        // of a serious error.
+        bool pushBuffer(PacketBuffer* buf, uint16_t seq);
+
+        // Fetch the next buffer in the RTP sequence.  Returns NULL if there is
+        // no buffer to fetch.  If a non-NULL PacketBuffer is returned,
+        // is_discon will be set to indicate whether or not this PacketBuffer is
+        // discontiuous with any previously returned packet buffers.  Packet
+        // buffers returned by fetchBuffer are the caller's responsibility; they
+        // must be certain to destroy the buffers when they are done.
+        PacketBuffer* fetchBuffer(bool* is_discon);
+
+        // Returns true and fills out the gap structure if the read pointer of
+        // the ring buffer is currently pointing to a gap which would stall a
+        // fetchBuffer operation.  Returns false if the read pointer is not
+        // pointing to a gap in the sequence currently.
+        GapStatus fetchCurrentGap(SeqNoGap* gap);
+
+        // Causes the read pointer to skip over any portion of a gap indicated
+        // by nak.  If nak is NULL, any gap currently blocking the read pointer
+        // will be completely skipped.  If any portion of a gap is skipped, the
+        // next successful read from fetch buffer will indicate a discontinuity.
+        void processNAK(const SeqNoGap* nak = NULL);
+
+        // Compute the number of milliseconds until the inactivity timer for
+        // this RTP stream.  Returns -1 if there is no active timeout, or 0 if
+        // the system has already timed out.
+        int computeInactivityTimeout();
+
+      private:
+        Mutex          lock_;
+        PacketBuffer** ring_;
+        uint32_t       capacity_;
+        uint32_t       rd_;
+        uint32_t       wr_;
+
+        uint16_t       rd_seq_;
+        bool           rd_seq_known_;
+        bool           waiting_for_fast_start_;
+        bool           fetched_first_packet_;
+
+        uint64_t       rtp_activity_timeout_;
+        bool           rtp_activity_timeout_valid_;
+
+        DISALLOW_EVIL_CONSTRUCTORS(RXRingBuffer);
+    };
+
+    class Substream : public virtual RefBase {
+      public:
+        Substream(uint32_t ssrc, OMXClient& omx);
+
+        void cleanupBufferInProgress();
+        void shutdown();
+        void processPayloadStart(uint8_t* buf,
+                                 uint32_t amt,
+                                 int32_t ts_lower);
+        void processPayloadCont (uint8_t* buf,
+                                 uint32_t amt);
+        void processTSTransform(const LinearTransform& trans);
+
+        bool     isAboutToUnderflow();
+        uint32_t getSSRC()      const { return ssrc_; }
+        uint16_t getProgramID() const { return (ssrc_ >> 5) & 0x1F; }
+        status_t getStatus() const { return status_; }
+
+      protected:
+        virtual ~Substream();
+
+      private:
+        void                cleanupDecoder();
+        bool                shouldAbort(const char* log_tag);
+        void                processCompletedBuffer();
+        bool                setupSubstreamMeta();
+        bool                setupMP3SubstreamMeta();
+        bool                setupAACSubstreamMeta();
+        bool                setupSubstreamType(uint8_t substream_type,
+                                               uint8_t codec_type);
+
+        uint32_t            ssrc_;
+        bool                waiting_for_rap_;
+        status_t            status_;
+
+        bool                substream_details_known_;
+        uint8_t             substream_type_;
+        uint8_t             codec_type_;
+        const char*         codec_mime_type_;
+        sp<MetaData>        substream_meta_;
+
+        MediaBuffer*        buffer_in_progress_;
+        uint32_t            expected_buffer_size_;
+        uint32_t            buffer_filled_;
+
+        Vector<uint8_t>     aux_data_in_progress_;
+        uint32_t            aux_data_expected_size_;
+
+        sp<AAH_DecoderPump> decoder_;
+
+        static int64_t      kAboutToUnderflowThreshold;
+
+        DISALLOW_EVIL_CONSTRUCTORS(Substream);
+    };
+
+    typedef DefaultKeyedVector< uint32_t, sp<Substream> > SubstreamVec;
+
+    status_t            startWorkThread();
+    void                stopWorkThread();
+    virtual bool        threadLoop();
+    bool                setupSocket();
+    void                cleanupSocket();
+    void                resetPipeline();
+    void                reset_l();
+    bool                processRX(PacketBuffer* pb);
+    void                processRingBuffer();
+    void                processCommandPacket(PacketBuffer* pb);
+    bool                processGaps();
+    int                 computeNextGapRetransmitTimeout();
+    void                fetchAudioFlinger();
+
+    PipeEvent           wakeup_work_thread_evt_;
+    sp<ThreadWrapper>   thread_wrapper_;
+    Mutex               api_lock_;
+    bool                is_playing_;
+    bool                data_source_set_;
+
+    struct sockaddr_in  listen_addr_;
+    int                 sock_fd_;
+    bool                multicast_joined_;
+
+    struct sockaddr_in  transmitter_addr_;
+    bool                transmitter_known_;
+
+    uint32_t            current_epoch_;
+    bool                current_epoch_known_;
+
+    SeqNoGap            current_gap_;
+    GapStatus           current_gap_status_;
+    uint64_t            next_retrans_req_time_;
+
+    RXRingBuffer        ring_buffer_;
+    SubstreamVec        substreams_;
+    OMXClient           omx_;
+    CCHelper            cc_helper_;
+
+    // Connection to audio flinger used to hack a path to setMasterVolume.
+    sp<IAudioFlinger>   audio_flinger_;
+
+    static const uint32_t kRTPRingBufferSize;
+    static const uint32_t kRetransRequestMagic;
+    static const uint32_t kFastStartRequestMagic;
+    static const uint32_t kRetransNAKMagic;
+    static const uint32_t kGapRerequestTimeoutUSec;
+    static const uint32_t kFastStartTimeoutUSec;
+    static const uint32_t kRTPActivityTimeoutUSec;
+
+    static const uint32_t INVOKE_GET_MASTER_VOLUME = 3;
+    static const uint32_t INVOKE_SET_MASTER_VOLUME = 4;
+
+    static uint64_t monotonicUSecNow();
+
+    DISALLOW_EVIL_CONSTRUCTORS(AAH_RXPlayer);
+};
+
+}  // namespace android
+
+#endif  // __AAH_RX_PLAYER_H__
diff --git a/media/libaah_rtp/aah_rx_player_core.cpp b/media/libaah_rtp/aah_rx_player_core.cpp
new file mode 100644
index 0000000..d6b31fd
--- /dev/null
+++ b/media/libaah_rtp/aah_rx_player_core.cpp
@@ -0,0 +1,809 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "LibAAH_RTP"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <fcntl.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <time.h>
+#include <utils/misc.h>
+
+#include <media/stagefright/Utils.h>
+
+#include "aah_rx_player.h"
+#include "aah_tx_packet.h"
+
+namespace android {
+
+const uint32_t AAH_RXPlayer::kRetransRequestMagic =
+    FOURCC('T','r','e','q');
+const uint32_t AAH_RXPlayer::kRetransNAKMagic =
+    FOURCC('T','n','a','k');
+const uint32_t AAH_RXPlayer::kFastStartRequestMagic =
+    FOURCC('T','f','s','t');
+const uint32_t AAH_RXPlayer::kGapRerequestTimeoutUSec = 75000;
+const uint32_t AAH_RXPlayer::kFastStartTimeoutUSec = 800000;
+const uint32_t AAH_RXPlayer::kRTPActivityTimeoutUSec = 10000000;
+
+static inline int16_t fetchInt16(uint8_t* data) {
+    return static_cast<int16_t>(U16_AT(data));
+}
+
+static inline int32_t fetchInt32(uint8_t* data) {
+    return static_cast<int32_t>(U32_AT(data));
+}
+
+static inline int64_t fetchInt64(uint8_t* data) {
+    return static_cast<int64_t>(U64_AT(data));
+}
+
+uint64_t AAH_RXPlayer::monotonicUSecNow() {
+    struct timespec now;
+    int res = clock_gettime(CLOCK_MONOTONIC, &now);
+    CHECK(res >= 0);
+
+    uint64_t ret = static_cast<uint64_t>(now.tv_sec) * 1000000;
+    ret += now.tv_nsec / 1000;
+
+    return ret;
+}
+
+status_t AAH_RXPlayer::startWorkThread() {
+    status_t res;
+    stopWorkThread();
+    res = thread_wrapper_->run("TRX_Player", PRIORITY_AUDIO);
+
+    if (res != OK) {
+        ALOGE("Failed to start work thread (res = %d)", res);
+    }
+
+    return res;
+}
+
+void AAH_RXPlayer::stopWorkThread() {
+    thread_wrapper_->requestExit();  // set the exit pending flag
+    wakeup_work_thread_evt_.setEvent();
+
+    status_t res;
+    res = thread_wrapper_->requestExitAndWait(); // block until thread exit.
+    if (res != OK) {
+        ALOGE("Failed to stop work thread (res = %d)", res);
+    }
+
+    wakeup_work_thread_evt_.clearPendingEvents();
+}
+
+void AAH_RXPlayer::cleanupSocket() {
+    if (sock_fd_ >= 0) {
+        if (multicast_joined_) {
+            int res;
+            struct ip_mreq mreq;
+            mreq.imr_multiaddr = listen_addr_.sin_addr;
+            mreq.imr_interface.s_addr = htonl(INADDR_ANY);
+            res = setsockopt(sock_fd_,
+                             IPPROTO_IP,
+                             IP_DROP_MEMBERSHIP,
+                             &mreq, sizeof(mreq));
+            if (res < 0) {
+                ALOGW("Failed to leave multicast group. (%d, %d)", res, errno);
+            }
+            multicast_joined_ = false;
+        }
+
+        close(sock_fd_);
+        sock_fd_ = -1;
+    }
+
+    resetPipeline();
+}
+
+void AAH_RXPlayer::resetPipeline() {
+    ring_buffer_.reset();
+
+    // Explicitly shudown all of the active substreams, then call clear out the
+    // collection.  Failure to clear out a substream can result in its decoder
+    // holding a reference to itself and therefor not going away when the
+    // collection is cleared.
+    for (size_t i = 0; i < substreams_.size(); ++i)
+        substreams_.valueAt(i)->shutdown();
+
+    substreams_.clear();
+
+    current_gap_status_ = kGS_NoGap;
+}
+
+bool AAH_RXPlayer::setupSocket() {
+    long flags;
+    int  res, buf_size;
+    socklen_t opt_size;
+
+    cleanupSocket();
+    CHECK(sock_fd_ < 0);
+
+    // Make the socket
+    sock_fd_ = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+    if (sock_fd_ < 0) {
+        ALOGE("Failed to create listen socket (errno %d)", errno);
+        goto bailout;
+    }
+
+    // Set non-blocking operation
+    flags = fcntl(sock_fd_, F_GETFL);
+    res   = fcntl(sock_fd_, F_SETFL, flags | O_NONBLOCK);
+    if (res < 0) {
+        ALOGE("Failed to set socket (%d) to non-blocking mode (errno %d)",
+              sock_fd_, errno);
+        goto bailout;
+    }
+
+    // Bind to our port
+    struct sockaddr_in bind_addr;
+    memset(&bind_addr, 0, sizeof(bind_addr));
+    bind_addr.sin_family = AF_INET;
+    bind_addr.sin_addr.s_addr = INADDR_ANY;
+    bind_addr.sin_port = listen_addr_.sin_port;
+    res = bind(sock_fd_,
+               reinterpret_cast<const sockaddr*>(&bind_addr),
+               sizeof(bind_addr));
+    if (res < 0) {
+        uint32_t a = ntohl(bind_addr.sin_addr.s_addr);
+        uint16_t p = ntohs(bind_addr.sin_port);
+        ALOGE("Failed to bind socket (%d) to %d.%d.%d.%d:%hd. (errno %d)",
+              sock_fd_,
+              (a >> 24) & 0xFF,
+              (a >> 16) & 0xFF,
+              (a >>  8) & 0xFF,
+              (a      ) & 0xFF,
+              p,
+              errno);
+
+        goto bailout;
+    }
+
+    buf_size = 1 << 16;   // 64k
+    res = setsockopt(sock_fd_,
+                     SOL_SOCKET, SO_RCVBUF,
+                     &buf_size, sizeof(buf_size));
+    if (res < 0) {
+        ALOGW("Failed to increase socket buffer size to %d.  (errno %d)",
+              buf_size, errno);
+    }
+
+    buf_size = 0;
+    opt_size = sizeof(buf_size);
+    res = getsockopt(sock_fd_,
+                     SOL_SOCKET, SO_RCVBUF,
+                     &buf_size, &opt_size);
+    if (res < 0) {
+        ALOGW("Failed to fetch socket buffer size.  (errno %d)", errno);
+    } else {
+        ALOGI("RX socket buffer size is now %d bytes",  buf_size);
+    }
+
+    if (listen_addr_.sin_addr.s_addr) {
+        // Join the multicast group and we should be good to go.
+        struct ip_mreq mreq;
+        mreq.imr_multiaddr = listen_addr_.sin_addr;
+        mreq.imr_interface.s_addr = htonl(INADDR_ANY);
+        res = setsockopt(sock_fd_,
+                         IPPROTO_IP,
+                         IP_ADD_MEMBERSHIP,
+                         &mreq, sizeof(mreq));
+        if (res < 0) {
+            ALOGE("Failed to join multicast group. (errno %d)", errno);
+            goto bailout;
+        }
+        multicast_joined_ = true;
+    }
+
+    return true;
+
+bailout:
+    cleanupSocket();
+    return false;
+}
+
+bool AAH_RXPlayer::threadLoop() {
+    struct pollfd poll_fds[2];
+    bool process_more_right_now = false;
+
+    if (!setupSocket()) {
+        sendEvent(MEDIA_ERROR);
+        goto bailout;
+    }
+
+    while (!thread_wrapper_->exitPending()) {
+        // Step 1: Wait until there is something to do.
+        int gap_timeout = computeNextGapRetransmitTimeout();
+        int ring_timeout = ring_buffer_.computeInactivityTimeout();
+        int timeout = -1;
+
+        if (!ring_timeout) {
+            ALOGW("RTP inactivity timeout reached, resetting pipeline.");
+            resetPipeline();
+            timeout = gap_timeout;
+        } else {
+            if (gap_timeout < 0) {
+                timeout = ring_timeout;
+            } else if (ring_timeout < 0) {
+                timeout = gap_timeout;
+            } else {
+                timeout = (gap_timeout < ring_timeout) ? gap_timeout
+                                                       : ring_timeout;
+            }
+        }
+
+        if ((0 != timeout) && (!process_more_right_now)) {
+            // Set up the events to wait on.  Start with the wakeup pipe.
+            memset(&poll_fds, 0, sizeof(poll_fds));
+            poll_fds[0].fd     = wakeup_work_thread_evt_.getWakeupHandle();
+            poll_fds[0].events = POLLIN;
+
+            // Add the RX socket.
+            poll_fds[1].fd     = sock_fd_;
+            poll_fds[1].events = POLLIN;
+
+            // Wait for something interesing to happen.
+            int poll_res = poll(poll_fds, NELEM(poll_fds), timeout);
+            if (poll_res < 0) {
+                ALOGE("Fatal error (%d,%d) while waiting on events",
+                      poll_res, errno);
+                sendEvent(MEDIA_ERROR);
+                goto bailout;
+            }
+        }
+
+        if (thread_wrapper_->exitPending()) {
+            break;
+        }
+
+        wakeup_work_thread_evt_.clearPendingEvents();
+        process_more_right_now = false;
+
+        // Step 2: Do we have data waiting in the socket?  If so, drain the
+        // socket moving valid RTP information into the ring buffer to be
+        // processed.
+        if (poll_fds[1].revents) {
+            struct sockaddr_in from;
+            socklen_t from_len;
+
+            ssize_t res = 0;
+            while (!thread_wrapper_->exitPending()) {
+                // Check the size of any pending packet.
+                res = recv(sock_fd_, NULL, 0, MSG_PEEK | MSG_TRUNC);
+
+                // Error?
+                if (res < 0) {
+                    // If the error is anything other than would block,
+                    // something has gone very wrong.
+                    if ((errno != EAGAIN) && (errno != EWOULDBLOCK)) {
+                        ALOGE("Fatal socket error during recvfrom (%d, %d)",
+                              (int)res, errno);
+                        goto bailout;
+                    }
+
+                    // Socket is out of data, just break out of processing and
+                    // wait for more.
+                    break;
+                }
+
+                // Allocate a payload.
+                PacketBuffer* pb = PacketBuffer::allocate(res);
+                if (NULL == pb) {
+                    ALOGE("Fatal error, failed to allocate packet buffer of"
+                          " length %u", static_cast<uint32_t>(res));
+                    goto bailout;
+                }
+
+                // Fetch the data.
+                from_len = sizeof(from);
+                res = recvfrom(sock_fd_, pb->data_, pb->length_, 0,
+                               reinterpret_cast<struct sockaddr*>(&from),
+                               &from_len);
+                if (res != pb->length_) {
+                    ALOGE("Fatal error, fetched packet length (%d) does not"
+                          " match peeked packet length (%u).  This should never"
+                          " happen.  (errno = %d)",
+                          static_cast<int>(res),
+                          static_cast<uint32_t>(pb->length_),
+                          errno);
+                }
+
+                bool drop_packet = false;
+                if (transmitter_known_) {
+                    if (from.sin_addr.s_addr !=
+                        transmitter_addr_.sin_addr.s_addr) {
+                        uint32_t a = ntohl(from.sin_addr.s_addr);
+                        uint16_t p = ntohs(from.sin_port);
+                        ALOGV("Dropping packet from unknown transmitter"
+                              " %u.%u.%u.%u:%hu",
+                              ((a >> 24) & 0xFF),
+                              ((a >> 16) & 0xFF),
+                              ((a >>  8) & 0xFF),
+                              ( a        & 0xFF),
+                              p);
+
+                        drop_packet = true;
+                    } else {
+                        transmitter_addr_.sin_port = from.sin_port;
+                    }
+                } else {
+                    memcpy(&transmitter_addr_, &from, sizeof(from));
+                    transmitter_known_ = true;
+                }
+
+                if (!drop_packet) {
+                    bool serious_error = !processRX(pb);
+
+                    if (serious_error) {
+                        // Something went "seriously wrong".  Currently, the
+                        // only trigger for this should be a ring buffer
+                        // overflow.  The current failsafe behavior for when
+                        // something goes seriously wrong is to just reset the
+                        // pipeline.  The system should behave as if this
+                        // AAH_RXPlayer was just set up for the first time.
+                        ALOGE("Something just went seriously wrong with the"
+                              " pipeline.  Resetting.");
+                        resetPipeline();
+                    }
+                } else {
+                    PacketBuffer::destroy(pb);
+                }
+            }
+        }
+
+        // Step 3: Process any data we mave have accumulated in the ring buffer
+        // so far.
+        if (!thread_wrapper_->exitPending()) {
+            processRingBuffer();
+        }
+
+        // Step 4: At this point in time, the ring buffer should either be
+        // empty, or stalled in front of a gap caused by some dropped packets.
+        // Check on the current gap situation and deal with it in an appropriate
+        // fashion.  If processGaps returns true, it means that it has given up
+        // on a gap and that we should try to process some more data
+        // immediately.
+        if (!thread_wrapper_->exitPending()) {
+            process_more_right_now = processGaps();
+        }
+
+        // Step 5: Check for fatal errors.  If any of our substreams has
+        // encountered a fatal, unrecoverable, error, then propagate the error
+        // up to user level and shut down.
+        for (size_t i = 0; i < substreams_.size(); ++i) {
+            status_t status;
+            CHECK(substreams_.valueAt(i) != NULL);
+
+            status = substreams_.valueAt(i)->getStatus();
+            if (OK != status) {
+                ALOGE("Substream index %d has encountered an unrecoverable"
+                      " error (%d).  Signalling application level and shutting"
+                      " down.", i, status);
+                sendEvent(MEDIA_ERROR);
+                goto bailout;
+            }
+        }
+    }
+
+bailout:
+    cleanupSocket();
+    return false;
+}
+
+bool AAH_RXPlayer::processRX(PacketBuffer* pb) {
+    CHECK(NULL != pb);
+
+    uint8_t* data = pb->data_;
+    ssize_t  amt  = pb->length_;
+    uint32_t nak_magic;
+    uint16_t seq_no;
+    uint32_t epoch;
+
+    // Every packet either starts with an RTP header which is at least 12 bytes
+    // long or is a retry NAK which is 14 bytes long.  If there are fewer than
+    // 12 bytes here, this cannot be a proper RTP packet.
+    if (amt < 12) {
+        ALOGV("Dropping packet, too short to contain RTP header (%u bytes)",
+              static_cast<uint32_t>(amt));
+        goto drop_packet;
+    }
+
+    // Check to see if this is the special case of a NAK packet.
+    nak_magic = ntohl(*(reinterpret_cast<uint32_t*>(data)));
+    if (nak_magic == kRetransNAKMagic) {
+        // Looks like a NAK packet; make sure its long enough.
+
+        if (amt < static_cast<ssize_t>(sizeof(RetransRequest))) {
+            ALOGV("Dropping packet, too short to contain NAK payload"
+                  " (%u bytes)", static_cast<uint32_t>(amt));
+            goto drop_packet;
+        }
+
+        SeqNoGap gap;
+        RetransRequest* rtr = reinterpret_cast<RetransRequest*>(data);
+        gap.start_seq_ = ntohs(rtr->start_seq_);
+        gap.end_seq_   = ntohs(rtr->end_seq_);
+
+        ALOGV("Process NAK for gap at [%hu, %hu]",
+                gap.start_seq_, gap.end_seq_);
+        ring_buffer_.processNAK(&gap);
+
+        return true;
+    }
+
+    // According to the TRTP spec, version should be 2, padding should be 0,
+    // extension should be 0 and CSRCCnt should be 0.  If any of these tests
+    // fail, we chuck the packet.
+    if (data[0] != 0x80) {
+        ALOGV("Dropping packet, bad V/P/X/CSRCCnt field (0x%02x)",
+              data[0]);
+        goto drop_packet;
+    }
+
+    // Check the payload type.  For TRTP, it should always be 100.
+    if ((data[1] & 0x7F) != 100) {
+        ALOGV("Dropping packet, bad payload type. (%u)",
+              data[1] & 0x7F);
+        goto drop_packet;
+    }
+
+    // Check whether the transmitter has begun a new epoch.
+    epoch = (U32_AT(data + 8) >> 10) & 0x3FFFFF;
+    if (current_epoch_known_) {
+        if (epoch != current_epoch_) {
+            ALOGV("%s: new epoch %u", __PRETTY_FUNCTION__, epoch);
+            current_epoch_ = epoch;
+            resetPipeline();
+        }
+    } else {
+        current_epoch_ = epoch;
+        current_epoch_known_ = true;
+    }
+
+    // Extract the sequence number and hand the packet off to the ring buffer
+    // for dropped packet detection and later processing.
+    seq_no = U16_AT(data + 2);
+    return ring_buffer_.pushBuffer(pb, seq_no);
+
+drop_packet:
+    PacketBuffer::destroy(pb);
+    return true;
+}
+
+void AAH_RXPlayer::processRingBuffer() {
+    PacketBuffer* pb;
+    bool is_discon;
+    sp<Substream> substream;
+    LinearTransform trans;
+    bool foundTrans = false;
+
+    while (NULL != (pb = ring_buffer_.fetchBuffer(&is_discon))) {
+        if (is_discon) {
+            // Abort all partially assembled payloads.
+            for (size_t i = 0; i < substreams_.size(); ++i) {
+                CHECK(substreams_.valueAt(i) != NULL);
+                substreams_.valueAt(i)->cleanupBufferInProgress();
+            }
+        }
+
+        uint8_t* data = pb->data_;
+        ssize_t  amt  = pb->length_;
+
+        // Should not have any non-RTP packets in the ring buffer.  RTP packets
+        // must be at least 12 bytes long.
+        CHECK(amt >= 12);
+
+        // Extract the marker bit and the SSRC field.
+        bool     marker = (data[1] & 0x80) != 0;
+        uint32_t ssrc   = U32_AT(data + 8);
+
+        // Is this the start of a new TRTP payload?  If so, the marker bit
+        // should be set and there are some things we should be checking for.
+        if (marker) {
+            // TRTP headers need to have at least a byte for version, a byte for
+            // payload type and flags, and 4 bytes for length.
+            if (amt < 18) {
+                ALOGV("Dropping packet, too short to contain TRTP header"
+                      " (%u bytes)", static_cast<uint32_t>(amt));
+                goto process_next_packet;
+            }
+
+            // Check the TRTP version and extract the payload type/flags.
+            uint8_t trtp_version =  data[12];
+            uint8_t payload_type = (data[13] >> 4) & 0xF;
+            uint8_t trtp_flags   =  data[13]       & 0xF;
+
+            if (1 != trtp_version) {
+                ALOGV("Dropping packet, bad trtp version %hhu", trtp_version);
+                goto process_next_packet;
+            }
+
+            // Is there a timestamp transformation present on this packet?  If
+            // so, extract it and pass it to the appropriate substreams.
+            if (trtp_flags & 0x02) {
+                ssize_t offset = 18 + ((trtp_flags & 0x01) ? 4 : 0);
+                if (amt < (offset + 24)) {
+                    ALOGV("Dropping packet, too short to contain TRTP Timestamp"
+                          " Transformation (%u bytes)",
+                          static_cast<uint32_t>(amt));
+                    goto process_next_packet;
+                }
+
+                trans.a_zero = fetchInt64(data + offset);
+                trans.b_zero = fetchInt64(data + offset + 16);
+                trans.a_to_b_numer = static_cast<int32_t>(
+                        fetchInt32 (data + offset + 8));
+                trans.a_to_b_denom = U32_AT(data + offset + 12);
+                foundTrans = true;
+
+                uint32_t program_id = (ssrc >> 5) & 0x1F;
+                for (size_t i = 0; i < substreams_.size(); ++i) {
+                    sp<Substream> iter = substreams_.valueAt(i);
+                    CHECK(iter != NULL);
+
+                    if (iter->getProgramID() == program_id) {
+                        iter->processTSTransform(trans);
+                    }
+                }
+            }
+
+            // Is this a command packet?  If so, its not necessarily associate
+            // with one particular substream.  Just give it to the command
+            // packet handler and then move on.
+            if (4 == payload_type) {
+                processCommandPacket(pb);
+                goto process_next_packet;
+            }
+        }
+
+        // If we got to here, then we are a normal packet.  Find (or allocate)
+        // the substream we belong to and send the packet off to be processed.
+        substream = substreams_.valueFor(ssrc);
+        if (substream == NULL) {
+            substream = new Substream(ssrc, omx_);
+            if (substream == NULL) {
+                ALOGE("Failed to allocate substream for SSRC 0x%08x", ssrc);
+                goto process_next_packet;
+            }
+            substreams_.add(ssrc, substream);
+
+            if (foundTrans) {
+                substream->processTSTransform(trans);
+            }
+        }
+
+        CHECK(substream != NULL);
+
+        if (marker) {
+            // Start of a new TRTP payload for this substream.  Extract the
+            // lower 32 bits of the timestamp and hand the buffer to the
+            // substream for processing.
+            uint32_t ts_lower = U32_AT(data + 4);
+            substream->processPayloadStart(data + 12, amt - 12, ts_lower);
+        } else {
+            // Continuation of an existing TRTP payload.  Just hand it off to
+            // the substream for processing.
+            substream->processPayloadCont(data + 12, amt - 12);
+        }
+
+process_next_packet:
+        PacketBuffer::destroy(pb);
+    }  // end of main processing while loop.
+}
+
+void AAH_RXPlayer::processCommandPacket(PacketBuffer* pb) {
+    CHECK(NULL != pb);
+
+    uint8_t* data = pb->data_;
+    ssize_t  amt  = pb->length_;
+
+    // verify that this packet meets the minimum length of a command packet
+    if (amt < 20) {
+        return;
+    }
+
+    uint8_t trtp_version =  data[12];
+    uint8_t trtp_flags   =  data[13]       & 0xF;
+
+    if (1 != trtp_version) {
+        ALOGV("Dropping packet, bad trtp version %hhu", trtp_version);
+        return;
+    }
+
+    // calculate the start of the command payload
+    ssize_t offset = 18;
+    if (trtp_flags & 0x01) {
+        // timestamp is present (4 bytes)
+        offset += 4;
+    }
+    if (trtp_flags & 0x02) {
+        // transform is present (24 bytes)
+        offset += 24;
+    }
+
+    // the packet must contain 2 bytes of command payload beyond the TRTP header
+    if (amt < offset + 2) {
+        return;
+    }
+
+    uint16_t command_id = U16_AT(data + offset);
+
+    switch (command_id) {
+        case TRTPControlPacket::kCommandNop:
+            break;
+
+        case TRTPControlPacket::kCommandEOS:
+        case TRTPControlPacket::kCommandFlush: {
+            uint16_t program_id = (U32_AT(data + 8) >> 5) & 0x1F;
+            ALOGI("*** %s flushing program_id=%d",
+                  __PRETTY_FUNCTION__, program_id);
+
+            Vector<uint32_t> substreams_to_remove;
+            for (size_t i = 0; i < substreams_.size(); ++i) {
+                sp<Substream> iter = substreams_.valueAt(i);
+                if (iter->getProgramID() == program_id) {
+                    iter->shutdown();
+                    substreams_to_remove.add(iter->getSSRC());
+                }
+            }
+
+            for (size_t i = 0; i < substreams_to_remove.size(); ++i) {
+                substreams_.removeItem(substreams_to_remove[i]);
+            }
+        } break;
+    }
+}
+
+bool AAH_RXPlayer::processGaps() {
+    // Deal with the current gap situation.  Specifically...
+    //
+    // 1) If a new gap has shown up, send a retransmit request to the
+    //    transmitter.
+    // 2) If a gap we were working on has had a packet in the middle or at
+    //    the end filled in, send another retransmit request for the begining
+    //    portion of the gap.  TRTP was designed for LANs where packet
+    //    re-ordering is very unlikely; so see the middle or end of a gap
+    //    filled in before the begining is an almost certain indication that
+    //    a retransmission packet was also dropped.
+    // 3) If we have been working on a gap for a while and it still has not
+    //    been filled in, send another retransmit request.
+    // 4) If the are no more gaps in the ring, clear the current_gap_status_
+    //    flag to indicate that all is well again.
+
+    // Start by fetching the active gap status.
+    SeqNoGap gap;
+    bool send_retransmit_request = false;
+    bool ret_val = false;
+    GapStatus gap_status;
+    if (kGS_NoGap != (gap_status = ring_buffer_.fetchCurrentGap(&gap))) {
+        // Note: checking for a change in the end sequence number should cover
+        // moving on to an entirely new gap for case #1 as well as resending the
+        // begining of a gap range for case #2.
+        send_retransmit_request = (kGS_NoGap == current_gap_status_) ||
+                                  (current_gap_.end_seq_ != gap.end_seq_);
+
+        // If this is the same gap we have been working on, and it has timed
+        // out, then check to see if our substreams are about to underflow.  If
+        // so, instead of sending another retransmit request, just give up on
+        // this gap and move on.
+        if (!send_retransmit_request &&
+           (kGS_NoGap != current_gap_status_) &&
+           (0 == computeNextGapRetransmitTimeout())) {
+
+            // If out current gap is the fast-start gap, don't bother to skip it
+            // because substreams look like the are about to underflow.
+            if ((kGS_FastStartGap != gap_status) ||
+                (current_gap_.end_seq_ != gap.end_seq_)) {
+                for (size_t i = 0; i < substreams_.size(); ++i) {
+                    if (substreams_.valueAt(i)->isAboutToUnderflow()) {
+                        ALOGV("About to underflow, giving up on gap [%hu, %hu]",
+                              gap.start_seq_, gap.end_seq_);
+                        ring_buffer_.processNAK();
+                        current_gap_status_ = kGS_NoGap;
+                        return true;
+                    }
+                }
+            }
+
+            // Looks like no one is about to underflow.  Just go ahead and send
+            // the request.
+            send_retransmit_request = true;
+        }
+    } else {
+        current_gap_status_ = kGS_NoGap;
+    }
+
+    if (send_retransmit_request) {
+        // If we have been working on a fast start, and it is still not filled
+        // in, even after the extended retransmit time out, give up and skip it.
+        // The system should fall back into its normal slow-start behavior.
+        if ((kGS_FastStartGap == current_gap_status_) &&
+            (current_gap_.end_seq_ == gap.end_seq_)) {
+            ALOGV("Fast start is taking forever; giving up.");
+            ring_buffer_.processNAK();
+            current_gap_status_ = kGS_NoGap;
+            return true;
+        }
+
+        // Send the request.
+        RetransRequest req;
+        uint32_t magic  = (kGS_FastStartGap == gap_status)
+                        ? kFastStartRequestMagic
+                        : kRetransRequestMagic;
+        req.magic_      = htonl(magic);
+        req.mcast_ip_   = listen_addr_.sin_addr.s_addr;
+        req.mcast_port_ = listen_addr_.sin_port;
+        req.start_seq_  = htons(gap.start_seq_);
+        req.end_seq_    = htons(gap.end_seq_);
+
+        {
+            uint32_t a = ntohl(transmitter_addr_.sin_addr.s_addr);
+            uint16_t p = ntohs(transmitter_addr_.sin_port);
+            ALOGV("Sending to transmitter %u.%u.%u.%u:%hu",
+                    ((a >> 24) & 0xFF),
+                    ((a >> 16) & 0xFF),
+                    ((a >>  8) & 0xFF),
+                    ( a        & 0xFF),
+                    p);
+        }
+
+        int res = sendto(sock_fd_, &req, sizeof(req), 0,
+                         reinterpret_cast<struct sockaddr*>(&transmitter_addr_),
+                         sizeof(transmitter_addr_));
+        if (res < 0) {
+            ALOGE("Error when sending retransmit request (%d)", errno);
+        } else {
+            ALOGV("%s request for range [%hu, %hu] sent",
+                  (kGS_FastStartGap == gap_status) ? "Fast Start"
+                                                   : "Retransmit",
+                  gap.start_seq_, gap.end_seq_);
+        }
+
+        // Update the current gap info.
+        current_gap_ = gap;
+        current_gap_status_ = gap_status;
+        next_retrans_req_time_ = monotonicUSecNow() +
+                               ((kGS_FastStartGap == current_gap_status_)
+                                ? kFastStartTimeoutUSec
+                                : kGapRerequestTimeoutUSec);
+    }
+
+    return false;
+}
+
+// Compute when its time to send the next gap retransmission in milliseconds.
+// Returns < 0 for an infinite timeout (no gap) and 0 if its time to retransmit
+// right now.
+int AAH_RXPlayer::computeNextGapRetransmitTimeout() {
+    if (kGS_NoGap == current_gap_status_) {
+        return -1;
+    }
+
+    int64_t timeout_delta = next_retrans_req_time_ - monotonicUSecNow();
+
+    timeout_delta /= 1000;
+    if (timeout_delta <= 0) {
+        return 0;
+    }
+
+    return static_cast<uint32_t>(timeout_delta);
+}
+
+}  // namespace android
diff --git a/media/libaah_rtp/aah_rx_player_ring_buffer.cpp b/media/libaah_rtp/aah_rx_player_ring_buffer.cpp
new file mode 100644
index 0000000..779405e
--- /dev/null
+++ b/media/libaah_rtp/aah_rx_player_ring_buffer.cpp
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "LibAAH_RTP"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include "aah_rx_player.h"
+
+namespace android {
+
+AAH_RXPlayer::RXRingBuffer::RXRingBuffer(uint32_t capacity) {
+    capacity_ = capacity;
+    rd_ = wr_ = 0;
+    ring_ = new PacketBuffer*[capacity];
+    memset(ring_, 0, sizeof(PacketBuffer*) * capacity);
+    reset();
+}
+
+AAH_RXPlayer::RXRingBuffer::~RXRingBuffer() {
+    reset();
+    delete[] ring_;
+}
+
+void AAH_RXPlayer::RXRingBuffer::reset() {
+    AutoMutex lock(&lock_);
+
+    if (NULL != ring_) {
+        while (rd_ != wr_) {
+            CHECK(rd_ < capacity_);
+            if (NULL != ring_[rd_]) {
+                PacketBuffer::destroy(ring_[rd_]);
+                ring_[rd_] = NULL;
+            }
+            rd_ = (rd_ + 1) % capacity_;
+        }
+    }
+
+    rd_ = wr_ = 0;
+    rd_seq_known_ = false;
+    waiting_for_fast_start_ = true;
+    fetched_first_packet_ = false;
+    rtp_activity_timeout_valid_ = false;
+}
+
+bool AAH_RXPlayer::RXRingBuffer::pushBuffer(PacketBuffer* buf,
+                                                uint16_t seq) {
+    AutoMutex lock(&lock_);
+    CHECK(NULL != ring_);
+    CHECK(NULL != buf);
+
+    rtp_activity_timeout_valid_ = true;
+    rtp_activity_timeout_ = monotonicUSecNow() + kRTPActivityTimeoutUSec;
+
+    // If the ring buffer is totally reset (we have never received a single
+    // payload) then we don't know the rd sequence number and this should be
+    // simple.  We just store the payload, advance the wr pointer and record the
+    // initial sequence number.
+    if (!rd_seq_known_) {
+        CHECK(rd_ == wr_);
+        CHECK(NULL == ring_[wr_]);
+        CHECK(wr_ < capacity_);
+
+        ring_[wr_] = buf;
+        wr_ = (wr_ + 1) % capacity_;
+        rd_seq_ = seq;
+        rd_seq_known_ = true;
+        return true;
+    }
+
+    // Compute the seqence number of this payload and of the write pointer,
+    // normalized around the read pointer.  IOW - transform the payload seq no
+    // and the wr pointer seq no into a space where the rd pointer seq no is
+    // zero.  This will define 4 cases we can consider...
+    //
+    // 1) norm_seq == norm_wr_seq
+    //    This payload is contiguous with the last.  All is good.
+    //
+    // 2)  ((norm_seq <  norm_wr_seq) && (norm_seq >= norm_rd_seq)
+    // aka ((norm_seq <  norm_wr_seq) && (norm_seq >= 0)
+    //    This payload is in the past, in the unprocessed region of the ring
+    //    buffer.  It is probably a retransmit intended to fill in a dropped
+    //    payload; it may be a duplicate.
+    //
+    // 3) ((norm_seq - norm_wr_seq) & 0x8000) != 0
+    //    This payload is in the past compared to the write pointer (or so very
+    //    far in the future that it has wrapped the seq no space), but not in
+    //    the unprocessed region of the ring buffer.  This could be a duplicate
+    //    retransmit; we just drop these payloads unless we are waiting for our
+    //    first fast start packet.  If we are waiting for fast start, than this
+    //    packet is probably the first packet of the fast start retransmission.
+    //    If it will fit in the buffer, back up the read pointer to its position
+    //    and clear the fast start flag, otherwise just drop it.
+    //
+    // 4) ((norm_seq - norm_wr_seq) & 0x8000) == 0
+    //    This payload which is ahead of the next write pointer.  This indicates
+    //    that we have missed some payloads and need to request a retransmit.
+    //    If norm_seq >= (capacity - 1), then the gap is so large that it would
+    //    overflow the ring buffer and we should probably start to panic.
+
+    uint16_t norm_wr_seq = ((wr_ + capacity_ - rd_) % capacity_);
+    uint16_t norm_seq    = seq - rd_seq_;
+
+    // Check for overflow first.
+    if ((!(norm_seq & 0x8000)) && (norm_seq >= (capacity_ - 1))) {
+        ALOGW("Ring buffer overflow; cap = %u, [rd, wr] = [%hu, %hu],"
+              " seq = %hu", capacity_, rd_seq_, norm_wr_seq + rd_seq_, seq);
+        PacketBuffer::destroy(buf);
+        return false;
+    }
+
+    // Check for case #1
+    if (norm_seq == norm_wr_seq) {
+        CHECK(wr_ < capacity_);
+        CHECK(NULL == ring_[wr_]);
+
+        ring_[wr_] = buf;
+        wr_ = (wr_ + 1) % capacity_;
+
+        CHECK(wr_ != rd_);
+        return true;
+    }
+
+    // Check case #2
+    uint32_t ring_pos = (rd_ + norm_seq) % capacity_;
+    if ((norm_seq < norm_wr_seq) && (!(norm_seq & 0x8000))) {
+        // Do we already have a payload for this slot?  If so, then this looks
+        // like a duplicate retransmit.  Just ignore it.
+        if (NULL != ring_[ring_pos]) {
+            ALOGD("RXed duplicate retransmit, seq = %hu", seq);
+            PacketBuffer::destroy(buf);
+        } else {
+            // Looks like we were missing this payload.  Go ahead and store it.
+            ring_[ring_pos] = buf;
+        }
+
+        return true;
+    }
+
+    // Check case #3
+    if ((norm_seq - norm_wr_seq) & 0x8000) {
+        if (!waiting_for_fast_start_) {
+            ALOGD("RXed duplicate retransmit from before rd pointer, seq = %hu",
+                  seq);
+            PacketBuffer::destroy(buf);
+        } else {
+            // Looks like a fast start fill-in.  Go ahead and store it, assuming
+            // that we can fit it in the buffer.
+            uint32_t implied_ring_size = static_cast<uint32_t>(norm_wr_seq)
+                                       + (rd_seq_ - seq);
+
+            if (implied_ring_size >= (capacity_ - 1)) {
+                ALOGD("RXed what looks like a fast start packet (seq = %hu),"
+                      " but packet is too far in the past to fit into the ring"
+                      "  buffer.  Dropping.", seq);
+                PacketBuffer::destroy(buf);
+            } else {
+                ring_pos = (rd_ + capacity_ + seq - rd_seq_) % capacity_;
+                rd_seq_ = seq;
+                rd_ = ring_pos;
+                waiting_for_fast_start_ = false;
+
+                CHECK(ring_pos < capacity_);
+                CHECK(NULL == ring_[ring_pos]);
+                ring_[ring_pos] = buf;
+            }
+
+        }
+        return true;
+    }
+
+    // Must be in case #4 with no overflow.  This packet fits in the current
+    // ring buffer, but is discontiuguous.  Advance the write pointer leaving a
+    // gap behind.
+    uint32_t gap_len = (ring_pos + capacity_ - wr_) % capacity_;
+    ALOGD("Drop detected; %u packets, seq_range [%hu, %hu]",
+          gap_len,
+          rd_seq_ + norm_wr_seq,
+          rd_seq_ + norm_wr_seq + gap_len - 1);
+
+    CHECK(NULL == ring_[ring_pos]);
+    ring_[ring_pos] = buf;
+    wr_ = (ring_pos + 1) % capacity_;
+    CHECK(wr_ != rd_);
+
+    return true;
+}
+
+AAH_RXPlayer::PacketBuffer*
+AAH_RXPlayer::RXRingBuffer::fetchBuffer(bool* is_discon) {
+    AutoMutex lock(&lock_);
+    CHECK(NULL != ring_);
+    CHECK(NULL != is_discon);
+
+    // If the read seqence number is not known, then this ring buffer has not
+    // received a packet since being reset and there cannot be any packets to
+    // return.  If we are still waiting for the first fast start packet to show
+    // up, we don't want to let any buffer be consumed yet because we expect to
+    // see a packet before the initial read sequence number show up shortly.
+    if (!rd_seq_known_ || waiting_for_fast_start_) {
+        *is_discon = false;
+        return NULL;
+    }
+
+    PacketBuffer* ret = NULL;
+    *is_discon = !fetched_first_packet_;
+
+    while ((rd_ != wr_) && (NULL == ret)) {
+        CHECK(rd_ < capacity_);
+
+        // If we hit a gap, stall and do not advance the read pointer.  Let the
+        // higher level code deal with requesting retries and/or deciding to
+        // skip the current gap.
+        ret = ring_[rd_];
+        if (NULL == ret) {
+            break;
+        }
+
+        ring_[rd_] = NULL;
+        rd_ = (rd_ + 1) % capacity_;
+        ++rd_seq_;
+    }
+
+    if (NULL != ret) {
+        fetched_first_packet_ = true;
+    }
+
+    return ret;
+}
+
+AAH_RXPlayer::GapStatus
+AAH_RXPlayer::RXRingBuffer::fetchCurrentGap(SeqNoGap* gap) {
+    AutoMutex lock(&lock_);
+    CHECK(NULL != ring_);
+    CHECK(NULL != gap);
+
+    // If the read seqence number is not known, then this ring buffer has not
+    // received a packet since being reset and there cannot be any gaps.
+    if (!rd_seq_known_) {
+        return kGS_NoGap;
+    }
+
+    // If we are waiting for fast start, then the current gap is a fast start
+    // gap and it includes all packets before the read sequence number.
+    if (waiting_for_fast_start_) {
+        gap->start_seq_ =
+        gap->end_seq_   = rd_seq_ - 1;
+        return kGS_FastStartGap;
+    }
+
+    // If rd == wr, then the buffer is empty and there cannot be any gaps.
+    if (rd_ == wr_) {
+        return kGS_NoGap;
+    }
+
+    // If rd_ is currently pointing at an unprocessed packet, then there is no
+    // current gap.
+    CHECK(rd_ < capacity_);
+    if (NULL != ring_[rd_]) {
+        return kGS_NoGap;
+    }
+
+    // Looks like there must be a gap here.  The start of the gap is the current
+    // rd sequence number, all we need to do now is determine its length in
+    // order to compute the end sequence number.
+    gap->start_seq_ = rd_seq_;
+    uint16_t end = rd_seq_;
+    uint32_t tmp = (rd_ + 1) % capacity_;
+    while ((tmp != wr_) && (NULL == ring_[tmp])) {
+        ++end;
+        tmp = (tmp + 1) % capacity_;
+    }
+    gap->end_seq_ = end;
+
+    return kGS_NormalGap;
+}
+
+void AAH_RXPlayer::RXRingBuffer::processNAK(const SeqNoGap* nak) {
+    AutoMutex lock(&lock_);
+    CHECK(NULL != ring_);
+
+    // If we were waiting for our first fast start fill-in packet, and we
+    // received a NAK, then apparantly we are not getting our fast start.  Just
+    // clear the waiting flag and go back to normal behavior.
+    if (waiting_for_fast_start_) {
+        waiting_for_fast_start_ = false;
+    }
+
+    // If we have not received a packet since last reset, or there is no data in
+    // the ring, then there is nothing to skip.
+    if ((!rd_seq_known_) || (rd_ == wr_)) {
+        return;
+    }
+
+    // If rd_ is currently pointing at an unprocessed packet, then there is no
+    // gap to skip.
+    CHECK(rd_ < capacity_);
+    if (NULL != ring_[rd_]) {
+        return;
+    }
+
+    // Looks like there must be a gap here.  Advance rd until we have passed
+    // over the portion of it indicated by nak (or all of the gap if nak is
+    // NULL).  Then reset fetched_first_packet_ so that the next read will show
+    // up as being discontiguous.
+    uint16_t seq_after_gap = (NULL == nak) ? 0 : nak->end_seq_ + 1;
+    while ((rd_ != wr_) &&
+           (NULL == ring_[rd_]) &&
+          ((NULL == nak) || (seq_after_gap != rd_seq_))) {
+        rd_ = (rd_ + 1) % capacity_;
+        ++rd_seq_;
+    }
+    fetched_first_packet_ = false;
+}
+
+int AAH_RXPlayer::RXRingBuffer::computeInactivityTimeout() {
+    AutoMutex lock(&lock_);
+
+    if (!rtp_activity_timeout_valid_) {
+        return -1;
+    }
+
+    uint64_t now = monotonicUSecNow();
+    if (rtp_activity_timeout_ <= now) {
+        return 0;
+    }
+
+    return (rtp_activity_timeout_ - now) / 1000;
+}
+
+AAH_RXPlayer::PacketBuffer*
+AAH_RXPlayer::PacketBuffer::allocate(ssize_t length) {
+    if (length <= 0) {
+        return NULL;
+    }
+
+    uint32_t alloc_len = sizeof(PacketBuffer) + length;
+    PacketBuffer* ret = reinterpret_cast<PacketBuffer*>(
+                        new uint8_t[alloc_len]);
+
+    if (NULL != ret) {
+        ret->length_ = length;
+    }
+
+    return ret;
+}
+
+void AAH_RXPlayer::PacketBuffer::destroy(PacketBuffer* pb) {
+    uint8_t* kill_me = reinterpret_cast<uint8_t*>(pb);
+    delete[] kill_me;
+}
+
+}  // namespace android
diff --git a/media/libaah_rtp/aah_rx_player_substream.cpp b/media/libaah_rtp/aah_rx_player_substream.cpp
new file mode 100644
index 0000000..18b0e2b
--- /dev/null
+++ b/media/libaah_rtp/aah_rx_player_substream.cpp
@@ -0,0 +1,677 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "LibAAH_RTP"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+
+#include <include/avc_utils.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/Utils.h>
+
+#include "aah_rx_player.h"
+#include "aah_tx_packet.h"
+
+inline uint32_t min(uint32_t a, uint32_t b) {
+    return (a < b ? a : b);
+}
+
+namespace android {
+
+int64_t AAH_RXPlayer::Substream::kAboutToUnderflowThreshold =
+    50ull * 1000;
+
+AAH_RXPlayer::Substream::Substream(uint32_t ssrc, OMXClient& omx) {
+    ssrc_ = ssrc;
+    substream_details_known_ = false;
+    buffer_in_progress_ = NULL;
+    status_ = OK;
+    codec_mime_type_ = "";
+
+    decoder_ = new AAH_DecoderPump(omx);
+    if (decoder_ == NULL) {
+        ALOGE("%s failed to allocate decoder pump!", __PRETTY_FUNCTION__);
+    }
+    if (OK != decoder_->initCheck()) {
+        ALOGE("%s failed to initialize decoder pump!", __PRETTY_FUNCTION__);
+    }
+
+    // cleanupBufferInProgress will reset most of the internal state variables.
+    // Just need to make sure that buffer_in_progress_ is NULL before calling.
+    cleanupBufferInProgress();
+}
+
+AAH_RXPlayer::Substream::~Substream() {
+    shutdown();
+}
+
+void AAH_RXPlayer::Substream::shutdown() {
+    substream_meta_ = NULL;
+    status_ = OK;
+    cleanupBufferInProgress();
+    cleanupDecoder();
+}
+
+void AAH_RXPlayer::Substream::cleanupBufferInProgress() {
+    if (NULL != buffer_in_progress_) {
+        buffer_in_progress_->release();
+        buffer_in_progress_ = NULL;
+    }
+
+    expected_buffer_size_ = 0;
+    buffer_filled_ = 0;
+    waiting_for_rap_ = true;
+
+    aux_data_in_progress_.clear();
+    aux_data_expected_size_ = 0;
+}
+
+void AAH_RXPlayer::Substream::cleanupDecoder() {
+    if (decoder_ != NULL) {
+        decoder_->shutdown();
+    }
+}
+
+bool AAH_RXPlayer::Substream::shouldAbort(const char* log_tag) {
+    // If we have already encountered a fatal error, do nothing.  We are just
+    // waiting for our owner to shut us down now.
+    if (OK != status_) {
+        ALOGV("Skipping %s, substream has encountered fatal error (%d).",
+                log_tag, status_);
+        return true;
+    }
+
+    return false;
+}
+
+void AAH_RXPlayer::Substream::processPayloadStart(uint8_t* buf,
+                                                  uint32_t amt,
+                                                  int32_t ts_lower) {
+    uint32_t min_length = 6;
+
+    if (shouldAbort(__PRETTY_FUNCTION__)) {
+        return;
+    }
+
+    // Do we have a buffer in progress already?  If so, abort the buffer.  In
+    // theory, this should never happen.  If there were a discontinutity in the
+    // stream, the discon in the seq_nos at the RTP level should have already
+    // triggered a cleanup of the buffer in progress.  To see a problem at this
+    // level is an indication either of a bug in the transmitter, or some form
+    // of terrible corruption/tampering on the wire.
+    if (NULL != buffer_in_progress_) {
+        ALOGE("processPayloadStart is aborting payload already in progress.");
+        cleanupBufferInProgress();
+    }
+
+    // Parse enough of the header to know where we stand.  Since this is a
+    // payload start, it should begin with a TRTP header which has to be at
+    // least 6 bytes long.
+    if (amt < min_length) {
+        ALOGV("Discarding payload too short to contain TRTP header (len = %u)",
+                amt);
+        return;
+    }
+
+    // Check the TRTP version number.
+    if (0x01 != buf[0]) {
+        ALOGV("Unexpected TRTP version (%u) in header.  Expected %u.",
+                buf[0], 1);
+        return;
+    }
+
+    // Extract the substream type field and make sure its one we understand (and
+    // one that does not conflict with any previously received substream type.
+    uint8_t header_type = (buf[1] >> 4) & 0xF;
+    switch (header_type) {
+        case TRTPPacket::kHeaderTypeAudio:
+            // Audio, yay!  Just break.  We understand audio payloads.
+            break;
+        case TRTPPacket::kHeaderTypeVideo:
+            ALOGV("RXed packet with unhandled TRTP header type (Video).");
+            return;
+        case TRTPPacket::kHeaderTypeSubpicture:
+            ALOGV("RXed packet with unhandled TRTP header type (Subpicture).");
+            return;
+        case TRTPPacket::kHeaderTypeControl:
+            ALOGV("RXed packet with unhandled TRTP header type (Control).");
+            return;
+        default:
+            ALOGV("RXed packet with unhandled TRTP header type (%u).",
+                    header_type);
+            return;
+    }
+
+    if (substream_details_known_ && (header_type != substream_type_)) {
+        ALOGV("RXed TRTP Payload for SSRC=0x%08x where header type (%u) does"
+              " not match previously received header type (%u)",
+              ssrc_, header_type, substream_type_);
+        return;
+    }
+
+    // Check the flags to see if there is another 32 bits of timestamp present.
+    uint32_t trtp_header_len = 6;
+    bool ts_valid = buf[1] & TRTPPacket::kFlag_TSValid;
+    if (ts_valid) {
+        min_length += 4;
+        trtp_header_len += 4;
+        if (amt < min_length) {
+            ALOGV("Discarding payload too short to contain TRTP timestamp"
+                  " (len = %u)", amt);
+            return;
+        }
+    }
+
+    // Extract the TRTP length field and sanity check it.
+    uint32_t trtp_len = U32_AT(buf + 2);
+    if (trtp_len < min_length) {
+        ALOGV("TRTP length (%u) is too short to be valid.  Must be at least %u"
+              " bytes.", trtp_len, min_length);
+        return;
+    }
+
+    // Extract the rest of the timestamp field if valid.
+    int64_t ts = 0;
+    uint32_t parse_offset = 6;
+    if (ts_valid) {
+        uint32_t ts_upper = U32_AT(buf + parse_offset);
+        parse_offset += 4;
+        ts = (static_cast<int64_t>(ts_upper) << 32) | ts_lower;
+    }
+
+    // Check the flags to see if there is another 24 bytes of timestamp
+    // transformation present.
+    if (buf[1] & TRTPPacket::kFlag_TSTransformPresent) {
+        min_length += 24;
+        parse_offset += 24;
+        trtp_header_len += 24;
+        if (amt < min_length) {
+            ALOGV("Discarding payload too short to contain TRTP timestamp"
+                  " transformation (len = %u)", amt);
+            return;
+        }
+    }
+
+    // TODO : break the parsing into individual parsers for the different
+    // payload types (audio, video, etc).
+    //
+    // At this point in time, we know that this is audio.  Go ahead and parse
+    // the basic header, check the codec type, and find the payload portion of
+    // the packet.
+    min_length += 3;
+    if (trtp_len < min_length) {
+        ALOGV("TRTP length (%u) is too short to be a valid audio payload.  Must"
+              " be at least %u bytes.", trtp_len, min_length);
+        return;
+    }
+
+    if (amt < min_length) {
+        ALOGV("TRTP porttion of RTP payload (%u bytes) too small to contain"
+              " entire TRTP header.  TRTP does not currently support"
+              " fragmenting TRTP headers across RTP payloads", amt);
+        return;
+    }
+
+    uint8_t codec_type = buf[parse_offset    ];
+    uint8_t flags      = buf[parse_offset + 1];
+    uint8_t volume     = buf[parse_offset + 2];
+    parse_offset += 3;
+    trtp_header_len += 3;
+
+    if (!setupSubstreamType(header_type, codec_type)) {
+        return;
+    }
+
+    if (decoder_ != NULL) {
+        decoder_->setRenderVolume(volume);
+    }
+
+    if (waiting_for_rap_ && !(flags & TRTPAudioPacket::kFlag_RandomAccessPoint)) {
+        ALOGV("Dropping non-RAP TRTP Audio Payload while waiting for RAP.");
+        return;
+    }
+
+    // Check for the presence of codec aux data.
+    if (flags & TRTPAudioPacket::kFlag_AuxLengthPresent) {
+        min_length += 4;
+        trtp_header_len += 4;
+
+        if (trtp_len < min_length) {
+            ALOGV("TRTP length (%u) is too short to be a valid audio payload.  "
+                  "Must be at least %u bytes.", trtp_len, min_length);
+            return;
+        }
+
+        if (amt < min_length) {
+            ALOGV("TRTP porttion of RTP payload (%u bytes) too small to contain"
+                  " entire TRTP header.  TRTP does not currently support"
+                  " fragmenting TRTP headers across RTP payloads", amt);
+            return;
+        }
+
+        aux_data_expected_size_ = U32_AT(buf + parse_offset);
+        aux_data_in_progress_.clear();
+        if (aux_data_in_progress_.capacity() < aux_data_expected_size_) {
+            aux_data_in_progress_.setCapacity(aux_data_expected_size_);
+        }
+    } else {
+        aux_data_expected_size_ = 0;
+    }
+
+    if ((aux_data_expected_size_ + trtp_header_len) > trtp_len) {
+        ALOGV("Expected codec aux data length (%u) and TRTP header overhead"
+              " (%u) too large for total TRTP payload length (%u).",
+             aux_data_expected_size_, trtp_header_len, trtp_len);
+        return;
+    }
+
+    // OK - everything left is just payload.  Compute the payload size, start
+    // the buffer in progress and pack as much payload as we can into it.  If
+    // the payload is finished once we are done, go ahead and send the payload
+    // to the decoder.
+    expected_buffer_size_ = trtp_len
+                          - trtp_header_len
+                          - aux_data_expected_size_;
+    if (!expected_buffer_size_) {
+        ALOGV("Dropping TRTP Audio Payload with 0 Access Unit length");
+        return;
+    }
+
+    CHECK(amt >= trtp_header_len);
+    uint32_t todo = amt - trtp_header_len;
+    if ((expected_buffer_size_ + aux_data_expected_size_) < todo) {
+        ALOGV("Extra data (%u > %u) present in initial TRTP Audio Payload;"
+              " dropping payload.", todo,
+              expected_buffer_size_ + aux_data_expected_size_);
+        return;
+    }
+
+    buffer_filled_ = 0;
+    buffer_in_progress_ = new MediaBuffer(expected_buffer_size_);
+    if ((NULL == buffer_in_progress_) ||
+            (NULL == buffer_in_progress_->data())) {
+        ALOGV("Failed to allocate MediaBuffer of length %u",
+                expected_buffer_size_);
+        cleanupBufferInProgress();
+        return;
+    }
+
+    sp<MetaData> meta = buffer_in_progress_->meta_data();
+    if (meta == NULL) {
+        ALOGV("Missing metadata structure in allocated MediaBuffer; dropping"
+              " payload");
+        cleanupBufferInProgress();
+        return;
+    }
+
+    meta->setCString(kKeyMIMEType, codec_mime_type_);
+    if (ts_valid) {
+        meta->setInt64(kKeyTime, ts);
+    }
+
+    // Skip over the header we have already extracted.
+    amt -= trtp_header_len;
+    buf += trtp_header_len;
+
+    // Extract as much of the expected aux data as we can.
+    todo = min(aux_data_expected_size_, amt);
+    if (todo) {
+        aux_data_in_progress_.appendArray(buf, todo);
+        buf += todo;
+        amt -= todo;
+    }
+
+    // Extract as much of the expected payload as we can.
+    todo = min(expected_buffer_size_, amt);
+    if (todo > 0) {
+        uint8_t* tgt =
+            reinterpret_cast<uint8_t*>(buffer_in_progress_->data());
+        memcpy(tgt, buf, todo);
+        buffer_filled_ = amt;
+        buf += todo;
+        amt -= todo;
+    }
+
+    if (buffer_filled_ >= expected_buffer_size_) {
+        processCompletedBuffer();
+    }
+}
+
+void AAH_RXPlayer::Substream::processPayloadCont(uint8_t* buf,
+                                                 uint32_t amt) {
+    if (shouldAbort(__PRETTY_FUNCTION__)) {
+        return;
+    }
+
+    if (NULL == buffer_in_progress_) {
+        ALOGV("TRTP Receiver skipping payload continuation; no buffer currently"
+              " in progress.");
+        return;
+    }
+
+    CHECK(aux_data_in_progress_.size() <= aux_data_expected_size_);
+    uint32_t aux_left = aux_data_expected_size_ - aux_data_in_progress_.size();
+    if (aux_left) {
+        uint32_t todo = min(aux_left, amt);
+        aux_data_in_progress_.appendArray(buf, todo);
+        amt -= todo;
+        buf += todo;
+
+        if (!amt)
+            return;
+    }
+
+    CHECK(buffer_filled_ < expected_buffer_size_);
+    uint32_t buffer_left = expected_buffer_size_ - buffer_filled_;
+    if (amt > buffer_left) {
+        ALOGV("Extra data (%u > %u) present in continued TRTP Audio Payload;"
+              " dropping payload.", amt, buffer_left);
+        cleanupBufferInProgress();
+        return;
+    }
+
+    if (amt > 0) {
+        uint8_t* tgt =
+            reinterpret_cast<uint8_t*>(buffer_in_progress_->data());
+        memcpy(tgt + buffer_filled_, buf, amt);
+        buffer_filled_ += amt;
+    }
+
+    if (buffer_filled_ >= expected_buffer_size_) {
+        processCompletedBuffer();
+    }
+}
+
+void AAH_RXPlayer::Substream::processCompletedBuffer() {
+    status_t res;
+
+    CHECK(NULL != buffer_in_progress_);
+
+    if (decoder_ == NULL) {
+        ALOGV("Dropping complete buffer, no decoder pump allocated");
+        goto bailout;
+    }
+
+    // Make sure our metadata used to initialize the decoder has been properly
+    // set up.
+    if (!setupSubstreamMeta())
+        goto bailout;
+
+    // If our decoder has not be set up, do so now.
+    res = decoder_->init(substream_meta_);
+    if (OK != res) {
+        ALOGE("Failed to init decoder (res = %d)", res);
+        cleanupDecoder();
+        substream_meta_ = NULL;
+        goto bailout;
+    }
+
+    // Queue the payload for decode.
+    res = decoder_->queueForDecode(buffer_in_progress_);
+
+    if (res != OK) {
+        ALOGD("Failed to queue payload for decode, resetting decoder pump!"
+             " (res = %d)", res);
+        status_ = res;
+        cleanupDecoder();
+        cleanupBufferInProgress();
+    }
+
+    // NULL out buffer_in_progress before calling the cleanup helper.
+    //
+    // MediaBuffers use something of a hybrid ref-counting pattern which prevent
+    // the AAH_DecoderPump's input queue from adding their own reference to the
+    // MediaBuffer.  MediaBuffers start life with a reference count of 0, as
+    // well as an observer which starts as NULL.  Before being given an
+    // observer, the ref count cannot be allowed to become non-zero as it will
+    // cause calls to release() to assert.  Basically, before a MediaBuffer has
+    // an observer, they behave like non-ref counted obects where release()
+    // serves the roll of delete.  After a MediaBuffer has an observer, they
+    // become more like ref counted objects where add ref and release can be
+    // used, and when the ref count hits zero, the MediaBuffer is handed off to
+    // the observer.
+    //
+    // Given all of this, when we give the buffer to the decoder pump to wait in
+    // the to-be-processed queue, the decoder cannot add a ref to the buffer as
+    // it would in a traditional ref counting system.  Instead it needs to
+    // "steal" the non-existent ref.  In the case of queue failure, we need to
+    // make certain to release this non-existent reference so that the buffer is
+    // cleaned up during the cleanupBufferInProgress helper.  In the case of a
+    // successful queue operation, we need to make certain that the
+    // cleanupBufferInProgress helper does not release the buffer since it needs
+    // to remain alive in the queue.  We acomplish this by NULLing out the
+    // buffer pointer before calling the cleanup helper.
+    buffer_in_progress_ = NULL;
+
+bailout:
+    cleanupBufferInProgress();
+}
+
+bool AAH_RXPlayer::Substream::setupSubstreamMeta() {
+    switch (codec_type_) {
+        case TRTPAudioPacket::kCodecMPEG1Audio:
+            codec_mime_type_ = MEDIA_MIMETYPE_AUDIO_MPEG;
+            return setupMP3SubstreamMeta();
+
+        case TRTPAudioPacket::kCodecAACAudio:
+            codec_mime_type_ = MEDIA_MIMETYPE_AUDIO_AAC;
+            return setupAACSubstreamMeta();
+
+        default:
+            ALOGV("Failed to setup substream metadata for unsupported codec"
+                  " type (%u)", codec_type_);
+            break;
+    }
+
+    return false;
+}
+
+bool AAH_RXPlayer::Substream::setupMP3SubstreamMeta() {
+    const uint8_t* buffer_data = NULL;
+    int sample_rate;
+    int channel_count;
+    size_t frame_size;
+    status_t res;
+
+    buffer_data = reinterpret_cast<const uint8_t*>(buffer_in_progress_->data());
+    if (buffer_in_progress_->size() < 4) {
+        ALOGV("MP3 payload too short to contain header, dropping payload.");
+        return false;
+    }
+
+    // Extract the channel count and the sample rate from the MP3 header.  The
+    // stagefright MP3 requires that these be delivered before decoing can
+    // begin.
+    if (!GetMPEGAudioFrameSize(U32_AT(buffer_data),
+                               &frame_size,
+                               &sample_rate,
+                               &channel_count,
+                               NULL,
+                               NULL)) {
+        ALOGV("Failed to parse MP3 header in payload, droping payload.");
+        return false;
+    }
+
+
+    // Make sure that our substream metadata is set up properly.  If there has
+    // been a format change, be sure to reset the underlying decoder.  In
+    // stagefright, it seems like the only way to do this is to destroy and
+    // recreate the decoder.
+    if (substream_meta_ == NULL) {
+        substream_meta_ = new MetaData();
+
+        if (substream_meta_ == NULL) {
+            ALOGE("Failed to allocate MetaData structure for MP3 substream");
+            return false;
+        }
+
+        substream_meta_->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG);
+        substream_meta_->setInt32  (kKeyChannelCount, channel_count);
+        substream_meta_->setInt32  (kKeySampleRate,   sample_rate);
+    } else {
+        int32_t prev_sample_rate;
+        int32_t prev_channel_count;
+        substream_meta_->findInt32(kKeySampleRate,   &prev_sample_rate);
+        substream_meta_->findInt32(kKeyChannelCount, &prev_channel_count);
+
+        if ((prev_channel_count != channel_count) ||
+            (prev_sample_rate   != sample_rate)) {
+            ALOGW("MP3 format change detected, forcing decoder reset.");
+            cleanupDecoder();
+
+            substream_meta_->setInt32(kKeyChannelCount, channel_count);
+            substream_meta_->setInt32(kKeySampleRate,   sample_rate);
+        }
+    }
+
+    return true;
+}
+
+bool AAH_RXPlayer::Substream::setupAACSubstreamMeta() {
+    int32_t sample_rate, channel_cnt;
+    static const size_t overhead = sizeof(sample_rate)
+                                 + sizeof(channel_cnt);
+
+    if (aux_data_in_progress_.size() < overhead) {
+        ALOGE("Not enough aux data (%u) to initialize AAC substream decoder",
+                aux_data_in_progress_.size());
+        return false;
+    }
+
+    const uint8_t* aux_data = aux_data_in_progress_.array();
+    size_t aux_data_size = aux_data_in_progress_.size();
+    sample_rate = U32_AT(aux_data);
+    channel_cnt = U32_AT(aux_data + sizeof(sample_rate));
+
+    const uint8_t* esds_data = NULL;
+    size_t esds_data_size = 0;
+    if (aux_data_size > overhead) {
+        esds_data = aux_data + overhead;
+        esds_data_size = aux_data_size - overhead;
+    }
+
+    // Do we already have metadata?  If so, has it changed at all?  If not, then
+    // there should be nothing else to do.  Otherwise, release our old stream
+    // metadata and make new metadata.
+    if (substream_meta_ != NULL) {
+        uint32_t type;
+        const void* data;
+        size_t size;
+        int32_t prev_sample_rate;
+        int32_t prev_channel_count;
+        bool res;
+
+        res = substream_meta_->findInt32(kKeySampleRate,   &prev_sample_rate);
+        CHECK(res);
+        res = substream_meta_->findInt32(kKeyChannelCount, &prev_channel_count);
+        CHECK(res);
+
+        // If nothing has changed about the codec aux data (esds, sample rate,
+        // channel count), then we can just do nothing and get out.  Otherwise,
+        // we will need to reset the decoder and make a new metadata object to
+        // deal with the format change.
+        bool hasData = (esds_data != NULL);
+        bool hadData = substream_meta_->findData(kKeyESDS, &type, &data, &size);
+        bool esds_change = (hadData != hasData);
+
+        if (!esds_change && hasData)
+            esds_change = ((size != esds_data_size) ||
+                           memcmp(data, esds_data, size));
+
+        if (!esds_change &&
+            (prev_sample_rate   == sample_rate) &&
+            (prev_channel_count == channel_cnt)) {
+            return true;  // no change, just get out.
+        }
+
+        ALOGW("AAC format change detected, forcing decoder reset.");
+        cleanupDecoder();
+        substream_meta_ = NULL;
+    }
+
+    CHECK(substream_meta_ == NULL);
+
+    substream_meta_ = new MetaData();
+    if (substream_meta_ == NULL) {
+        ALOGE("Failed to allocate MetaData structure for AAC substream");
+        return false;
+    }
+
+    substream_meta_->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AAC);
+    substream_meta_->setInt32  (kKeySampleRate,   sample_rate);
+    substream_meta_->setInt32  (kKeyChannelCount, channel_cnt);
+
+    if (esds_data) {
+        substream_meta_->setData(kKeyESDS, kTypeESDS,
+                                 esds_data, esds_data_size);
+    }
+
+    return true;
+}
+
+void AAH_RXPlayer::Substream::processTSTransform(const LinearTransform& trans) {
+    if (decoder_ != NULL) {
+        decoder_->setRenderTSTransform(trans);
+    }
+}
+
+bool AAH_RXPlayer::Substream::isAboutToUnderflow() {
+    if (decoder_ == NULL) {
+        return false;
+    }
+
+    return decoder_->isAboutToUnderflow(kAboutToUnderflowThreshold);
+}
+
+bool AAH_RXPlayer::Substream::setupSubstreamType(uint8_t substream_type,
+                                                 uint8_t codec_type) {
+    // Sanity check the codec type.  Right now we only support MP3 and AAC.
+    // Also check for conflicts with previously delivered codec types.
+    if (substream_details_known_) {
+        if (codec_type != codec_type_) {
+            ALOGV("RXed TRTP Payload for SSRC=0x%08x where codec type (%u) does"
+                  " not match previously received codec type (%u)",
+                 ssrc_, codec_type, codec_type_);
+            return false;
+        }
+
+        return true;
+    }
+
+    switch (codec_type) {
+        // MP3 and AAC are all we support right now.
+        case TRTPAudioPacket::kCodecMPEG1Audio:
+        case TRTPAudioPacket::kCodecAACAudio:
+            break;
+
+        default:
+            ALOGV("RXed TRTP Audio Payload for SSRC=0x%08x with unsupported"
+                  " codec type (%u)", ssrc_, codec_type);
+            return false;
+    }
+
+    substream_type_ = substream_type;
+    codec_type_ = codec_type;
+    substream_details_known_ = true;
+
+    return true;
+}
+
+}  // namespace android
diff --git a/media/libaah_rtp/aah_tx_packet.cpp b/media/libaah_rtp/aah_tx_packet.cpp
new file mode 100644
index 0000000..4cd6e47
--- /dev/null
+++ b/media/libaah_rtp/aah_tx_packet.cpp
@@ -0,0 +1,344 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "LibAAH_RTP"
+#include <utils/Log.h>
+
+#include <arpa/inet.h>
+#include <string.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+
+#include "aah_tx_packet.h"
+
+namespace android {
+
+const int TRTPPacket::kRTPHeaderLen;
+const uint32_t TRTPPacket::kTRTPEpochMask;
+
+TRTPPacket::~TRTPPacket() {
+    delete mPacket;
+}
+
+/*** TRTP packet properties ***/
+
+void TRTPPacket::setSeqNumber(uint16_t val) {
+    mSeqNumber = val;
+
+    if (mIsPacked) {
+        const int kTRTPSeqNumberOffset = 2;
+        uint16_t* buf = reinterpret_cast<uint16_t*>(
+            mPacket + kTRTPSeqNumberOffset);
+        *buf = htons(mSeqNumber);
+    }
+}
+
+uint16_t TRTPPacket::getSeqNumber() const {
+    return mSeqNumber;
+}
+
+void TRTPPacket::setPTS(int64_t val) {
+    CHECK(!mIsPacked);
+    mPTS = val;
+    mPTSValid = true;
+}
+
+int64_t TRTPPacket::getPTS() const {
+    return mPTS;
+}
+
+void TRTPPacket::setEpoch(uint32_t val) {
+    mEpoch = val;
+
+    if (mIsPacked) {
+        const int kTRTPEpochOffset = 8;
+        uint32_t* buf = reinterpret_cast<uint32_t*>(
+            mPacket + kTRTPEpochOffset);
+        uint32_t val = ntohl(*buf);
+        val &= ~(kTRTPEpochMask << kTRTPEpochShift);
+        val |= (mEpoch & kTRTPEpochMask) << kTRTPEpochShift;
+        *buf = htonl(val);
+    }
+}
+
+void TRTPPacket::setProgramID(uint16_t val) {
+    CHECK(!mIsPacked);
+    mProgramID = val;
+}
+
+void TRTPPacket::setSubstreamID(uint16_t val) {
+    CHECK(!mIsPacked);
+    mSubstreamID = val;
+}
+
+
+void TRTPPacket::setClockTransform(const LinearTransform& trans) {
+    CHECK(!mIsPacked);
+    mClockTranform = trans;
+    mClockTranformValid = true;
+}
+
+uint8_t* TRTPPacket::getPacket() const {
+    CHECK(mIsPacked);
+    return mPacket;
+}
+
+int TRTPPacket::getPacketLen() const {
+    CHECK(mIsPacked);
+    return mPacketLen;
+}
+
+void TRTPPacket::setExpireTime(nsecs_t val) {
+    CHECK(!mIsPacked);
+    mExpireTime = val;
+}
+
+nsecs_t TRTPPacket::getExpireTime() const {
+    return mExpireTime;
+}
+
+/*** TRTP audio packet properties ***/
+
+void TRTPAudioPacket::setCodecType(TRTPAudioCodecType val) {
+    CHECK(!mIsPacked);
+    mCodecType = val;
+}
+
+void TRTPAudioPacket::setRandomAccessPoint(bool val) {
+    CHECK(!mIsPacked);
+    mRandomAccessPoint = val;
+}
+
+void TRTPAudioPacket::setDropable(bool val) {
+    CHECK(!mIsPacked);
+    mDropable = val;
+}
+
+void TRTPAudioPacket::setDiscontinuity(bool val) {
+    CHECK(!mIsPacked);
+    mDiscontinuity = val;
+}
+
+void TRTPAudioPacket::setEndOfStream(bool val) {
+    CHECK(!mIsPacked);
+    mEndOfStream = val;
+}
+
+void TRTPAudioPacket::setVolume(uint8_t val) {
+    CHECK(!mIsPacked);
+    mVolume = val;
+}
+
+void TRTPAudioPacket::setAccessUnitData(const void* data, size_t len) {
+    CHECK(!mIsPacked);
+    mAccessUnitData = data;
+    mAccessUnitLen = len;
+}
+
+void TRTPAudioPacket::setAuxData(const void* data, size_t len) {
+    CHECK(!mIsPacked);
+    mAuxData = data;
+    mAuxDataLen = len;
+}
+
+/*** TRTP control packet properties ***/
+
+void TRTPControlPacket::setCommandID(TRTPCommandID val) {
+    CHECK(!mIsPacked);
+    mCommandID = val;
+}
+
+/*** TRTP packet serializers ***/
+
+void TRTPPacket::writeU8(uint8_t*& buf, uint8_t val) {
+    *buf = val;
+    buf++;
+}
+
+void TRTPPacket::writeU16(uint8_t*& buf, uint16_t val) {
+    *reinterpret_cast<uint16_t*>(buf) = htons(val);
+    buf += 2;
+}
+
+void TRTPPacket::writeU32(uint8_t*& buf, uint32_t val) {
+    *reinterpret_cast<uint32_t*>(buf) = htonl(val);
+    buf += 4;
+}
+
+void TRTPPacket::writeU64(uint8_t*& buf, uint64_t val) {
+    buf[0] = static_cast<uint8_t>(val >> 56);
+    buf[1] = static_cast<uint8_t>(val >> 48);
+    buf[2] = static_cast<uint8_t>(val >> 40);
+    buf[3] = static_cast<uint8_t>(val >> 32);
+    buf[4] = static_cast<uint8_t>(val >> 24);
+    buf[5] = static_cast<uint8_t>(val >> 16);
+    buf[6] = static_cast<uint8_t>(val >>  8);
+    buf[7] = static_cast<uint8_t>(val);
+    buf += 8;
+}
+
+void TRTPPacket::writeTRTPHeader(uint8_t*& buf,
+                                 bool isFirstFragment,
+                                 int totalPacketLen) {
+    // RTP header
+    writeU8(buf,
+            ((mVersion & 0x03) << 6) |
+            (static_cast<int>(mPadding) << 5) |
+            (static_cast<int>(mExtension) << 4) |
+            (mCsrcCount & 0x0F));
+    writeU8(buf,
+            (static_cast<int>(isFirstFragment) << 7) |
+            (mPayloadType & 0x7F));
+    writeU16(buf, mSeqNumber);
+    if (isFirstFragment && mPTSValid) {
+        writeU32(buf, mPTS & 0xFFFFFFFF);
+    } else {
+        writeU32(buf, 0);
+    }
+    writeU32(buf,
+            ((mEpoch & kTRTPEpochMask) << kTRTPEpochShift) |
+            ((mProgramID & 0x1F) << 5) |
+            (mSubstreamID & 0x1F));
+
+    // TRTP header
+    writeU8(buf, mTRTPVersion);
+    writeU8(buf,
+            ((mTRTPHeaderType & 0x0F) << 4) |
+            (mClockTranformValid ? 0x02 : 0x00) |
+            (mPTSValid ? 0x01 : 0x00));
+    writeU32(buf, totalPacketLen - kRTPHeaderLen);
+    if (mPTSValid) {
+        writeU32(buf, mPTS >> 32);
+    }
+
+    if (mClockTranformValid) {
+        writeU64(buf, mClockTranform.a_zero);
+        writeU32(buf, mClockTranform.a_to_b_numer);
+        writeU32(buf, mClockTranform.a_to_b_denom);
+        writeU64(buf, mClockTranform.b_zero);
+    }
+}
+
+bool TRTPAudioPacket::pack() {
+    if (mIsPacked) {
+        return false;
+    }
+
+    int packetLen = kRTPHeaderLen +
+                    mAuxDataLen +
+                    mAccessUnitLen +
+                    TRTPHeaderLen();
+
+    // TODO : support multiple fragments
+    const int kMaxUDPPayloadLen = 65507;
+    if (packetLen > kMaxUDPPayloadLen) {
+        return false;
+    }
+
+    mPacket = new uint8_t[packetLen];
+    if (!mPacket) {
+        return false;
+    }
+
+    mPacketLen = packetLen;
+
+    uint8_t* cur = mPacket;
+    bool hasAux = mAuxData && mAuxDataLen;
+    uint8_t flags = (static_cast<int>(hasAux) << 4) |
+                    (static_cast<int>(mRandomAccessPoint) << 3) |
+                    (static_cast<int>(mDropable) << 2) |
+                    (static_cast<int>(mDiscontinuity) << 1) |
+                    (static_cast<int>(mEndOfStream));
+
+    writeTRTPHeader(cur, true, packetLen);
+    writeU8(cur, mCodecType);
+    writeU8(cur, flags);
+    writeU8(cur, mVolume);
+
+    if (hasAux) {
+        writeU32(cur, mAuxDataLen);
+        memcpy(cur, mAuxData, mAuxDataLen);
+        cur += mAuxDataLen;
+    }
+
+    memcpy(cur, mAccessUnitData, mAccessUnitLen);
+
+    mIsPacked = true;
+    return true;
+}
+
+int TRTPPacket::TRTPHeaderLen() const {
+    // 6 bytes for version, payload type, flags and length.  An additional 4 if
+    // there are upper timestamp bits present and another 24 if there is a clock
+    // transformation present.
+    return 6 +
+           (mClockTranformValid ? 24 : 0) +
+           (mPTSValid ? 4 : 0);
+}
+
+int TRTPAudioPacket::TRTPHeaderLen() const {
+    // TRTPPacket::TRTPHeaderLen() for the base TRTPHeader.  3 bytes for audio's
+    // codec type, flags and volume field.  Another 5 bytes if the codec type is
+    // PCM and we are sending sample rate/channel count. as well as however long
+    // the aux data (if present) is.
+
+    int pcmParamLength;
+    switch(mCodecType) {
+        case kCodecPCMBigEndian:
+        case kCodecPCMLittleEndian:
+            pcmParamLength = 5;
+            break;
+
+        default:
+            pcmParamLength = 0;
+            break;
+    }
+
+
+    int auxDataLenField = (NULL != mAuxData) ? sizeof(uint32_t) : 0;
+    return TRTPPacket::TRTPHeaderLen() +
+           3 +
+           auxDataLenField +
+           pcmParamLength;
+}
+
+bool TRTPControlPacket::pack() {
+    if (mIsPacked) {
+        return false;
+    }
+
+    // command packets contain a 2-byte command ID
+    int packetLen = kRTPHeaderLen +
+                    TRTPHeaderLen() +
+                    2;
+
+    mPacket = new uint8_t[packetLen];
+    if (!mPacket) {
+        return false;
+    }
+
+    mPacketLen = packetLen;
+
+    uint8_t* cur = mPacket;
+
+    writeTRTPHeader(cur, true, packetLen);
+    writeU16(cur, mCommandID);
+
+    mIsPacked = true;
+    return true;
+}
+
+}  // namespace android
diff --git a/media/libaah_rtp/aah_tx_packet.h b/media/libaah_rtp/aah_tx_packet.h
new file mode 100644
index 0000000..7f78ea0
--- /dev/null
+++ b/media/libaah_rtp/aah_tx_packet.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __AAH_TX_PACKET_H__
+#define __AAH_TX_PACKET_H__
+
+#include <media/stagefright/foundation/ABase.h>
+#include <utils/LinearTransform.h>
+#include <utils/RefBase.h>
+#include <utils/Timers.h>
+
+namespace android {
+
+class TRTPPacket : public RefBase {
+  public:
+    enum TRTPHeaderType {
+        kHeaderTypeAudio = 1,
+        kHeaderTypeVideo = 2,
+        kHeaderTypeSubpicture = 3,
+        kHeaderTypeControl = 4,
+    };
+
+    enum TRTPPayloadFlags {
+        kFlag_TSTransformPresent = 0x02,
+        kFlag_TSValid = 0x01,
+    };
+
+  protected:
+    TRTPPacket(TRTPHeaderType headerType)
+        : mIsPacked(false)
+        , mVersion(2)
+        , mPadding(false)
+        , mExtension(false)
+        , mCsrcCount(0)
+        , mPayloadType(100)
+        , mSeqNumber(0)
+        , mPTSValid(false)
+        , mPTS(0)
+        , mEpoch(0)
+        , mProgramID(0)
+        , mSubstreamID(0)
+        , mClockTranformValid(false)
+        , mTRTPVersion(1)
+        , mTRTPLength(0)
+        , mTRTPHeaderType(headerType)
+        , mPacket(NULL)
+        , mPacketLen(0) { }
+
+  public:
+    virtual ~TRTPPacket();
+
+    void setSeqNumber(uint16_t val);
+    uint16_t getSeqNumber() const;
+
+    void setPTS(int64_t val);
+    int64_t getPTS() const;
+
+    void setEpoch(uint32_t val);
+    void setProgramID(uint16_t val);
+    void setSubstreamID(uint16_t val);
+    void setClockTransform(const LinearTransform& trans);
+
+    uint8_t* getPacket() const;
+    int getPacketLen() const;
+
+    void setExpireTime(nsecs_t val);
+    nsecs_t getExpireTime() const;
+
+    virtual bool pack() = 0;
+
+    // mask for the number of bits in a TRTP epoch
+    static const uint32_t kTRTPEpochMask = (1 << 22) - 1;
+    static const int kTRTPEpochShift = 10;
+
+  protected:
+    static const int kRTPHeaderLen = 12;
+    virtual int TRTPHeaderLen() const;
+
+    void writeTRTPHeader(uint8_t*& buf,
+                         bool isFirstFragment,
+                         int totalPacketLen);
+
+    void writeU8(uint8_t*& buf, uint8_t val);
+    void writeU16(uint8_t*& buf, uint16_t val);
+    void writeU32(uint8_t*& buf, uint32_t val);
+    void writeU64(uint8_t*& buf, uint64_t val);
+
+    bool mIsPacked;
+
+    uint8_t mVersion;
+    bool mPadding;
+    bool mExtension;
+    uint8_t mCsrcCount;
+    uint8_t mPayloadType;
+    uint16_t mSeqNumber;
+    bool mPTSValid;
+    int64_t  mPTS;
+    uint32_t mEpoch;
+    uint16_t mProgramID;
+    uint16_t mSubstreamID;
+    LinearTransform mClockTranform;
+    bool mClockTranformValid;
+    uint8_t mTRTPVersion;
+    uint32_t mTRTPLength;
+    TRTPHeaderType mTRTPHeaderType;
+
+    uint8_t* mPacket;
+    int mPacketLen;
+
+    nsecs_t mExpireTime;
+
+    DISALLOW_EVIL_CONSTRUCTORS(TRTPPacket);
+};
+
+class TRTPAudioPacket : public TRTPPacket {
+  public:
+    enum AudioPayloadFlags {
+        kFlag_AuxLengthPresent = 0x10,
+        kFlag_RandomAccessPoint = 0x08,
+        kFlag_Dropable = 0x04,
+        kFlag_Discontinuity = 0x02,
+        kFlag_EndOfStream = 0x01,
+    };
+
+    TRTPAudioPacket()
+        : TRTPPacket(kHeaderTypeAudio)
+        , mCodecType(kCodecInvalid)
+        , mRandomAccessPoint(false)
+        , mDropable(false)
+        , mDiscontinuity(false)
+        , mEndOfStream(false)
+        , mVolume(0)
+        , mAccessUnitData(NULL)
+        , mAccessUnitLen(0)
+        , mAuxData(NULL)
+        , mAuxDataLen(0) { }
+
+    enum TRTPAudioCodecType {
+        kCodecInvalid = 0,
+        kCodecPCMBigEndian = 1,
+        kCodecPCMLittleEndian = 2,
+        kCodecMPEG1Audio = 3,
+        kCodecAACAudio = 4,
+    };
+
+    void setCodecType(TRTPAudioCodecType val);
+    void setRandomAccessPoint(bool val);
+    void setDropable(bool val);
+    void setDiscontinuity(bool val);
+    void setEndOfStream(bool val);
+    void setVolume(uint8_t val);
+    void setAccessUnitData(const void* data, size_t len);
+    void setAuxData(const void* data, size_t len);
+
+    virtual bool pack();
+
+  protected:
+    virtual int TRTPHeaderLen() const;
+
+  private:
+    TRTPAudioCodecType mCodecType;
+    bool mRandomAccessPoint;
+    bool mDropable;
+    bool mDiscontinuity;
+    bool mEndOfStream;
+    uint8_t mVolume;
+
+    const void* mAccessUnitData;
+    size_t mAccessUnitLen;
+    const void* mAuxData;
+    size_t mAuxDataLen;
+
+    DISALLOW_EVIL_CONSTRUCTORS(TRTPAudioPacket);
+};
+
+class TRTPControlPacket : public TRTPPacket {
+  public:
+    TRTPControlPacket()
+        : TRTPPacket(kHeaderTypeControl)
+        , mCommandID(kCommandNop) {}
+
+    enum TRTPCommandID {
+        kCommandNop   = 1,
+        kCommandFlush = 2,
+        kCommandEOS   = 3,
+    };
+
+    void setCommandID(TRTPCommandID val);
+
+    virtual bool pack();
+
+  private:
+    TRTPCommandID mCommandID;
+
+    DISALLOW_EVIL_CONSTRUCTORS(TRTPControlPacket);
+};
+
+}  // namespace android
+
+#endif  // __AAH_TX_PLAYER_H__
diff --git a/media/libaah_rtp/aah_tx_player.cpp b/media/libaah_rtp/aah_tx_player.cpp
new file mode 100644
index 0000000..974805b
--- /dev/null
+++ b/media/libaah_rtp/aah_tx_player.cpp
@@ -0,0 +1,1177 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "LibAAH_RTP"
+#include <utils/Log.h>
+
+#define __STDC_FORMAT_MACROS
+#include <inttypes.h>
+#include <netdb.h>
+#include <netinet/ip.h>
+
+#include <common_time/cc_helper.h>
+#include <media/IMediaPlayer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/FileSource.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MetaData.h>
+#include <utils/Timers.h>
+
+#include "aah_tx_packet.h"
+#include "aah_tx_player.h"
+
+namespace android {
+
+static int64_t kLowWaterMarkUs = 2000000ll;  // 2secs
+static int64_t kHighWaterMarkUs = 10000000ll;  // 10secs
+static const size_t kLowWaterMarkBytes = 40000;
+static const size_t kHighWaterMarkBytes = 200000;
+
+// When we start up, how much lead time should we put on the first access unit?
+static const int64_t kAAHStartupLeadTimeUs = 300000LL;
+
+// How much time do we attempt to lead the clock by in steady state?
+static const int64_t kAAHBufferTimeUs = 1000000LL;
+
+// how long do we keep data in our retransmit buffer after sending it.
+const int64_t AAH_TXPlayer::kAAHRetryKeepAroundTimeNs =
+    kAAHBufferTimeUs * 1100;
+
+sp<MediaPlayerBase> createAAH_TXPlayer() {
+    sp<MediaPlayerBase> ret = new AAH_TXPlayer();
+    return ret;
+}
+
+template <typename T> static T clamp(T val, T min, T max) {
+    if (val < min) {
+        return min;
+    } else if (val > max) {
+        return max;
+    } else {
+        return val;
+    }
+}
+
+struct AAH_TXEvent : public TimedEventQueue::Event {
+    AAH_TXEvent(AAH_TXPlayer *player,
+                void (AAH_TXPlayer::*method)()) : mPlayer(player)
+                                                , mMethod(method) {}
+
+  protected:
+    virtual ~AAH_TXEvent() {}
+
+    virtual void fire(TimedEventQueue *queue, int64_t /* now_us */) {
+        (mPlayer->*mMethod)();
+    }
+
+  private:
+    AAH_TXPlayer *mPlayer;
+    void (AAH_TXPlayer::*mMethod)();
+
+    AAH_TXEvent(const AAH_TXEvent &);
+    AAH_TXEvent& operator=(const AAH_TXEvent &);
+};
+
+AAH_TXPlayer::AAH_TXPlayer()
+        : mQueueStarted(false)
+        , mFlags(0)
+        , mExtractorFlags(0) {
+    DataSource::RegisterDefaultSniffers();
+
+    mBufferingEvent = new AAH_TXEvent(this, &AAH_TXPlayer::onBufferingUpdate);
+    mBufferingEventPending = false;
+
+    mPumpAudioEvent = new AAH_TXEvent(this, &AAH_TXPlayer::onPumpAudio);
+    mPumpAudioEventPending = false;
+
+    mAudioCodecData = NULL;
+
+    reset_l();
+}
+
+AAH_TXPlayer::~AAH_TXPlayer() {
+    if (mQueueStarted) {
+        mQueue.stop();
+    }
+
+    reset_l();
+}
+
+void AAH_TXPlayer::cancelPlayerEvents(bool keepBufferingGoing) {
+    if (!keepBufferingGoing) {
+        mQueue.cancelEvent(mBufferingEvent->eventID());
+        mBufferingEventPending = false;
+
+        mQueue.cancelEvent(mPumpAudioEvent->eventID());
+        mPumpAudioEventPending = false;
+    }
+}
+
+status_t AAH_TXPlayer::initCheck() {
+    // Check for the presense of the common time service by attempting to query
+    // for CommonTime's frequency.  If we get an error back, we cannot talk to
+    // the service at all and should abort now.
+    status_t res;
+    uint64_t freq;
+    res = mCCHelper.getCommonFreq(&freq);
+    if (OK != res) {
+        ALOGE("Failed to connect to common time service! (res %d)", res);
+        return res;
+    }
+
+    return OK;
+}
+
+status_t AAH_TXPlayer::setDataSource(
+        const char *url,
+        const KeyedVector<String8, String8> *headers) {
+    Mutex::Autolock autoLock(mLock);
+    return setDataSource_l(url, headers);
+}
+
+status_t AAH_TXPlayer::setDataSource_l(
+        const char *url,
+        const KeyedVector<String8, String8> *headers) {
+    reset_l();
+
+    mUri.setTo(url);
+
+    if (headers) {
+        mUriHeaders = *headers;
+
+        ssize_t index = mUriHeaders.indexOfKey(String8("x-hide-urls-from-log"));
+        if (index >= 0) {
+            // Browser is in "incognito" mode, suppress logging URLs.
+
+            // This isn't something that should be passed to the server.
+            mUriHeaders.removeItemsAt(index);
+
+            mFlags |= INCOGNITO;
+        }
+    }
+
+    // The URL may optionally contain a "#" character followed by a Skyjam
+    // cookie.  Ideally the cookie header should just be passed in the headers
+    // argument, but the Java API for supplying headers is apparently not yet
+    // exposed in the SDK used by application developers.
+    const char kSkyjamCookieDelimiter = '#';
+    char* skyjamCookie = strrchr(mUri.string(), kSkyjamCookieDelimiter);
+    if (skyjamCookie) {
+        skyjamCookie++;
+        mUriHeaders.add(String8("Cookie"), String8(skyjamCookie));
+        mUri = String8(mUri.string(), skyjamCookie - mUri.string());
+    }
+
+    return OK;
+}
+
+status_t AAH_TXPlayer::setDataSource(int fd, int64_t offset, int64_t length) {
+    Mutex::Autolock autoLock(mLock);
+
+    reset_l();
+
+    sp<DataSource> dataSource = new FileSource(dup(fd), offset, length);
+
+    status_t err = dataSource->initCheck();
+
+    if (err != OK) {
+        return err;
+    }
+
+    mFileSource = dataSource;
+
+    sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource);
+
+    if (extractor == NULL) {
+        return UNKNOWN_ERROR;
+    }
+
+    return setDataSource_l(extractor);
+}
+
+status_t AAH_TXPlayer::setVideoSurface(const sp<Surface>& surface) {
+    return OK;
+}
+
+status_t AAH_TXPlayer::setVideoSurfaceTexture(
+        const sp<ISurfaceTexture>& surfaceTexture) {
+    return OK;
+}
+
+status_t AAH_TXPlayer::prepare() {
+    return INVALID_OPERATION;
+}
+
+status_t AAH_TXPlayer::prepareAsync() {
+    Mutex::Autolock autoLock(mLock);
+
+    return prepareAsync_l();
+}
+
+status_t AAH_TXPlayer::prepareAsync_l() {
+    if (mFlags & PREPARING) {
+        return UNKNOWN_ERROR;  // async prepare already pending
+    }
+
+    mAAH_Sender = AAH_TXSender::GetInstance();
+    if (mAAH_Sender == NULL) {
+        return NO_MEMORY;
+    }
+
+    if (!mQueueStarted) {
+        mQueue.start();
+        mQueueStarted = true;
+    }
+
+    mFlags |= PREPARING;
+    mAsyncPrepareEvent = new AAH_TXEvent(
+            this, &AAH_TXPlayer::onPrepareAsyncEvent);
+
+    mQueue.postEvent(mAsyncPrepareEvent);
+
+    return OK;
+}
+
+status_t AAH_TXPlayer::finishSetDataSource_l() {
+    sp<DataSource> dataSource;
+
+    if (!strncasecmp("http://",  mUri.string(), 7) ||
+        !strncasecmp("https://", mUri.string(), 8)) {
+
+        mConnectingDataSource = HTTPBase::Create(
+                (mFlags & INCOGNITO)
+                    ? HTTPBase::kFlagIncognito
+                    : 0);
+
+        mLock.unlock();
+        status_t err = mConnectingDataSource->connect(mUri, &mUriHeaders);
+        mLock.lock();
+
+        if (err != OK) {
+            mConnectingDataSource.clear();
+
+            ALOGI("mConnectingDataSource->connect() returned %d", err);
+            return err;
+        }
+
+        mCachedSource = new NuCachedSource2(mConnectingDataSource);
+        mConnectingDataSource.clear();
+
+        dataSource = mCachedSource;
+
+        // We're going to prefill the cache before trying to instantiate
+        // the extractor below, as the latter is an operation that otherwise
+        // could block on the datasource for a significant amount of time.
+        // During that time we'd be unable to abort the preparation phase
+        // without this prefill.
+
+        mLock.unlock();
+
+        for (;;) {
+            status_t finalStatus;
+            size_t cachedDataRemaining =
+                mCachedSource->approxDataRemaining(&finalStatus);
+
+            if (finalStatus != OK ||
+                cachedDataRemaining >= kHighWaterMarkBytes ||
+                (mFlags & PREPARE_CANCELLED)) {
+                break;
+            }
+
+            usleep(200000);
+        }
+
+        mLock.lock();
+
+        if (mFlags & PREPARE_CANCELLED) {
+            ALOGI("Prepare cancelled while waiting for initial cache fill.");
+            return UNKNOWN_ERROR;
+        }
+    } else {
+        dataSource = DataSource::CreateFromURI(mUri.string(), &mUriHeaders);
+    }
+
+    if (dataSource == NULL) {
+        return UNKNOWN_ERROR;
+    }
+
+    sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource);
+
+    if (extractor == NULL) {
+        return UNKNOWN_ERROR;
+    }
+
+    return setDataSource_l(extractor);
+}
+
+status_t AAH_TXPlayer::setDataSource_l(const sp<MediaExtractor> &extractor) {
+    // Attempt to approximate overall stream bitrate by summing all
+    // tracks' individual bitrates, if not all of them advertise bitrate,
+    // we have to fail.
+
+    int64_t totalBitRate = 0;
+
+    for (size_t i = 0; i < extractor->countTracks(); ++i) {
+        sp<MetaData> meta = extractor->getTrackMetaData(i);
+
+        int32_t bitrate;
+        if (!meta->findInt32(kKeyBitRate, &bitrate)) {
+            totalBitRate = -1;
+            break;
+        }
+
+        totalBitRate += bitrate;
+    }
+
+    mBitrate = totalBitRate;
+
+    ALOGV("mBitrate = %lld bits/sec", mBitrate);
+
+    bool haveAudio = false;
+    for (size_t i = 0; i < extractor->countTracks(); ++i) {
+        sp<MetaData> meta = extractor->getTrackMetaData(i);
+
+        const char *mime;
+        CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+        if (!strncasecmp(mime, "audio/", 6)) {
+            mAudioSource = extractor->getTrack(i);
+            CHECK(mAudioSource != NULL);
+            haveAudio = true;
+            break;
+        }
+    }
+
+    if (!haveAudio) {
+        return UNKNOWN_ERROR;
+    }
+
+    mExtractorFlags = extractor->flags();
+
+    return OK;
+}
+
+void AAH_TXPlayer::abortPrepare(status_t err) {
+    CHECK(err != OK);
+
+    notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
+
+    mPrepareResult = err;
+    mFlags &= ~(PREPARING|PREPARE_CANCELLED|PREPARING_CONNECTED);
+    mPreparedCondition.broadcast();
+}
+
+void AAH_TXPlayer::onPrepareAsyncEvent() {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mFlags & PREPARE_CANCELLED) {
+        ALOGI("prepare was cancelled before doing anything");
+        abortPrepare(UNKNOWN_ERROR);
+        return;
+    }
+
+    if (mUri.size() > 0) {
+        status_t err = finishSetDataSource_l();
+
+        if (err != OK) {
+            abortPrepare(err);
+            return;
+        }
+    }
+
+    mAudioFormat = mAudioSource->getFormat();
+    if (!mAudioFormat->findInt64(kKeyDuration, &mDurationUs))
+        mDurationUs = 1;
+
+    const char* mime_type = NULL;
+    if (!mAudioFormat->findCString(kKeyMIMEType, &mime_type)) {
+        ALOGE("Failed to find audio substream MIME type during prepare.");
+        abortPrepare(BAD_VALUE);
+        return;
+    }
+
+    if (!strcmp(mime_type, MEDIA_MIMETYPE_AUDIO_MPEG)) {
+        mAudioCodec = TRTPAudioPacket::kCodecMPEG1Audio;
+    } else
+    if (!strcmp(mime_type, MEDIA_MIMETYPE_AUDIO_AAC)) {
+        mAudioCodec = TRTPAudioPacket::kCodecAACAudio;
+
+        uint32_t type;
+        int32_t  sample_rate;
+        int32_t  channel_count;
+        const void* esds_data;
+        size_t esds_len;
+
+        if (!mAudioFormat->findInt32(kKeySampleRate, &sample_rate)) {
+            ALOGE("Failed to find sample rate for AAC substream.");
+            abortPrepare(BAD_VALUE);
+            return;
+        }
+
+        if (!mAudioFormat->findInt32(kKeyChannelCount, &channel_count)) {
+            ALOGE("Failed to find channel count for AAC substream.");
+            abortPrepare(BAD_VALUE);
+            return;
+        }
+
+        if (!mAudioFormat->findData(kKeyESDS, &type, &esds_data, &esds_len)) {
+            ALOGE("Failed to find codec init data for AAC substream.");
+            abortPrepare(BAD_VALUE);
+            return;
+        }
+
+        CHECK(NULL == mAudioCodecData);
+        mAudioCodecDataSize = esds_len
+                            + sizeof(sample_rate)
+                            + sizeof(channel_count);
+        mAudioCodecData = new uint8_t[mAudioCodecDataSize];
+        if (NULL == mAudioCodecData) {
+            ALOGE("Failed to allocate %u bytes for AAC substream codec aux"
+                  " data.", mAudioCodecDataSize);
+            mAudioCodecDataSize = 0;
+            abortPrepare(BAD_VALUE);
+            return;
+        }
+
+        uint8_t* tmp = mAudioCodecData;
+        tmp[0] = static_cast<uint8_t>((sample_rate   >> 24) & 0xFF);
+        tmp[1] = static_cast<uint8_t>((sample_rate   >> 16) & 0xFF);
+        tmp[2] = static_cast<uint8_t>((sample_rate   >>  8) & 0xFF);
+        tmp[3] = static_cast<uint8_t>((sample_rate        ) & 0xFF);
+        tmp[4] = static_cast<uint8_t>((channel_count >> 24) & 0xFF);
+        tmp[5] = static_cast<uint8_t>((channel_count >> 16) & 0xFF);
+        tmp[6] = static_cast<uint8_t>((channel_count >>  8) & 0xFF);
+        tmp[7] = static_cast<uint8_t>((channel_count      ) & 0xFF);
+
+        memcpy(tmp + 8, esds_data, esds_len);
+    } else {
+        ALOGE("Unsupported MIME type \"%s\" in audio substream", mime_type);
+        abortPrepare(BAD_VALUE);
+        return;
+    }
+
+    status_t err = mAudioSource->start();
+    if (err != OK) {
+        ALOGI("failed to start audio source, err=%d", err);
+        abortPrepare(err);
+        return;
+    }
+
+    mFlags |= PREPARING_CONNECTED;
+
+    if (mCachedSource != NULL) {
+        postBufferingEvent_l();
+    } else {
+        finishAsyncPrepare_l();
+    }
+}
+
+void AAH_TXPlayer::finishAsyncPrepare_l() {
+    notifyListener_l(MEDIA_PREPARED);
+
+    mPrepareResult = OK;
+    mFlags &= ~(PREPARING|PREPARE_CANCELLED|PREPARING_CONNECTED);
+    mFlags |= PREPARED;
+    mPreparedCondition.broadcast();
+}
+
+status_t AAH_TXPlayer::start() {
+    Mutex::Autolock autoLock(mLock);
+
+    mFlags &= ~CACHE_UNDERRUN;
+
+    return play_l();
+}
+
+status_t AAH_TXPlayer::play_l() {
+    if (mFlags & PLAYING) {
+        return OK;
+    }
+
+    if (!(mFlags & PREPARED)) {
+        return INVALID_OPERATION;
+    }
+
+    {
+        Mutex::Autolock lock(mEndpointLock);
+        if (!mEndpointValid) {
+            return INVALID_OPERATION;
+        }
+        if (!mEndpointRegistered) {
+            mProgramID = mAAH_Sender->registerEndpoint(mEndpoint);
+            mEndpointRegistered = true;
+        }
+    }
+
+    mFlags |= PLAYING;
+
+    updateClockTransform_l(false);
+
+    postPumpAudioEvent_l(-1);
+
+    return OK;
+}
+
+status_t AAH_TXPlayer::stop() {
+    status_t ret = pause();
+    sendEOS_l();
+    return ret;
+}
+
+status_t AAH_TXPlayer::pause() {
+    Mutex::Autolock autoLock(mLock);
+
+    mFlags &= ~CACHE_UNDERRUN;
+
+    return pause_l();
+}
+
+status_t AAH_TXPlayer::pause_l(bool doClockUpdate) {
+    if (!(mFlags & PLAYING)) {
+        return OK;
+    }
+
+    cancelPlayerEvents(true /* keepBufferingGoing */);
+
+    mFlags &= ~PLAYING;
+
+    if (doClockUpdate) {
+        updateClockTransform_l(true);
+    }
+
+    return OK;
+}
+
+void AAH_TXPlayer::updateClockTransform_l(bool pause) {
+    // record the new pause status so that onPumpAudio knows what rate to apply
+    // when it initializes the transform
+    mPlayRateIsPaused = pause;
+
+    // if we haven't yet established a valid clock transform, then we can't
+    // do anything here
+    if (!mCurrentClockTransformValid) {
+        return;
+    }
+
+    // sample the current common time
+    int64_t commonTimeNow;
+    if (OK != mCCHelper.getCommonTime(&commonTimeNow)) {
+        ALOGE("updateClockTransform_l get common time failed");
+        mCurrentClockTransformValid = false;
+        return;
+    }
+
+    // convert the current common time to media time using the old
+    // transform
+    int64_t mediaTimeNow;
+    if (!mCurrentClockTransform.doReverseTransform(
+            commonTimeNow, &mediaTimeNow)) {
+        ALOGE("updateClockTransform_l reverse transform failed");
+        mCurrentClockTransformValid = false;
+        return;
+    }
+
+    // calculate a new transform that preserves the old transform's
+    // result for the current time
+    mCurrentClockTransform.a_zero = mediaTimeNow;
+    mCurrentClockTransform.b_zero = commonTimeNow;
+    mCurrentClockTransform.a_to_b_numer = 1;
+    mCurrentClockTransform.a_to_b_denom = pause ? 0 : 1;
+
+    // send a packet announcing the new transform
+    sp<TRTPControlPacket> packet = new TRTPControlPacket();
+    packet->setClockTransform(mCurrentClockTransform);
+    packet->setCommandID(TRTPControlPacket::kCommandNop);
+    queuePacketToSender_l(packet);
+}
+
+void AAH_TXPlayer::sendEOS_l() {
+    sp<TRTPControlPacket> packet = new TRTPControlPacket();
+    packet->setCommandID(TRTPControlPacket::kCommandEOS);
+    queuePacketToSender_l(packet);
+}
+
+bool AAH_TXPlayer::isPlaying() {
+    return (mFlags & PLAYING) || (mFlags & CACHE_UNDERRUN);
+}
+
+status_t AAH_TXPlayer::seekTo(int msec) {
+    if (mExtractorFlags & MediaExtractor::CAN_SEEK) {
+        Mutex::Autolock autoLock(mLock);
+        return seekTo_l(static_cast<int64_t>(msec) * 1000);
+    }
+
+    notifyListener_l(MEDIA_SEEK_COMPLETE);
+    return OK;
+}
+
+status_t AAH_TXPlayer::seekTo_l(int64_t timeUs) {
+    mIsSeeking = true;
+    mSeekTimeUs = timeUs;
+
+    mCurrentClockTransformValid = false;
+    mLastQueuedMediaTimePTSValid = false;
+
+    // send a flush command packet
+    sp<TRTPControlPacket> packet = new TRTPControlPacket();
+    packet->setCommandID(TRTPControlPacket::kCommandFlush);
+    queuePacketToSender_l(packet);
+
+    return OK;
+}
+
+status_t AAH_TXPlayer::getCurrentPosition(int *msec) {
+    if (!msec) {
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock lock(mLock);
+
+    int position;
+
+    if (mIsSeeking) {
+        position = mSeekTimeUs / 1000;
+    } else if (mCurrentClockTransformValid) {
+        // sample the current common time
+        int64_t commonTimeNow;
+        if (OK != mCCHelper.getCommonTime(&commonTimeNow)) {
+            ALOGE("getCurrentPosition get common time failed");
+            return INVALID_OPERATION;
+        }
+
+        int64_t mediaTimeNow;
+        if (!mCurrentClockTransform.doReverseTransform(commonTimeNow,
+                    &mediaTimeNow)) {
+            ALOGE("getCurrentPosition reverse transform failed");
+            return INVALID_OPERATION;
+        }
+
+        position = static_cast<int>(mediaTimeNow / 1000);
+    } else {
+        position = 0;
+    }
+
+    int duration;
+    if (getDuration_l(&duration) == OK) {
+        *msec = clamp(position, 0, duration);
+    } else {
+        *msec = (position >= 0) ? position : 0;
+    }
+
+    return OK;
+}
+
+status_t AAH_TXPlayer::getDuration(int* msec) {
+    if (!msec) {
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock lock(mLock);
+
+    return getDuration_l(msec);
+}
+
+status_t AAH_TXPlayer::getDuration_l(int* msec) {
+    if (mDurationUs < 0) {
+        return UNKNOWN_ERROR;
+    }
+
+    *msec = (mDurationUs + 500) / 1000;
+
+    return OK;
+}
+
+status_t AAH_TXPlayer::reset() {
+    Mutex::Autolock autoLock(mLock);
+    reset_l();
+    return OK;
+}
+
+void AAH_TXPlayer::reset_l() {
+    if (mFlags & PREPARING) {
+        mFlags |= PREPARE_CANCELLED;
+        if (mConnectingDataSource != NULL) {
+            ALOGI("interrupting the connection process");
+            mConnectingDataSource->disconnect();
+        }
+
+        if (mFlags & PREPARING_CONNECTED) {
+            // We are basically done preparing, we're just buffering
+            // enough data to start playback, we can safely interrupt that.
+            finishAsyncPrepare_l();
+        }
+    }
+
+    while (mFlags & PREPARING) {
+        mPreparedCondition.wait(mLock);
+    }
+
+    cancelPlayerEvents();
+
+    sendEOS_l();
+
+    mCachedSource.clear();
+
+    if (mAudioSource != NULL) {
+        mAudioSource->stop();
+    }
+    mAudioSource.clear();
+    mAudioCodec = TRTPAudioPacket::kCodecInvalid;
+    mAudioFormat = NULL;
+    delete[] mAudioCodecData;
+    mAudioCodecData = NULL;
+    mAudioCodecDataSize = 0;
+
+    mFlags = 0;
+    mExtractorFlags = 0;
+
+    mDurationUs = -1;
+    mIsSeeking = false;
+    mSeekTimeUs = 0;
+
+    mUri.setTo("");
+    mUriHeaders.clear();
+
+    mFileSource.clear();
+
+    mBitrate = -1;
+
+    {
+        Mutex::Autolock lock(mEndpointLock);
+        if (mAAH_Sender != NULL && mEndpointRegistered) {
+            mAAH_Sender->unregisterEndpoint(mEndpoint);
+        }
+        mEndpointRegistered = false;
+        mEndpointValid = false;
+    }
+
+    mProgramID = 0;
+
+    mAAH_Sender.clear();
+    mLastQueuedMediaTimePTSValid = false;
+    mCurrentClockTransformValid = false;
+    mPlayRateIsPaused = false;
+
+    mTRTPVolume = 255;
+}
+
+status_t AAH_TXPlayer::setLooping(int loop) {
+    return OK;
+}
+
+player_type AAH_TXPlayer::playerType() {
+    return AAH_TX_PLAYER;
+}
+
+status_t AAH_TXPlayer::setParameter(int key, const Parcel &request) {
+    return ERROR_UNSUPPORTED;
+}
+
+status_t AAH_TXPlayer::getParameter(int key, Parcel *reply) {
+    return ERROR_UNSUPPORTED;
+}
+
+status_t AAH_TXPlayer::invoke(const Parcel& request, Parcel *reply) {
+    return INVALID_OPERATION;
+}
+
+status_t AAH_TXPlayer::getMetadata(const media::Metadata::Filter& ids,
+                                   Parcel* records) {
+    using media::Metadata;
+
+    Metadata metadata(records);
+
+    metadata.appendBool(Metadata::kPauseAvailable, true);
+    metadata.appendBool(Metadata::kSeekBackwardAvailable, false);
+    metadata.appendBool(Metadata::kSeekForwardAvailable, false);
+    metadata.appendBool(Metadata::kSeekAvailable, false);
+
+    return OK;
+}
+
+status_t AAH_TXPlayer::setVolume(float leftVolume, float rightVolume) {
+    if (leftVolume != rightVolume) {
+        ALOGE("%s does not support per channel volume: %f, %f",
+              __PRETTY_FUNCTION__, leftVolume, rightVolume);
+    }
+
+    float volume = clamp(leftVolume, 0.0f, 1.0f);
+
+    Mutex::Autolock lock(mLock);
+    mTRTPVolume = static_cast<uint8_t>((leftVolume * 255.0) + 0.5);
+
+    return OK;
+}
+
+status_t AAH_TXPlayer::setAudioStreamType(audio_stream_type_t streamType) {
+    return OK;
+}
+
+status_t AAH_TXPlayer::setRetransmitEndpoint(
+        const struct sockaddr_in* endpoint) {
+    Mutex::Autolock lock(mLock);
+
+    if (NULL == endpoint)
+        return BAD_VALUE;
+
+    // Once the endpoint has been registered, it may not be changed.
+    if (mEndpointRegistered)
+        return INVALID_OPERATION;
+
+    mEndpoint.addr = endpoint->sin_addr.s_addr;
+    mEndpoint.port = endpoint->sin_port;
+    mEndpointValid = true;
+
+    return OK;
+}
+
+void AAH_TXPlayer::notifyListener_l(int msg, int ext1, int ext2) {
+    sendEvent(msg, ext1, ext2);
+}
+
+bool AAH_TXPlayer::getBitrate_l(int64_t *bitrate) {
+    off64_t size;
+    if (mDurationUs >= 0 &&
+        mCachedSource != NULL &&
+        mCachedSource->getSize(&size) == OK) {
+        *bitrate = size * 8000000ll / mDurationUs;  // in bits/sec
+        return true;
+    }
+
+    if (mBitrate >= 0) {
+        *bitrate = mBitrate;
+        return true;
+    }
+
+    *bitrate = 0;
+
+    return false;
+}
+
+// Returns true iff cached duration is available/applicable.
+bool AAH_TXPlayer::getCachedDuration_l(int64_t *durationUs, bool *eos) {
+    int64_t bitrate;
+
+    if (mCachedSource != NULL && getBitrate_l(&bitrate)) {
+        status_t finalStatus;
+        size_t cachedDataRemaining = mCachedSource->approxDataRemaining(
+                                        &finalStatus);
+        *durationUs = cachedDataRemaining * 8000000ll / bitrate;
+        *eos = (finalStatus != OK);
+        return true;
+    }
+
+    return false;
+}
+
+void AAH_TXPlayer::ensureCacheIsFetching_l() {
+    if (mCachedSource != NULL) {
+        mCachedSource->resumeFetchingIfNecessary();
+    }
+}
+
+void AAH_TXPlayer::postBufferingEvent_l() {
+    if (mBufferingEventPending) {
+        return;
+    }
+    mBufferingEventPending = true;
+    mQueue.postEventWithDelay(mBufferingEvent, 1000000ll);
+}
+
+void AAH_TXPlayer::postPumpAudioEvent_l(int64_t delayUs) {
+    if (mPumpAudioEventPending) {
+        return;
+    }
+    mPumpAudioEventPending = true;
+    mQueue.postEventWithDelay(mPumpAudioEvent, delayUs < 0 ? 10000 : delayUs);
+}
+
+void AAH_TXPlayer::onBufferingUpdate() {
+    Mutex::Autolock autoLock(mLock);
+    if (!mBufferingEventPending) {
+        return;
+    }
+    mBufferingEventPending = false;
+
+    if (mCachedSource != NULL) {
+        status_t finalStatus;
+        size_t cachedDataRemaining = mCachedSource->approxDataRemaining(
+                                        &finalStatus);
+        bool eos = (finalStatus != OK);
+
+        if (eos) {
+            if (finalStatus == ERROR_END_OF_STREAM) {
+                notifyListener_l(MEDIA_BUFFERING_UPDATE, 100);
+            }
+            if (mFlags & PREPARING) {
+                ALOGV("cache has reached EOS, prepare is done.");
+                finishAsyncPrepare_l();
+            }
+        } else {
+            int64_t bitrate;
+            if (getBitrate_l(&bitrate)) {
+                size_t cachedSize = mCachedSource->cachedSize();
+                int64_t cachedDurationUs = cachedSize * 8000000ll / bitrate;
+
+                int percentage = (100.0 * (double) cachedDurationUs)
+                               / mDurationUs;
+                if (percentage > 100) {
+                    percentage = 100;
+                }
+
+                notifyListener_l(MEDIA_BUFFERING_UPDATE, percentage);
+            } else {
+                // We don't know the bitrate of the stream, use absolute size
+                // limits to maintain the cache.
+
+                if ((mFlags & PLAYING) &&
+                    !eos &&
+                    (cachedDataRemaining < kLowWaterMarkBytes)) {
+                    ALOGI("cache is running low (< %d) , pausing.",
+                          kLowWaterMarkBytes);
+                    mFlags |= CACHE_UNDERRUN;
+                    pause_l();
+                    ensureCacheIsFetching_l();
+                    notifyListener_l(MEDIA_INFO, MEDIA_INFO_BUFFERING_START);
+                } else if (eos || cachedDataRemaining > kHighWaterMarkBytes) {
+                    if (mFlags & CACHE_UNDERRUN) {
+                        ALOGI("cache has filled up (> %d), resuming.",
+                              kHighWaterMarkBytes);
+                        mFlags &= ~CACHE_UNDERRUN;
+                        play_l();
+                        notifyListener_l(MEDIA_INFO, MEDIA_INFO_BUFFERING_END);
+                    } else if (mFlags & PREPARING) {
+                        ALOGV("cache has filled up (> %d), prepare is done",
+                              kHighWaterMarkBytes);
+                        finishAsyncPrepare_l();
+                    }
+                }
+            }
+        }
+    }
+
+    int64_t cachedDurationUs;
+    bool eos;
+    if (getCachedDuration_l(&cachedDurationUs, &eos)) {
+        ALOGV("cachedDurationUs = %.2f secs, eos=%d",
+              cachedDurationUs / 1E6, eos);
+
+        if ((mFlags & PLAYING) &&
+            !eos &&
+            (cachedDurationUs < kLowWaterMarkUs)) {
+            ALOGI("cache is running low (%.2f secs) , pausing.",
+                  cachedDurationUs / 1E6);
+            mFlags |= CACHE_UNDERRUN;
+            pause_l();
+            ensureCacheIsFetching_l();
+            notifyListener_l(MEDIA_INFO, MEDIA_INFO_BUFFERING_START);
+        } else if (eos || cachedDurationUs > kHighWaterMarkUs) {
+            if (mFlags & CACHE_UNDERRUN) {
+                ALOGI("cache has filled up (%.2f secs), resuming.",
+                      cachedDurationUs / 1E6);
+                mFlags &= ~CACHE_UNDERRUN;
+                play_l();
+                notifyListener_l(MEDIA_INFO, MEDIA_INFO_BUFFERING_END);
+            } else if (mFlags & PREPARING) {
+                ALOGV("cache has filled up (%.2f secs), prepare is done",
+                        cachedDurationUs / 1E6);
+                finishAsyncPrepare_l();
+            }
+        }
+    }
+
+    postBufferingEvent_l();
+}
+
+void AAH_TXPlayer::onPumpAudio() {
+    while (true) {
+        Mutex::Autolock autoLock(mLock);
+        // If this flag is clear, its because someone has externally canceled
+        // this pump operation (probably because we a resetting/shutting down).
+        // Get out immediately, do not reschedule ourselves.
+        if (!mPumpAudioEventPending) {
+            return;
+        }
+
+        // Start by checking if there is still work to be doing.  If we have
+        // never queued a payload (so we don't know what the last queued PTS is)
+        // or we have never established a MediaTime->CommonTime transformation,
+        // then we have work to do (one time through this loop should establish
+        // both).  Otherwise, we want to keep a fixed amt of presentation time
+        // worth of data buffered.  If we cannot get common time (service is
+        // unavailable, or common time is undefined)) then we don't have a lot
+        // of good options here.  For now, signal an error up to the app level
+        // and shut down the transmission pump.
+        int64_t commonTimeNow;
+        if (OK != mCCHelper.getCommonTime(&commonTimeNow)) {
+            // Failed to get common time; either the service is down or common
+            // time is not synced.  Raise an error and shutdown the player.
+            ALOGE("*** Cannot pump audio, unable to fetch common time."
+                  "  Shutting down.");
+            notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, UNKNOWN_ERROR);
+            mPumpAudioEventPending = false;
+            break;
+        }
+
+        if (mCurrentClockTransformValid && mLastQueuedMediaTimePTSValid) {
+            int64_t mediaTimeNow;
+            bool conversionResult = mCurrentClockTransform.doReverseTransform(
+                                        commonTimeNow,
+                                        &mediaTimeNow);
+            CHECK(conversionResult);
+
+            if ((mediaTimeNow +
+                 kAAHBufferTimeUs -
+                 mLastQueuedMediaTimePTS) <= 0) {
+                break;
+            }
+        }
+
+        MediaSource::ReadOptions options;
+        if (mIsSeeking) {
+            options.setSeekTo(mSeekTimeUs);
+        }
+
+        MediaBuffer* mediaBuffer;
+        status_t err = mAudioSource->read(&mediaBuffer, &options);
+        if (err != NO_ERROR) {
+            if (err == ERROR_END_OF_STREAM) {
+                ALOGI("*** %s reached end of stream", __PRETTY_FUNCTION__);
+                notifyListener_l(MEDIA_BUFFERING_UPDATE, 100);
+                notifyListener_l(MEDIA_PLAYBACK_COMPLETE);
+                pause_l(false);
+                sendEOS_l();
+            } else {
+                ALOGE("*** %s read failed err=%d", __PRETTY_FUNCTION__, err);
+            }
+            return;
+        }
+
+        if (mIsSeeking) {
+            mIsSeeking = false;
+            notifyListener_l(MEDIA_SEEK_COMPLETE);
+        }
+
+        uint8_t* data = (static_cast<uint8_t*>(mediaBuffer->data()) +
+                mediaBuffer->range_offset());
+        ALOGV("*** %s got media buffer data=[%02hhx %02hhx %02hhx %02hhx]"
+              " offset=%d length=%d", __PRETTY_FUNCTION__,
+              data[0], data[1], data[2], data[3],
+              mediaBuffer->range_offset(), mediaBuffer->range_length());
+
+        int64_t mediaTimeUs;
+        CHECK(mediaBuffer->meta_data()->findInt64(kKeyTime, &mediaTimeUs));
+        ALOGV("*** timeUs=%lld", mediaTimeUs);
+
+        if (!mCurrentClockTransformValid) {
+            if (OK == mCCHelper.getCommonTime(&commonTimeNow)) {
+                mCurrentClockTransform.a_zero = mediaTimeUs;
+                mCurrentClockTransform.b_zero = commonTimeNow +
+                                                kAAHStartupLeadTimeUs;
+                mCurrentClockTransform.a_to_b_numer = 1;
+                mCurrentClockTransform.a_to_b_denom = mPlayRateIsPaused ? 0 : 1;
+                mCurrentClockTransformValid = true;
+            } else {
+                // Failed to get common time; either the service is down or
+                // common time is not synced.  Raise an error and shutdown the
+                // player.
+                ALOGE("*** Cannot begin transmission, unable to fetch common"
+                      " time. Dropping sample with pts=%lld", mediaTimeUs);
+                notifyListener_l(MEDIA_ERROR,
+                                 MEDIA_ERROR_UNKNOWN,
+                                 UNKNOWN_ERROR);
+                mPumpAudioEventPending = false;
+                break;
+            }
+        }
+
+        ALOGV("*** transmitting packet with pts=%lld", mediaTimeUs);
+
+        sp<TRTPAudioPacket> packet = new TRTPAudioPacket();
+        packet->setPTS(mediaTimeUs);
+        packet->setSubstreamID(1);
+
+        packet->setCodecType(mAudioCodec);
+        packet->setVolume(mTRTPVolume);
+        // TODO : introduce a throttle for this so we can control the
+        // frequency with which transforms get sent.
+        packet->setClockTransform(mCurrentClockTransform);
+        packet->setAccessUnitData(data, mediaBuffer->range_length());
+
+        // TODO : while its pretty much universally true that audio ES payloads
+        // are all RAPs across all codecs, it might be a good idea to throttle
+        // the frequency with which we send codec out of band data to the RXers.
+        // If/when we do, we need to flag only those payloads which have
+        // required out of band data attached to them as RAPs.
+        packet->setRandomAccessPoint(true);
+
+        if (mAudioCodecData && mAudioCodecDataSize) {
+            packet->setAuxData(mAudioCodecData, mAudioCodecDataSize);
+        }
+
+        queuePacketToSender_l(packet);
+        mediaBuffer->release();
+
+        mLastQueuedMediaTimePTSValid = true;
+        mLastQueuedMediaTimePTS = mediaTimeUs;
+    }
+
+    { // Explicit scope for the autolock pattern.
+        Mutex::Autolock autoLock(mLock);
+
+        // If someone externally has cleared this flag, its because we should be
+        // shutting down.  Do not reschedule ourselves.
+        if (!mPumpAudioEventPending) {
+            return;
+        }
+
+        // Looks like no one canceled us explicitly.  Clear our flag and post a
+        // new event to ourselves.
+        mPumpAudioEventPending = false;
+        postPumpAudioEvent_l(10000);
+    }
+}
+
+void AAH_TXPlayer::queuePacketToSender_l(const sp<TRTPPacket>& packet) {
+    if (mAAH_Sender == NULL) {
+        return;
+    }
+
+    sp<AMessage> message = new AMessage(AAH_TXSender::kWhatSendPacket,
+                                        mAAH_Sender->handlerID());
+
+    {
+        Mutex::Autolock lock(mEndpointLock);
+        if (!mEndpointValid) {
+            return;
+        }
+
+        message->setInt32(AAH_TXSender::kSendPacketIPAddr, mEndpoint.addr);
+        message->setInt32(AAH_TXSender::kSendPacketPort, mEndpoint.port);
+    }
+
+    packet->setProgramID(mProgramID);
+    packet->setExpireTime(systemTime() + kAAHRetryKeepAroundTimeNs);
+    packet->pack();
+
+    message->setObject(AAH_TXSender::kSendPacketTRTPPacket, packet);
+
+    message->post();
+}
+
+}  // namespace android
diff --git a/media/libaah_rtp/aah_tx_player.h b/media/libaah_rtp/aah_tx_player.h
new file mode 100644
index 0000000..2e4b1f7
--- /dev/null
+++ b/media/libaah_rtp/aah_tx_player.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __AAH_TX_PLAYER_H__
+#define __AAH_TX_PLAYER_H__
+
+#include <common_time/cc_helper.h>
+#include <libstagefright/include/HTTPBase.h>
+#include <libstagefright/include/NuCachedSource2.h>
+#include <libstagefright/include/TimedEventQueue.h>
+#include <media/MediaPlayerInterface.h>
+#include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/MediaSource.h>
+#include <utils/LinearTransform.h>
+#include <utils/String8.h>
+#include <utils/threads.h>
+
+#include "aah_tx_sender.h"
+
+namespace android {
+
+class AAH_TXPlayer : public MediaPlayerHWInterface {
+  public:
+    AAH_TXPlayer();
+
+    virtual status_t    initCheck();
+    virtual status_t    setDataSource(const char *url,
+                                      const KeyedVector<String8, String8>*
+                                      headers);
+    virtual status_t    setDataSource(int fd, int64_t offset, int64_t length);
+    virtual status_t    setVideoSurface(const sp<Surface>& surface);
+    virtual status_t    setVideoSurfaceTexture(const sp<ISurfaceTexture>&
+                                               surfaceTexture);
+    virtual status_t    prepare();
+    virtual status_t    prepareAsync();
+    virtual status_t    start();
+    virtual status_t    stop();
+    virtual status_t    pause();
+    virtual bool        isPlaying();
+    virtual status_t    seekTo(int msec);
+    virtual status_t    getCurrentPosition(int *msec);
+    virtual status_t    getDuration(int *msec);
+    virtual status_t    reset();
+    virtual status_t    setLooping(int loop);
+    virtual player_type playerType();
+    virtual status_t    setParameter(int key, const Parcel &request);
+    virtual status_t    getParameter(int key, Parcel *reply);
+    virtual status_t    invoke(const Parcel& request, Parcel *reply);
+    virtual status_t    getMetadata(const media::Metadata::Filter& ids,
+                                    Parcel* records);
+    virtual status_t    setVolume(float leftVolume, float rightVolume);
+    virtual status_t    setAudioStreamType(audio_stream_type_t streamType);
+    virtual status_t    setRetransmitEndpoint(
+                            const struct sockaddr_in* endpoint);
+
+    static const int64_t kAAHRetryKeepAroundTimeNs;
+
+  protected:
+    virtual ~AAH_TXPlayer();
+
+  private:
+    friend struct AwesomeEvent;
+
+    enum {
+        PLAYING             = 1,
+        PREPARING           = 8,
+        PREPARED            = 16,
+        PREPARE_CANCELLED   = 64,
+        CACHE_UNDERRUN      = 128,
+
+        // We are basically done preparing but are currently buffering
+        // sufficient data to begin playback and finish the preparation
+        // phase for good.
+        PREPARING_CONNECTED = 2048,
+
+        INCOGNITO           = 32768,
+    };
+
+    status_t setDataSource_l(const char *url,
+                             const KeyedVector<String8, String8> *headers);
+    status_t setDataSource_l(const sp<MediaExtractor>& extractor);
+    status_t finishSetDataSource_l();
+    status_t prepareAsync_l();
+    void onPrepareAsyncEvent();
+    void finishAsyncPrepare_l();
+    void abortPrepare(status_t err);
+    status_t play_l();
+    status_t pause_l(bool doClockUpdate = true);
+    status_t seekTo_l(int64_t timeUs);
+    void updateClockTransform_l(bool pause);
+    void sendEOS_l();
+    void cancelPlayerEvents(bool keepBufferingGoing = false);
+    void reset_l();
+    void notifyListener_l(int msg, int ext1 = 0, int ext2 = 0);
+    bool getBitrate_l(int64_t* bitrate);
+    status_t getDuration_l(int* msec);
+    bool getCachedDuration_l(int64_t* durationUs, bool* eos);
+    void ensureCacheIsFetching_l();
+    void postBufferingEvent_l();
+    void postPumpAudioEvent_l(int64_t delayUs);
+    void onBufferingUpdate();
+    void onPumpAudio();
+    void queuePacketToSender_l(const sp<TRTPPacket>& packet);
+
+    Mutex mLock;
+
+    TimedEventQueue mQueue;
+    bool mQueueStarted;
+
+    sp<TimedEventQueue::Event> mBufferingEvent;
+    bool mBufferingEventPending;
+
+    uint32_t mFlags;
+    uint32_t mExtractorFlags;
+
+    String8 mUri;
+    KeyedVector<String8, String8> mUriHeaders;
+
+    sp<DataSource> mFileSource;
+
+    sp<TimedEventQueue::Event> mAsyncPrepareEvent;
+    Condition mPreparedCondition;
+    status_t mPrepareResult;
+
+    bool mIsSeeking;
+    int64_t mSeekTimeUs;
+
+    sp<TimedEventQueue::Event> mPumpAudioEvent;
+    bool mPumpAudioEventPending;
+
+    sp<HTTPBase> mConnectingDataSource;
+    sp<NuCachedSource2> mCachedSource;
+
+    sp<MediaSource> mAudioSource;
+    TRTPAudioPacket::TRTPAudioCodecType mAudioCodec;
+    sp<MetaData> mAudioFormat;
+    uint8_t* mAudioCodecData;
+    size_t mAudioCodecDataSize;
+
+    int64_t mDurationUs;
+    int64_t mBitrate;
+
+    sp<AAH_TXSender> mAAH_Sender;
+    LinearTransform  mCurrentClockTransform;
+    bool             mCurrentClockTransformValid;
+    int64_t          mLastQueuedMediaTimePTS;
+    bool             mLastQueuedMediaTimePTSValid;
+    bool             mPlayRateIsPaused;
+    CCHelper         mCCHelper;
+
+    Mutex mEndpointLock;
+    AAH_TXSender::Endpoint mEndpoint;
+    bool mEndpointValid;
+    bool mEndpointRegistered;
+    uint16_t mProgramID;
+    uint8_t mTRTPVolume;
+
+    DISALLOW_EVIL_CONSTRUCTORS(AAH_TXPlayer);
+};
+
+}  // namespace android
+
+#endif  // __AAH_TX_PLAYER_H__
diff --git a/media/libaah_rtp/aah_tx_sender.cpp b/media/libaah_rtp/aah_tx_sender.cpp
new file mode 100644
index 0000000..08e32d2
--- /dev/null
+++ b/media/libaah_rtp/aah_tx_sender.cpp
@@ -0,0 +1,603 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "LibAAH_RTP"
+#include <media/stagefright/foundation/ADebug.h>
+
+#include <netinet/in.h>
+#include <poll.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+#include <media/stagefright/foundation/AMessage.h>
+#include <utils/misc.h>
+
+#include "aah_tx_player.h"
+#include "aah_tx_sender.h"
+
+namespace android {
+
+const char* AAH_TXSender::kSendPacketIPAddr = "ipaddr";
+const char* AAH_TXSender::kSendPacketPort = "port";
+const char* AAH_TXSender::kSendPacketTRTPPacket = "trtp";
+
+const int AAH_TXSender::kRetryTrimIntervalUs = 100000;
+const int AAH_TXSender::kHeartbeatIntervalUs = 1000000;
+const int AAH_TXSender::kRetryBufferCapacity = 100;
+const nsecs_t AAH_TXSender::kHeartbeatTimeout = 600ull * 1000000000ull;
+
+Mutex AAH_TXSender::sLock;
+wp<AAH_TXSender> AAH_TXSender::sInstance;
+uint32_t AAH_TXSender::sNextEpoch;
+bool AAH_TXSender::sNextEpochValid = false;
+
+AAH_TXSender::AAH_TXSender() : mSocket(-1) {
+    mLastSentPacketTime = systemTime();
+}
+
+sp<AAH_TXSender> AAH_TXSender::GetInstance() {
+    Mutex::Autolock autoLock(sLock);
+
+    sp<AAH_TXSender> sender = sInstance.promote();
+
+    if (sender == NULL) {
+        sender = new AAH_TXSender();
+        if (sender == NULL) {
+            return NULL;
+        }
+
+        sender->mLooper = new ALooper();
+        if (sender->mLooper == NULL) {
+            return NULL;
+        }
+
+        sender->mReflector = new AHandlerReflector<AAH_TXSender>(sender.get());
+        if (sender->mReflector == NULL) {
+            return NULL;
+        }
+
+        sender->mSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+        if (sender->mSocket == -1) {
+            ALOGW("%s unable to create socket", __PRETTY_FUNCTION__);
+            return NULL;
+        }
+
+        struct sockaddr_in bind_addr;
+        memset(&bind_addr, 0, sizeof(bind_addr));
+        bind_addr.sin_family = AF_INET;
+        if (bind(sender->mSocket,
+                 reinterpret_cast<const sockaddr*>(&bind_addr),
+                 sizeof(bind_addr)) < 0) {
+            ALOGW("%s unable to bind socket (errno %d)",
+                  __PRETTY_FUNCTION__, errno);
+            return NULL;
+        }
+
+        sender->mRetryReceiver = new RetryReceiver(sender.get());
+        if (sender->mRetryReceiver == NULL) {
+            return NULL;
+        }
+
+        sender->mLooper->setName("AAH_TXSender");
+        sender->mLooper->registerHandler(sender->mReflector);
+        sender->mLooper->start(false, false, PRIORITY_AUDIO);
+
+        if (sender->mRetryReceiver->run("AAH_TXSenderRetry", PRIORITY_AUDIO)
+                != OK) {
+            ALOGW("%s unable to start retry thread", __PRETTY_FUNCTION__);
+            return NULL;
+        }
+
+        sInstance = sender;
+    }
+
+    return sender;
+}
+
+AAH_TXSender::~AAH_TXSender() {
+    mLooper->stop();
+    mLooper->unregisterHandler(mReflector->id());
+
+    if (mRetryReceiver != NULL) {
+        mRetryReceiver->requestExit();
+        mRetryReceiver->mWakeupEvent.setEvent();
+        if (mRetryReceiver->requestExitAndWait() != OK) {
+            ALOGW("%s shutdown of retry receiver failed", __PRETTY_FUNCTION__);
+        }
+        mRetryReceiver->mSender = NULL;
+        mRetryReceiver.clear();
+    }
+
+    if (mSocket != -1) {
+        close(mSocket);
+    }
+}
+
+// Return the next epoch number usable for a newly instantiated endpoint.
+uint32_t AAH_TXSender::getNextEpoch() {
+    Mutex::Autolock autoLock(sLock);
+
+    if (sNextEpochValid) {
+        sNextEpoch = (sNextEpoch + 1) & TRTPPacket::kTRTPEpochMask;
+    } else {
+        sNextEpoch = ns2ms(systemTime()) & TRTPPacket::kTRTPEpochMask;
+        sNextEpochValid = true;
+    }
+
+    return sNextEpoch;
+}
+
+// Notify the sender that a player has started sending to this endpoint.
+// Returns a program ID for use by the calling player.
+uint16_t AAH_TXSender::registerEndpoint(const Endpoint& endpoint) {
+    Mutex::Autolock lock(mEndpointLock);
+
+    EndpointState* eps = mEndpointMap.valueFor(endpoint);
+    if (eps) {
+        eps->playerRefCount++;
+    } else {
+        eps = new EndpointState(getNextEpoch());
+        mEndpointMap.add(endpoint, eps);
+    }
+
+    // if this is the first registered endpoint, then send a message to start
+    // trimming retry buffers and a message to start sending heartbeats.
+    if (mEndpointMap.size() == 1) {
+        sp<AMessage> trimMessage = new AMessage(kWhatTrimRetryBuffers,
+                                                handlerID());
+        trimMessage->post(kRetryTrimIntervalUs);
+
+        sp<AMessage> heartbeatMessage = new AMessage(kWhatSendHeartbeats,
+                                                     handlerID());
+        heartbeatMessage->post(kHeartbeatIntervalUs);
+    }
+
+    eps->nextProgramID++;
+    return eps->nextProgramID;
+}
+
+// Notify the sender that a player has ceased sending to this endpoint.
+// An endpoint's state can not be deleted until all of the endpoint's
+// registered players have called unregisterEndpoint.
+void AAH_TXSender::unregisterEndpoint(const Endpoint& endpoint) {
+    Mutex::Autolock lock(mEndpointLock);
+
+    EndpointState* eps = mEndpointMap.valueFor(endpoint);
+    if (eps) {
+        eps->playerRefCount--;
+        CHECK(eps->playerRefCount >= 0);
+    }
+}
+
+void AAH_TXSender::onMessageReceived(const sp<AMessage>& msg) {
+    switch (msg->what()) {
+        case kWhatSendPacket:
+            onSendPacket(msg);
+            break;
+
+        case kWhatTrimRetryBuffers:
+            trimRetryBuffers();
+            break;
+
+        case kWhatSendHeartbeats:
+            sendHeartbeats();
+            break;
+
+        default:
+            TRESPASS();
+            break;
+    }
+}
+
+void AAH_TXSender::onSendPacket(const sp<AMessage>& msg) {
+    sp<RefBase> obj;
+    CHECK(msg->findObject(kSendPacketTRTPPacket, &obj));
+    sp<TRTPPacket> packet = static_cast<TRTPPacket*>(obj.get());
+
+    uint32_t ipAddr;
+    CHECK(msg->findInt32(kSendPacketIPAddr,
+                         reinterpret_cast<int32_t*>(&ipAddr)));
+
+    int32_t port32;
+    CHECK(msg->findInt32(kSendPacketPort, &port32));
+    uint16_t port = port32;
+
+    Mutex::Autolock lock(mEndpointLock);
+    doSendPacket_l(packet, Endpoint(ipAddr, port));
+    mLastSentPacketTime = systemTime();
+}
+
+void AAH_TXSender::doSendPacket_l(const sp<TRTPPacket>& packet,
+                                  const Endpoint& endpoint) {
+    EndpointState* eps = mEndpointMap.valueFor(endpoint);
+    if (!eps) {
+        // the endpoint state has disappeared, so the player that sent this
+        // packet must be dead.
+        return;
+    }
+
+    // assign the packet's sequence number
+    packet->setEpoch(eps->epoch);
+    packet->setSeqNumber(eps->trtpSeqNumber++);
+
+    // add the packet to the retry buffer
+    RetryBuffer& retry = eps->retry;
+    retry.push_back(packet);
+
+    // send the packet
+    struct sockaddr_in addr;
+    memset(&addr, 0, sizeof(addr));
+    addr.sin_family = AF_INET;
+    addr.sin_addr.s_addr = endpoint.addr;
+    addr.sin_port = endpoint.port;
+
+    ssize_t result = sendto(mSocket,
+                            packet->getPacket(),
+                            packet->getPacketLen(),
+                            0,
+                            (const struct sockaddr *) &addr,
+                            sizeof(addr));
+    if (result == -1) {
+        ALOGW("%s sendto failed", __PRETTY_FUNCTION__);
+    }
+}
+
+void AAH_TXSender::trimRetryBuffers() {
+    Mutex::Autolock lock(mEndpointLock);
+
+    nsecs_t localTimeNow = systemTime();
+
+    Vector<Endpoint> endpointsToRemove;
+
+    for (size_t i = 0; i < mEndpointMap.size(); i++) {
+        EndpointState* eps = mEndpointMap.editValueAt(i);
+        RetryBuffer& retry = eps->retry;
+
+        while (!retry.isEmpty()) {
+            if (retry[0]->getExpireTime() < localTimeNow) {
+                retry.pop_front();
+            } else {
+                break;
+            }
+        }
+
+        if (retry.isEmpty() && eps->playerRefCount == 0) {
+            endpointsToRemove.add(mEndpointMap.keyAt(i));
+        }
+    }
+
+    // remove the state for any endpoints that are no longer in use
+    for (size_t i = 0; i < endpointsToRemove.size(); i++) {
+        Endpoint& e = endpointsToRemove.editItemAt(i);
+        ALOGD("*** %s removing endpoint addr=%08x",
+                __PRETTY_FUNCTION__, e.addr);
+        size_t index = mEndpointMap.indexOfKey(e);
+        delete mEndpointMap.valueAt(index);
+        mEndpointMap.removeItemsAt(index);
+    }
+
+    // schedule the next trim
+    if (mEndpointMap.size()) {
+        sp<AMessage> trimMessage = new AMessage(kWhatTrimRetryBuffers,
+                                                handlerID());
+        trimMessage->post(kRetryTrimIntervalUs);
+    }
+}
+
+void AAH_TXSender::sendHeartbeats() {
+    Mutex::Autolock lock(mEndpointLock);
+
+    if (shouldSendHeartbeats_l()) {
+        for (size_t i = 0; i < mEndpointMap.size(); i++) {
+            EndpointState* eps = mEndpointMap.editValueAt(i);
+            const Endpoint& ep = mEndpointMap.keyAt(i);
+
+            sp<TRTPControlPacket> packet = new TRTPControlPacket();
+            packet->setCommandID(TRTPControlPacket::kCommandNop);
+
+            packet->setExpireTime(systemTime() +
+                                  AAH_TXPlayer::kAAHRetryKeepAroundTimeNs);
+            packet->pack();
+
+            doSendPacket_l(packet, ep);
+        }
+    }
+
+    // schedule the next heartbeat
+    if (mEndpointMap.size()) {
+        sp<AMessage> heartbeatMessage = new AMessage(kWhatSendHeartbeats,
+                                                     handlerID());
+        heartbeatMessage->post(kHeartbeatIntervalUs);
+    }
+}
+
+bool AAH_TXSender::shouldSendHeartbeats_l() {
+    // assert(holding endpoint lock)
+    return (systemTime() < (mLastSentPacketTime + kHeartbeatTimeout));
+}
+
+// Receiver
+
+// initial 4-byte ID of a retry request packet
+const uint32_t AAH_TXSender::RetryReceiver::kRetryRequestID = 'Treq';
+
+// initial 4-byte ID of a retry NAK packet
+const uint32_t AAH_TXSender::RetryReceiver::kRetryNakID = 'Tnak';
+
+// initial 4-byte ID of a fast start request packet
+const uint32_t AAH_TXSender::RetryReceiver::kFastStartRequestID = 'Tfst';
+
+AAH_TXSender::RetryReceiver::RetryReceiver(AAH_TXSender* sender)
+        : Thread(false),
+    mSender(sender) {}
+
+    AAH_TXSender::RetryReceiver::~RetryReceiver() {
+        mWakeupEvent.clearPendingEvents();
+    }
+
+// Returns true if val is within the interval bounded inclusively by
+// start and end.  Also handles the case where there is a rollover of the
+// range between start and end.
+template <typename T>
+static inline bool withinIntervalWithRollover(T val, T start, T end) {
+    return ((start <= end && val >= start && val <= end) ||
+            (start > end && (val >= start || val <= end)));
+}
+
+bool AAH_TXSender::RetryReceiver::threadLoop() {
+    struct pollfd pollFds[2];
+    pollFds[0].fd = mSender->mSocket;
+    pollFds[0].events = POLLIN;
+    pollFds[0].revents = 0;
+    pollFds[1].fd = mWakeupEvent.getWakeupHandle();
+    pollFds[1].events = POLLIN;
+    pollFds[1].revents = 0;
+
+    int pollResult = poll(pollFds, NELEM(pollFds), -1);
+    if (pollResult == -1) {
+        ALOGE("%s poll failed", __PRETTY_FUNCTION__);
+        return false;
+    }
+
+    if (exitPending()) {
+        ALOGI("*** %s exiting", __PRETTY_FUNCTION__);
+        return false;
+    }
+
+    if (pollFds[0].revents) {
+        handleRetryRequest();
+    }
+
+    return true;
+}
+
+void AAH_TXSender::RetryReceiver::handleRetryRequest() {
+    ALOGV("*** RX %s start", __PRETTY_FUNCTION__);
+
+    RetryPacket request;
+    struct sockaddr requestSrcAddr;
+    socklen_t requestSrcAddrLen = sizeof(requestSrcAddr);
+
+    ssize_t result = recvfrom(mSender->mSocket, &request, sizeof(request), 0,
+                              &requestSrcAddr, &requestSrcAddrLen);
+    if (result == -1) {
+        ALOGE("%s recvfrom failed, errno=%d", __PRETTY_FUNCTION__, errno);
+        return;
+    }
+
+    if (static_cast<size_t>(result) < sizeof(RetryPacket)) {
+        ALOGW("%s short packet received", __PRETTY_FUNCTION__);
+        return;
+    }
+
+    uint32_t host_request_id = ntohl(request.id);
+    if ((host_request_id != kRetryRequestID) &&
+        (host_request_id != kFastStartRequestID)) {
+        ALOGW("%s received retry request with bogus ID (%08x)",
+                __PRETTY_FUNCTION__, host_request_id);
+        return;
+    }
+
+    Endpoint endpoint(request.endpointIP, request.endpointPort);
+
+    Mutex::Autolock lock(mSender->mEndpointLock);
+
+    EndpointState* eps = mSender->mEndpointMap.valueFor(endpoint);
+
+    if (eps == NULL || eps->retry.isEmpty()) {
+        // we have no retry buffer or an empty retry buffer for this endpoint,
+        // so NAK the entire request
+        RetryPacket nak = request;
+        nak.id = htonl(kRetryNakID);
+        result = sendto(mSender->mSocket, &nak, sizeof(nak), 0,
+                        &requestSrcAddr, requestSrcAddrLen);
+        if (result == -1) {
+            ALOGW("%s sendto failed", __PRETTY_FUNCTION__);
+        }
+        return;
+    }
+
+    RetryBuffer& retry = eps->retry;
+
+    uint16_t startSeq = ntohs(request.seqStart);
+    uint16_t endSeq = ntohs(request.seqEnd);
+
+    uint16_t retryFirstSeq = retry[0]->getSeqNumber();
+    uint16_t retryLastSeq = retry[retry.size() - 1]->getSeqNumber();
+
+    // If this is a fast start, then force the start of the retry to match the
+    // start of the retransmit ring buffer (unless the end of the retransmit
+    // ring buffer is already past the point of fast start)
+    if ((host_request_id == kFastStartRequestID) &&
+        !((startSeq - retryFirstSeq) & 0x8000)) {
+        startSeq = retryFirstSeq;
+    }
+
+    int startIndex;
+    if (withinIntervalWithRollover(startSeq, retryFirstSeq, retryLastSeq)) {
+        startIndex = static_cast<uint16_t>(startSeq - retryFirstSeq);
+    } else {
+        startIndex = -1;
+    }
+
+    int endIndex;
+    if (withinIntervalWithRollover(endSeq, retryFirstSeq, retryLastSeq)) {
+        endIndex = static_cast<uint16_t>(endSeq - retryFirstSeq);
+    } else {
+        endIndex = -1;
+    }
+
+    if (startIndex == -1 && endIndex == -1) {
+        // no part of the request range is found in the retry buffer
+        RetryPacket nak = request;
+        nak.id = htonl(kRetryNakID);
+        result = sendto(mSender->mSocket, &nak, sizeof(nak), 0,
+                        &requestSrcAddr, requestSrcAddrLen);
+        if (result == -1) {
+            ALOGW("%s sendto failed", __PRETTY_FUNCTION__);
+        }
+        return;
+    }
+
+    if (startIndex == -1) {
+        // NAK a subrange at the front of the request range
+        RetryPacket nak = request;
+        nak.id = htonl(kRetryNakID);
+        nak.seqEnd = htons(retryFirstSeq - 1);
+        result = sendto(mSender->mSocket, &nak, sizeof(nak), 0,
+                        &requestSrcAddr, requestSrcAddrLen);
+        if (result == -1) {
+            ALOGW("%s sendto failed", __PRETTY_FUNCTION__);
+        }
+
+        startIndex = 0;
+    } else if (endIndex == -1) {
+        // NAK a subrange at the back of the request range
+        RetryPacket nak = request;
+        nak.id = htonl(kRetryNakID);
+        nak.seqStart = htons(retryLastSeq + 1);
+        result = sendto(mSender->mSocket, &nak, sizeof(nak), 0,
+                        &requestSrcAddr, requestSrcAddrLen);
+        if (result == -1) {
+            ALOGW("%s sendto failed", __PRETTY_FUNCTION__);
+        }
+
+        endIndex = retry.size() - 1;
+    }
+
+    // send the retry packets
+    for (int i = startIndex; i <= endIndex; i++) {
+        const sp<TRTPPacket>& replyPacket = retry[i];
+
+        result = sendto(mSender->mSocket,
+                        replyPacket->getPacket(),
+                        replyPacket->getPacketLen(),
+                        0,
+                        &requestSrcAddr,
+                        requestSrcAddrLen);
+
+        if (result == -1) {
+            ALOGW("%s sendto failed", __PRETTY_FUNCTION__);
+        }
+    }
+}
+
+// Endpoint
+
+AAH_TXSender::Endpoint::Endpoint()
+        : addr(0)
+        , port(0) { }
+
+AAH_TXSender::Endpoint::Endpoint(uint32_t a, uint16_t p)
+        : addr(a)
+        , port(p) {}
+
+bool AAH_TXSender::Endpoint::operator<(const Endpoint& other) const {
+    return ((addr < other.addr) ||
+            (addr == other.addr && port < other.port));
+}
+
+// EndpointState
+
+AAH_TXSender::EndpointState::EndpointState(uint32_t _epoch)
+    : retry(kRetryBufferCapacity)
+    , playerRefCount(1)
+    , trtpSeqNumber(0)
+    , nextProgramID(0)
+    , epoch(_epoch) { }
+
+// CircularBuffer
+
+template <typename T>
+CircularBuffer<T>::CircularBuffer(size_t capacity)
+        : mCapacity(capacity)
+        , mHead(0)
+        , mTail(0)
+        , mFillCount(0) {
+    mBuffer = new T[capacity];
+}
+
+template <typename T>
+CircularBuffer<T>::~CircularBuffer() {
+    delete [] mBuffer;
+}
+
+template <typename T>
+void CircularBuffer<T>::push_back(const T& item) {
+    if (this->isFull()) {
+        this->pop_front();
+    }
+    mBuffer[mHead] = item;
+    mHead = (mHead + 1) % mCapacity;
+    mFillCount++;
+}
+
+template <typename T>
+void CircularBuffer<T>::pop_front() {
+    CHECK(!isEmpty());
+    mBuffer[mTail] = T();
+    mTail = (mTail + 1) % mCapacity;
+    mFillCount--;
+}
+
+template <typename T>
+size_t CircularBuffer<T>::size() const {
+    return mFillCount;
+}
+
+template <typename T>
+bool CircularBuffer<T>::isFull() const {
+    return (mFillCount == mCapacity);
+}
+
+template <typename T>
+bool CircularBuffer<T>::isEmpty() const {
+    return (mFillCount == 0);
+}
+
+template <typename T>
+const T& CircularBuffer<T>::itemAt(size_t index) const {
+    CHECK(index < mFillCount);
+    return mBuffer[(mTail + index) % mCapacity];
+}
+
+template <typename T>
+const T& CircularBuffer<T>::operator[](size_t index) const {
+    return itemAt(index);
+}
+
+}  // namespace android
diff --git a/media/libaah_rtp/aah_tx_sender.h b/media/libaah_rtp/aah_tx_sender.h
new file mode 100644
index 0000000..74206c4
--- /dev/null
+++ b/media/libaah_rtp/aah_tx_sender.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __AAH_TX_SENDER_H__
+#define __AAH_TX_SENDER_H__
+
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AHandlerReflector.h>
+#include <utils/RefBase.h>
+#include <utils/threads.h>
+
+#include "aah_tx_packet.h"
+#include "pipe_event.h"
+
+namespace android {
+
+template <typename T> class CircularBuffer {
+  public:
+    CircularBuffer(size_t capacity);
+    ~CircularBuffer();
+    void push_back(const T& item);;
+    void pop_front();
+    size_t size() const;
+    bool isFull() const;
+    bool isEmpty() const;
+    const T& itemAt(size_t index) const;
+    const T& operator[](size_t index) const;
+
+  private:
+    T* mBuffer;
+    size_t mCapacity;
+    size_t mHead;
+    size_t mTail;
+    size_t mFillCount;
+
+    DISALLOW_EVIL_CONSTRUCTORS(CircularBuffer);
+};
+
+class AAH_TXSender : public virtual RefBase {
+  public:
+    ~AAH_TXSender();
+
+    static sp<AAH_TXSender> GetInstance();
+
+    ALooper::handler_id handlerID() { return mReflector->id(); }
+
+    // an IP address and port
+    struct Endpoint {
+        Endpoint();
+        Endpoint(uint32_t a, uint16_t p);
+        bool operator<(const Endpoint& other) const;
+
+        uint32_t addr;
+        uint16_t port;
+    };
+
+    uint16_t registerEndpoint(const Endpoint& endpoint);
+    void unregisterEndpoint(const Endpoint& endpoint);
+
+    enum {
+        kWhatSendPacket,
+        kWhatTrimRetryBuffers,
+        kWhatSendHeartbeats,
+    };
+
+    // fields for SendPacket messages
+    static const char* kSendPacketIPAddr;
+    static const char* kSendPacketPort;
+    static const char* kSendPacketTRTPPacket;
+
+  private:
+    AAH_TXSender();
+
+    static Mutex sLock;
+    static wp<AAH_TXSender> sInstance;
+    static uint32_t sNextEpoch;
+    static bool sNextEpochValid;
+
+    static uint32_t getNextEpoch();
+
+    typedef CircularBuffer<sp<TRTPPacket> > RetryBuffer;
+
+    // state maintained on a per-endpoint basis
+    struct EndpointState {
+        EndpointState(uint32_t epoch);
+        RetryBuffer retry;
+        int playerRefCount;
+        uint16_t trtpSeqNumber;
+        uint16_t nextProgramID;
+        uint32_t epoch;
+    };
+
+    friend class AHandlerReflector<AAH_TXSender>;
+    void onMessageReceived(const sp<AMessage>& msg);
+    void onSendPacket(const sp<AMessage>& msg);
+    void doSendPacket_l(const sp<TRTPPacket>& packet,
+                        const Endpoint& endpoint);
+    void trimRetryBuffers();
+    void sendHeartbeats();
+    bool shouldSendHeartbeats_l();
+
+    sp<ALooper> mLooper;
+    sp<AHandlerReflector<AAH_TXSender> > mReflector;
+
+    int mSocket;
+    nsecs_t mLastSentPacketTime;
+
+    DefaultKeyedVector<Endpoint, EndpointState*> mEndpointMap;
+    Mutex mEndpointLock;
+
+    static const int kRetryTrimIntervalUs;
+    static const int kHeartbeatIntervalUs;
+    static const int kRetryBufferCapacity;
+    static const nsecs_t kHeartbeatTimeout;
+
+    class RetryReceiver : public Thread {
+      private:
+        friend class AAH_TXSender;
+
+        RetryReceiver(AAH_TXSender* sender);
+        virtual ~RetryReceiver();
+        virtual bool threadLoop();
+        void handleRetryRequest();
+
+        static const int kMaxReceiverPacketLen;
+        static const uint32_t kRetryRequestID;
+        static const uint32_t kFastStartRequestID;
+        static const uint32_t kRetryNakID;
+
+        AAH_TXSender* mSender;
+        PipeEvent mWakeupEvent;
+    };
+
+    sp<RetryReceiver> mRetryReceiver;
+
+    DISALLOW_EVIL_CONSTRUCTORS(AAH_TXSender);
+};
+
+struct RetryPacket {
+    uint32_t id;
+    uint32_t endpointIP;
+    uint16_t endpointPort;
+    uint16_t seqStart;
+    uint16_t seqEnd;
+} __attribute__((packed));
+
+}  // namespace android
+
+#endif  // __AAH_TX_SENDER_H__
diff --git a/media/libaah_rtp/pipe_event.cpp b/media/libaah_rtp/pipe_event.cpp
new file mode 100644
index 0000000..b8e6960
--- /dev/null
+++ b/media/libaah_rtp/pipe_event.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "LibAAH_RTP"
+#include <utils/Log.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <unistd.h>
+
+#include "pipe_event.h"
+
+namespace android {
+
+PipeEvent::PipeEvent() {
+    pipe_[0] = -1;
+    pipe_[1] = -1;
+
+    // Create the pipe.
+    if (pipe(pipe_) >= 0) {
+        // Set non-blocking mode on the read side of the pipe so we can
+        // easily drain it whenever we wakeup.
+        fcntl(pipe_[0], F_SETFL, O_NONBLOCK);
+    } else {
+        ALOGE("Failed to create pipe event %d %d %d",
+              pipe_[0], pipe_[1], errno);
+        pipe_[0] = -1;
+        pipe_[1] = -1;
+    }
+}
+
+PipeEvent::~PipeEvent() {
+    if (pipe_[0] >= 0) {
+        close(pipe_[0]);
+    }
+
+    if (pipe_[1] >= 0) {
+        close(pipe_[1]);
+    }
+}
+
+void PipeEvent::clearPendingEvents() {
+    char drain_buffer[16];
+    while (read(pipe_[0], drain_buffer, sizeof(drain_buffer)) > 0) {
+        // No body.
+    }
+}
+
+bool PipeEvent::wait(int timeout) {
+    struct pollfd wait_fd;
+
+    wait_fd.fd = getWakeupHandle();
+    wait_fd.events = POLLIN;
+    wait_fd.revents = 0;
+
+    int res = poll(&wait_fd, 1, timeout);
+
+    if (res < 0) {
+        ALOGE("Wait error in PipeEvent; sleeping to prevent overload!");
+        usleep(1000);
+    }
+
+    return (res > 0);
+}
+
+void PipeEvent::setEvent() {
+    char foo = 'q';
+    write(pipe_[1], &foo, 1);
+}
+
+}  // namespace android
+
diff --git a/media/libaah_rtp/pipe_event.h b/media/libaah_rtp/pipe_event.h
new file mode 100644
index 0000000..e53b0fd
--- /dev/null
+++ b/media/libaah_rtp/pipe_event.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __PIPE_EVENT_H__
+#define __PIPE_EVENT_H__
+
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+
+class PipeEvent {
+  public:
+    PipeEvent();
+   ~PipeEvent();
+
+    bool initCheck() const {
+        return ((pipe_[0] >= 0) && (pipe_[1] >= 0));
+    }
+
+    int getWakeupHandle() const { return pipe_[0]; }
+
+    // block until the event fires; returns true if the event fired and false if
+    // the wait timed out.  Timeout is expressed in milliseconds; negative
+    // values mean wait forever.
+    bool wait(int timeout = -1);
+
+    void clearPendingEvents();
+    void setEvent();
+
+  private:
+    int pipe_[2];
+
+    DISALLOW_EVIL_CONSTRUCTORS(PipeEvent);
+};
+
+}  // namespace android
+
+#endif  // __PIPE_EVENT_H__
diff --git a/media/libeffects/data/audio_effects.conf b/media/libeffects/data/audio_effects.conf
index b8fa487..ce25bc8 100644
--- a/media/libeffects/data/audio_effects.conf
+++ b/media/libeffects/data/audio_effects.conf
@@ -50,11 +50,11 @@
   }
   volume {
     library bundle
-    uuid 119341a0-8469-11df-81f9- 0002a5d5c51b
+    uuid 119341a0-8469-11df-81f9-0002a5d5c51b
   }
   reverb_env_aux {
     library reverb
-    uuid 4a387fc0-8ab3-11df-8bad- 0002a5d5c51b
+    uuid 4a387fc0-8ab3-11df-8bad-0002a5d5c51b
   }
   reverb_env_ins {
     library reverb
diff --git a/media/libeffects/downmix/Android.mk b/media/libeffects/downmix/Android.mk
new file mode 100644
index 0000000..95ca6fd
--- /dev/null
+++ b/media/libeffects/downmix/Android.mk
@@ -0,0 +1,28 @@
+LOCAL_PATH:= $(call my-dir)
+
+# Multichannel downmix effect library
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+	EffectDownmix.c
+
+LOCAL_SHARED_LIBRARIES := \
+	libcutils
+
+LOCAL_MODULE:= libdownmix
+
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)/soundfx
+
+ifeq ($(TARGET_OS)-$(TARGET_SIMULATOR),linux-true)
+LOCAL_LDLIBS += -ldl
+endif
+
+LOCAL_C_INCLUDES := \
+	$(call include-path-for, audio-effects) \
+	$(call include-path-for, audio-utils)
+
+LOCAL_PRELINK_MODULE := false
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libeffects/downmix/EffectDownmix.c b/media/libeffects/downmix/EffectDownmix.c
new file mode 100644
index 0000000..a325172
--- /dev/null
+++ b/media/libeffects/downmix/EffectDownmix.c
@@ -0,0 +1,889 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectDownmix"
+#define LOG_NDEBUG 0
+#include <cutils/log.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdbool.h>
+#include "EffectDownmix.h"
+
+#define MINUS_3_DB_IN_Q19_12 2896 // -3dB = 0.707 * 2^12 = 2896
+
+// effect_handle_t interface implementation for downmix effect
+const struct effect_interface_s gDownmixInterface = {
+        Downmix_Process,
+        Downmix_Command,
+        Downmix_GetDescriptor,
+        NULL /* no process_reverse function, no reference stream needed */
+};
+
+audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
+    tag : AUDIO_EFFECT_LIBRARY_TAG,
+    version : EFFECT_LIBRARY_API_VERSION,
+    name : "Downmix Library",
+    implementor : "The Android Open Source Project",
+    query_num_effects : DownmixLib_QueryNumberEffects,
+    query_effect : DownmixLib_QueryEffect,
+    create_effect : DownmixLib_Create,
+    release_effect : DownmixLib_Release,
+    get_descriptor : DownmixLib_GetDescriptor,
+};
+
+
+// AOSP insert downmix UUID: 93f04452-e4fe-41cc-91f9-e475b6d1d69f
+static const effect_descriptor_t gDownmixDescriptor = {
+        EFFECT_UIID_DOWNMIX__, //type
+        {0x93f04452, 0xe4fe, 0x41cc, 0x91f9, {0xe4, 0x75, 0xb6, 0xd1, 0xd6, 0x9f}}, // uuid
+        EFFECT_CONTROL_API_VERSION,
+        EFFECT_FLAG_TYPE_INSERT | EFFECT_FLAG_INSERT_FIRST,
+        0, //FIXME what value should be reported? // cpu load
+        0, //FIXME what value should be reported? // memory usage
+        "Multichannel Downmix To Stereo", // human readable effect name
+        "The Android Open Source Project" // human readable effect implementor name
+};
+
+// gDescriptors contains pointers to all defined effect descriptor in this library
+static const effect_descriptor_t * const gDescriptors[] = {
+        &gDownmixDescriptor
+};
+
+// number of effects in this library
+const int kNbEffects = sizeof(gDescriptors) / sizeof(const effect_descriptor_t *);
+
+
+/*----------------------------------------------------------------------------
+ * Effect API implementation
+ *--------------------------------------------------------------------------*/
+
+/*--- Effect Library Interface Implementation ---*/
+
+int32_t DownmixLib_QueryNumberEffects(uint32_t *pNumEffects) {
+    ALOGV("DownmixLib_QueryNumberEffects()");
+    *pNumEffects = kNbEffects;
+    return 0;
+}
+
+int32_t DownmixLib_QueryEffect(uint32_t index, effect_descriptor_t *pDescriptor) {
+    ALOGV("DownmixLib_QueryEffect() index=%d", index);
+    if (pDescriptor == NULL) {
+        return -EINVAL;
+    }
+    if (index >= (uint32_t)kNbEffects) {
+        return -EINVAL;
+    }
+    memcpy(pDescriptor, gDescriptors[index], sizeof(effect_descriptor_t));
+    return 0;
+}
+
+
+int32_t DownmixLib_Create(const effect_uuid_t *uuid,
+        int32_t sessionId,
+        int32_t ioId,
+        effect_handle_t *pHandle) {
+    int ret;
+    int i;
+    downmix_module_t *module;
+    const effect_descriptor_t *desc;
+
+    ALOGV("DownmixLib_Create()");
+
+    if (pHandle == NULL || uuid == NULL) {
+        return -EINVAL;
+    }
+
+    for (i = 0 ; i < kNbEffects ; i++) {
+        desc = gDescriptors[i];
+        if (memcmp(uuid, &desc->uuid, sizeof(effect_uuid_t)) == 0) {
+            break;
+        }
+    }
+
+    if (i == kNbEffects) {
+        return -ENOENT;
+    }
+
+    module = malloc(sizeof(downmix_module_t));
+
+    module->itfe = &gDownmixInterface;
+
+    module->context.state = DOWNMIX_STATE_UNINITIALIZED;
+
+    ret = Downmix_Init(module);
+    if (ret < 0) {
+        ALOGW("DownmixLib_Create() init failed");
+        free(module);
+        return ret;
+    }
+
+    *pHandle = (effect_handle_t) module;
+
+    ALOGV("DownmixLib_Create() %p , size %d", module, sizeof(downmix_module_t));
+
+    return 0;
+}
+
+
+int32_t DownmixLib_Release(effect_handle_t handle) {
+    downmix_module_t *pDwmModule = (downmix_module_t *)handle;
+
+    ALOGV("DownmixLib_Release() %p", handle);
+    if (handle == NULL) {
+        return -EINVAL;
+    }
+
+    pDwmModule->context.state = DOWNMIX_STATE_UNINITIALIZED;
+
+    free(pDwmModule);
+    return 0;
+}
+
+
+int32_t DownmixLib_GetDescriptor(const effect_uuid_t *uuid, effect_descriptor_t *pDescriptor) {
+    ALOGV("DownmixLib_GetDescriptor()");
+    int i;
+
+    if (pDescriptor == NULL || uuid == NULL){
+        ALOGE("DownmixLib_Create() called with NULL pointer");
+        return -EINVAL;
+    }
+    ALOGV("DownmixLib_GetDescriptor() nb effects=%d", kNbEffects);
+    for (i = 0; i < kNbEffects; i++) {
+        ALOGV("DownmixLib_GetDescriptor() i=%d", i);
+        if (memcmp(uuid, &gDescriptors[i]->uuid, sizeof(effect_uuid_t)) == 0) {
+            memcpy(pDescriptor, gDescriptors[i], sizeof(effect_descriptor_t));
+            ALOGV("EffectGetDescriptor - UUID matched downmix type %d, UUID = %x",
+                 i, gDescriptors[i]->uuid.timeLow);
+            return 0;
+        }
+    }
+
+    return -EINVAL;
+}
+
+
+/*--- Effect Control Interface Implementation ---*/
+
+static int Downmix_Process(effect_handle_t self,
+        audio_buffer_t *inBuffer, audio_buffer_t *outBuffer) {
+
+    downmix_object_t *pDownmixer;
+    int16_t *pSrc, *pDst;
+    downmix_module_t *pDwmModule = (downmix_module_t *)self;
+
+    if (pDwmModule == NULL) {
+        return -EINVAL;
+    }
+
+    if (inBuffer == NULL || inBuffer->raw == NULL ||
+        outBuffer == NULL || outBuffer->raw == NULL ||
+        inBuffer->frameCount != outBuffer->frameCount) {
+        return -EINVAL;
+    }
+
+    pDownmixer = (downmix_object_t*) &pDwmModule->context;
+
+    if (pDownmixer->state == DOWNMIX_STATE_UNINITIALIZED) {
+        ALOGE("Downmix_Process error: trying to use an uninitialized downmixer");
+        return -EINVAL;
+    } else if (pDownmixer->state == DOWNMIX_STATE_INITIALIZED) {
+        ALOGE("Downmix_Process error: trying to use a non-configured downmixer");
+        return -ENODATA;
+    }
+
+    pSrc = inBuffer->s16;
+    pDst = outBuffer->s16;
+    size_t numFrames = outBuffer->frameCount;
+
+    const bool accumulate =
+            (pDwmModule->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE);
+
+    switch(pDownmixer->type) {
+
+      case DOWNMIX_TYPE_STRIP:
+          if (accumulate) {
+              while (numFrames) {
+                  pDst[0] = clamp16(pDst[0] + pSrc[0]);
+                  pDst[1] = clamp16(pDst[1] + pSrc[1]);
+                  pSrc += pDownmixer->input_channel_count;
+                  pDst += 2;
+                  numFrames--;
+              }
+          } else {
+              while (numFrames) {
+                  pDst[0] = pSrc[0];
+                  pDst[1] = pSrc[1];
+                  pSrc += pDownmixer->input_channel_count;
+                  pDst += 2;
+                  numFrames--;
+              }
+          }
+          break;
+
+      case DOWNMIX_TYPE_FOLD:
+        // optimize for the common formats
+        switch(pDwmModule->config.inputCfg.channels) {
+        case AUDIO_CHANNEL_OUT_QUAD:
+            Downmix_foldFromQuad(pSrc, pDst, numFrames, accumulate);
+            break;
+        case AUDIO_CHANNEL_OUT_SURROUND:
+            Downmix_foldFromSurround(pSrc, pDst, numFrames, accumulate);
+            break;
+        case AUDIO_CHANNEL_OUT_5POINT1:
+            Downmix_foldFrom5Point1(pSrc, pDst, numFrames, accumulate);
+            break;
+        case AUDIO_CHANNEL_OUT_7POINT1:
+            Downmix_foldFrom7Point1(pSrc, pDst, numFrames, accumulate);
+            break;
+        default:
+            // FIXME implement generic downmix
+            ALOGE("Multichannel configurations other than quad, 4.0, 5.1 and 7.1 are not supported");
+            break;
+        }
+        break;
+
+      default:
+        return -EINVAL;
+    }
+
+    return 0;
+}
+
+
+static int Downmix_Command(effect_handle_t self, uint32_t cmdCode, uint32_t cmdSize,
+        void *pCmdData, uint32_t *replySize, void *pReplyData) {
+
+    downmix_module_t *pDwmModule = (downmix_module_t *) self;
+    downmix_object_t *pDownmixer;
+    int retsize;
+
+    if (pDwmModule == NULL || pDwmModule->context.state == DOWNMIX_STATE_UNINITIALIZED) {
+        return -EINVAL;
+    }
+
+    pDownmixer = (downmix_object_t*) &pDwmModule->context;
+
+    ALOGV("Downmix_Command command %d cmdSize %d",cmdCode, cmdSize);
+
+    switch (cmdCode) {
+    case EFFECT_CMD_INIT:
+        if (pReplyData == NULL || *replySize != sizeof(int)) {
+            return -EINVAL;
+        }
+        *(int *) pReplyData = Downmix_Init(pDwmModule);
+        break;
+
+    case EFFECT_CMD_SET_CONFIG:
+        if (pCmdData == NULL || cmdSize != sizeof(effect_config_t)
+                || pReplyData == NULL || *replySize != sizeof(int)) {
+            return -EINVAL;
+        }
+        *(int *) pReplyData = Downmix_Configure(pDwmModule,
+                (effect_config_t *)pCmdData, false);
+        break;
+
+    case EFFECT_CMD_RESET:
+        Downmix_Reset(pDownmixer, false);
+        break;
+
+    case EFFECT_CMD_GET_PARAM:
+        ALOGV("Downmix_Command EFFECT_CMD_GET_PARAM pCmdData %p, *replySize %d, pReplyData: %p",
+                pCmdData, *replySize, pReplyData);
+        if (pCmdData == NULL || cmdSize < (int)(sizeof(effect_param_t) + sizeof(int32_t)) ||
+                pReplyData == NULL ||
+                *replySize < (int) sizeof(effect_param_t) + 2 * sizeof(int32_t)) {
+            return -EINVAL;
+        }
+        effect_param_t *rep = (effect_param_t *) pReplyData;
+        memcpy(pReplyData, pCmdData, sizeof(effect_param_t) + sizeof(int32_t));
+        ALOGV("Downmix_Command EFFECT_CMD_GET_PARAM param %d, replySize %d",
+                *(int32_t *)rep->data, rep->vsize);
+        rep->status = Downmix_getParameter(pDownmixer, *(int32_t *)rep->data, &rep->vsize,
+                rep->data + sizeof(int32_t));
+        *replySize = sizeof(effect_param_t) + sizeof(int32_t) + rep->vsize;
+        break;
+
+    case EFFECT_CMD_SET_PARAM:
+        ALOGV("Downmix_Command EFFECT_CMD_SET_PARAM cmdSize %d pCmdData %p, *replySize %d, " \
+                "pReplyData %p", cmdSize, pCmdData, *replySize, pReplyData);
+        if (pCmdData == NULL || (cmdSize < (int)(sizeof(effect_param_t) + sizeof(int32_t)))
+                || pReplyData == NULL || *replySize != (int)sizeof(int32_t)) {
+            return -EINVAL;
+        }
+        effect_param_t *cmd = (effect_param_t *) pCmdData;
+        *(int *)pReplyData = Downmix_setParameter(pDownmixer, *(int32_t *)cmd->data,
+                cmd->vsize, cmd->data + sizeof(int32_t));
+        break;
+
+    case EFFECT_CMD_SET_PARAM_DEFERRED:
+        //FIXME implement
+        ALOGW("Downmix_Command command EFFECT_CMD_SET_PARAM_DEFERRED not supported, FIXME");
+        break;
+
+    case EFFECT_CMD_SET_PARAM_COMMIT:
+        //FIXME implement
+        ALOGW("Downmix_Command command EFFECT_CMD_SET_PARAM_COMMIT not supported, FIXME");
+        break;
+
+    case EFFECT_CMD_ENABLE:
+        if (pReplyData == NULL || *replySize != sizeof(int)) {
+            return -EINVAL;
+        }
+        if (pDownmixer->state != DOWNMIX_STATE_INITIALIZED) {
+            return -ENOSYS;
+        }
+        pDownmixer->state = DOWNMIX_STATE_ACTIVE;
+        ALOGV("EFFECT_CMD_ENABLE() OK");
+        *(int *)pReplyData = 0;
+        break;
+
+    case EFFECT_CMD_DISABLE:
+        if (pReplyData == NULL || *replySize != sizeof(int)) {
+            return -EINVAL;
+        }
+        if (pDownmixer->state != DOWNMIX_STATE_ACTIVE) {
+            return -ENOSYS;
+        }
+        pDownmixer->state = DOWNMIX_STATE_INITIALIZED;
+        ALOGV("EFFECT_CMD_DISABLE() OK");
+        *(int *)pReplyData = 0;
+        break;
+
+    case EFFECT_CMD_SET_DEVICE:
+        if (pCmdData == NULL || cmdSize != (int)sizeof(uint32_t)) {
+            return -EINVAL;
+        }
+        // FIXME change type if playing on headset vs speaker
+        ALOGV("Downmix_Command EFFECT_CMD_SET_DEVICE: 0x%08x", *(uint32_t *)pCmdData);
+        break;
+
+    case EFFECT_CMD_SET_VOLUME: {
+        // audio output is always stereo => 2 channel volumes
+        if (pCmdData == NULL || cmdSize != (int)sizeof(uint32_t) * 2) {
+            return -EINVAL;
+        }
+        // FIXME change volume
+        ALOGW("Downmix_Command command EFFECT_CMD_SET_VOLUME not supported, FIXME");
+        float left = (float)(*(uint32_t *)pCmdData) / (1 << 24);
+        float right = (float)(*((uint32_t *)pCmdData + 1)) / (1 << 24);
+        ALOGV("Downmix_Command EFFECT_CMD_SET_VOLUME: left %f, right %f ", left, right);
+        break;
+    }
+
+    case EFFECT_CMD_SET_AUDIO_MODE:
+        if (pCmdData == NULL || cmdSize != (int)sizeof(uint32_t)) {
+            return -EINVAL;
+        }
+        ALOGV("Downmix_Command EFFECT_CMD_SET_AUDIO_MODE: %d", *(uint32_t *)pCmdData);
+        break;
+
+    case EFFECT_CMD_SET_CONFIG_REVERSE:
+    case EFFECT_CMD_SET_INPUT_DEVICE:
+        // these commands are ignored by a downmix effect
+        break;
+
+    default:
+        ALOGW("Downmix_Command invalid command %d",cmdCode);
+        return -EINVAL;
+    }
+
+    return 0;
+}
+
+
+int Downmix_GetDescriptor(effect_handle_t self, effect_descriptor_t *pDescriptor)
+{
+    downmix_module_t *pDwnmxModule = (downmix_module_t *) self;
+
+    if (pDwnmxModule == NULL ||
+            pDwnmxModule->context.state == DOWNMIX_STATE_UNINITIALIZED) {
+        return -EINVAL;
+    }
+
+    memcpy(pDescriptor, &gDownmixDescriptor, sizeof(effect_descriptor_t));
+
+    return 0;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Downmix internal functions
+ *--------------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------------
+ * Downmix_Init()
+ *----------------------------------------------------------------------------
+ * Purpose:
+ * Initialize downmix context and apply default parameters
+ *
+ * Inputs:
+ *  pDwmModule    pointer to downmix effect module
+ *
+ * Outputs:
+ *
+ * Returns:
+ *  0             indicates success
+ *
+ * Side Effects:
+ *  updates:
+ *           pDwmModule->context.type
+ *           pDwmModule->context.apply_volume_correction
+ *           pDwmModule->config.inputCfg
+ *           pDwmModule->config.outputCfg
+ *           pDwmModule->config.inputCfg.samplingRate
+ *           pDwmModule->config.outputCfg.samplingRate
+ *           pDwmModule->context.state
+ *  doesn't set:
+ *           pDwmModule->itfe
+ *
+ *----------------------------------------------------------------------------
+ */
+
+int Downmix_Init(downmix_module_t *pDwmModule) {
+
+    ALOGV("Downmix_Init module %p", pDwmModule);
+    int ret = 0;
+
+    memset(&pDwmModule->context, 0, sizeof(downmix_object_t));
+
+    pDwmModule->config.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
+    pDwmModule->config.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+    pDwmModule->config.inputCfg.channels = AUDIO_CHANNEL_OUT_7POINT1;
+    pDwmModule->config.inputCfg.bufferProvider.getBuffer = NULL;
+    pDwmModule->config.inputCfg.bufferProvider.releaseBuffer = NULL;
+    pDwmModule->config.inputCfg.bufferProvider.cookie = NULL;
+    pDwmModule->config.inputCfg.mask = EFFECT_CONFIG_ALL;
+
+    pDwmModule->config.inputCfg.samplingRate = 44100;
+    pDwmModule->config.outputCfg.samplingRate = pDwmModule->config.inputCfg.samplingRate;
+
+    // set a default value for the access mode, but should be overwritten by caller
+    pDwmModule->config.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
+    pDwmModule->config.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+    pDwmModule->config.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+    pDwmModule->config.outputCfg.bufferProvider.getBuffer = NULL;
+    pDwmModule->config.outputCfg.bufferProvider.releaseBuffer = NULL;
+    pDwmModule->config.outputCfg.bufferProvider.cookie = NULL;
+    pDwmModule->config.outputCfg.mask = EFFECT_CONFIG_ALL;
+
+    ret = Downmix_Configure(pDwmModule, &pDwmModule->config, true);
+    if (ret != 0) {
+        ALOGV("Downmix_Init error %d on module %p", ret, pDwmModule);
+    } else {
+        pDwmModule->context.state = DOWNMIX_STATE_INITIALIZED;
+    }
+
+    return ret;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Downmix_Configure()
+ *----------------------------------------------------------------------------
+ * Purpose:
+ *  Set input and output audio configuration.
+ *
+ * Inputs:
+ *  pDwmModule  pointer to downmix effect module
+ *  pConfig     pointer to effect_config_t structure containing input
+ *                  and output audio parameters configuration
+ *  init        true if called from init function
+ *
+ * Outputs:
+ *
+ * Returns:
+ *  0           indicates success
+ *
+ * Side Effects:
+ *
+ *----------------------------------------------------------------------------
+ */
+
+int Downmix_Configure(downmix_module_t *pDwmModule, effect_config_t *pConfig, bool init) {
+
+    downmix_object_t *pDownmixer = &pDwmModule->context;
+
+    // Check configuration compatibility with build options, and effect capabilities
+    if (pConfig->inputCfg.samplingRate != pConfig->outputCfg.samplingRate
+        || pConfig->outputCfg.channels != DOWNMIX_OUTPUT_CHANNELS
+        || pConfig->inputCfg.format != AUDIO_FORMAT_PCM_16_BIT
+        || pConfig->outputCfg.format != AUDIO_FORMAT_PCM_16_BIT) {
+        ALOGE("Downmix_Configure error: invalid config");
+        return -EINVAL;
+    }
+
+    memcpy(&pDwmModule->config, pConfig, sizeof(effect_config_t));
+
+    if (init) {
+        pDownmixer->type = DOWNMIX_TYPE_FOLD;
+        pDownmixer->apply_volume_correction = false;
+        pDownmixer->input_channel_count = 8; // matches default input of AUDIO_CHANNEL_OUT_7POINT1
+    } else {
+        // when configuring the effect, do not allow a blank channel mask
+        if (pConfig->inputCfg.channels == 0) {
+            ALOGE("Downmix_Configure error: input channel mask can't be 0");
+            return -EINVAL;
+        }
+        pDownmixer->input_channel_count = popcount(pConfig->inputCfg.channels);
+    }
+
+    Downmix_Reset(pDownmixer, init);
+
+    return 0;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Downmix_Reset()
+ *----------------------------------------------------------------------------
+ * Purpose:
+ *  Reset internal states.
+ *
+ * Inputs:
+ *  pDownmixer   pointer to downmix context
+ *  init         true if called from init function
+ *
+ * Outputs:
+*
+ * Returns:
+ *  0            indicates success
+ *
+ * Side Effects:
+ *
+ *----------------------------------------------------------------------------
+ */
+
+int Downmix_Reset(downmix_object_t *pDownmixer, bool init) {
+    // nothing to do here
+    return 0;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Downmix_setParameter()
+ *----------------------------------------------------------------------------
+ * Purpose:
+ * Set a Downmix parameter
+ *
+ * Inputs:
+ *  pDownmixer    handle to instance data
+ *  param         parameter
+ *  pValue        pointer to parameter value
+ *  size          value size
+ *
+ * Outputs:
+ *
+ * Returns:
+ *  0             indicates success
+ *
+ * Side Effects:
+ *
+ *----------------------------------------------------------------------------
+ */
+int Downmix_setParameter(downmix_object_t *pDownmixer, int32_t param, size_t size, void *pValue) {
+
+    int16_t value16;
+    ALOGV("Downmix_setParameter, context %p, param %d, value16 %d, value32 %d",
+            pDownmixer, param, *(int16_t *)pValue, *(int32_t *)pValue);
+
+    switch (param) {
+
+      case DOWNMIX_PARAM_TYPE:
+        if (size != sizeof(downmix_type_t)) {
+            ALOGE("Downmix_setParameter(DOWNMIX_PARAM_TYPE) invalid size %d, should be %d",
+                    size, sizeof(downmix_type_t));
+            return -EINVAL;
+        }
+        value16 = *(int16_t *)pValue;
+        ALOGV("set DOWNMIX_PARAM_TYPE, type %d", value16);
+        if (!((value16 > DOWNMIX_TYPE_INVALID) && (value16 < DOWNMIX_TYPE_LAST))) {
+            ALOGE("Downmix_setParameter invalid DOWNMIX_PARAM_TYPE value %d", value16);
+            return -EINVAL;
+        } else {
+            pDownmixer->type = (downmix_type_t) value16;
+        break;
+
+      default:
+        ALOGE("Downmix_setParameter unknown parameter %d", param);
+        return -EINVAL;
+    }
+}
+
+    return 0;
+} /* end Downmix_setParameter */
+
+
+/*----------------------------------------------------------------------------
+ * Downmix_getParameter()
+ *----------------------------------------------------------------------------
+ * Purpose:
+ * Get a Downmix parameter
+ *
+ * Inputs:
+ *  pDownmixer    handle to instance data
+ *  param         parameter
+ *  pValue        pointer to variable to hold retrieved value
+ *  pSize         pointer to value size: maximum size as input
+ *
+ * Outputs:
+ *  *pValue updated with parameter value
+ *  *pSize updated with actual value size
+ *
+ * Returns:
+ *  0             indicates success
+ *
+ * Side Effects:
+ *
+ *----------------------------------------------------------------------------
+ */
+int Downmix_getParameter(downmix_object_t *pDownmixer, int32_t param, size_t *pSize, void *pValue) {
+    int16_t *pValue16;
+
+    switch (param) {
+
+    case DOWNMIX_PARAM_TYPE:
+      if (*pSize < sizeof(int16_t)) {
+          ALOGE("Downmix_getParameter invalid parameter size %d for DOWNMIX_PARAM_TYPE", *pSize);
+          return -EINVAL;
+      }
+      pValue16 = (int16_t *)pValue;
+      *pValue16 = (int16_t) pDownmixer->type;
+      *pSize = sizeof(int16_t);
+      ALOGV("Downmix_getParameter DOWNMIX_PARAM_TYPE is %d", *pValue16);
+      break;
+
+    default:
+      ALOGE("Downmix_getParameter unknown parameter %d", param);
+      return -EINVAL;
+    }
+
+    return 0;
+} /* end Downmix_getParameter */
+
+
+/*----------------------------------------------------------------------------
+ * Downmix_foldFromQuad()
+ *----------------------------------------------------------------------------
+ * Purpose:
+ * downmix a quad signal to stereo
+ *
+ * Inputs:
+ *  pSrc       quad audio samples to downmix
+ *  numFrames  the number of quad frames to downmix
+ *
+ * Outputs:
+ *  pDst       downmixed stereo audio samples
+ *
+ *----------------------------------------------------------------------------
+ */
+void Downmix_foldFromQuad(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) {
+    // sample at index 0 is FL
+    // sample at index 1 is FR
+    // sample at index 2 is RL
+    // sample at index 3 is RR
+    if (accumulate) {
+        while (numFrames) {
+            // FL + RL
+            pDst[0] = clamp16(pDst[0] + pSrc[0] + pSrc[2]);
+            // FR + RR
+            pDst[1] = clamp16(pDst[1] + pSrc[1] + pSrc[3]);
+            pSrc += 4;
+            pDst += 2;
+            numFrames--;
+        }
+    } else { // same code as above but without adding and clamping pDst[i] to itself
+        while (numFrames) {
+            // FL + RL
+            pDst[0] = clamp16(pSrc[0] + pSrc[2]);
+            // FR + RR
+            pDst[1] = clamp16(pSrc[1] + pSrc[3]);
+            pSrc += 4;
+            pDst += 2;
+            numFrames--;
+        }
+    }
+}
+
+
+/*----------------------------------------------------------------------------
+ * Downmix_foldFromSurround()
+ *----------------------------------------------------------------------------
+ * Purpose:
+ * downmix a "surround sound" (mono rear) signal to stereo
+ *
+ * Inputs:
+ *  pSrc       surround signal to downmix
+ *  numFrames  the number of surround frames to downmix
+ *
+ * Outputs:
+ *  pDst       downmixed stereo audio samples
+ *
+ *----------------------------------------------------------------------------
+ */
+void Downmix_foldFromSurround(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) {
+    int32_t lt, rt, centerPlusRearContrib; // samples in Q19.12 format
+    // sample at index 0 is FL
+    // sample at index 1 is FR
+    // sample at index 2 is FC
+    // sample at index 3 is RC
+    if (accumulate) {
+        while (numFrames) {
+            // centerPlusRearContrib = FC(-3dB) + RC(-3dB)
+            centerPlusRearContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12) + (pSrc[3] * MINUS_3_DB_IN_Q19_12);
+            // FL + centerPlusRearContrib
+            lt = (pSrc[0] << 12) + centerPlusRearContrib;
+            // FR + centerPlusRearContrib
+            rt = (pSrc[1] << 12) + centerPlusRearContrib;
+            pDst[0] = clamp16(pDst[0] + (lt >> 12));
+            pDst[1] = clamp16(pDst[1] + (rt >> 12));
+            pSrc += 4;
+            pDst += 2;
+            numFrames--;
+        }
+    } else { // same code as above but without adding and clamping pDst[i] to itself
+        while (numFrames) {
+            // centerPlusRearContrib = FC(-3dB) + RC(-3dB)
+            centerPlusRearContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12) + (pSrc[3] * MINUS_3_DB_IN_Q19_12);
+            // FL + centerPlusRearContrib
+            lt = (pSrc[0] << 12) + centerPlusRearContrib;
+            // FR + centerPlusRearContrib
+            rt = (pSrc[1] << 12) + centerPlusRearContrib;
+            pDst[0] = clamp16(lt >> 12);
+            pDst[1] = clamp16(rt >> 12);
+            pSrc += 4;
+            pDst += 2;
+            numFrames--;
+        }
+    }
+}
+
+
+/*----------------------------------------------------------------------------
+ * Downmix_foldFrom5Point1()
+ *----------------------------------------------------------------------------
+ * Purpose:
+ * downmix a 5.1 signal to stereo
+ *
+ * Inputs:
+ *  pSrc       5.1 audio samples to downmix
+ *  numFrames  the number of 5.1 frames to downmix
+ *
+ * Outputs:
+ *  pDst       downmixed stereo audio samples
+ *
+ *----------------------------------------------------------------------------
+ */
+void Downmix_foldFrom5Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) {
+    int32_t lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
+    // sample at index 0 is FL
+    // sample at index 1 is FR
+    // sample at index 2 is FC
+    // sample at index 3 is LFE
+    // sample at index 4 is RL
+    // sample at index 5 is RR
+    if (accumulate) {
+        while (numFrames) {
+            // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
+            centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12)
+                    + (pSrc[3] * MINUS_3_DB_IN_Q19_12);
+            // FL + centerPlusLfeContrib + RL
+            lt = (pSrc[0] << 12) + centerPlusLfeContrib + (pSrc[4] << 12);
+            // FR + centerPlusLfeContrib + RR
+            rt = (pSrc[1] << 12) + centerPlusLfeContrib + (pSrc[5] << 12);
+            pDst[0] = clamp16(pDst[0] + (lt >> 12));
+            pDst[1] = clamp16(pDst[1] + (rt >> 12));
+            pSrc += 6;
+            pDst += 2;
+            numFrames--;
+        }
+    } else { // same code as above but without adding and clamping pDst[i] to itself
+        while (numFrames) {
+            // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
+            centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12)
+                    + (pSrc[3] * MINUS_3_DB_IN_Q19_12);
+            // FL + centerPlusLfeContrib + RL
+            lt = (pSrc[0] << 12) + centerPlusLfeContrib + (pSrc[4] << 12);
+            // FR + centerPlusLfeContrib + RR
+            rt = (pSrc[1] << 12) + centerPlusLfeContrib + (pSrc[5] << 12);
+            pDst[0] = clamp16(lt >> 12);
+            pDst[1] = clamp16(rt >> 12);
+            pSrc += 6;
+            pDst += 2;
+            numFrames--;
+        }
+    }
+}
+
+
+/*----------------------------------------------------------------------------
+ * Downmix_foldFrom7Point1()
+ *----------------------------------------------------------------------------
+ * Purpose:
+ * downmix a 7.1 signal to stereo
+ *
+ * Inputs:
+ *  pSrc       7.1 audio samples to downmix
+ *  numFrames  the number of 7.1 frames to downmix
+ *
+ * Outputs:
+ *  pDst       downmixed stereo audio samples
+ *
+ *----------------------------------------------------------------------------
+ */
+void Downmix_foldFrom7Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) {
+    int32_t lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
+    // sample at index 0 is FL
+    // sample at index 1 is FR
+    // sample at index 2 is FC
+    // sample at index 3 is LFE
+    // sample at index 4 is RL
+    // sample at index 5 is RR
+    // sample at index 6 is SL
+    // sample at index 7 is SR
+    if (accumulate) {
+        while (numFrames) {
+            // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
+            centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12)
+                    + (pSrc[3] * MINUS_3_DB_IN_Q19_12);
+            // FL + centerPlusLfeContrib + SL + RL
+            lt = (pSrc[0] << 12) + centerPlusLfeContrib + (pSrc[6] << 12) + (pSrc[4] << 12);
+            // FR + centerPlusLfeContrib + SR + RR
+            rt = (pSrc[1] << 12) + centerPlusLfeContrib + (pSrc[7] << 12) + (pSrc[5] << 12);
+            pDst[0] = clamp16(lt >> 12);
+            pDst[1] = clamp16(rt >> 12);
+            pSrc += 8;
+            pDst += 2;
+            numFrames--;
+    }
+    } else { // same code as above but without adding and clamping pDst[i] to itself
+        while (numFrames) {
+            // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
+            centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12)
+                    + (pSrc[3] * MINUS_3_DB_IN_Q19_12);
+            // FL + centerPlusLfeContrib + SL + RL
+            lt = (pSrc[0] << 12) + centerPlusLfeContrib + (pSrc[6] << 12) + (pSrc[4] << 12);
+            // FR + centerPlusLfeContrib + SR + RR
+            rt = (pSrc[1] << 12) + centerPlusLfeContrib + (pSrc[7] << 12) + (pSrc[5] << 12);
+            pDst[0] = clamp16(pDst[0] + (lt >> 12));
+            pDst[1] = clamp16(pDst[1] + (rt >> 12));
+            pSrc += 8;
+            pDst += 2;
+            numFrames--;
+        }
+    }
+}
+
diff --git a/media/libeffects/downmix/EffectDownmix.h b/media/libeffects/downmix/EffectDownmix.h
new file mode 100644
index 0000000..4176b5a
--- /dev/null
+++ b/media/libeffects/downmix/EffectDownmix.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_EFFECTDOWNMIX_H_
+#define ANDROID_EFFECTDOWNMIX_H_
+
+#include <audio_effects/effect_downmix.h>
+#include <audio_utils/primitives.h>
+#include <system/audio.h>
+
+/*------------------------------------
+ * definitions
+ *------------------------------------
+*/
+
+#define DOWNMIX_OUTPUT_CHANNELS AUDIO_CHANNEL_OUT_STEREO
+
+typedef enum {
+    DOWNMIX_STATE_UNINITIALIZED,
+    DOWNMIX_STATE_INITIALIZED,
+    DOWNMIX_STATE_ACTIVE,
+} downmix_state_t;
+
+/* parameters for each downmixer */
+typedef struct {
+    downmix_state_t state;
+    downmix_type_t type;
+    bool apply_volume_correction;
+    uint8_t input_channel_count;
+} downmix_object_t;
+
+
+typedef struct downmix_module_s {
+    const struct effect_interface_s *itfe;
+    effect_config_t config;
+    downmix_object_t context;
+} downmix_module_t;
+
+
+/*------------------------------------
+ * Effect API
+ *------------------------------------
+*/
+int32_t DownmixLib_QueryNumberEffects(uint32_t *pNumEffects);
+int32_t DownmixLib_QueryEffect(uint32_t index,
+        effect_descriptor_t *pDescriptor);
+int32_t DownmixLib_Create(const effect_uuid_t *uuid,
+        int32_t sessionId,
+        int32_t ioId,
+        effect_handle_t *pHandle);
+int32_t DownmixLib_Release(effect_handle_t handle);
+int32_t DownmixLib_GetDescriptor(const effect_uuid_t *uuid,
+        effect_descriptor_t *pDescriptor);
+
+static int Downmix_Process(effect_handle_t self,
+        audio_buffer_t *inBuffer,
+        audio_buffer_t *outBuffer);
+static int Downmix_Command(effect_handle_t self,
+        uint32_t cmdCode,
+        uint32_t cmdSize,
+        void *pCmdData,
+        uint32_t *replySize,
+        void *pReplyData);
+static int Downmix_GetDescriptor(effect_handle_t self,
+        effect_descriptor_t *pDescriptor);
+
+
+/*------------------------------------
+ * internal functions
+ *------------------------------------
+*/
+int Downmix_Init(downmix_module_t *pDwmModule);
+int Downmix_Configure(downmix_module_t *pDwmModule, effect_config_t *pConfig, bool init);
+int Downmix_Reset(downmix_object_t *pDownmixer, bool init);
+int Downmix_setParameter(downmix_object_t *pDownmixer, int32_t param, size_t size, void *pValue);
+int Downmix_getParameter(downmix_object_t *pDownmixer, int32_t param, size_t *pSize, void *pValue);
+
+void Downmix_foldFromQuad(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate);
+void Downmix_foldFromSurround(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate);
+void Downmix_foldFrom5Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate);
+void Downmix_foldFrom7Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate);
+
+#endif /*ANDROID_EFFECTDOWNMIX_H_*/
diff --git a/media/libeffects/factory/Android.mk b/media/libeffects/factory/Android.mk
index 2f2b974..6e69151 100644
--- a/media/libeffects/factory/Android.mk
+++ b/media/libeffects/factory/Android.mk
@@ -15,6 +15,6 @@
 LOCAL_SHARED_LIBRARIES += libdl
 
 LOCAL_C_INCLUDES := \
-    system/media/audio_effects/include
+    $(call include-path-for, audio-effects)
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libeffects/factory/EffectsFactory.c b/media/libeffects/factory/EffectsFactory.c
index 9f6599f..59cd9e3 100644
--- a/media/libeffects/factory/EffectsFactory.c
+++ b/media/libeffects/factory/EffectsFactory.c
@@ -53,8 +53,8 @@
 static lib_entry_t *getLibrary(const char *path);
 static void resetEffectEnumeration();
 static uint32_t updateNumEffects();
-static int findEffect(effect_uuid_t *type,
-               effect_uuid_t *uuid,
+static int findEffect(const effect_uuid_t *type,
+               const effect_uuid_t *uuid,
                lib_entry_t **lib,
                effect_descriptor_t **desc);
 static void dumpEffectDescriptor(effect_descriptor_t *desc, char *str, size_t len);
@@ -236,7 +236,7 @@
     return ret;
 }
 
-int EffectGetDescriptor(effect_uuid_t *uuid, effect_descriptor_t *pDescriptor)
+int EffectGetDescriptor(const effect_uuid_t *uuid, effect_descriptor_t *pDescriptor)
 {
     lib_entry_t *l = NULL;
     effect_descriptor_t *d = NULL;
@@ -257,7 +257,7 @@
     return ret;
 }
 
-int EffectCreate(effect_uuid_t *uuid, int32_t sessionId, int32_t ioId, effect_handle_t *pHandle)
+int EffectCreate(const effect_uuid_t *uuid, int32_t sessionId, int32_t ioId, effect_handle_t *pHandle)
 {
     list_elem_t *e = gLibraryList;
     lib_entry_t *l = NULL;
@@ -372,7 +372,7 @@
     return ret;
 }
 
-int EffectIsNullUuid(effect_uuid_t *uuid)
+int EffectIsNullUuid(const effect_uuid_t *uuid)
 {
     if (memcmp(uuid, EFFECT_UUID_NULL, sizeof(effect_uuid_t))) {
         return 0;
@@ -628,8 +628,8 @@
     return cnt;
 }
 
-int findEffect(effect_uuid_t *type,
-               effect_uuid_t *uuid,
+int findEffect(const effect_uuid_t *type,
+               const effect_uuid_t *uuid,
                lib_entry_t **lib,
                effect_descriptor_t **desc)
 {
diff --git a/media/libeffects/lvm/wrapper/Android.mk b/media/libeffects/lvm/wrapper/Android.mk
index f097dd0..4313424 100644
--- a/media/libeffects/lvm/wrapper/Android.mk
+++ b/media/libeffects/lvm/wrapper/Android.mk
@@ -26,7 +26,7 @@
 	$(LOCAL_PATH)/Bundle \
 	$(LOCAL_PATH)/../lib/Common/lib/ \
 	$(LOCAL_PATH)/../lib/Bundle/lib/ \
-	system/media/audio_effects/include
+	$(call include-path-for, audio-effects)
 
 
 include $(BUILD_SHARED_LIBRARY)
@@ -55,6 +55,6 @@
     $(LOCAL_PATH)/Reverb \
     $(LOCAL_PATH)/../lib/Common/lib/ \
     $(LOCAL_PATH)/../lib/Reverb/lib/ \
-    system/media/audio_effects/include
+    $(call include-path-for, audio-effects)
 
-include $(BUILD_SHARED_LIBRARY)
\ No newline at end of file
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 62be78c..ca93ce5 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -24,7 +24,7 @@
 #include <stdlib.h>
 #include <string.h>
 #include <new>
-#include <EffectBundle.h>
+#include "EffectBundle.h"
 
 
 // effect_handle_t interface implementation for bass boost
@@ -133,7 +133,8 @@
 int  LvmEffect_enable          (EffectContext *pContext);
 int  LvmEffect_disable         (EffectContext *pContext);
 void LvmEffect_free            (EffectContext *pContext);
-int  Effect_configure          (EffectContext *pContext, effect_config_t *pConfig);
+int  Effect_setConfig          (EffectContext *pContext, effect_config_t *pConfig);
+void Effect_getConfig          (EffectContext *pContext, effect_config_t *pConfig);
 int  BassBoost_setParameter    (EffectContext *pContext, void *pParam, void *pValue);
 int  BassBoost_getParameter    (EffectContext *pContext,
                                void           *pParam,
@@ -194,7 +195,7 @@
     return 0;
 }     /* end EffectQueryEffect */
 
-extern "C" int EffectCreate(effect_uuid_t       *uuid,
+extern "C" int EffectCreate(const effect_uuid_t *uuid,
                             int32_t             sessionId,
                             int32_t             ioId,
                             effect_handle_t  *pHandle){
@@ -470,7 +471,7 @@
 
 } /* end EffectRelease */
 
-extern "C" int EffectGetDescriptor(effect_uuid_t       *uuid,
+extern "C" int EffectGetDescriptor(const effect_uuid_t *uuid,
                                    effect_descriptor_t *pDescriptor) {
     const effect_descriptor_t *desc = NULL;
 
@@ -936,7 +937,7 @@
 }    /* end LvmEffect_free */
 
 //----------------------------------------------------------------------------
-// Effect_configure()
+// Effect_setConfig()
 //----------------------------------------------------------------------------
 // Purpose: Set input and output audio configuration.
 //
@@ -949,9 +950,9 @@
 //
 //----------------------------------------------------------------------------
 
-int Effect_configure(EffectContext *pContext, effect_config_t *pConfig){
+int Effect_setConfig(EffectContext *pContext, effect_config_t *pConfig){
     LVM_Fs_en   SampleRate;
-    //ALOGV("\tEffect_configure start");
+    //ALOGV("\tEffect_setConfig start");
 
     CHECK_ARG(pContext != NULL);
     CHECK_ARG(pConfig != NULL);
@@ -992,7 +993,7 @@
         pContext->pBundledContext->SamplesPerSecond = 48000*2; // 2 secs Stereo
         break;
     default:
-        ALOGV("\tEffect_Configure invalid sampling rate %d", pConfig->inputCfg.samplingRate);
+        ALOGV("\tEffect_setConfig invalid sampling rate %d", pConfig->inputCfg.samplingRate);
         return -EINVAL;
     }
 
@@ -1001,28 +1002,47 @@
         LVM_ControlParams_t     ActiveParams;
         LVM_ReturnStatus_en     LvmStatus = LVM_SUCCESS;
 
-        ALOGV("\tEffect_configure change sampling rate to %d", SampleRate);
+        ALOGV("\tEffect_setConfig change sampling rate to %d", SampleRate);
 
         /* Get the current settings */
         LvmStatus = LVM_GetControlParameters(pContext->pBundledContext->hInstance,
                                          &ActiveParams);
 
-        LVM_ERROR_CHECK(LvmStatus, "LVM_GetControlParameters", "Effect_configure")
+        LVM_ERROR_CHECK(LvmStatus, "LVM_GetControlParameters", "Effect_setConfig")
         if(LvmStatus != LVM_SUCCESS) return -EINVAL;
 
         LvmStatus = LVM_SetControlParameters(pContext->pBundledContext->hInstance, &ActiveParams);
 
-        LVM_ERROR_CHECK(LvmStatus, "LVM_SetControlParameters", "Effect_configure")
-        ALOGV("\tEffect_configure Succesfully called LVM_SetControlParameters\n");
+        LVM_ERROR_CHECK(LvmStatus, "LVM_SetControlParameters", "Effect_setConfig")
+        ALOGV("\tEffect_setConfig Succesfully called LVM_SetControlParameters\n");
         pContext->pBundledContext->SampleRate = SampleRate;
 
     }else{
-        //ALOGV("\tEffect_configure keep sampling rate at %d", SampleRate);
+        //ALOGV("\tEffect_setConfig keep sampling rate at %d", SampleRate);
     }
 
-    //ALOGV("\tEffect_configure End....");
+    //ALOGV("\tEffect_setConfig End....");
     return 0;
-}   /* end Effect_configure */
+}   /* end Effect_setConfig */
+
+//----------------------------------------------------------------------------
+// Effect_getConfig()
+//----------------------------------------------------------------------------
+// Purpose: Get input and output audio configuration.
+//
+// Inputs:
+//  pContext:   effect engine context
+//  pConfig:    pointer to effect_config_t structure holding input and output
+//      configuration parameters
+//
+// Outputs:
+//
+//----------------------------------------------------------------------------
+
+void Effect_getConfig(EffectContext *pContext, effect_config_t *pConfig)
+{
+    memcpy(pConfig, &pContext->config, sizeof(effect_config_t));
+}   /* end Effect_getConfig */
 
 //----------------------------------------------------------------------------
 // BassGetStrength()
@@ -2778,23 +2798,34 @@
             }
             break;
 
-        case EFFECT_CMD_CONFIGURE:
-            //ALOGV("\tEffect_command cmdCode Case: EFFECT_CMD_CONFIGURE start");
+        case EFFECT_CMD_SET_CONFIG:
+            //ALOGV("\tEffect_command cmdCode Case: EFFECT_CMD_SET_CONFIG start");
             if (pCmdData    == NULL||
                 cmdSize     != sizeof(effect_config_t)||
                 pReplyData  == NULL||
                 *replySize  != sizeof(int)){
                 ALOGV("\tLVM_ERROR : Effect_command cmdCode Case: "
-                        "EFFECT_CMD_CONFIGURE: ERROR");
+                        "EFFECT_CMD_SET_CONFIG: ERROR");
                 return -EINVAL;
             }
-            *(int *) pReplyData = android::Effect_configure(pContext, (effect_config_t *) pCmdData);
-            //ALOGV("\tEffect_command cmdCode Case: EFFECT_CMD_CONFIGURE end");
+            *(int *) pReplyData = android::Effect_setConfig(pContext, (effect_config_t *) pCmdData);
+            //ALOGV("\tEffect_command cmdCode Case: EFFECT_CMD_SET_CONFIG end");
+            break;
+
+        case EFFECT_CMD_GET_CONFIG:
+            if (pReplyData == NULL ||
+                *replySize != sizeof(effect_config_t)) {
+                ALOGV("\tLVM_ERROR : Effect_command cmdCode Case: "
+                        "EFFECT_CMD_GET_CONFIG: ERROR");
+                return -EINVAL;
+            }
+
+            android::Effect_getConfig(pContext, (effect_config_t *)pReplyData);
             break;
 
         case EFFECT_CMD_RESET:
             //ALOGV("\tEffect_command cmdCode Case: EFFECT_CMD_RESET start");
-            android::Effect_configure(pContext, &pContext->config);
+            android::Effect_setConfig(pContext, &pContext->config);
             //ALOGV("\tEffect_command cmdCode Case: EFFECT_CMD_RESET end");
             break;
 
@@ -3078,20 +3109,20 @@
 
                     if (pContext->pBundledContext->bBassEnabled == LVM_TRUE) {
                         ALOGV("\tEFFECT_CMD_SET_DEVICE disable LVM_BASS_BOOST %d",
-                              *(int32_t *)pCmdData);
+                             *(int32_t *)pCmdData);
                         android::LvmEffect_disable(pContext);
                     }
                     pContext->pBundledContext->bBassTempDisabled = LVM_TRUE;
                 } else {
                     ALOGV("\tEFFECT_CMD_SET_DEVICE device is valid for LVM_BASS_BOOST %d",
-                          *(int32_t *)pCmdData);
+                         *(int32_t *)pCmdData);
 
                     // If a device supports bassboost and the effect has been temporarily disabled
                     // previously then re-enable it
 
                     if (pContext->pBundledContext->bBassEnabled == LVM_TRUE) {
                         ALOGV("\tEFFECT_CMD_SET_DEVICE re-enable LVM_BASS_BOOST %d",
-                              *(int32_t *)pCmdData);
+                             *(int32_t *)pCmdData);
                         android::LvmEffect_enable(pContext);
                     }
                     pContext->pBundledContext->bBassTempDisabled = LVM_FALSE;
diff --git a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
index 1825aab..9599dcc 100755
--- a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
+++ b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
@@ -24,8 +24,9 @@
 #include <stdlib.h>
 #include <string.h>
 #include <new>
-#include <EffectReverb.h>
-#include <LVREV.h>
+#include "EffectReverb.h"
+// from Reverb/lib
+#include "LVREV.h"
 
 // effect_handle_t interface implementation for reverb
 extern "C" const struct effect_interface_s gReverbInterface;
@@ -175,7 +176,8 @@
 //--- local function prototypes
 int  Reverb_init            (ReverbContext *pContext);
 void Reverb_free            (ReverbContext *pContext);
-int  Reverb_configure       (ReverbContext *pContext, effect_config_t *pConfig);
+int  Reverb_setConfig       (ReverbContext *pContext, effect_config_t *pConfig);
+void Reverb_getConfig       (ReverbContext *pContext, effect_config_t *pConfig);
 int  Reverb_setParameter    (ReverbContext *pContext, void *pParam, void *pValue);
 int  Reverb_getParameter    (ReverbContext *pContext,
                              void          *pParam,
@@ -209,7 +211,7 @@
     return 0;
 }     /* end EffectQueryEffect */
 
-extern "C" int EffectCreate(effect_uuid_t       *uuid,
+extern "C" int EffectCreate(const effect_uuid_t *uuid,
                             int32_t             sessionId,
                             int32_t             ioId,
                             effect_handle_t  *pHandle){
@@ -316,7 +318,7 @@
     return 0;
 } /* end EffectRelease */
 
-extern "C" int EffectGetDescriptor(effect_uuid_t       *uuid,
+extern "C" int EffectGetDescriptor(const effect_uuid_t *uuid,
                                    effect_descriptor_t *pDescriptor) {
     int i;
     int length = sizeof(gDescriptors) / sizeof(const effect_descriptor_t *);
@@ -609,7 +611,7 @@
 }    /* end Reverb_free */
 
 //----------------------------------------------------------------------------
-// Reverb_configure()
+// Reverb_setConfig()
 //----------------------------------------------------------------------------
 // Purpose: Set input and output audio configuration.
 //
@@ -622,9 +624,9 @@
 //
 //----------------------------------------------------------------------------
 
-int Reverb_configure(ReverbContext *pContext, effect_config_t *pConfig){
+int Reverb_setConfig(ReverbContext *pContext, effect_config_t *pConfig){
     LVM_Fs_en   SampleRate;
-    //ALOGV("\tReverb_configure start");
+    //ALOGV("\tReverb_setConfig start");
 
     CHECK_ARG(pContext != NULL);
     CHECK_ARG(pConfig != NULL);
@@ -642,7 +644,7 @@
         return -EINVAL;
     }
 
-    //ALOGV("\tReverb_configure calling memcpy");
+    //ALOGV("\tReverb_setConfig calling memcpy");
     memcpy(&pContext->config, pConfig, sizeof(effect_config_t));
 
 
@@ -666,7 +668,7 @@
         SampleRate = LVM_FS_48000;
         break;
     default:
-        ALOGV("\rReverb_Configure invalid sampling rate %d", pConfig->inputCfg.samplingRate);
+        ALOGV("\rReverb_setConfig invalid sampling rate %d", pConfig->inputCfg.samplingRate);
         return -EINVAL;
     }
 
@@ -675,28 +677,46 @@
         LVREV_ControlParams_st    ActiveParams;
         LVREV_ReturnStatus_en     LvmStatus = LVREV_SUCCESS;
 
-        //ALOGV("\tReverb_configure change sampling rate to %d", SampleRate);
+        //ALOGV("\tReverb_setConfig change sampling rate to %d", SampleRate);
 
         /* Get the current settings */
         LvmStatus = LVREV_GetControlParameters(pContext->hInstance,
                                          &ActiveParams);
 
-        LVM_ERROR_CHECK(LvmStatus, "LVREV_GetControlParameters", "Reverb_configure")
+        LVM_ERROR_CHECK(LvmStatus, "LVREV_GetControlParameters", "Reverb_setConfig")
         if(LvmStatus != LVREV_SUCCESS) return -EINVAL;
 
         LvmStatus = LVREV_SetControlParameters(pContext->hInstance, &ActiveParams);
 
-        LVM_ERROR_CHECK(LvmStatus, "LVREV_SetControlParameters", "Reverb_configure")
-        //ALOGV("\tReverb_configure Succesfully called LVREV_SetControlParameters\n");
+        LVM_ERROR_CHECK(LvmStatus, "LVREV_SetControlParameters", "Reverb_setConfig")
+        //ALOGV("\tReverb_setConfig Succesfully called LVREV_SetControlParameters\n");
 
     }else{
-        //ALOGV("\tReverb_configure keep sampling rate at %d", SampleRate);
+        //ALOGV("\tReverb_setConfig keep sampling rate at %d", SampleRate);
     }
 
-    //ALOGV("\tReverb_configure End");
+    //ALOGV("\tReverb_setConfig End");
     return 0;
-}   /* end Reverb_configure */
+}   /* end Reverb_setConfig */
 
+//----------------------------------------------------------------------------
+// Reverb_getConfig()
+//----------------------------------------------------------------------------
+// Purpose: Get input and output audio configuration.
+//
+// Inputs:
+//  pContext:   effect engine context
+//  pConfig:    pointer to effect_config_t structure holding input and output
+//      configuration parameters
+//
+// Outputs:
+//
+//----------------------------------------------------------------------------
+
+void Reverb_getConfig(ReverbContext *pContext, effect_config_t *pConfig)
+{
+    memcpy(pConfig, &pContext->config, sizeof(effect_config_t));
+}   /* end Reverb_getConfig */
 
 //----------------------------------------------------------------------------
 // Reverb_init()
@@ -1924,24 +1944,36 @@
             *(int *) pReplyData = 0;
             break;
 
-        case EFFECT_CMD_CONFIGURE:
+        case EFFECT_CMD_SET_CONFIG:
             //ALOGV("\tReverb_command cmdCode Case: "
-            //        "EFFECT_CMD_CONFIGURE start");
-            if (pCmdData    == NULL||
-                cmdSize     != sizeof(effect_config_t)||
-                pReplyData  == NULL||
-                *replySize  != sizeof(int)){
+            //        "EFFECT_CMD_SET_CONFIG start");
+            if (pCmdData == NULL ||
+                cmdSize != sizeof(effect_config_t) ||
+                pReplyData == NULL ||
+                *replySize != sizeof(int)) {
                 ALOGV("\tLVM_ERROR : Reverb_command cmdCode Case: "
-                        "EFFECT_CMD_CONFIGURE: ERROR");
+                        "EFFECT_CMD_SET_CONFIG: ERROR");
                 return -EINVAL;
             }
-            *(int *) pReplyData = Reverb_configure(pContext, (effect_config_t *) pCmdData);
+            *(int *) pReplyData = android::Reverb_setConfig(pContext,
+                                                            (effect_config_t *) pCmdData);
+            break;
+
+        case EFFECT_CMD_GET_CONFIG:
+            if (pReplyData == NULL ||
+                *replySize != sizeof(effect_config_t)) {
+                ALOGV("\tLVM_ERROR : Reverb_command cmdCode Case: "
+                        "EFFECT_CMD_GET_CONFIG: ERROR");
+                return -EINVAL;
+            }
+
+            android::Reverb_getConfig(pContext, (effect_config_t *)pReplyData);
             break;
 
         case EFFECT_CMD_RESET:
             //ALOGV("\tReverb_command cmdCode Case: "
             //        "EFFECT_CMD_RESET start");
-            Reverb_configure(pContext, &pContext->config);
+            Reverb_setConfig(pContext, &pContext->config);
             break;
 
         case EFFECT_CMD_GET_PARAM:{
diff --git a/media/libeffects/preprocessing/Android.mk b/media/libeffects/preprocessing/Android.mk
index 77d40b6..c13b9d4 100755
--- a/media/libeffects/preprocessing/Android.mk
+++ b/media/libeffects/preprocessing/Android.mk
@@ -13,8 +13,8 @@
 LOCAL_C_INCLUDES += \
     external/webrtc/src \
     external/webrtc/src/modules/interface \
-    external/webrtc/src/modules/audio_processing/main/interface \
-    system/media/audio_effects/include
+    external/webrtc/src/modules/audio_processing/interface \
+    $(call include-path-for, audio-effects)
 
 LOCAL_C_INCLUDES += $(call include-path-for, speex)
 
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
index 6267d1d..1d76f62 100755
--- a/media/libeffects/preprocessing/PreProcessing.cpp
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -24,8 +24,8 @@
 #include <audio_effects/effect_aec.h>
 #include <audio_effects/effect_agc.h>
 #include <audio_effects/effect_ns.h>
-#include "modules/interface/module_common_types.h"
-#include "modules/audio_processing/main/interface/audio_processing.h"
+#include <module_common_types.h>
+#include <audio_processing.h>
 #include "speex/speex_resampler.h"
 
 
@@ -220,8 +220,8 @@
 // Automatic Gain Control (AGC)
 //------------------------------------------------------------------------------
 
-static const int kAgcDefaultTargetLevel = 0;
-static const int kAgcDefaultCompGain = 90;
+static const int kAgcDefaultTargetLevel = 3;
+static const int kAgcDefaultCompGain = 9;
 static const bool kAgcDefaultLimiter = true;
 
 int  AgcInit (preproc_effect_t *effect)
@@ -845,6 +845,17 @@
          config->inputCfg.samplingRate, config->inputCfg.channels);
     int status;
 
+    // if at least one process is enabled, do not accept configuration changes
+    if (session->enabledMsk) {
+        if (session->samplingRate != config->inputCfg.samplingRate ||
+                session->inChannelCount != inCnl ||
+                session->outChannelCount != outCnl) {
+            return -ENOSYS;
+        } else {
+            return 0;
+        }
+    }
+
     // AEC implementation is limited to 16kHz
     if (config->inputCfg.samplingRate >= 32000 && !(session->createdMsk & (1 << PREPROC_AEC))) {
         session->apmSamplingRate = 32000;
@@ -940,6 +951,18 @@
     return 0;
 }
 
+void Session_GetConfig(preproc_session_t *session, effect_config_t *config)
+{
+    memset(config, 0, sizeof(effect_config_t));
+    config->inputCfg.samplingRate = config->outputCfg.samplingRate = session->samplingRate;
+    config->inputCfg.format = config->outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+    config->inputCfg.channels = audio_channel_in_mask_from_count(session->inChannelCount);
+    // "out" doesn't mean output device, so this is the correct API to convert channel count to mask
+    config->outputCfg.channels = audio_channel_in_mask_from_count(session->outChannelCount);
+    config->inputCfg.mask = config->outputCfg.mask =
+            (EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS | EFFECT_CONFIG_FORMAT);
+}
+
 int Session_SetReverseConfig(preproc_session_t *session, effect_config_t *config)
 {
     if (config->inputCfg.samplingRate != config->outputCfg.samplingRate ||
@@ -969,6 +992,17 @@
     return 0;
 }
 
+void Session_GetReverseConfig(preproc_session_t *session, effect_config_t *config)
+{
+    memset(config, 0, sizeof(effect_config_t));
+    config->inputCfg.samplingRate = config->outputCfg.samplingRate = session->samplingRate;
+    config->inputCfg.format = config->outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+    config->inputCfg.channels = config->outputCfg.channels =
+            audio_channel_in_mask_from_count(session->revChannelCount);
+    config->inputCfg.mask = config->outputCfg.mask =
+            (EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS | EFFECT_CONFIG_FORMAT);
+}
+
 void Session_SetProcEnabled(preproc_session_t *session, uint32_t procId, bool enabled)
 {
     if (enabled) {
@@ -1048,7 +1082,7 @@
     return sInitStatus;
 }
 
-const effect_descriptor_t *PreProc_GetDescriptor(effect_uuid_t *uuid)
+const effect_descriptor_t *PreProc_GetDescriptor(const effect_uuid_t *uuid)
 {
     size_t i;
     for (i = 0; i < PREPROC_NUM_EFFECTS; i++) {
@@ -1250,29 +1284,42 @@
             *(int *)pReplyData = 0;
             break;
 
-        case EFFECT_CMD_CONFIGURE:
+        case EFFECT_CMD_SET_CONFIG:
             if (pCmdData    == NULL||
                 cmdSize     != sizeof(effect_config_t)||
                 pReplyData  == NULL||
                 *replySize  != sizeof(int)){
                 ALOGV("PreProcessingFx_Command cmdCode Case: "
-                        "EFFECT_CMD_CONFIGURE: ERROR");
+                        "EFFECT_CMD_SET_CONFIG: ERROR");
                 return -EINVAL;
             }
             *(int *)pReplyData = Session_SetConfig(effect->session, (effect_config_t *)pCmdData);
             if (*(int *)pReplyData != 0) {
                 break;
             }
-            *(int *)pReplyData = Effect_SetState(effect, PREPROC_EFFECT_STATE_CONFIG);
+            if (effect->state != PREPROC_EFFECT_STATE_ACTIVE) {
+                *(int *)pReplyData = Effect_SetState(effect, PREPROC_EFFECT_STATE_CONFIG);
+            }
             break;
 
-        case EFFECT_CMD_CONFIGURE_REVERSE:
-            if (pCmdData    == NULL||
-                cmdSize     != sizeof(effect_config_t)||
-                pReplyData  == NULL||
-                *replySize  != sizeof(int)){
+        case EFFECT_CMD_GET_CONFIG:
+            if (pReplyData == NULL ||
+                *replySize != sizeof(effect_config_t)) {
+                ALOGV("\tLVM_ERROR : PreProcessingFx_Command cmdCode Case: "
+                        "EFFECT_CMD_GET_CONFIG: ERROR");
+                return -EINVAL;
+            }
+
+            Session_GetConfig(effect->session, (effect_config_t *)pReplyData);
+            break;
+
+        case EFFECT_CMD_SET_CONFIG_REVERSE:
+            if (pCmdData == NULL ||
+                cmdSize != sizeof(effect_config_t) ||
+                pReplyData == NULL ||
+                *replySize != sizeof(int)) {
                 ALOGV("PreProcessingFx_Command cmdCode Case: "
-                        "EFFECT_CMD_CONFIGURE_REVERSE: ERROR");
+                        "EFFECT_CMD_SET_CONFIG_REVERSE: ERROR");
                 return -EINVAL;
             }
             *(int *)pReplyData = Session_SetReverseConfig(effect->session,
@@ -1282,6 +1329,16 @@
             }
             break;
 
+        case EFFECT_CMD_GET_CONFIG_REVERSE:
+            if (pReplyData == NULL ||
+                *replySize != sizeof(effect_config_t)){
+                ALOGV("PreProcessingFx_Command cmdCode Case: "
+                        "EFFECT_CMD_GET_CONFIG_REVERSE: ERROR");
+                return -EINVAL;
+            }
+            Session_GetReverseConfig(effect->session, (effect_config_t *)pCmdData);
+            break;
+
         case EFFECT_CMD_RESET:
             if (effect->ops->reset) {
                 effect->ops->reset(effect);
@@ -1523,7 +1580,7 @@
     return 0;
 }
 
-int PreProcessingLib_Create(effect_uuid_t       *uuid,
+int PreProcessingLib_Create(const effect_uuid_t *uuid,
                             int32_t             sessionId,
                             int32_t             ioId,
                             effect_handle_t  *pInterface)
@@ -1575,7 +1632,7 @@
     return Session_ReleaseEffect(fx->session, fx);
 }
 
-int PreProcessingLib_GetDescriptor(effect_uuid_t       *uuid,
+int PreProcessingLib_GetDescriptor(const effect_uuid_t *uuid,
                                    effect_descriptor_t *pDescriptor) {
 
     if (pDescriptor == NULL || uuid == NULL){
diff --git a/media/libeffects/testlibs/Android.mk_ b/media/libeffects/testlibs/Android.mk_
index 249ebf4..2954908 100644
--- a/media/libeffects/testlibs/Android.mk_
+++ b/media/libeffects/testlibs/Android.mk_
@@ -23,7 +23,7 @@
 endif
 
 LOCAL_C_INCLUDES := \
-	system/media/audio_effects/include \
+	$(call include-path-for, audio-effects) \
 	$(call include-path-for, graphics corecg)
 
 LOCAL_MODULE_TAGS := optional
@@ -60,7 +60,7 @@
 
 LOCAL_C_INCLUDES := \
 	$(call include-path-for, graphics corecg) \
-	system/media/audio_effects/include
+	$(call include-path-for, audio-effects)
 
 LOCAL_MODULE_TAGS := optional
 
diff --git a/media/libeffects/testlibs/AudioBiquadFilter.cpp b/media/libeffects/testlibs/AudioBiquadFilter.cpp
index 72917a3..16dd1c5 100644
--- a/media/libeffects/testlibs/AudioBiquadFilter.cpp
+++ b/media/libeffects/testlibs/AudioBiquadFilter.cpp
@@ -17,12 +17,10 @@
 
 #include <string.h>
 #include <assert.h>
+#include <cutils/compiler.h>
 
 #include "AudioBiquadFilter.h"
 
-#define LIKELY( exp )       (__builtin_expect( (exp) != 0, true  ))
-#define UNLIKELY( exp )     (__builtin_expect( (exp) != 0, false ))
-
 namespace android {
 
 const audio_coef_t AudioBiquadFilter::IDENTITY_COEFS[AudioBiquadFilter::NUM_COEFS] = { AUDIO_COEF_ONE, 0, 0, 0, 0 };
@@ -55,7 +53,7 @@
 void AudioBiquadFilter::setCoefs(const audio_coef_t coefs[NUM_COEFS], bool immediate) {
     memcpy(mTargetCoefs, coefs, sizeof(mTargetCoefs));
     if (mState & STATE_ENABLED_MASK) {
-        if (UNLIKELY(immediate)) {
+        if (CC_UNLIKELY(immediate)) {
             memcpy(mCoefs, coefs, sizeof(mCoefs));
             setState(STATE_NORMAL);
         } else {
@@ -70,7 +68,7 @@
 }
 
 void AudioBiquadFilter::enable(bool immediate) {
-    if (UNLIKELY(immediate)) {
+    if (CC_UNLIKELY(immediate)) {
         memcpy(mCoefs, mTargetCoefs, sizeof(mCoefs));
         setState(STATE_NORMAL);
     } else {
@@ -79,7 +77,7 @@
 }
 
 void AudioBiquadFilter::disable(bool immediate) {
-    if (UNLIKELY(immediate)) {
+    if (CC_UNLIKELY(immediate)) {
         memcpy(mCoefs, IDENTITY_COEFS, sizeof(mCoefs));
         setState(STATE_BYPASS);
     } else {
@@ -142,7 +140,7 @@
                                        audio_sample_t * out,
                                        int frameCount) {
     // The common case is in-place processing, because this is what the EQ does.
-    if (UNLIKELY(in != out)) {
+    if (CC_UNLIKELY(in != out)) {
         memcpy(out, in, frameCount * mNumChannels * sizeof(audio_sample_t));
     }
 }
diff --git a/media/libeffects/testlibs/AudioCoefInterpolator.cpp b/media/libeffects/testlibs/AudioCoefInterpolator.cpp
index 039ab9f..6b56922 100644
--- a/media/libeffects/testlibs/AudioCoefInterpolator.cpp
+++ b/media/libeffects/testlibs/AudioCoefInterpolator.cpp
@@ -16,10 +16,10 @@
  */
 
 #include <string.h>
-#include "AudioCoefInterpolator.h"
 
-#define LIKELY( exp )       (__builtin_expect( (exp) != 0, true  ))
-#define UNLIKELY( exp )     (__builtin_expect( (exp) != 0, false ))
+#include <cutils/compiler.h>
+
+#include "AudioCoefInterpolator.h"
 
 namespace android {
 
@@ -44,9 +44,9 @@
     size_t index = 0;
     size_t dim = mNumInDims;
     while (dim-- > 0) {
-        if (UNLIKELY(intCoord[dim] < 0)) {
+        if (CC_UNLIKELY(intCoord[dim] < 0)) {
             fracCoord[dim] = 0;
-        } else if (UNLIKELY(intCoord[dim] >= (int)mInDims[dim] - 1)) {
+        } else if (CC_UNLIKELY(intCoord[dim] >= (int)mInDims[dim] - 1)) {
             fracCoord[dim] = 0;
             index += mInDimOffsets[dim] * (mInDims[dim] - 1);
         } else {
@@ -63,7 +63,7 @@
         memcpy(out, mTable + index, mNumOutDims * sizeof(audio_coef_t));
     } else {
         getCoefRecurse(index, fracCoord, out, dim + 1);
-        if (LIKELY(fracCoord != 0)) {
+        if (CC_LIKELY(fracCoord != 0)) {
            audio_coef_t tempCoef[MAX_OUT_DIMS];
            getCoefRecurse(index + mInDimOffsets[dim], fracCoord, tempCoef,
                            dim + 1);
diff --git a/media/libeffects/testlibs/AudioCommon.h b/media/libeffects/testlibs/AudioCommon.h
index 444f93a..e8080dc 100644
--- a/media/libeffects/testlibs/AudioCommon.h
+++ b/media/libeffects/testlibs/AudioCommon.h
@@ -20,6 +20,7 @@
 
 #include <stdint.h>
 #include <stddef.h>
+#include <cutils/compiler.h>
 
 namespace android {
 
@@ -76,9 +77,9 @@
 // Convert a audio_sample_t sample to S15 (with clipping)
 inline int16_t audio_sample_t_to_s15_clip(audio_sample_t sample) {
     // TODO: optimize for targets supporting this as an atomic operation.
-    if (__builtin_expect(sample >= (0x7FFF << 9), 0)) {
+    if (CC_UNLIKELY(sample >= (0x7FFF << 9))) {
         return 0x7FFF;
-    } else if (__builtin_expect(sample <= -(0x8000 << 9), 0)) {
+    } else if (CC_UNLIKELY(sample <= -(0x8000 << 9))) {
         return 0x8000;
     } else {
         return audio_sample_t_to_s15(sample);
diff --git a/media/libeffects/testlibs/AudioPeakingFilter.cpp b/media/libeffects/testlibs/AudioPeakingFilter.cpp
index 60fefe6..99323ac 100644
--- a/media/libeffects/testlibs/AudioPeakingFilter.cpp
+++ b/media/libeffects/testlibs/AudioPeakingFilter.cpp
@@ -21,9 +21,7 @@
 
 #include <new>
 #include <assert.h>
-
-#define LIKELY( exp )       (__builtin_expect( (exp) != 0, true  ))
-#define UNLIKELY( exp )     (__builtin_expect( (exp) != 0, false ))
+#include <cutils/compiler.h>
 
 namespace android {
 // Format of the coefficient table:
@@ -66,12 +64,12 @@
 
 void AudioPeakingFilter::setFrequency(uint32_t millihertz) {
     mNominalFrequency = millihertz;
-    if (UNLIKELY(millihertz > mNiquistFreq / 2)) {
+    if (CC_UNLIKELY(millihertz > mNiquistFreq / 2)) {
         millihertz = mNiquistFreq / 2;
     }
     uint32_t normFreq = static_cast<uint32_t>(
             (static_cast<uint64_t>(millihertz) * mFrequencyFactor) >> 10);
-    if (LIKELY(normFreq > (1 << 23))) {
+    if (CC_LIKELY(normFreq > (1 << 23))) {
         mFrequency = (Effects_log2(normFreq) - ((32-9) << 15)) << (FREQ_PRECISION_BITS - 15);
     } else {
         mFrequency = 0;
@@ -107,11 +105,11 @@
     int32_t halfBW = (((mBandwidth + 1) / 2) << 15) / 1200;
 
     low = static_cast<uint32_t>((static_cast<uint64_t>(mNominalFrequency) * Effects_exp2(-halfBW + (16 << 15))) >> 16);
-    if (UNLIKELY(halfBW >= (16 << 15))) {
+    if (CC_UNLIKELY(halfBW >= (16 << 15))) {
         high = mNiquistFreq;
     } else {
         high = static_cast<uint32_t>((static_cast<uint64_t>(mNominalFrequency) * Effects_exp2(halfBW + (16 << 15))) >> 16);
-        if (UNLIKELY(high > mNiquistFreq)) {
+        if (CC_UNLIKELY(high > mNiquistFreq)) {
             high = mNiquistFreq;
         }
     }
diff --git a/media/libeffects/testlibs/AudioShelvingFilter.cpp b/media/libeffects/testlibs/AudioShelvingFilter.cpp
index b8650ba..e031287 100644
--- a/media/libeffects/testlibs/AudioShelvingFilter.cpp
+++ b/media/libeffects/testlibs/AudioShelvingFilter.cpp
@@ -21,9 +21,7 @@
 
 #include <new>
 #include <assert.h>
-
-#define LIKELY( exp )       (__builtin_expect( (exp) != 0, true  ))
-#define UNLIKELY( exp )     (__builtin_expect( (exp) != 0, false ))
+#include <cutils/compiler.h>
 
 namespace android {
 // Format of the coefficient tables:
@@ -71,13 +69,13 @@
 
 void AudioShelvingFilter::setFrequency(uint32_t millihertz) {
     mNominalFrequency = millihertz;
-    if (UNLIKELY(millihertz > mNiquistFreq / 2)) {
+    if (CC_UNLIKELY(millihertz > mNiquistFreq / 2)) {
         millihertz = mNiquistFreq / 2;
     }
     uint32_t normFreq = static_cast<uint32_t>(
             (static_cast<uint64_t>(millihertz) * mFrequencyFactor) >> 10);
     uint32_t log2minFreq = (mType == kLowShelf ? (32-10) : (32-2));
-    if (LIKELY(normFreq > (1U << log2minFreq))) {
+    if (CC_LIKELY(normFreq > (1U << log2minFreq))) {
         mFrequency = (Effects_log2(normFreq) - (log2minFreq << 15)) << (FREQ_PRECISION_BITS - 15);
     } else {
         mFrequency = 0;
diff --git a/media/libeffects/testlibs/EffectEqualizer.cpp b/media/libeffects/testlibs/EffectEqualizer.cpp
index 43f34de..35a4a61 100644
--- a/media/libeffects/testlibs/EffectEqualizer.cpp
+++ b/media/libeffects/testlibs/EffectEqualizer.cpp
@@ -114,7 +114,7 @@
 //--- local function prototypes
 
 int Equalizer_init(EqualizerContext *pContext);
-int Equalizer_configure(EqualizerContext *pContext, effect_config_t *pConfig);
+int Equalizer_setConfig(EqualizerContext *pContext, effect_config_t *pConfig);
 int Equalizer_getParameter(AudioEqualizer * pEqualizer, int32_t *pParam, size_t *pValueSize, void *pValue);
 int Equalizer_setParameter(AudioEqualizer * pEqualizer, int32_t *pParam, void *pValue);
 
@@ -140,7 +140,7 @@
     return 0;
 } /* end EffectQueryNext */
 
-extern "C" int EffectCreate(effect_uuid_t *uuid,
+extern "C" int EffectCreate(const effect_uuid_t *uuid,
                             int32_t sessionId,
                             int32_t ioId,
                             effect_handle_t *pHandle) {
@@ -195,7 +195,7 @@
     return 0;
 } /* end EffectRelease */
 
-extern "C" int EffectGetDescriptor(effect_uuid_t       *uuid,
+extern "C" int EffectGetDescriptor(const effect_uuid_t *uuid,
                                    effect_descriptor_t *pDescriptor) {
 
     if (pDescriptor == NULL || uuid == NULL){
@@ -224,7 +224,7 @@
 }
 
 //----------------------------------------------------------------------------
-// Equalizer_configure()
+// Equalizer_setConfig()
 //----------------------------------------------------------------------------
 // Purpose: Set input and output audio configuration.
 //
@@ -237,9 +237,9 @@
 //
 //----------------------------------------------------------------------------
 
-int Equalizer_configure(EqualizerContext *pContext, effect_config_t *pConfig)
+int Equalizer_setConfig(EqualizerContext *pContext, effect_config_t *pConfig)
 {
-    ALOGV("Equalizer_configure start");
+    ALOGV("Equalizer_setConfig start");
 
     CHECK_ARG(pContext != NULL);
     CHECK_ARG(pConfig != NULL);
@@ -272,7 +272,26 @@
                         pConfig->outputCfg.accessMode);
 
     return 0;
-}   // end Equalizer_configure
+}   // end Equalizer_setConfig
+
+//----------------------------------------------------------------------------
+// Equalizer_getConfig()
+//----------------------------------------------------------------------------
+// Purpose: Get input and output audio configuration.
+//
+// Inputs:
+//  pContext:   effect engine context
+//  pConfig:    pointer to effect_config_t structure holding input and output
+//      configuration parameters
+//
+// Outputs:
+//
+//----------------------------------------------------------------------------
+
+void Equalizer_getConfig(EqualizerContext *pContext, effect_config_t *pConfig)
+{
+    memcpy(pConfig, &pContext->config, sizeof(effect_config_t));
+}   // end Equalizer_getConfig
 
 
 //----------------------------------------------------------------------------
@@ -332,7 +351,7 @@
 
     pContext->pEqualizer->enable(true);
 
-    Equalizer_configure(pContext, &pContext->config);
+    Equalizer_setConfig(pContext, &pContext->config);
 
     return 0;
 }   // end Equalizer_init
@@ -643,16 +662,22 @@
         }
         *(int *) pReplyData = Equalizer_init(pContext);
         break;
-    case EFFECT_CMD_CONFIGURE:
+    case EFFECT_CMD_SET_CONFIG:
         if (pCmdData == NULL || cmdSize != sizeof(effect_config_t)
                 || pReplyData == NULL || *replySize != sizeof(int)) {
             return -EINVAL;
         }
-        *(int *) pReplyData = Equalizer_configure(pContext,
+        *(int *) pReplyData = Equalizer_setConfig(pContext,
                 (effect_config_t *) pCmdData);
         break;
+    case EFFECT_CMD_GET_CONFIG:
+        if (pReplyData == NULL || *replySize != sizeof(effect_config_t)) {
+            return -EINVAL;
+        }
+        Equalizer_getConfig(pContext, (effect_config_t *) pCmdData);
+        break;
     case EFFECT_CMD_RESET:
-        Equalizer_configure(pContext, &pContext->config);
+        Equalizer_setConfig(pContext, &pContext->config);
         break;
     case EFFECT_CMD_GET_PARAM: {
         if (pCmdData == NULL || cmdSize < (int)(sizeof(effect_param_t) + sizeof(int32_t)) ||
diff --git a/media/libeffects/testlibs/EffectReverb.c b/media/libeffects/testlibs/EffectReverb.c
index d22868a..8351712 100644
--- a/media/libeffects/testlibs/EffectReverb.c
+++ b/media/libeffects/testlibs/EffectReverb.c
@@ -111,7 +111,7 @@
     return 0;
 }
 
-int EffectCreate(effect_uuid_t *uuid,
+int EffectCreate(const effect_uuid_t *uuid,
         int32_t sessionId,
         int32_t ioId,
         effect_handle_t *pHandle) {
@@ -182,7 +182,7 @@
     return 0;
 }
 
-int EffectGetDescriptor(effect_uuid_t       *uuid,
+int EffectGetDescriptor(const effect_uuid_t *uuid,
                         effect_descriptor_t *pDescriptor) {
     int i;
     int length = sizeof(gDescriptors) / sizeof(const effect_descriptor_t *);
@@ -318,14 +318,20 @@
             pRvbModule->context.mState = REVERB_STATE_INITIALIZED;
         }
         break;
-    case EFFECT_CMD_CONFIGURE:
+    case EFFECT_CMD_SET_CONFIG:
         if (pCmdData == NULL || cmdSize != sizeof(effect_config_t)
                 || pReplyData == NULL || *replySize != sizeof(int)) {
             return -EINVAL;
         }
-        *(int *) pReplyData = Reverb_Configure(pRvbModule,
+        *(int *) pReplyData = Reverb_setConfig(pRvbModule,
                 (effect_config_t *)pCmdData, false);
         break;
+    case EFFECT_CMD_GET_CONFIG:
+        if (pReplyData == NULL || *replySize != sizeof(effect_config_t)) {
+            return -EINVAL;
+        }
+        Reverb_getConfig(pRvbModule, (effect_config_t *) pCmdData);
+        break;
     case EFFECT_CMD_RESET:
         Reverb_Reset(pReverb, false);
         break;
@@ -492,7 +498,7 @@
     pRvbModule->config.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
     pRvbModule->config.outputCfg.mask = EFFECT_CONFIG_ALL;
 
-    ret = Reverb_Configure(pRvbModule, &pRvbModule->config, true);
+    ret = Reverb_setConfig(pRvbModule, &pRvbModule->config, true);
     if (ret < 0) {
         ALOGV("Reverb_Init error %d on module %p", ret, pRvbModule);
     }
@@ -501,7 +507,7 @@
 }
 
 /*----------------------------------------------------------------------------
- * Reverb_Init()
+ * Reverb_setConfig()
  *----------------------------------------------------------------------------
  * Purpose:
  *  Set input and output audio configuration.
@@ -518,7 +524,7 @@
  *----------------------------------------------------------------------------
  */
 
-int Reverb_Configure(reverb_module_t *pRvbModule, effect_config_t *pConfig,
+int Reverb_setConfig(reverb_module_t *pRvbModule, effect_config_t *pConfig,
         bool init) {
     reverb_object_t *pReverb = &pRvbModule->context;
     int bufferSizeInSamples;
@@ -531,12 +537,12 @@
         || pConfig->outputCfg.channels != OUTPUT_CHANNELS
         || pConfig->inputCfg.format != AUDIO_FORMAT_PCM_16_BIT
         || pConfig->outputCfg.format != AUDIO_FORMAT_PCM_16_BIT) {
-        ALOGV("Reverb_Configure invalid config");
+        ALOGV("Reverb_setConfig invalid config");
         return -EINVAL;
     }
     if ((pReverb->m_Aux && (pConfig->inputCfg.channels != AUDIO_CHANNEL_OUT_MONO)) ||
         (!pReverb->m_Aux && (pConfig->inputCfg.channels != AUDIO_CHANNEL_OUT_STEREO))) {
-        ALOGV("Reverb_Configure invalid config");
+        ALOGV("Reverb_setConfig invalid config");
         return -EINVAL;
     }
 
@@ -576,7 +582,7 @@
         pReverb->m_nCosWT_5KHz = 25997;
         break;
     default:
-        ALOGV("Reverb_Configure invalid sampling rate %d", pReverb->m_nSamplingRate);
+        ALOGV("Reverb_setConfig invalid sampling rate %d", pReverb->m_nSamplingRate);
         return -EINVAL;
     }
 
@@ -620,6 +626,28 @@
 }
 
 /*----------------------------------------------------------------------------
+ * Reverb_getConfig()
+ *----------------------------------------------------------------------------
+ * Purpose:
+ *  Get input and output audio configuration.
+ *
+ * Inputs:
+ *  pRvbModule    - pointer to reverb effect module
+ *  pConfig       - pointer to effect_config_t structure containing input
+ *              and output audio parameters configuration
+ * Outputs:
+ *
+ * Side Effects:
+ *
+ *----------------------------------------------------------------------------
+ */
+
+void Reverb_getConfig(reverb_module_t *pRvbModule, effect_config_t *pConfig)
+{
+    memcpy(pConfig, &pRvbModule->config, sizeof(effect_config_t));
+}
+
+/*----------------------------------------------------------------------------
  * Reverb_Reset()
  *----------------------------------------------------------------------------
  * Purpose:
@@ -844,7 +872,7 @@
             if (param == REVERB_PARAM_ROOM_HF_LEVEL) {
                 break;
             }
-            pValue32 = &pProperties->decayTime;
+            pValue32 = (int32_t *)&pProperties->decayTime;
             /* FALL THROUGH */
 
         case REVERB_PARAM_DECAY_TIME:
@@ -916,7 +944,7 @@
             if (param == REVERB_PARAM_REFLECTIONS_LEVEL) {
                 break;
             }
-            pValue32 = &pProperties->reflectionsDelay;
+            pValue32 = (int32_t *)&pProperties->reflectionsDelay;
             /* FALL THROUGH */
 
         case REVERB_PARAM_REFLECTIONS_DELAY:
@@ -940,7 +968,7 @@
             if (param == REVERB_PARAM_REVERB_LEVEL) {
                 break;
             }
-            pValue32 = &pProperties->reverbDelay;
+            pValue32 = (int32_t *)&pProperties->reverbDelay;
             /* FALL THROUGH */
 
         case REVERB_PARAM_REVERB_DELAY:
diff --git a/media/libeffects/testlibs/EffectReverb.h b/media/libeffects/testlibs/EffectReverb.h
index 8e2cc31..1fb14a7 100644
--- a/media/libeffects/testlibs/EffectReverb.h
+++ b/media/libeffects/testlibs/EffectReverb.h
@@ -303,12 +303,12 @@
 int EffectQueryNumberEffects(uint32_t *pNumEffects);
 int EffectQueryEffect(uint32_t index,
                       effect_descriptor_t *pDescriptor);
-int EffectCreate(effect_uuid_t *effectUID,
+int EffectCreate(const effect_uuid_t *effectUID,
                  int32_t sessionId,
                  int32_t ioId,
                  effect_handle_t *pHandle);
 int EffectRelease(effect_handle_t handle);
-int EffectGetDescriptor(effect_uuid_t       *uuid,
+int EffectGetDescriptor(const effect_uuid_t *uuid,
                         effect_descriptor_t *pDescriptor);
 
 static int Reverb_Process(effect_handle_t self,
@@ -329,7 +329,8 @@
 */
 
 int Reverb_Init(reverb_module_t *pRvbModule, int aux, int preset);
-int Reverb_Configure(reverb_module_t *pRvbModule, effect_config_t *pConfig, bool init);
+int Reverb_setConfig(reverb_module_t *pRvbModule, effect_config_t *pConfig, bool init);
+void Reverb_getConfig(reverb_module_t *pRvbModule, effect_config_t *pConfig);
 void Reverb_Reset(reverb_object_t *pReverb, bool init);
 
 int Reverb_setParameter (reverb_object_t *pReverb, int32_t param, size_t size, void *pValue);
diff --git a/media/libeffects/visualizer/Android.mk b/media/libeffects/visualizer/Android.mk
index 2160177..76b5110 100644
--- a/media/libeffects/visualizer/Android.mk
+++ b/media/libeffects/visualizer/Android.mk
@@ -17,7 +17,7 @@
 
 LOCAL_C_INCLUDES := \
 	$(call include-path-for, graphics corecg) \
-	system/media/audio_effects/include
+	$(call include-path-for, audio-effects)
 
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libeffects/visualizer/EffectVisualizer.cpp b/media/libeffects/visualizer/EffectVisualizer.cpp
index c441710..51c8b68 100644
--- a/media/libeffects/visualizer/EffectVisualizer.cpp
+++ b/media/libeffects/visualizer/EffectVisualizer.cpp
@@ -78,7 +78,7 @@
 }
 
 //----------------------------------------------------------------------------
-// Visualizer_configure()
+// Visualizer_setConfig()
 //----------------------------------------------------------------------------
 // Purpose: Set input and output audio configuration.
 //
@@ -91,9 +91,9 @@
 //
 //----------------------------------------------------------------------------
 
-int Visualizer_configure(VisualizerContext *pContext, effect_config_t *pConfig)
+int Visualizer_setConfig(VisualizerContext *pContext, effect_config_t *pConfig)
 {
-    ALOGV("Visualizer_configure start");
+    ALOGV("Visualizer_setConfig start");
 
     if (pConfig->inputCfg.samplingRate != pConfig->outputCfg.samplingRate) return -EINVAL;
     if (pConfig->inputCfg.channels != pConfig->outputCfg.channels) return -EINVAL;
@@ -112,6 +112,26 @@
 
 
 //----------------------------------------------------------------------------
+// Visualizer_getConfig()
+//----------------------------------------------------------------------------
+// Purpose: Get input and output audio configuration.
+//
+// Inputs:
+//  pContext:   effect engine context
+//  pConfig:    pointer to effect_config_t structure holding input and output
+//      configuration parameters
+//
+// Outputs:
+//
+//----------------------------------------------------------------------------
+
+void Visualizer_getConfig(VisualizerContext *pContext, effect_config_t *pConfig)
+{
+    memcpy(pConfig, &pContext->mConfig, sizeof(effect_config_t));
+}
+
+
+//----------------------------------------------------------------------------
 // Visualizer_init()
 //----------------------------------------------------------------------------
 // Purpose: Initialize engine with default configuration.
@@ -144,7 +164,7 @@
 
     pContext->mCaptureSize = VISUALIZER_CAPTURE_SIZE_MAX;
 
-    Visualizer_configure(pContext, &pContext->mConfig);
+    Visualizer_setConfig(pContext, &pContext->mConfig);
 
     return 0;
 }
@@ -170,7 +190,7 @@
     return 0;
 }
 
-int VisualizerLib_Create(effect_uuid_t *uuid,
+int VisualizerLib_Create(const effect_uuid_t *uuid,
                          int32_t sessionId,
                          int32_t ioId,
                          effect_handle_t *pHandle) {
@@ -220,7 +240,7 @@
     return 0;
 }
 
-int VisualizerLib_GetDescriptor(effect_uuid_t       *uuid,
+int VisualizerLib_GetDescriptor(const effect_uuid_t *uuid,
                                 effect_descriptor_t *pDescriptor) {
 
     if (pDescriptor == NULL || uuid == NULL){
@@ -337,14 +357,21 @@
         }
         *(int *) pReplyData = Visualizer_init(pContext);
         break;
-    case EFFECT_CMD_CONFIGURE:
+    case EFFECT_CMD_SET_CONFIG:
         if (pCmdData == NULL || cmdSize != sizeof(effect_config_t)
                 || pReplyData == NULL || *replySize != sizeof(int)) {
             return -EINVAL;
         }
-        *(int *) pReplyData = Visualizer_configure(pContext,
+        *(int *) pReplyData = Visualizer_setConfig(pContext,
                 (effect_config_t *) pCmdData);
         break;
+    case EFFECT_CMD_GET_CONFIG:
+        if (pReplyData == NULL ||
+            *replySize != sizeof(effect_config_t)) {
+            return -EINVAL;
+        }
+        Visualizer_getConfig(pContext, (effect_config_t *)pReplyData);
+        break;
     case EFFECT_CMD_RESET:
         Visualizer_reset(pContext);
         break;
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 7af4a87..21e8f29 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -43,13 +43,12 @@
     IEffectClient.cpp \
     AudioEffect.cpp \
     Visualizer.cpp \
-    MemoryLeakTrackUtil.cpp \
-    fixedfft.cpp.arm
+    MemoryLeakTrackUtil.cpp
 
 LOCAL_SHARED_LIBRARIES := \
 	libui libcutils libutils libbinder libsonivox libicuuc libexpat \
         libcamera_client libstagefright_foundation \
-        libgui libdl
+        libgui libdl libaudioutils libmedia_native
 
 LOCAL_WHOLE_STATIC_LIBRARY := libmedia_helper
 
@@ -58,9 +57,10 @@
 LOCAL_C_INCLUDES := \
     $(JNI_H_INCLUDE) \
     $(call include-path-for, graphics corecg) \
-    $(TOP)/frameworks/base/include/media/stagefright/openmax \
+    $(TOP)/frameworks/native/include/media/openmax \
     external/icu4c/common \
     external/expat/lib \
-    system/media/audio_effects/include
+    $(call include-path-for, audio-effects) \
+    $(call include-path-for, audio-utils)
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libmedia/AudioEffect.cpp b/media/libmedia/AudioEffect.cpp
index 6639d06..34451ca 100644
--- a/media/libmedia/AudioEffect.cpp
+++ b/media/libmedia/AudioEffect.cpp
@@ -159,7 +159,7 @@
     mCblk->buffer = (uint8_t *)mCblk + bufOffset;
 
     iEffect->asBinder()->linkToDeath(mIEffectClient);
-    ALOGV("set() %p OK effect: %s id: %d status %d enabled %d, ", this, mDescriptor.name, mId, mStatus, mEnabled);
+    ALOGV("set() %p OK effect: %s id: %d status %d enabled %d", this, mDescriptor.name, mId, mStatus, mEnabled);
 
     return mStatus;
 }
@@ -174,7 +174,7 @@
             mIEffect->disconnect();
             mIEffect->asBinder()->unlinkToDeath(mIEffectClient);
         }
-         IPCThreadState::self()->flushCommands();
+        IPCThreadState::self()->flushCommands();
     }
     mIEffect.clear();
     mIEffectClient.clear();
@@ -202,7 +202,7 @@
 status_t AudioEffect::setEnabled(bool enabled)
 {
     if (mStatus != NO_ERROR) {
-        return INVALID_OPERATION;
+        return (mStatus == ALREADY_EXISTS) ? (status_t) INVALID_OPERATION : mStatus;
     }
 
     status_t status = NO_ERROR;
@@ -231,7 +231,7 @@
 {
     if (mStatus != NO_ERROR && mStatus != ALREADY_EXISTS) {
         ALOGV("command() bad status %d", mStatus);
-        return INVALID_OPERATION;
+        return mStatus;
     }
 
     if (cmdCode == EFFECT_CMD_ENABLE || cmdCode == EFFECT_CMD_DISABLE) {
@@ -263,7 +263,7 @@
 status_t AudioEffect::setParameter(effect_param_t *param)
 {
     if (mStatus != NO_ERROR) {
-        return INVALID_OPERATION;
+        return (mStatus == ALREADY_EXISTS) ? (status_t) INVALID_OPERATION : mStatus;
     }
 
     if (param == NULL || param->psize == 0 || param->vsize == 0) {
@@ -281,7 +281,7 @@
 status_t AudioEffect::setParameterDeferred(effect_param_t *param)
 {
     if (mStatus != NO_ERROR) {
-        return INVALID_OPERATION;
+        return (mStatus == ALREADY_EXISTS) ? (status_t) INVALID_OPERATION : mStatus;
     }
 
     if (param == NULL || param->psize == 0 || param->vsize == 0) {
@@ -307,7 +307,7 @@
 status_t AudioEffect::setParameterCommit()
 {
     if (mStatus != NO_ERROR) {
-        return INVALID_OPERATION;
+        return (mStatus == ALREADY_EXISTS) ? (status_t) INVALID_OPERATION : mStatus;
     }
 
     Mutex::Autolock _l(mCblk->lock);
@@ -321,7 +321,7 @@
 status_t AudioEffect::getParameter(effect_param_t *param)
 {
     if (mStatus != NO_ERROR && mStatus != ALREADY_EXISTS) {
-        return INVALID_OPERATION;
+        return mStatus;
     }
 
     if (param == NULL || param->psize == 0 || param->vsize == 0) {
@@ -341,8 +341,8 @@
 void AudioEffect::binderDied()
 {
     ALOGW("IEffect died");
-    mStatus = NO_INIT;
-    if (mCbf) {
+    mStatus = DEAD_OBJECT;
+    if (mCbf != NULL) {
         status_t status = DEAD_OBJECT;
         mCbf(EVENT_ERROR, mUserData, &status);
     }
@@ -363,7 +363,7 @@
             mStatus = ALREADY_EXISTS;
         }
     }
-    if (mCbf) {
+    if (mCbf != NULL) {
         mCbf(EVENT_CONTROL_STATUS_CHANGED, mUserData, &controlGranted);
     }
 }
@@ -373,7 +373,7 @@
     ALOGV("enableStatusChanged %p enabled %d mCbf %p", this, enabled, mCbf);
     if (mStatus == ALREADY_EXISTS) {
         mEnabled = enabled;
-        if (mCbf) {
+        if (mCbf != NULL) {
             mCbf(EVENT_ENABLE_STATUS_CHANGED, mUserData, &enabled);
         }
     }
@@ -389,7 +389,7 @@
         return;
     }
 
-    if (mCbf && cmdCode == EFFECT_CMD_SET_PARAM) {
+    if (mCbf != NULL && cmdCode == EFFECT_CMD_SET_PARAM) {
         effect_param_t *cmd = (effect_param_t *)cmdData;
         cmd->status = *(int32_t *)replyData;
         mCbf(EVENT_PARAMETER_CHANGED, mUserData, cmd);
@@ -412,7 +412,8 @@
     return af->queryEffect(index, descriptor);
 }
 
-status_t AudioEffect::getEffectDescriptor(effect_uuid_t *uuid, effect_descriptor_t *descriptor)
+status_t AudioEffect::getEffectDescriptor(const effect_uuid_t *uuid,
+        effect_descriptor_t *descriptor) /*const*/
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
@@ -479,4 +480,3 @@
 
 
 }; // namespace android
-
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 34a5eb7..05ade75 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -39,9 +39,7 @@
 
 #include <system/audio.h>
 #include <cutils/bitops.h>
-
-#define LIKELY( exp )       (__builtin_expect( (exp) != 0, true  ))
-#define UNLIKELY( exp )     (__builtin_expect( (exp) != 0, false ))
+#include <cutils/compiler.h>
 
 namespace android {
 // ---------------------------------------------------------------------------
@@ -50,7 +48,7 @@
 status_t AudioRecord::getMinFrameCount(
         int* frameCount,
         uint32_t sampleRate,
-        int format,
+        audio_format_t format,
         int channelCount)
 {
     size_t size = 0;
@@ -80,22 +78,24 @@
 // ---------------------------------------------------------------------------
 
 AudioRecord::AudioRecord()
-    : mStatus(NO_INIT), mSessionId(0)
+    : mStatus(NO_INIT), mSessionId(0),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT)
 {
 }
 
 AudioRecord::AudioRecord(
-        int inputSource,
+        audio_source_t inputSource,
         uint32_t sampleRate,
-        int format,
+        audio_format_t format,
         uint32_t channelMask,
         int frameCount,
-        uint32_t flags,
+        record_flags flags,
         callback_t cbf,
         void* user,
         int notificationFrames,
         int sessionId)
-    : mStatus(NO_INIT), mSessionId(0)
+    : mStatus(NO_INIT), mSessionId(0),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT)
 {
     mStatus = set(inputSource, sampleRate, format, channelMask,
             frameCount, flags, cbf, user, notificationFrames, sessionId);
@@ -119,12 +119,12 @@
 }
 
 status_t AudioRecord::set(
-        int inputSource,
+        audio_source_t inputSource,
         uint32_t sampleRate,
-        int format,
+        audio_format_t format,
         uint32_t channelMask,
         int frameCount,
-        uint32_t flags,
+        record_flags flags,
         callback_t cbf,
         void* user,
         int notificationFrames,
@@ -148,7 +148,7 @@
         sampleRate = DEFAULT_SAMPLE_RATE;
     }
     // these below should probably come from the audioFlinger too...
-    if (format == 0) {
+    if (format == AUDIO_FORMAT_DEFAULT) {
         format = AUDIO_FORMAT_PCM_16_BIT;
     }
     // validate parameters
@@ -206,11 +206,8 @@
         return status;
     }
 
-    if (cbf != 0) {
+    if (cbf != NULL) {
         mClientRecordThread = new ClientRecordThread(*this, threadCanCallJava);
-        if (mClientRecordThread == 0) {
-            return NO_INIT;
-        }
     }
 
     mStatus = NO_ERROR;
@@ -231,7 +228,7 @@
     mMarkerReached = false;
     mNewPosition = 0;
     mUpdatePeriod = 0;
-    mInputSource = (uint8_t)inputSource;
+    mInputSource = inputSource;
     mFlags = flags;
     mInput = input;
     AudioSystem::acquireAudioSessionId(mSessionId);
@@ -251,7 +248,7 @@
     return mLatency;
 }
 
-int AudioRecord::format() const
+audio_format_t AudioRecord::format() const
 {
     return mFormat;
 }
@@ -266,7 +263,7 @@
     return mFrameCount;
 }
 
-int AudioRecord::frameSize() const
+size_t AudioRecord::frameSize() const
 {
     if (audio_is_linear_pcm(mFormat)) {
         return channelCount()*audio_bytes_per_sample(mFormat);
@@ -275,9 +272,9 @@
     }
 }
 
-int AudioRecord::inputSource() const
+audio_source_t AudioRecord::inputSource() const
 {
-    return (int)mInputSource;
+    return mInputSource;
 }
 
 // -------------------------------------------------------------------------
@@ -296,22 +293,36 @@
                 return WOULD_BLOCK;
             }
         }
-        t->mLock.lock();
-     }
+    }
 
     AutoMutex lock(mLock);
     // acquire a strong reference on the IAudioRecord and IMemory so that they cannot be destroyed
     // while we are accessing the cblk
-    sp <IAudioRecord> audioRecord = mAudioRecord;
-    sp <IMemory> iMem = mCblkMemory;
+    sp<IAudioRecord> audioRecord = mAudioRecord;
+    sp<IMemory> iMem = mCblkMemory;
     audio_track_cblk_t* cblk = mCblk;
     if (mActive == 0) {
         mActive = 1;
 
+        pid_t tid;
+        if (t != 0) {
+            mReadyToRun = WOULD_BLOCK;
+            t->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
+            tid = t->getTid();  // pid_t is unknown until run()
+            ALOGV("getTid=%d", tid);
+            if (tid == -1) {
+                tid = 0;
+            }
+            // thread blocks in readyToRun()
+        } else {
+            tid = 0;    // not gettid()
+        }
+
         cblk->lock.lock();
         if (!(cblk->flags & CBLK_INVALID_MSK)) {
             cblk->lock.unlock();
-            ret = mAudioRecord->start();
+            ALOGV("mAudioRecord->start(tid=%d)", tid);
+            ret = mAudioRecord->start(tid);
             cblk->lock.lock();
             if (ret == DEAD_OBJECT) {
                 android_atomic_or(CBLK_INVALID_ON, &cblk->flags);
@@ -326,19 +337,22 @@
             cblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
             cblk->waitTimeMs = 0;
             if (t != 0) {
-               t->run("ClientRecordThread", ANDROID_PRIORITY_AUDIO);
+                // thread unblocks in readyToRun() and returns NO_ERROR
+                mReadyToRun = NO_ERROR;
+                mCondition.signal();
             } else {
-                setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_AUDIO);
+                mPreviousPriority = getpriority(PRIO_PROCESS, 0);
+                mPreviousSchedulingGroup = androidGetThreadSchedulingGroup(0);
+                androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
             }
         } else {
             mActive = 0;
+            // thread unblocks in readyToRun() and returns NO_INIT
+            mReadyToRun = NO_INIT;
+            mCondition.signal();
         }
     }
 
-    if (t != 0) {
-        t->mLock.unlock();
-    }
-
     return ret;
 }
 
@@ -348,10 +362,6 @@
 
     ALOGV("stop");
 
-    if (t != 0) {
-        t->mLock.lock();
-    }
-
     AutoMutex lock(mLock);
     if (mActive == 1) {
         mActive = 0;
@@ -363,14 +373,11 @@
         if (t != 0) {
             t->requestExit();
         } else {
-            setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_NORMAL);
+            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
+            androidSetThreadSchedulingGroup(0, mPreviousSchedulingGroup);
         }
     }
 
-    if (t != 0) {
-        t->mLock.unlock();
-    }
-
     return NO_ERROR;
 }
 
@@ -379,7 +386,7 @@
     return !mActive;
 }
 
-uint32_t AudioRecord::getSampleRate()
+uint32_t AudioRecord::getSampleRate() const
 {
     AutoMutex lock(mLock);
     return mCblk->sampleRate;
@@ -387,7 +394,7 @@
 
 status_t AudioRecord::setMarkerPosition(uint32_t marker)
 {
-    if (mCbf == 0) return INVALID_OPERATION;
+    if (mCbf == NULL) return INVALID_OPERATION;
 
     mMarkerPosition = marker;
     mMarkerReached = false;
@@ -395,9 +402,9 @@
     return NO_ERROR;
 }
 
-status_t AudioRecord::getMarkerPosition(uint32_t *marker)
+status_t AudioRecord::getMarkerPosition(uint32_t *marker) const
 {
-    if (marker == 0) return BAD_VALUE;
+    if (marker == NULL) return BAD_VALUE;
 
     *marker = mMarkerPosition;
 
@@ -406,7 +413,7 @@
 
 status_t AudioRecord::setPositionUpdatePeriod(uint32_t updatePeriod)
 {
-    if (mCbf == 0) return INVALID_OPERATION;
+    if (mCbf == NULL) return INVALID_OPERATION;
 
     uint32_t curPosition;
     getPosition(&curPosition);
@@ -416,18 +423,18 @@
     return NO_ERROR;
 }
 
-status_t AudioRecord::getPositionUpdatePeriod(uint32_t *updatePeriod)
+status_t AudioRecord::getPositionUpdatePeriod(uint32_t *updatePeriod) const
 {
-    if (updatePeriod == 0) return BAD_VALUE;
+    if (updatePeriod == NULL) return BAD_VALUE;
 
     *updatePeriod = mUpdatePeriod;
 
     return NO_ERROR;
 }
 
-status_t AudioRecord::getPosition(uint32_t *position)
+status_t AudioRecord::getPosition(uint32_t *position) const
 {
-    if (position == 0) return BAD_VALUE;
+    if (position == NULL) return BAD_VALUE;
 
     AutoMutex lock(mLock);
     *position = mCblk->user;
@@ -435,7 +442,7 @@
     return NO_ERROR;
 }
 
-unsigned int AudioRecord::getInputFramesLost()
+unsigned int AudioRecord::getInputFramesLost() const
 {
     if (mActive)
         return AudioSystem::getInputFramesLost(mInput);
@@ -448,7 +455,7 @@
 // must be called with mLock held
 status_t AudioRecord::openRecord_l(
         uint32_t sampleRate,
-        uint32_t format,
+        audio_format_t format,
         uint32_t channelMask,
         int frameCount,
         uint32_t flags,
@@ -508,11 +515,11 @@
         goto start_loop_here;
         while (framesReady == 0) {
             active = mActive;
-            if (UNLIKELY(!active)) {
+            if (CC_UNLIKELY(!active)) {
                 cblk->lock.unlock();
                 return NO_MORE_BUFFERS;
             }
-            if (UNLIKELY(!waitCount)) {
+            if (CC_UNLIKELY(!waitCount)) {
                 cblk->lock.unlock();
                 return WOULD_BLOCK;
             }
@@ -529,13 +536,13 @@
             if (cblk->flags & CBLK_INVALID_MSK) {
                 goto create_new_record;
             }
-            if (__builtin_expect(result!=NO_ERROR, false)) {
+            if (CC_UNLIKELY(result != NO_ERROR)) {
                 cblk->waitTimeMs += waitTimeMs;
                 if (cblk->waitTimeMs >= cblk->bufferTimeoutMs) {
                     ALOGW(   "obtainBuffer timed out (is the CPU pegged?) "
                             "user=%08x, server=%08x", cblk->user, cblk->server);
                     cblk->lock.unlock();
-                    result = mAudioRecord->start();
+                    result = mAudioRecord->start(0);    // callback thread hasn't changed
                     cblk->lock.lock();
                     if (result == DEAD_OBJECT) {
                         android_atomic_or(CBLK_INVALID_ON, &cblk->flags);
@@ -590,7 +597,7 @@
     mCblk->stepUser(audioBuffer->frameCount);
 }
 
-audio_io_handle_t AudioRecord::getInput()
+audio_io_handle_t AudioRecord::getInput() const
 {
     AutoMutex lock(mLock);
     return mInput;
@@ -608,7 +615,7 @@
     return mInput;
 }
 
-int AudioRecord::getSessionId()
+int AudioRecord::getSessionId() const
 {
     return mSessionId;
 }
@@ -631,8 +638,8 @@
     mLock.lock();
     // acquire a strong reference on the IAudioRecord and IMemory so that they cannot be destroyed
     // while we are accessing the cblk
-    sp <IAudioRecord> audioRecord = mAudioRecord;
-    sp <IMemory> iMem = mCblkMemory;
+    sp<IAudioRecord> audioRecord = mAudioRecord;
+    sp<IMemory> iMem = mCblkMemory;
     mLock.unlock();
 
     do {
@@ -677,8 +684,8 @@
     mLock.lock();
     // acquire a strong reference on the IAudioRecord and IMemory so that they cannot be destroyed
     // while we are accessing the cblk
-    sp <IAudioRecord> audioRecord = mAudioRecord;
-    sp <IMemory> iMem = mCblkMemory;
+    sp<IAudioRecord> audioRecord = mAudioRecord;
+    sp<IMemory> iMem = mCblkMemory;
     audio_track_cblk_t* cblk = mCblk;
     mLock.unlock();
 
@@ -773,7 +780,7 @@
         result = openRecord_l(cblk->sampleRate, mFormat, mChannelMask,
                 mFrameCount, mFlags, getInput_l());
         if (result == NO_ERROR) {
-            result = mAudioRecord->start();
+            result = mAudioRecord->start(0);    // callback thread hasn't changed
         }
         if (result != NO_ERROR) {
             mActive = false;
@@ -799,7 +806,7 @@
         }
     }
     ALOGV("restoreRecord_l() status %d mActive %d cblk %p, old cblk %p flags %08x old flags %08x",
-         result, mActive, mCblk, cblk, mCblk->flags, cblk->flags);
+        result, mActive, mCblk, cblk, mCblk->flags, cblk->flags);
 
     if (result == NO_ERROR) {
         // from now on we switch to the newly created cblk
@@ -824,7 +831,15 @@
     return mReceiver.processAudioBuffer(this);
 }
 
+status_t AudioRecord::ClientRecordThread::readyToRun()
+{
+    AutoMutex(mReceiver.mLock);
+    while (mReceiver.mReadyToRun == WOULD_BLOCK) {
+        mReceiver.mCondition.wait(mReceiver.mLock);
+    }
+    return mReceiver.mReadyToRun;
+}
+
 // -------------------------------------------------------------------------
 
 }; // namespace android
-
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index f7f129c..33c7d03 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -35,12 +35,13 @@
 sp<AudioSystem::AudioFlingerClient> AudioSystem::gAudioFlingerClient;
 audio_error_callback AudioSystem::gAudioErrorCallback = NULL;
 // Cached values
-DefaultKeyedVector<int, audio_io_handle_t> AudioSystem::gStreamOutputMap(0);
+
+DefaultKeyedVector<audio_stream_type_t, audio_io_handle_t> AudioSystem::gStreamOutputMap(0);
 DefaultKeyedVector<audio_io_handle_t, AudioSystem::OutputDescriptor *> AudioSystem::gOutputs(0);
 
-// Cached values for recording queries
+// Cached values for recording queries, all protected by gLock
 uint32_t AudioSystem::gPrevInSamplingRate = 16000;
-int AudioSystem::gPrevInFormat = AUDIO_FORMAT_PCM_16_BIT;
+audio_format_t AudioSystem::gPrevInFormat = AUDIO_FORMAT_PCM_16_BIT;
 int AudioSystem::gPrevInChannelCount = 1;
 size_t AudioSystem::gInBuffSize = 0;
 
@@ -49,7 +50,7 @@
 const sp<IAudioFlinger>& AudioSystem::get_audio_flinger()
 {
     Mutex::Autolock _l(gLock);
-    if (gAudioFlinger.get() == 0) {
+    if (gAudioFlinger == 0) {
         sp<IServiceManager> sm = defaultServiceManager();
         sp<IBinder> binder;
         do {
@@ -58,14 +59,14 @@
                 break;
             ALOGW("AudioFlinger not published, waiting...");
             usleep(500000); // 0.5 s
-        } while(true);
+        } while (true);
         if (gAudioFlingerClient == NULL) {
             gAudioFlingerClient = new AudioFlingerClient();
         } else {
             if (gAudioErrorCallback) {
                 gAudioErrorCallback(NO_ERROR);
             }
-         }
+        }
         binder->linkToDeath(gAudioFlingerClient);
         gAudioFlinger = interface_cast<IAudioFlinger>(binder);
         gAudioFlinger->registerClient(gAudioFlingerClient);
@@ -120,7 +121,8 @@
     return NO_ERROR;
 }
 
-status_t AudioSystem::setStreamVolume(int stream, float value, int output)
+status_t AudioSystem::setStreamVolume(audio_stream_type_t stream, float value,
+        audio_io_handle_t output)
 {
     if (uint32_t(stream) >= AUDIO_STREAM_CNT) return BAD_VALUE;
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
@@ -129,7 +131,7 @@
     return NO_ERROR;
 }
 
-status_t AudioSystem::setStreamMute(int stream, bool mute)
+status_t AudioSystem::setStreamMute(audio_stream_type_t stream, bool mute)
 {
     if (uint32_t(stream) >= AUDIO_STREAM_CNT) return BAD_VALUE;
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
@@ -138,7 +140,8 @@
     return NO_ERROR;
 }
 
-status_t AudioSystem::getStreamVolume(int stream, float* volume, int output)
+status_t AudioSystem::getStreamVolume(audio_stream_type_t stream, float* volume,
+        audio_io_handle_t output)
 {
     if (uint32_t(stream) >= AUDIO_STREAM_CNT) return BAD_VALUE;
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
@@ -147,7 +150,7 @@
     return NO_ERROR;
 }
 
-status_t AudioSystem::getStreamMute(int stream, bool* mute)
+status_t AudioSystem::getStreamMute(audio_stream_type_t stream, bool* mute)
 {
     if (uint32_t(stream) >= AUDIO_STREAM_CNT) return BAD_VALUE;
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
@@ -156,9 +159,9 @@
     return NO_ERROR;
 }
 
-status_t AudioSystem::setMode(int mode)
+status_t AudioSystem::setMode(audio_mode_t mode)
 {
-    if (mode >= AUDIO_MODE_CNT) return BAD_VALUE;
+    if (uint32_t(mode) >= AUDIO_MODE_CNT) return BAD_VALUE;
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
     return af->setMode(mode);
@@ -203,7 +206,12 @@
     return volume ? 100 - int(dBConvertInverse * log(volume) + 0.5) : 0;
 }
 
-status_t AudioSystem::getOutputSamplingRate(int* samplingRate, int streamType)
+// DEPRECATED
+status_t AudioSystem::getOutputSamplingRate(int* samplingRate, int streamType) {
+    return getOutputSamplingRate(samplingRate, (audio_stream_type_t)streamType);
+}
+
+status_t AudioSystem::getOutputSamplingRate(int* samplingRate, audio_stream_type_t streamType)
 {
     OutputDescriptor *outputDesc;
     audio_io_handle_t output;
@@ -212,14 +220,14 @@
         streamType = AUDIO_STREAM_MUSIC;
     }
 
-    output = getOutput((audio_stream_type_t)streamType);
+    output = getOutput(streamType);
     if (output == 0) {
         return PERMISSION_DENIED;
     }
 
     gLock.lock();
     outputDesc = AudioSystem::gOutputs.valueFor(output);
-    if (outputDesc == 0) {
+    if (outputDesc == NULL) {
         ALOGV("getOutputSamplingRate() no output descriptor for output %d in gOutputs", output);
         gLock.unlock();
         const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
@@ -236,7 +244,12 @@
     return NO_ERROR;
 }
 
-status_t AudioSystem::getOutputFrameCount(int* frameCount, int streamType)
+// DEPRECATED
+status_t AudioSystem::getOutputFrameCount(int* frameCount, int streamType) {
+    return getOutputFrameCount(frameCount, (audio_stream_type_t)streamType);
+}
+
+status_t AudioSystem::getOutputFrameCount(int* frameCount, audio_stream_type_t streamType)
 {
     OutputDescriptor *outputDesc;
     audio_io_handle_t output;
@@ -245,14 +258,14 @@
         streamType = AUDIO_STREAM_MUSIC;
     }
 
-    output = getOutput((audio_stream_type_t)streamType);
+    output = getOutput(streamType);
     if (output == 0) {
         return PERMISSION_DENIED;
     }
 
     gLock.lock();
     outputDesc = AudioSystem::gOutputs.valueFor(output);
-    if (outputDesc == 0) {
+    if (outputDesc == NULL) {
         gLock.unlock();
         const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
         if (af == 0) return PERMISSION_DENIED;
@@ -267,7 +280,7 @@
     return NO_ERROR;
 }
 
-status_t AudioSystem::getOutputLatency(uint32_t* latency, int streamType)
+status_t AudioSystem::getOutputLatency(uint32_t* latency, audio_stream_type_t streamType)
 {
     OutputDescriptor *outputDesc;
     audio_io_handle_t output;
@@ -276,14 +289,14 @@
         streamType = AUDIO_STREAM_MUSIC;
     }
 
-    output = getOutput((audio_stream_type_t)streamType);
+    output = getOutput(streamType);
     if (output == 0) {
         return PERMISSION_DENIED;
     }
 
     gLock.lock();
     outputDesc = AudioSystem::gOutputs.valueFor(output);
-    if (outputDesc == 0) {
+    if (outputDesc == NULL) {
         gLock.unlock();
         const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
         if (af == 0) return PERMISSION_DENIED;
@@ -298,25 +311,30 @@
     return NO_ERROR;
 }
 
-status_t AudioSystem::getInputBufferSize(uint32_t sampleRate, int format, int channelCount,
+status_t AudioSystem::getInputBufferSize(uint32_t sampleRate, audio_format_t format, int channelCount,
     size_t* buffSize)
 {
+    gLock.lock();
     // Do we have a stale gInBufferSize or are we requesting the input buffer size for new values
-    if ((gInBuffSize == 0) || (sampleRate != gPrevInSamplingRate) || (format != gPrevInFormat)
+    size_t inBuffSize = gInBuffSize;
+    if ((inBuffSize == 0) || (sampleRate != gPrevInSamplingRate) || (format != gPrevInFormat)
         || (channelCount != gPrevInChannelCount)) {
+        gLock.unlock();
+        const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+        if (af == 0) {
+            return PERMISSION_DENIED;
+        }
+        inBuffSize = af->getInputBufferSize(sampleRate, format, channelCount);
+        gLock.lock();
         // save the request params
         gPrevInSamplingRate = sampleRate;
         gPrevInFormat = format;
         gPrevInChannelCount = channelCount;
 
-        gInBuffSize = 0;
-        const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
-        if (af == 0) {
-            return PERMISSION_DENIED;
-        }
-        gInBuffSize = af->getInputBufferSize(sampleRate, format, channelCount);
+        gInBuffSize = inBuffSize;
     }
-    *buffSize = gInBuffSize;
+    gLock.unlock();
+    *buffSize = inBuffSize;
 
     return NO_ERROR;
 }
@@ -328,7 +346,7 @@
     return af->setVoiceVolume(value);
 }
 
-status_t AudioSystem::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, int stream)
+status_t AudioSystem::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, audio_stream_type_t stream)
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
@@ -337,7 +355,7 @@
         stream = AUDIO_STREAM_MUSIC;
     }
 
-    return af->getRenderPosition(halFrames, dspFrames, getOutput((audio_stream_type_t)stream));
+    return af->getRenderPosition(halFrames, dspFrames, getOutput(stream));
 }
 
 unsigned int AudioSystem::getInputFramesLost(audio_io_handle_t ioHandle) {
@@ -386,10 +404,11 @@
     ALOGW("AudioFlinger server died!");
 }
 
-void AudioSystem::AudioFlingerClient::ioConfigChanged(int event, int ioHandle, void *param2) {
+void AudioSystem::AudioFlingerClient::ioConfigChanged(int event, audio_io_handle_t ioHandle,
+        const void *param2) {
     ALOGV("ioConfigChanged() event %d", event);
-    OutputDescriptor *desc;
-    uint32_t stream;
+    const OutputDescriptor *desc;
+    audio_stream_type_t stream;
 
     if (ioHandle == 0) return;
 
@@ -397,8 +416,8 @@
 
     switch (event) {
     case STREAM_CONFIG_CHANGED:
-        if (param2 == 0) break;
-        stream = *(uint32_t *)param2;
+        if (param2 == NULL) break;
+        stream = *(const audio_stream_type_t *)param2;
         ALOGV("ioConfigChanged() STREAM_CONFIG_CHANGED stream %d, output %d", stream, ioHandle);
         if (gStreamOutputMap.indexOfKey(stream) >= 0) {
             gStreamOutputMap.replaceValueFor(stream, ioHandle);
@@ -409,8 +428,8 @@
             ALOGV("ioConfigChanged() opening already existing output! %d", ioHandle);
             break;
         }
-        if (param2 == 0) break;
-        desc = (OutputDescriptor *)param2;
+        if (param2 == NULL) break;
+        desc = (const OutputDescriptor *)param2;
 
         OutputDescriptor *outputDesc =  new OutputDescriptor(*desc);
         gOutputs.add(ioHandle, outputDesc);
@@ -438,8 +457,8 @@
             ALOGW("ioConfigChanged() modifying unknow output! %d", ioHandle);
             break;
         }
-        if (param2 == 0) break;
-        desc = (OutputDescriptor *)param2;
+        if (param2 == NULL) break;
+        desc = (const OutputDescriptor *)param2;
 
         ALOGV("ioConfigChanged() new config for output %d samplingRate %d, format %d channels %d frameCount %d latency %d",
                 ioHandle, desc->samplingRate, desc->format,
@@ -462,8 +481,8 @@
     gAudioErrorCallback = cb;
 }
 
-bool AudioSystem::routedToA2dpOutput(int streamType) {
-    switch(streamType) {
+bool AudioSystem::routedToA2dpOutput(audio_stream_type_t streamType) {
+    switch (streamType) {
     case AUDIO_STREAM_MUSIC:
     case AUDIO_STREAM_VOICE_CALL:
     case AUDIO_STREAM_BLUETOOTH_SCO:
@@ -484,7 +503,7 @@
 const sp<IAudioPolicyService>& AudioSystem::get_audio_policy_service()
 {
     gLock.lock();
-    if (gAudioPolicyService.get() == 0) {
+    if (gAudioPolicyService == 0) {
         sp<IServiceManager> sm = defaultServiceManager();
         sp<IBinder> binder;
         do {
@@ -493,7 +512,7 @@
                 break;
             ALOGW("AudioPolicyService not published, waiting...");
             usleep(500000); // 0.5 s
-        } while(true);
+        } while (true);
         if (gAudioPolicyServiceClient == NULL) {
             gAudioPolicyServiceClient = new AudioPolicyServiceClient();
         }
@@ -531,21 +550,15 @@
     return aps->getDeviceConnectionState(device, device_address);
 }
 
-status_t AudioSystem::setPhoneState(int state)
+status_t AudioSystem::setPhoneState(audio_mode_t state)
 {
+    if (uint32_t(state) >= AUDIO_MODE_CNT) return BAD_VALUE;
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
 
     return aps->setPhoneState(state);
 }
 
-status_t AudioSystem::setRingerMode(uint32_t mode, uint32_t mask)
-{
-    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
-    if (aps == 0) return PERMISSION_DENIED;
-    return aps->setRingerMode(mode, mask);
-}
-
 status_t AudioSystem::setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
@@ -563,7 +576,7 @@
 
 audio_io_handle_t AudioSystem::getOutput(audio_stream_type_t stream,
                                     uint32_t samplingRate,
-                                    uint32_t format,
+                                    audio_format_t format,
                                     uint32_t channels,
                                     audio_policy_output_flags_t flags)
 {
@@ -621,9 +634,9 @@
     aps->releaseOutput(output);
 }
 
-audio_io_handle_t AudioSystem::getInput(int inputSource,
+audio_io_handle_t AudioSystem::getInput(audio_source_t inputSource,
                                     uint32_t samplingRate,
-                                    uint32_t format,
+                                    audio_format_t format,
                                     uint32_t channels,
                                     audio_in_acoustics_t acoustics,
                                     int sessionId)
@@ -663,18 +676,22 @@
     return aps->initStreamVolume(stream, indexMin, indexMax);
 }
 
-status_t AudioSystem::setStreamVolumeIndex(audio_stream_type_t stream, int index)
+status_t AudioSystem::setStreamVolumeIndex(audio_stream_type_t stream,
+                                           int index,
+                                           audio_devices_t device)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
-    return aps->setStreamVolumeIndex(stream, index);
+    return aps->setStreamVolumeIndex(stream, index, device);
 }
 
-status_t AudioSystem::getStreamVolumeIndex(audio_stream_type_t stream, int *index)
+status_t AudioSystem::getStreamVolumeIndex(audio_stream_type_t stream,
+                                           int *index,
+                                           audio_devices_t device)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
-    return aps->getStreamVolumeIndex(stream, index);
+    return aps->getStreamVolumeIndex(stream, index, device);
 }
 
 uint32_t AudioSystem::getStrategyForStream(audio_stream_type_t stream)
@@ -684,10 +701,10 @@
     return aps->getStrategyForStream(stream);
 }
 
-uint32_t AudioSystem::getDevicesForStream(audio_stream_type_t stream)
+audio_devices_t AudioSystem::getDevicesForStream(audio_stream_type_t stream)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
-    if (aps == 0) return 0;
+    if (aps == 0) return (audio_devices_t)0;
     return aps->getDevicesForStream(stream);
 }
 
@@ -723,7 +740,7 @@
     return aps->setEffectEnabled(id, enabled);
 }
 
-status_t AudioSystem::isStreamActive(int stream, bool* state, uint32_t inPastMs)
+status_t AudioSystem::isStreamActive(audio_stream_type_t stream, bool* state, uint32_t inPastMs)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
@@ -751,4 +768,3 @@
 }
 
 }; // namespace android
-
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 3a8bfa3..048be1d 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -1,4 +1,4 @@
-/* //device/extlibs/pv/android/AudioTrack.cpp
+/*
 **
 ** Copyright 2007, The Android Open Source Project
 **
@@ -38,12 +38,12 @@
 #include <utils/Atomic.h>
 
 #include <cutils/bitops.h>
+#include <cutils/compiler.h>
 
 #include <system/audio.h>
 #include <system/audio_policy.h>
 
-#define LIKELY( exp )       (__builtin_expect( (exp) != 0, true  ))
-#define UNLIKELY( exp )     (__builtin_expect( (exp) != 0, false ))
+#include <audio_utils/primitives.h>
 
 namespace android {
 // ---------------------------------------------------------------------------
@@ -51,7 +51,7 @@
 // static
 status_t AudioTrack::getMinFrameCount(
         int* frameCount,
-        int streamType,
+        audio_stream_type_t streamType,
         uint32_t sampleRate)
 {
     int afSampleRate;
@@ -72,18 +72,42 @@
     if (minBufCount < 2) minBufCount = 2;
 
     *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
-              afFrameCount * minBufCount * sampleRate / afSampleRate;
+            afFrameCount * minBufCount * sampleRate / afSampleRate;
     return NO_ERROR;
 }
 
 // ---------------------------------------------------------------------------
 
 AudioTrack::AudioTrack()
-    : mStatus(NO_INIT)
+    : mStatus(NO_INIT),
+      mIsTimed(false),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+      mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT)
 {
 }
 
 AudioTrack::AudioTrack(
+        audio_stream_type_t streamType,
+        uint32_t sampleRate,
+        audio_format_t format,
+        int channelMask,
+        int frameCount,
+        audio_policy_output_flags_t flags,
+        callback_t cbf,
+        void* user,
+        int notificationFrames,
+        int sessionId)
+    : mStatus(NO_INIT),
+      mIsTimed(false),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+      mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT)
+{
+    mStatus = set(streamType, sampleRate, format, channelMask,
+            frameCount, flags, cbf, user, notificationFrames,
+            0, false, sessionId);
+}
+
+AudioTrack::AudioTrack(
         int streamType,
         uint32_t sampleRate,
         int format,
@@ -94,25 +118,30 @@
         void* user,
         int notificationFrames,
         int sessionId)
-    : mStatus(NO_INIT)
+    : mStatus(NO_INIT),
+      mIsTimed(false),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT)
 {
-    mStatus = set(streamType, sampleRate, format, channelMask,
-            frameCount, flags, cbf, user, notificationFrames,
+    mStatus = set((audio_stream_type_t)streamType, sampleRate, (audio_format_t)format, channelMask,
+            frameCount, (audio_policy_output_flags_t)flags, cbf, user, notificationFrames,
             0, false, sessionId);
 }
 
 AudioTrack::AudioTrack(
-        int streamType,
+        audio_stream_type_t streamType,
         uint32_t sampleRate,
-        int format,
+        audio_format_t format,
         int channelMask,
         const sp<IMemory>& sharedBuffer,
-        uint32_t flags,
+        audio_policy_output_flags_t flags,
         callback_t cbf,
         void* user,
         int notificationFrames,
         int sessionId)
-    : mStatus(NO_INIT)
+    : mStatus(NO_INIT),
+      mIsTimed(false),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+      mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT)
 {
     mStatus = set(streamType, sampleRate, format, channelMask,
             0, flags, cbf, user, notificationFrames,
@@ -139,12 +168,12 @@
 }
 
 status_t AudioTrack::set(
-        int streamType,
+        audio_stream_type_t streamType,
         uint32_t sampleRate,
-        int format,
+        audio_format_t format,
         int channelMask,
         int frameCount,
-        uint32_t flags,
+        audio_policy_output_flags_t flags,
         callback_t cbf,
         void* user,
         int notificationFrames,
@@ -178,7 +207,7 @@
         sampleRate = afSampleRate;
     }
     // these below should probably come from the audioFlinger too...
-    if (format == 0) {
+    if (format == AUDIO_FORMAT_DEFAULT) {
         format = AUDIO_FORMAT_PCM_16_BIT;
     }
     if (channelMask == 0) {
@@ -193,7 +222,7 @@
 
     // force direct flag if format is not linear PCM
     if (!audio_is_linear_pcm(format)) {
-        flags |= AUDIO_POLICY_OUTPUT_FLAG_DIRECT;
+        flags = (audio_policy_output_flags_t) (flags | AUDIO_POLICY_OUTPUT_FLAG_DIRECT);
     }
 
     if (!audio_is_output_channel(channelMask)) {
@@ -203,9 +232,9 @@
     uint32_t channelCount = popcount(channelMask);
 
     audio_io_handle_t output = AudioSystem::getOutput(
-                                    (audio_stream_type_t)streamType,
-                                    sampleRate,format, channelMask,
-                                    (audio_policy_output_flags_t)flags);
+                                    streamType,
+                                    sampleRate, format, channelMask,
+                                    flags);
 
     if (output == 0) {
         ALOGE("Could not get audio output for stream type %d", streamType);
@@ -214,7 +243,7 @@
 
     mVolume[LEFT] = 1.0f;
     mVolume[RIGHT] = 1.0f;
-    mSendLevel = 0;
+    mSendLevel = 0.0f;
     mFrameCount = frameCount;
     mNotificationFramesReq = notificationFrames;
     mSessionId = sessionId;
@@ -223,7 +252,7 @@
     // create the IAudioTrack
     status_t status = createTrack_l(streamType,
                                   sampleRate,
-                                  (uint32_t)format,
+                                  format,
                                   (uint32_t)channelMask,
                                   frameCount,
                                   flags,
@@ -235,23 +264,19 @@
         return status;
     }
 
-    if (cbf != 0) {
+    if (cbf != NULL) {
         mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
-        if (mAudioTrackThread == 0) {
-          ALOGE("Could not create callback thread");
-          return NO_INIT;
-        }
     }
 
     mStatus = NO_ERROR;
 
     mStreamType = streamType;
-    mFormat = (uint32_t)format;
+    mFormat = format;
     mChannelMask = (uint32_t)channelMask;
     mChannelCount = channelCount;
     mSharedBuffer = sharedBuffer;
     mMuted = false;
-    mActive = 0;
+    mActive = false;
     mCbf = cbf;
     mUserData = user;
     mLoopCount = 0;
@@ -278,12 +303,12 @@
     return mLatency;
 }
 
-int AudioTrack::streamType() const
+audio_stream_type_t AudioTrack::streamType() const
 {
     return mStreamType;
 }
 
-int AudioTrack::format() const
+audio_format_t AudioTrack::format() const
 {
     return mFormat;
 }
@@ -298,7 +323,7 @@
     return mCblk->frameCount;
 }
 
-int AudioTrack::frameSize() const
+size_t AudioTrack::frameSize() const
 {
     if (audio_is_linear_pcm(mFormat)) {
         return channelCount()*audio_bytes_per_sample(mFormat);
@@ -327,34 +352,43 @@
                 return;
             }
         }
-        t->mLock.lock();
-     }
+    }
 
     AutoMutex lock(mLock);
     // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
     // while we are accessing the cblk
-    sp <IAudioTrack> audioTrack = mAudioTrack;
-    sp <IMemory> iMem = mCblkMemory;
+    sp<IAudioTrack> audioTrack = mAudioTrack;
+    sp<IMemory> iMem = mCblkMemory;
     audio_track_cblk_t* cblk = mCblk;
 
-    if (mActive == 0) {
+    if (!mActive) {
         mFlushed = false;
-        mActive = 1;
+        mActive = true;
         mNewPosition = cblk->server + mUpdatePeriod;
         cblk->lock.lock();
         cblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS;
         cblk->waitTimeMs = 0;
         android_atomic_and(~CBLK_DISABLED_ON, &cblk->flags);
+        pid_t tid;
         if (t != 0) {
-           t->run("AudioTrackThread", ANDROID_PRIORITY_AUDIO);
+            t->run("AudioTrack", ANDROID_PRIORITY_AUDIO);
+            tid = t->getTid();  // pid_t is unknown until run()
+            ALOGV("getTid=%d", tid);
+            if (tid == -1) {
+                tid = 0;
+            }
         } else {
-            setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_AUDIO);
+            mPreviousPriority = getpriority(PRIO_PROCESS, 0);
+            mPreviousSchedulingGroup = androidGetThreadSchedulingGroup(0);
+            androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
+            tid = 0;    // not gettid()
         }
 
         ALOGV("start %p before lock cblk %p", this, mCblk);
         if (!(cblk->flags & CBLK_INVALID_MSK)) {
             cblk->lock.unlock();
-            status = mAudioTrack->start();
+            ALOGV("mAudioTrack->start(tid=%d)", tid);
+            status = mAudioTrack->start(tid);
             cblk->lock.lock();
             if (status == DEAD_OBJECT) {
                 android_atomic_or(CBLK_INVALID_ON, &cblk->flags);
@@ -366,18 +400,16 @@
         cblk->lock.unlock();
         if (status != NO_ERROR) {
             ALOGV("start() failed");
-            mActive = 0;
+            mActive = false;
             if (t != 0) {
                 t->requestExit();
             } else {
-                setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_NORMAL);
+                setpriority(PRIO_PROCESS, 0, mPreviousPriority);
+                androidSetThreadSchedulingGroup(0, mPreviousSchedulingGroup);
             }
         }
     }
 
-    if (t != 0) {
-        t->mLock.unlock();
-    }
 }
 
 void AudioTrack::stop()
@@ -385,13 +417,10 @@
     sp<AudioTrackThread> t = mAudioTrackThread;
 
     ALOGV("stop %p", this);
-    if (t != 0) {
-        t->mLock.lock();
-    }
 
     AutoMutex lock(mLock);
-    if (mActive == 1) {
-        mActive = 0;
+    if (mActive) {
+        mActive = false;
         mCblk->cv.signal();
         mAudioTrack->stop();
         // Cancel loops (If we are in the middle of a loop, playback
@@ -408,18 +437,17 @@
         if (t != 0) {
             t->requestExit();
         } else {
-            setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_NORMAL);
+            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
+            androidSetThreadSchedulingGroup(0, mPreviousSchedulingGroup);
         }
     }
 
-    if (t != 0) {
-        t->mLock.unlock();
-    }
 }
 
 bool AudioTrack::stopped() const
 {
-    return !mActive;
+    AutoMutex lock(mLock);
+    return stopped_l();
 }
 
 void AudioTrack::flush()
@@ -451,8 +479,8 @@
 {
     ALOGV("pause");
     AutoMutex lock(mLock);
-    if (mActive == 1) {
-        mActive = 0;
+    if (mActive) {
+        mActive = false;
         mAudioTrack->pause();
     }
 }
@@ -470,7 +498,7 @@
 
 status_t AudioTrack::setVolume(float left, float right)
 {
-    if (left > 1.0f || right > 1.0f) {
+    if (left < 0.0f || left > 1.0f || right < 0.0f || right > 1.0f) {
         return BAD_VALUE;
     }
 
@@ -478,13 +506,12 @@
     mVolume[LEFT] = left;
     mVolume[RIGHT] = right;
 
-    // write must be atomic
-    mCblk->volumeLR = (uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000);
+    mCblk->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000));
 
     return NO_ERROR;
 }
 
-void AudioTrack::getVolume(float* left, float* right)
+void AudioTrack::getVolume(float* left, float* right) const
 {
     if (left != NULL) {
         *left  = mVolume[LEFT];
@@ -497,19 +524,19 @@
 status_t AudioTrack::setAuxEffectSendLevel(float level)
 {
     ALOGV("setAuxEffectSendLevel(%f)", level);
-    if (level > 1.0f) {
+    if (level < 0.0f || level > 1.0f) {
         return BAD_VALUE;
     }
     AutoMutex lock(mLock);
 
     mSendLevel = level;
 
-    mCblk->sendLevel = uint16_t(level * 0x1000);
+    mCblk->setSendLevel(level);
 
     return NO_ERROR;
 }
 
-void AudioTrack::getAuxEffectSendLevel(float* level)
+void AudioTrack::getAuxEffectSendLevel(float* level) const
 {
     if (level != NULL) {
         *level  = mSendLevel;
@@ -520,6 +547,10 @@
 {
     int afSamplingRate;
 
+    if (mIsTimed) {
+        return INVALID_OPERATION;
+    }
+
     if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) {
         return NO_INIT;
     }
@@ -531,8 +562,12 @@
     return NO_ERROR;
 }
 
-uint32_t AudioTrack::getSampleRate()
+uint32_t AudioTrack::getSampleRate() const
 {
+    if (mIsTimed) {
+        return INVALID_OPERATION;
+    }
+
     AutoMutex lock(mLock);
     return mCblk->sampleRate;
 }
@@ -558,6 +593,10 @@
         return NO_ERROR;
     }
 
+    if (mIsTimed) {
+        return INVALID_OPERATION;
+    }
+
     if (loopStart >= loopEnd ||
         loopEnd - loopStart > cblk->frameCount ||
         cblk->server > loopStart) {
@@ -579,29 +618,9 @@
     return NO_ERROR;
 }
 
-status_t AudioTrack::getLoop(uint32_t *loopStart, uint32_t *loopEnd, int *loopCount)
-{
-    AutoMutex lock(mLock);
-    if (loopStart != 0) {
-        *loopStart = mCblk->loopStart;
-    }
-    if (loopEnd != 0) {
-        *loopEnd = mCblk->loopEnd;
-    }
-    if (loopCount != 0) {
-        if (mCblk->loopCount < 0) {
-            *loopCount = -1;
-        } else {
-            *loopCount = mCblk->loopCount;
-        }
-    }
-
-    return NO_ERROR;
-}
-
 status_t AudioTrack::setMarkerPosition(uint32_t marker)
 {
-    if (mCbf == 0) return INVALID_OPERATION;
+    if (mCbf == NULL) return INVALID_OPERATION;
 
     mMarkerPosition = marker;
     mMarkerReached = false;
@@ -609,9 +628,9 @@
     return NO_ERROR;
 }
 
-status_t AudioTrack::getMarkerPosition(uint32_t *marker)
+status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
 {
-    if (marker == 0) return BAD_VALUE;
+    if (marker == NULL) return BAD_VALUE;
 
     *marker = mMarkerPosition;
 
@@ -620,7 +639,7 @@
 
 status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
 {
-    if (mCbf == 0) return INVALID_OPERATION;
+    if (mCbf == NULL) return INVALID_OPERATION;
 
     uint32_t curPosition;
     getPosition(&curPosition);
@@ -630,9 +649,9 @@
     return NO_ERROR;
 }
 
-status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod)
+status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
 {
-    if (updatePeriod == 0) return BAD_VALUE;
+    if (updatePeriod == NULL) return BAD_VALUE;
 
     *updatePeriod = mUpdatePeriod;
 
@@ -641,10 +660,13 @@
 
 status_t AudioTrack::setPosition(uint32_t position)
 {
-    AutoMutex lock(mLock);
-    Mutex::Autolock _l(mCblk->lock);
+    if (mIsTimed) return INVALID_OPERATION;
 
-    if (!stopped()) return INVALID_OPERATION;
+    AutoMutex lock(mLock);
+
+    if (!stopped_l()) return INVALID_OPERATION;
+
+    Mutex::Autolock _l(mCblk->lock);
 
     if (position > mCblk->user) return BAD_VALUE;
 
@@ -656,7 +678,7 @@
 
 status_t AudioTrack::getPosition(uint32_t *position)
 {
-    if (position == 0) return BAD_VALUE;
+    if (position == NULL) return BAD_VALUE;
     AutoMutex lock(mLock);
     *position = mFlushed ? 0 : mCblk->server;
 
@@ -667,7 +689,7 @@
 {
     AutoMutex lock(mLock);
 
-    if (!stopped()) return INVALID_OPERATION;
+    if (!stopped_l()) return INVALID_OPERATION;
 
     flush_l();
 
@@ -685,11 +707,11 @@
 // must be called with mLock held
 audio_io_handle_t AudioTrack::getOutput_l()
 {
-    return AudioSystem::getOutput((audio_stream_type_t)mStreamType,
-            mCblk->sampleRate, mFormat, mChannelMask, (audio_policy_output_flags_t)mFlags);
+    return AudioSystem::getOutput(mStreamType,
+            mCblk->sampleRate, mFormat, mChannelMask, mFlags);
 }
 
-int AudioTrack::getSessionId()
+int AudioTrack::getSessionId() const
 {
     return mSessionId;
 }
@@ -708,12 +730,12 @@
 
 // must be called with mLock held
 status_t AudioTrack::createTrack_l(
-        int streamType,
+        audio_stream_type_t streamType,
         uint32_t sampleRate,
-        uint32_t format,
+        audio_format_t format,
         uint32_t channelMask,
         int frameCount,
-        uint32_t flags,
+        audio_policy_output_flags_t flags,
         const sp<IMemory>& sharedBuffer,
         audio_io_handle_t output,
         bool enforceFrameCount)
@@ -721,8 +743,8 @@
     status_t status;
     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
     if (audioFlinger == 0) {
-       ALOGE("Could not get audioflinger");
-       return NO_INIT;
+        ALOGE("Could not get audioflinger");
+        return NO_INIT;
     }
 
     int afSampleRate;
@@ -768,7 +790,7 @@
                 frameCount = minFrameCount;
             }
         } else {
-            // Ensure that buffer alignment matches channelcount
+            // Ensure that buffer alignment matches channelCount
             int channelCount = popcount(channelMask);
             if (((uint32_t)sharedBuffer->pointer() & (channelCount | 1)) != 0) {
                 ALOGE("Invalid buffer alignement: address %p, channelCount %d", sharedBuffer->pointer(), channelCount);
@@ -787,6 +809,7 @@
                                                       ((uint16_t)flags) << 16,
                                                       sharedBuffer,
                                                       output,
+                                                      mIsTimed,
                                                       &mSessionId,
                                                       &status);
 
@@ -799,9 +822,7 @@
         ALOGE("Could not get control block");
         return NO_INIT;
     }
-    mAudioTrack.clear();
     mAudioTrack = track;
-    mCblkMemory.clear();
     mCblkMemory = cblk;
     mCblk = static_cast<audio_track_cblk_t*>(cblk->pointer());
     android_atomic_or(CBLK_DIRECTION_OUT, &mCblk->flags);
@@ -809,12 +830,12 @@
         mCblk->buffers = (char*)mCblk + sizeof(audio_track_cblk_t);
     } else {
         mCblk->buffers = sharedBuffer->pointer();
-         // Force buffer full condition as data is already present in shared memory
+        // Force buffer full condition as data is already present in shared memory
         mCblk->stepUser(mCblk->frameCount);
     }
 
-    mCblk->volumeLR = (uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) | uint16_t(mVolume[LEFT] * 0x1000);
-    mCblk->sendLevel = uint16_t(mSendLevel * 0x1000);
+    mCblk->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) | uint16_t(mVolume[LEFT] * 0x1000));
+    mCblk->setSendLevel(mSendLevel);
     mAudioTrack->attachAuxEffect(mAuxEffectId);
     mCblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS;
     mCblk->waitTimeMs = 0;
@@ -826,7 +847,7 @@
 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
 {
     AutoMutex lock(mLock);
-    int active;
+    bool active;
     status_t result = NO_ERROR;
     audio_track_cblk_t* cblk = mCblk;
     uint32_t framesReq = audioBuffer->frameCount;
@@ -848,12 +869,12 @@
         goto start_loop_here;
         while (framesAvail == 0) {
             active = mActive;
-            if (UNLIKELY(!active)) {
+            if (CC_UNLIKELY(!active)) {
                 ALOGV("Not active and NO_MORE_BUFFERS");
                 cblk->lock.unlock();
                 return NO_MORE_BUFFERS;
             }
-            if (UNLIKELY(!waitCount)) {
+            if (CC_UNLIKELY(!waitCount)) {
                 cblk->lock.unlock();
                 return WOULD_BLOCK;
             }
@@ -862,7 +883,7 @@
                 result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));
                 cblk->lock.unlock();
                 mLock.lock();
-                if (mActive == 0) {
+                if (!mActive) {
                     return status_t(STOPPED);
                 }
                 cblk->lock.lock();
@@ -871,7 +892,7 @@
             if (cblk->flags & CBLK_INVALID_MSK) {
                 goto create_new_track;
             }
-            if (__builtin_expect(result!=NO_ERROR, false)) {
+            if (CC_UNLIKELY(result != NO_ERROR)) {
                 cblk->waitTimeMs += waitTimeMs;
                 if (cblk->waitTimeMs >= cblk->bufferTimeoutMs) {
                     // timing out when a loop has been set and we have already written upto loop end
@@ -881,7 +902,7 @@
                                 "user=%08x, server=%08x", this, cblk->user, cblk->server);
                         //unlock cblk mutex before calling mAudioTrack->start() (see issue #1617140)
                         cblk->lock.unlock();
-                        result = mAudioTrack->start();
+                        result = mAudioTrack->start(0); // callback thread hasn't changed
                         cblk->lock.lock();
                         if (result == DEAD_OBJECT) {
                             android_atomic_or(CBLK_INVALID_ON, &cblk->flags);
@@ -913,7 +934,7 @@
     if (mActive && (cblk->flags & CBLK_DISABLED_MSK)) {
         android_atomic_and(~CBLK_DISABLED_ON, &cblk->flags);
         ALOGW("obtainBuffer() track %p disabled, restarting", this);
-        mAudioTrack->start();
+        mAudioTrack->start(0);  // callback thread hasn't changed
     }
 
     cblk->waitTimeMs = 0;
@@ -955,9 +976,11 @@
 {
 
     if (mSharedBuffer != 0) return INVALID_OPERATION;
+    if (mIsTimed) return INVALID_OPERATION;
 
     if (ssize_t(userSize) < 0) {
-        // sanity-check. user is most-likely passing an error code.
+        // Sanity-check: user is most-likely passing an error code, and it would
+        // make the return value ambiguous (actualSize vs error).
         ALOGE("AudioTrack::write(buffer=%p, size=%u (%d)",
                 buffer, userSize, userSize);
         return BAD_VALUE;
@@ -968,20 +991,18 @@
     // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
     // while we are accessing the cblk
     mLock.lock();
-    sp <IAudioTrack> audioTrack = mAudioTrack;
-    sp <IMemory> iMem = mCblkMemory;
+    sp<IAudioTrack> audioTrack = mAudioTrack;
+    sp<IMemory> iMem = mCblkMemory;
     mLock.unlock();
 
     ssize_t written = 0;
     const int8_t *src = (const int8_t *)buffer;
     Buffer audioBuffer;
-    size_t frameSz = (size_t)frameSize();
+    size_t frameSz = frameSize();
 
     do {
         audioBuffer.frameCount = userSize/frameSz;
 
-        // Calling obtainBuffer() with a negative wait count causes
-        // an (almost) infinite wait time.
         status_t err = obtainBuffer(&audioBuffer, -1);
         if (err < 0) {
             // out of buffers, return #bytes written
@@ -995,12 +1016,7 @@
         if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_POLICY_OUTPUT_FLAG_DIRECT)) {
             // Divide capacity by 2 to take expansion into account
             toWrite = audioBuffer.size>>1;
-            // 8 to 16 bit conversion
-            int count = toWrite;
-            int16_t *dst = (int16_t *)(audioBuffer.i8);
-            while(count--) {
-                *dst++ = (int16_t)(*src++^0x80) << 8;
-            }
+            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) src, toWrite);
         } else {
             toWrite = audioBuffer.size;
             memcpy(audioBuffer.i8, src, toWrite);
@@ -1017,6 +1033,59 @@
 
 // -------------------------------------------------------------------------
 
+TimedAudioTrack::TimedAudioTrack() {
+    mIsTimed = true;
+}
+
+status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
+{
+    status_t result = UNKNOWN_ERROR;
+
+    // If the track is not invalid already, try to allocate a buffer.  alloc
+    // fails indicating that the server is dead, flag the track as invalid so
+    // we can attempt to restore in in just a bit.
+    if (!(mCblk->flags & CBLK_INVALID_MSK)) {
+        result = mAudioTrack->allocateTimedBuffer(size, buffer);
+        if (result == DEAD_OBJECT) {
+            android_atomic_or(CBLK_INVALID_ON, &mCblk->flags);
+        }
+    }
+
+    // If the track is invalid at this point, attempt to restore it. and try the
+    // allocation one more time.
+    if (mCblk->flags & CBLK_INVALID_MSK) {
+        mCblk->lock.lock();
+        result = restoreTrack_l(mCblk, false);
+        mCblk->lock.unlock();
+
+        if (result == OK)
+            result = mAudioTrack->allocateTimedBuffer(size, buffer);
+    }
+
+    return result;
+}
+
+status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
+                                           int64_t pts)
+{
+    // restart track if it was disabled by audioflinger due to previous underrun
+    if (mActive && (mCblk->flags & CBLK_DISABLED_MSK)) {
+        android_atomic_and(~CBLK_DISABLED_ON, &mCblk->flags);
+        ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
+        mAudioTrack->start(0);
+    }
+
+    return mAudioTrack->queueTimedBuffer(buffer, pts);
+}
+
+status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
+                                                TargetTimeline target)
+{
+    return mAudioTrack->setMediaTimeTransform(xform, target);
+}
+
+// -------------------------------------------------------------------------
+
 bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
 {
     Buffer audioBuffer;
@@ -1026,13 +1095,14 @@
     mLock.lock();
     // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
     // while we are accessing the cblk
-    sp <IAudioTrack> audioTrack = mAudioTrack;
-    sp <IMemory> iMem = mCblkMemory;
+    sp<IAudioTrack> audioTrack = mAudioTrack;
+    sp<IMemory> iMem = mCblkMemory;
     audio_track_cblk_t* cblk = mCblk;
+    bool active = mActive;
     mLock.unlock();
 
     // Manage underrun callback
-    if (mActive && (cblk->framesAvailable() == cblk->frameCount)) {
+    if (active && (cblk->framesAvailable() == cblk->frameCount)) {
         ALOGV("Underrun user: %x, server: %x, flags %04x", cblk->user, cblk->server, cblk->flags);
         if (!(android_atomic_or(CBLK_UNDERRUN_ON, &cblk->flags) & CBLK_UNDERRUN_MSK)) {
             mCbf(EVENT_UNDERRUN, mUserData, 0);
@@ -1075,6 +1145,9 @@
         frames = mRemainingFrames;
     }
 
+    // See description of waitCount parameter at declaration of obtainBuffer().
+    // The logic below prevents us from being stuck below at obtainBuffer()
+    // not being able to handle timed events (position, markers, loops).
     int32_t waitCount = -1;
     if (mUpdatePeriod || (!mMarkerReached && mMarkerPosition) || mLoopCount) {
         waitCount = 1;
@@ -1084,9 +1157,6 @@
 
         audioBuffer.frameCount = frames;
 
-        // Calling obtainBuffer() with a wait count of 1
-        // limits wait time to WAIT_PERIOD_MS. This prevents from being
-        // stuck here not being able to handle timed events (position, markers, loops).
         status_t err = obtainBuffer(&audioBuffer, waitCount);
         if (err < NO_ERROR) {
             if (err != TIMED_OUT) {
@@ -1120,19 +1190,14 @@
         if (writtenSize > reqSize) writtenSize = reqSize;
 
         if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_POLICY_OUTPUT_FLAG_DIRECT)) {
-            // 8 to 16 bit conversion
-            const int8_t *src = audioBuffer.i8 + writtenSize-1;
-            int count = writtenSize;
-            int16_t *dst = audioBuffer.i16 + writtenSize-1;
-            while(count--) {
-                *dst-- = (int16_t)(*src--^0x80) << 8;
-            }
+            // 8 to 16 bit conversion, note that source and destination are the same address
+            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
             writtenSize <<= 1;
         }
 
         audioBuffer.size = writtenSize;
         // NOTE: mCblk->frameSize is not equal to AudioTrack::frameSize() for
-        // 8 bit PCM data: in this case,  mCblk->frameSize is based on a sampel size of
+        // 8 bit PCM data: in this case,  mCblk->frameSize is based on a sample size of
         // 16 bit.
         audioBuffer.frameCount = writtenSize/mCblk->frameSize;
 
@@ -1159,7 +1224,7 @@
 
     if (!(android_atomic_or(CBLK_RESTORING_ON, &cblk->flags) & CBLK_RESTORING_MSK)) {
         ALOGW("dead IAudioTrack, creating a new one from %s TID %d",
-             fromStart ? "start()" : "obtainBuffer()", gettid());
+            fromStart ? "start()" : "obtainBuffer()", gettid());
 
         // signal old cblk condition so that other threads waiting for available buffers stop
         // waiting now
@@ -1213,7 +1278,7 @@
                 }
             }
             if (mActive) {
-                result = mAudioTrack->start();
+                result = mAudioTrack->start(0); // callback thread hasn't changed
                 ALOGW_IF(result != NO_ERROR, "restoreTrack_l() start() failed status %d", result);
             }
             if (fromStart && result == NO_ERROR) {
@@ -1245,7 +1310,7 @@
         }
     }
     ALOGV("restoreTrack_l() status %d mActive %d cblk %p, old cblk %p flags %08x old flags %08x",
-         result, mActive, mCblk, cblk, mCblk->flags, cblk->flags);
+        result, mActive, mCblk, cblk, mCblk->flags, cblk->flags);
 
     if (result == NO_ERROR) {
         // from now on we switch to the newly created cblk
@@ -1304,15 +1369,15 @@
 
 audio_track_cblk_t::audio_track_cblk_t()
     : lock(Mutex::SHARED), cv(Condition::SHARED), user(0), server(0),
-    userBase(0), serverBase(0), buffers(0), frameCount(0),
-    loopStart(UINT_MAX), loopEnd(UINT_MAX), loopCount(0), volumeLR(0),
-    sendLevel(0), flags(0)
+    userBase(0), serverBase(0), buffers(NULL), frameCount(0),
+    loopStart(UINT_MAX), loopEnd(UINT_MAX), loopCount(0), mVolumeLR(0x10001000),
+    mSendLevel(0), flags(0)
 {
 }
 
 uint32_t audio_track_cblk_t::stepUser(uint32_t frameCount)
 {
-    uint32_t u = this->user;
+    uint32_t u = user;
 
     u += frameCount;
     // Ensure that user is never ahead of server for AudioRecord
@@ -1321,16 +1386,16 @@
         if (bufferTimeoutMs == MAX_STARTUP_TIMEOUT_MS-1) {
             bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
         }
-    } else if (u > this->server) {
-        ALOGW("stepServer occured after track reset");
-        u = this->server;
+    } else if (u > server) {
+        ALOGW("stepServer occurred after track reset");
+        u = server;
     }
 
     if (u >= userBase + this->frameCount) {
         userBase += this->frameCount;
     }
 
-    this->user = u;
+    user = u;
 
     // Clear flow control error condition as new data has been written/read to/from buffer.
     if (flags & CBLK_UNDERRUN_MSK) {
@@ -1347,7 +1412,7 @@
         return false;
     }
 
-    uint32_t s = this->server;
+    uint32_t s = server;
 
     s += frameCount;
     if (flags & CBLK_DIRECTION_MSK) {
@@ -1360,9 +1425,9 @@
         // while the mixer is processing a block: in this case,
         // stepServer() is called After the flush() has reset u & s and
         // we have s > u
-        if (s > this->user) {
-            ALOGW("stepServer occured after track reset");
-            s = this->user;
+        if (s > user) {
+            ALOGW("stepServer occurred after track reset");
+            s = user;
         }
     }
 
@@ -1378,7 +1443,7 @@
         serverBase += this->frameCount;
     }
 
-    this->server = s;
+    server = s;
 
     if (!(flags & CBLK_INVALID_MSK)) {
         cv.signal();
@@ -1389,7 +1454,7 @@
 
 void* audio_track_cblk_t::buffer(uint32_t offset) const
 {
-    return (int8_t *)this->buffers + (offset - userBase) * this->frameSize;
+    return (int8_t *)buffers + (offset - userBase) * frameSize;
 }
 
 uint32_t audio_track_cblk_t::framesAvailable()
@@ -1400,8 +1465,8 @@
 
 uint32_t audio_track_cblk_t::framesAvailable_l()
 {
-    uint32_t u = this->user;
-    uint32_t s = this->server;
+    uint32_t u = user;
+    uint32_t s = server;
 
     if (flags & CBLK_DIRECTION_MSK) {
         uint32_t limit = (s < loopStart) ? s : loopStart;
@@ -1413,8 +1478,8 @@
 
 uint32_t audio_track_cblk_t::framesReady()
 {
-    uint32_t u = this->user;
-    uint32_t s = this->server;
+    uint32_t u = user;
+    uint32_t s = server;
 
     if (flags & CBLK_DIRECTION_MSK) {
         if (u < loopEnd) {
@@ -1459,4 +1524,3 @@
 // -------------------------------------------------------------------------
 
 }; // namespace android
-
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index abd491f..07b12e4 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -82,14 +82,15 @@
 
     virtual sp<IAudioTrack> createTrack(
                                 pid_t pid,
-                                int streamType,
+                                audio_stream_type_t streamType,
                                 uint32_t sampleRate,
-                                uint32_t format,
+                                audio_format_t format,
                                 uint32_t channelMask,
                                 int frameCount,
                                 uint32_t flags,
                                 const sp<IMemory>& sharedBuffer,
-                                int output,
+                                audio_io_handle_t output,
+                                bool isTimed,
                                 int *sessionId,
                                 status_t *status)
     {
@@ -97,14 +98,15 @@
         sp<IAudioTrack> track;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         data.writeInt32(pid);
-        data.writeInt32(streamType);
+        data.writeInt32((int32_t) streamType);
         data.writeInt32(sampleRate);
         data.writeInt32(format);
         data.writeInt32(channelMask);
         data.writeInt32(frameCount);
         data.writeInt32(flags);
         data.writeStrongBinder(sharedBuffer->asBinder());
-        data.writeInt32(output);
+        data.writeInt32((int32_t) output);
+        data.writeInt32(isTimed);
         int lSessionId = 0;
         if (sessionId != NULL) {
             lSessionId = *sessionId;
@@ -129,9 +131,9 @@
 
     virtual sp<IAudioRecord> openRecord(
                                 pid_t pid,
-                                int input,
+                                audio_io_handle_t input,
                                 uint32_t sampleRate,
-                                uint32_t format,
+                                audio_format_t format,
                                 uint32_t channelMask,
                                 int frameCount,
                                 uint32_t flags,
@@ -142,7 +144,7 @@
         sp<IAudioRecord> record;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         data.writeInt32(pid);
-        data.writeInt32(input);
+        data.writeInt32((int32_t) input);
         data.writeInt32(sampleRate);
         data.writeInt32(format);
         data.writeInt32(channelMask);
@@ -170,47 +172,47 @@
         return record;
     }
 
-    virtual uint32_t sampleRate(int output) const
+    virtual uint32_t sampleRate(audio_io_handle_t output) const
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(output);
+        data.writeInt32((int32_t) output);
         remote()->transact(SAMPLE_RATE, data, &reply);
         return reply.readInt32();
     }
 
-    virtual int channelCount(int output) const
+    virtual int channelCount(audio_io_handle_t output) const
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(output);
+        data.writeInt32((int32_t) output);
         remote()->transact(CHANNEL_COUNT, data, &reply);
         return reply.readInt32();
     }
 
-    virtual uint32_t format(int output) const
+    virtual audio_format_t format(audio_io_handle_t output) const
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(output);
+        data.writeInt32((int32_t) output);
         remote()->transact(FORMAT, data, &reply);
-        return reply.readInt32();
+        return (audio_format_t) reply.readInt32();
     }
 
-    virtual size_t frameCount(int output) const
+    virtual size_t frameCount(audio_io_handle_t output) const
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(output);
+        data.writeInt32((int32_t) output);
         remote()->transact(FRAME_COUNT, data, &reply);
         return reply.readInt32();
     }
 
-    virtual uint32_t latency(int output) const
+    virtual uint32_t latency(audio_io_handle_t output) const
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(output);
+        data.writeInt32((int32_t) output);
         remote()->transact(LATENCY, data, &reply);
         return reply.readInt32();
     }
@@ -249,47 +251,48 @@
         return reply.readInt32();
     }
 
-    virtual status_t setStreamVolume(int stream, float value, int output)
+    virtual status_t setStreamVolume(audio_stream_type_t stream, float value,
+            audio_io_handle_t output)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(stream);
+        data.writeInt32((int32_t) stream);
         data.writeFloat(value);
-        data.writeInt32(output);
+        data.writeInt32((int32_t) output);
         remote()->transact(SET_STREAM_VOLUME, data, &reply);
         return reply.readInt32();
     }
 
-    virtual status_t setStreamMute(int stream, bool muted)
+    virtual status_t setStreamMute(audio_stream_type_t stream, bool muted)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(stream);
+        data.writeInt32((int32_t) stream);
         data.writeInt32(muted);
         remote()->transact(SET_STREAM_MUTE, data, &reply);
         return reply.readInt32();
     }
 
-    virtual float streamVolume(int stream, int output) const
+    virtual float streamVolume(audio_stream_type_t stream, audio_io_handle_t output) const
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(stream);
-        data.writeInt32(output);
+        data.writeInt32((int32_t) stream);
+        data.writeInt32((int32_t) output);
         remote()->transact(STREAM_VOLUME, data, &reply);
         return reply.readFloat();
     }
 
-    virtual bool streamMute(int stream) const
+    virtual bool streamMute(audio_stream_type_t stream) const
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(stream);
+        data.writeInt32((int32_t) stream);
         remote()->transact(STREAM_MUTE, data, &reply);
         return reply.readInt32();
     }
 
-    virtual status_t setMode(int mode)
+    virtual status_t setMode(audio_mode_t mode)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
@@ -315,21 +318,21 @@
         return reply.readInt32();
     }
 
-    virtual status_t setParameters(int ioHandle, const String8& keyValuePairs)
+    virtual status_t setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(ioHandle);
+        data.writeInt32((int32_t) ioHandle);
         data.writeString8(keyValuePairs);
         remote()->transact(SET_PARAMETERS, data, &reply);
         return reply.readInt32();
     }
 
-    virtual String8 getParameters(int ioHandle, const String8& keys)
+    virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) const
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(ioHandle);
+        data.writeInt32((int32_t) ioHandle);
         data.writeString8(keys);
         remote()->transact(GET_PARAMETERS, data, &reply);
         return reply.readString8();
@@ -343,7 +346,7 @@
         remote()->transact(REGISTER_CLIENT, data, &reply);
     }
 
-    virtual size_t getInputBufferSize(uint32_t sampleRate, int format, int channelCount)
+    virtual size_t getInputBufferSize(uint32_t sampleRate, audio_format_t format, int channelCount) const
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
@@ -354,17 +357,17 @@
         return reply.readInt32();
     }
 
-    virtual int openOutput(uint32_t *pDevices,
+    virtual audio_io_handle_t openOutput(uint32_t *pDevices,
                             uint32_t *pSamplingRate,
-                            uint32_t *pFormat,
+                            audio_format_t *pFormat,
                             uint32_t *pChannels,
                             uint32_t *pLatencyMs,
-                            uint32_t flags)
+                            audio_policy_output_flags_t flags)
     {
         Parcel data, reply;
         uint32_t devices = pDevices ? *pDevices : 0;
         uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0;
-        uint32_t format = pFormat ? *pFormat : 0;
+        audio_format_t format = pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT;
         uint32_t channels = pChannels ? *pChannels : 0;
         uint32_t latency = pLatencyMs ? *pLatencyMs : 0;
 
@@ -374,15 +377,15 @@
         data.writeInt32(format);
         data.writeInt32(channels);
         data.writeInt32(latency);
-        data.writeInt32(flags);
+        data.writeInt32((int32_t) flags);
         remote()->transact(OPEN_OUTPUT, data, &reply);
-        int  output = reply.readInt32();
-        ALOGV("openOutput() returned output, %p", output);
+        audio_io_handle_t output = (audio_io_handle_t) reply.readInt32();
+        ALOGV("openOutput() returned output, %d", output);
         devices = reply.readInt32();
         if (pDevices) *pDevices = devices;
         samplingRate = reply.readInt32();
         if (pSamplingRate) *pSamplingRate = samplingRate;
-        format = reply.readInt32();
+        format = (audio_format_t) reply.readInt32();
         if (pFormat) *pFormat = format;
         channels = reply.readInt32();
         if (pChannels) *pChannels = channels;
@@ -391,53 +394,54 @@
         return output;
     }
 
-    virtual int openDuplicateOutput(int output1, int output2)
+    virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
+            audio_io_handle_t output2)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(output1);
-        data.writeInt32(output2);
+        data.writeInt32((int32_t) output1);
+        data.writeInt32((int32_t) output2);
         remote()->transact(OPEN_DUPLICATE_OUTPUT, data, &reply);
-        return reply.readInt32();
+        return (audio_io_handle_t) reply.readInt32();
     }
 
-    virtual status_t closeOutput(int output)
+    virtual status_t closeOutput(audio_io_handle_t output)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(output);
+        data.writeInt32((int32_t) output);
         remote()->transact(CLOSE_OUTPUT, data, &reply);
         return reply.readInt32();
     }
 
-    virtual status_t suspendOutput(int output)
+    virtual status_t suspendOutput(audio_io_handle_t output)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(output);
+        data.writeInt32((int32_t) output);
         remote()->transact(SUSPEND_OUTPUT, data, &reply);
         return reply.readInt32();
     }
 
-    virtual status_t restoreOutput(int output)
+    virtual status_t restoreOutput(audio_io_handle_t output)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(output);
+        data.writeInt32((int32_t) output);
         remote()->transact(RESTORE_OUTPUT, data, &reply);
         return reply.readInt32();
     }
 
-    virtual int openInput(uint32_t *pDevices,
+    virtual audio_io_handle_t openInput(uint32_t *pDevices,
                             uint32_t *pSamplingRate,
-                            uint32_t *pFormat,
+                            audio_format_t *pFormat,
                             uint32_t *pChannels,
-                            uint32_t acoustics)
+                            audio_in_acoustics_t acoustics)
     {
         Parcel data, reply;
         uint32_t devices = pDevices ? *pDevices : 0;
         uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0;
-        uint32_t format = pFormat ? *pFormat : 0;
+        audio_format_t format = pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT;
         uint32_t channels = pChannels ? *pChannels : 0;
 
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
@@ -445,14 +449,14 @@
         data.writeInt32(samplingRate);
         data.writeInt32(format);
         data.writeInt32(channels);
-        data.writeInt32(acoustics);
+        data.writeInt32((int32_t) acoustics);
         remote()->transact(OPEN_INPUT, data, &reply);
-        int input = reply.readInt32();
+        audio_io_handle_t input = (audio_io_handle_t) reply.readInt32();
         devices = reply.readInt32();
         if (pDevices) *pDevices = devices;
         samplingRate = reply.readInt32();
         if (pSamplingRate) *pSamplingRate = samplingRate;
-        format = reply.readInt32();
+        format = (audio_format_t) reply.readInt32();
         if (pFormat) *pFormat = format;
         channels = reply.readInt32();
         if (pChannels) *pChannels = channels;
@@ -468,12 +472,12 @@
         return reply.readInt32();
     }
 
-    virtual status_t setStreamOutput(uint32_t stream, int output)
+    virtual status_t setStreamOutput(audio_stream_type_t stream, audio_io_handle_t output)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(stream);
-        data.writeInt32(output);
+        data.writeInt32((int32_t) stream);
+        data.writeInt32((int32_t) output);
         remote()->transact(SET_STREAM_OUTPUT, data, &reply);
         return reply.readInt32();
     }
@@ -487,11 +491,12 @@
         return reply.readInt32();
     }
 
-    virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, int output)
+    virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames,
+            audio_io_handle_t output) const
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(output);
+        data.writeInt32((int32_t) output);
         remote()->transact(GET_RENDER_POSITION, data, &reply);
         status_t status = reply.readInt32();
         if (status == NO_ERROR) {
@@ -507,11 +512,11 @@
         return status;
     }
 
-    virtual unsigned int getInputFramesLost(int ioHandle)
+    virtual unsigned int getInputFramesLost(audio_io_handle_t ioHandle) const
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(ioHandle);
+        data.writeInt32((int32_t) ioHandle);
         remote()->transact(GET_INPUT_FRAMES_LOST, data, &reply);
         return reply.readInt32();
     }
@@ -544,7 +549,7 @@
         remote()->transact(RELEASE_AUDIO_SESSION_ID, data, &reply);
     }
 
-    virtual status_t queryNumberEffects(uint32_t *numEffects)
+    virtual status_t queryNumberEffects(uint32_t *numEffects) const
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
@@ -556,13 +561,13 @@
         if (status != NO_ERROR) {
             return status;
         }
-        if (numEffects) {
+        if (numEffects != NULL) {
             *numEffects = (uint32_t)reply.readInt32();
         }
         return NO_ERROR;
     }
 
-    virtual status_t queryEffect(uint32_t index, effect_descriptor_t *pDescriptor)
+    virtual status_t queryEffect(uint32_t index, effect_descriptor_t *pDescriptor) const
     {
         if (pDescriptor == NULL) {
             return BAD_VALUE;
@@ -582,7 +587,8 @@
         return NO_ERROR;
     }
 
-    virtual status_t getEffectDescriptor(effect_uuid_t *pUuid, effect_descriptor_t *pDescriptor)
+    virtual status_t getEffectDescriptor(const effect_uuid_t *pUuid,
+            effect_descriptor_t *pDescriptor) const
     {
         if (pUuid == NULL || pDescriptor == NULL) {
             return BAD_VALUE;
@@ -606,7 +612,7 @@
                                     effect_descriptor_t *pDesc,
                                     const sp<IEffectClient>& client,
                                     int32_t priority,
-                                    int output,
+                                    audio_io_handle_t output,
                                     int sessionId,
                                     status_t *status,
                                     int *id,
@@ -616,18 +622,18 @@
         sp<IEffect> effect;
 
         if (pDesc == NULL) {
-             return effect;
-             if (status) {
-                 *status = BAD_VALUE;
-             }
-         }
+            return effect;
+            if (status) {
+                *status = BAD_VALUE;
+            }
+        }
 
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         data.writeInt32(pid);
         data.write(pDesc, sizeof(effect_descriptor_t));
         data.writeStrongBinder(client->asBinder());
         data.writeInt32(priority);
-        data.writeInt32(output);
+        data.writeInt32((int32_t) output);
         data.writeInt32(sessionId);
 
         status_t lStatus = remote()->transact(CREATE_EFFECT, data, &reply);
@@ -640,7 +646,7 @@
                 *id = tmp;
             }
             tmp = reply.readInt32();
-            if (enabled) {
+            if (enabled != NULL) {
                 *enabled = tmp;
             }
             effect = interface_cast<IEffect>(reply.readStrongBinder());
@@ -653,13 +659,14 @@
         return effect;
     }
 
-    virtual status_t moveEffects(int session, int srcOutput, int dstOutput)
+    virtual status_t moveEffects(int session, audio_io_handle_t srcOutput,
+            audio_io_handle_t dstOutput)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         data.writeInt32(session);
-        data.writeInt32(srcOutput);
-        data.writeInt32(dstOutput);
+        data.writeInt32((int32_t) srcOutput);
+        data.writeInt32((int32_t) dstOutput);
         remote()->transact(MOVE_EFFECTS, data, &reply);
         return reply.readInt32();
     }
@@ -672,23 +679,24 @@
 status_t BnAudioFlinger::onTransact(
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
-    switch(code) {
+    switch (code) {
         case CREATE_TRACK: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             pid_t pid = data.readInt32();
             int streamType = data.readInt32();
             uint32_t sampleRate = data.readInt32();
-            int format = data.readInt32();
+            audio_format_t format = (audio_format_t) data.readInt32();
             int channelCount = data.readInt32();
             size_t bufferCount = data.readInt32();
             uint32_t flags = data.readInt32();
             sp<IMemory> buffer = interface_cast<IMemory>(data.readStrongBinder());
-            int output = data.readInt32();
+            audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
+            bool isTimed = data.readInt32();
             int sessionId = data.readInt32();
             status_t status;
             sp<IAudioTrack> track = createTrack(pid,
-                    streamType, sampleRate, format,
-                    channelCount, bufferCount, flags, buffer, output, &sessionId, &status);
+                    (audio_stream_type_t) streamType, sampleRate, format,
+                    channelCount, bufferCount, flags, buffer, output, isTimed, &sessionId, &status);
             reply->writeInt32(sessionId);
             reply->writeInt32(status);
             reply->writeStrongBinder(track->asBinder());
@@ -697,9 +705,9 @@
         case OPEN_RECORD: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             pid_t pid = data.readInt32();
-            int input = data.readInt32();
+            audio_io_handle_t input = (audio_io_handle_t) data.readInt32();
             uint32_t sampleRate = data.readInt32();
-            int format = data.readInt32();
+            audio_format_t format = (audio_format_t) data.readInt32();
             int channelCount = data.readInt32();
             size_t bufferCount = data.readInt32();
             uint32_t flags = data.readInt32();
@@ -714,30 +722,30 @@
         } break;
         case SAMPLE_RATE: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32( sampleRate(data.readInt32()) );
+            reply->writeInt32( sampleRate((audio_io_handle_t) data.readInt32()) );
             return NO_ERROR;
         } break;
         case CHANNEL_COUNT: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32( channelCount(data.readInt32()) );
+            reply->writeInt32( channelCount((audio_io_handle_t) data.readInt32()) );
             return NO_ERROR;
         } break;
         case FORMAT: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32( format(data.readInt32()) );
+            reply->writeInt32( format((audio_io_handle_t) data.readInt32()) );
             return NO_ERROR;
         } break;
         case FRAME_COUNT: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32( frameCount(data.readInt32()) );
+            reply->writeInt32( frameCount((audio_io_handle_t) data.readInt32()) );
             return NO_ERROR;
         } break;
         case LATENCY: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32( latency(data.readInt32()) );
+            reply->writeInt32( latency((audio_io_handle_t) data.readInt32()) );
             return NO_ERROR;
         } break;
-         case SET_MASTER_VOLUME: {
+        case SET_MASTER_VOLUME: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             reply->writeInt32( setMasterVolume(data.readFloat()) );
             return NO_ERROR;
@@ -761,32 +769,32 @@
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             int stream = data.readInt32();
             float volume = data.readFloat();
-            int output = data.readInt32();
-            reply->writeInt32( setStreamVolume(stream, volume, output) );
+            audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
+            reply->writeInt32( setStreamVolume((audio_stream_type_t) stream, volume, output) );
             return NO_ERROR;
         } break;
         case SET_STREAM_MUTE: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             int stream = data.readInt32();
-            reply->writeInt32( setStreamMute(stream, data.readInt32()) );
+            reply->writeInt32( setStreamMute((audio_stream_type_t) stream, data.readInt32()) );
             return NO_ERROR;
         } break;
         case STREAM_VOLUME: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             int stream = data.readInt32();
             int output = data.readInt32();
-            reply->writeFloat( streamVolume(stream, output) );
+            reply->writeFloat( streamVolume((audio_stream_type_t) stream, output) );
             return NO_ERROR;
         } break;
         case STREAM_MUTE: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             int stream = data.readInt32();
-            reply->writeInt32( streamMute(stream) );
+            reply->writeInt32( streamMute((audio_stream_type_t) stream) );
             return NO_ERROR;
         } break;
         case SET_MODE: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            int mode = data.readInt32();
+            audio_mode_t mode = (audio_mode_t) data.readInt32();
             reply->writeInt32( setMode(mode) );
             return NO_ERROR;
         } break;
@@ -803,18 +811,18 @@
         } break;
         case SET_PARAMETERS: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            int ioHandle = data.readInt32();
+            audio_io_handle_t ioHandle = (audio_io_handle_t) data.readInt32();
             String8 keyValuePairs(data.readString8());
             reply->writeInt32(setParameters(ioHandle, keyValuePairs));
             return NO_ERROR;
-         } break;
+        } break;
         case GET_PARAMETERS: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            int ioHandle = data.readInt32();
+            audio_io_handle_t ioHandle = (audio_io_handle_t) data.readInt32();
             String8 keys(data.readString8());
             reply->writeString8(getParameters(ioHandle, keys));
             return NO_ERROR;
-         } break;
+        } break;
 
         case REGISTER_CLIENT: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
@@ -825,7 +833,7 @@
         case GET_INPUTBUFFERSIZE: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             uint32_t sampleRate = data.readInt32();
-            int format = data.readInt32();
+            audio_format_t format = (audio_format_t) data.readInt32();
             int channelCount = data.readInt32();
             reply->writeInt32( getInputBufferSize(sampleRate, format, channelCount) );
             return NO_ERROR;
@@ -834,18 +842,18 @@
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             uint32_t devices = data.readInt32();
             uint32_t samplingRate = data.readInt32();
-            uint32_t format = data.readInt32();
+            audio_format_t format = (audio_format_t) data.readInt32();
             uint32_t channels = data.readInt32();
             uint32_t latency = data.readInt32();
-            uint32_t flags = data.readInt32();
-            int output = openOutput(&devices,
+            audio_policy_output_flags_t flags = (audio_policy_output_flags_t) data.readInt32();
+            audio_io_handle_t output = openOutput(&devices,
                                      &samplingRate,
                                      &format,
                                      &channels,
                                      &latency,
                                      flags);
             ALOGV("OPEN_OUTPUT output, %p", output);
-            reply->writeInt32(output);
+            reply->writeInt32((int32_t) output);
             reply->writeInt32(devices);
             reply->writeInt32(samplingRate);
             reply->writeInt32(format);
@@ -855,40 +863,40 @@
         } break;
         case OPEN_DUPLICATE_OUTPUT: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            int output1 = data.readInt32();
-            int output2 = data.readInt32();
-            reply->writeInt32(openDuplicateOutput(output1, output2));
+            audio_io_handle_t output1 = (audio_io_handle_t) data.readInt32();
+            audio_io_handle_t output2 = (audio_io_handle_t) data.readInt32();
+            reply->writeInt32((int32_t) openDuplicateOutput(output1, output2));
             return NO_ERROR;
         } break;
         case CLOSE_OUTPUT: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32(closeOutput(data.readInt32()));
+            reply->writeInt32(closeOutput((audio_io_handle_t) data.readInt32()));
             return NO_ERROR;
         } break;
         case SUSPEND_OUTPUT: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32(suspendOutput(data.readInt32()));
+            reply->writeInt32(suspendOutput((audio_io_handle_t) data.readInt32()));
             return NO_ERROR;
         } break;
         case RESTORE_OUTPUT: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32(restoreOutput(data.readInt32()));
+            reply->writeInt32(restoreOutput((audio_io_handle_t) data.readInt32()));
             return NO_ERROR;
         } break;
         case OPEN_INPUT: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             uint32_t devices = data.readInt32();
             uint32_t samplingRate = data.readInt32();
-            uint32_t format = data.readInt32();
+            audio_format_t format = (audio_format_t) data.readInt32();
             uint32_t channels = data.readInt32();
-            uint32_t acoutics = data.readInt32();
+            audio_in_acoustics_t acoustics = (audio_in_acoustics_t) data.readInt32();
 
-            int input = openInput(&devices,
+            audio_io_handle_t input = openInput(&devices,
                                      &samplingRate,
                                      &format,
                                      &channels,
-                                     acoutics);
-            reply->writeInt32(input);
+                                     acoustics);
+            reply->writeInt32((int32_t) input);
             reply->writeInt32(devices);
             reply->writeInt32(samplingRate);
             reply->writeInt32(format);
@@ -897,14 +905,14 @@
         } break;
         case CLOSE_INPUT: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32(closeInput(data.readInt32()));
+            reply->writeInt32(closeInput((audio_io_handle_t) data.readInt32()));
             return NO_ERROR;
         } break;
         case SET_STREAM_OUTPUT: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             uint32_t stream = data.readInt32();
-            int output = data.readInt32();
-            reply->writeInt32(setStreamOutput(stream, output));
+            audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
+            reply->writeInt32(setStreamOutput((audio_stream_type_t) stream, output));
             return NO_ERROR;
         } break;
         case SET_VOICE_VOLUME: {
@@ -915,7 +923,7 @@
         } break;
         case GET_RENDER_POSITION: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            int output = data.readInt32();
+            audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
             uint32_t halFrames;
             uint32_t dspFrames;
             status_t status = getRenderPosition(&halFrames, &dspFrames, output);
@@ -928,7 +936,7 @@
         }
         case GET_INPUT_FRAMES_LOST: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            int ioHandle = data.readInt32();
+            audio_io_handle_t ioHandle = (audio_io_handle_t) data.readInt32();
             reply->writeInt32(getInputFramesLost(ioHandle));
             return NO_ERROR;
         } break;
@@ -988,7 +996,7 @@
             data.read(&desc, sizeof(effect_descriptor_t));
             sp<IEffectClient> client = interface_cast<IEffectClient>(data.readStrongBinder());
             int32_t priority = data.readInt32();
-            int output = data.readInt32();
+            audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
             int sessionId = data.readInt32();
             status_t status;
             int id;
@@ -1005,8 +1013,8 @@
         case MOVE_EFFECTS: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
             int session = data.readInt32();
-            int srcOutput = data.readInt32();
-            int dstOutput = data.readInt32();
+            audio_io_handle_t srcOutput = (audio_io_handle_t) data.readInt32();
+            audio_io_handle_t dstOutput = (audio_io_handle_t) data.readInt32();
             reply->writeInt32(moveEffects(session, srcOutput, dstOutput));
             return NO_ERROR;
         } break;
diff --git a/media/libmedia/IAudioFlingerClient.cpp b/media/libmedia/IAudioFlingerClient.cpp
index 5a3f250..4178b29 100644
--- a/media/libmedia/IAudioFlingerClient.cpp
+++ b/media/libmedia/IAudioFlingerClient.cpp
@@ -39,18 +39,18 @@
     {
     }
 
-    void ioConfigChanged(int event, int ioHandle, void *param2)
+    void ioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlingerClient::getInterfaceDescriptor());
         data.writeInt32(event);
-        data.writeInt32(ioHandle);
+        data.writeInt32((int32_t) ioHandle);
         if (event == AudioSystem::STREAM_CONFIG_CHANGED) {
-            uint32_t stream = *(uint32_t *)param2;
+            uint32_t stream = *(const uint32_t *)param2;
             ALOGV("ioConfigChanged stream %d", stream);
             data.writeInt32(stream);
         } else if (event != AudioSystem::OUTPUT_CLOSED && event != AudioSystem::INPUT_CLOSED) {
-            AudioSystem::OutputDescriptor *desc = (AudioSystem::OutputDescriptor *)param2;
+            const AudioSystem::OutputDescriptor *desc = (const AudioSystem::OutputDescriptor *)param2;
             data.writeInt32(desc->samplingRate);
             data.writeInt32(desc->format);
             data.writeInt32(desc->channels);
@@ -68,12 +68,12 @@
 status_t BnAudioFlingerClient::onTransact(
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
-    switch(code) {
+    switch (code) {
     case IO_CONFIG_CHANGED: {
             CHECK_INTERFACE(IAudioFlingerClient, data, reply);
             int event = data.readInt32();
-            int ioHandle = data.readInt32();
-            void *param2 = 0;
+            audio_io_handle_t ioHandle = (audio_io_handle_t) data.readInt32();
+            const void *param2 = NULL;
             AudioSystem::OutputDescriptor desc;
             uint32_t stream;
             if (event == AudioSystem::STREAM_CONFIG_CHANGED) {
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 50b4855..5040bd9 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -33,7 +33,7 @@
     SET_DEVICE_CONNECTION_STATE = IBinder::FIRST_CALL_TRANSACTION,
     GET_DEVICE_CONNECTION_STATE,
     SET_PHONE_STATE,
-    SET_RINGER_MODE,
+    SET_RINGER_MODE,    // reserved, no longer used
     SET_FORCE_USE,
     GET_FORCE_USE,
     GET_OUTPUT,
@@ -91,7 +91,7 @@
         return static_cast <audio_policy_dev_state_t>(reply.readInt32());
     }
 
-    virtual status_t setPhoneState(int state)
+    virtual status_t setPhoneState(audio_mode_t state)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -100,16 +100,6 @@
         return static_cast <status_t> (reply.readInt32());
     }
 
-    virtual status_t setRingerMode(uint32_t mode, uint32_t mask)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
-        data.writeInt32(mode);
-        data.writeInt32(mask);
-        remote()->transact(SET_RINGER_MODE, data, &reply);
-        return static_cast <status_t> (reply.readInt32());
-    }
-
     virtual status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config)
     {
         Parcel data, reply;
@@ -132,7 +122,7 @@
     virtual audio_io_handle_t getOutput(
                                         audio_stream_type_t stream,
                                         uint32_t samplingRate,
-                                        uint32_t format,
+                                        audio_format_t format,
                                         uint32_t channels,
                                         audio_policy_output_flags_t flags)
     {
@@ -154,7 +144,7 @@
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
         data.writeInt32(output);
-        data.writeInt32(stream);
+        data.writeInt32((int32_t) stream);
         data.writeInt32(session);
         remote()->transact(START_OUTPUT, data, &reply);
         return static_cast <status_t> (reply.readInt32());
@@ -167,7 +157,7 @@
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
         data.writeInt32(output);
-        data.writeInt32(stream);
+        data.writeInt32((int32_t) stream);
         data.writeInt32(session);
         remote()->transact(STOP_OUTPUT, data, &reply);
         return static_cast <status_t> (reply.readInt32());
@@ -182,16 +172,16 @@
     }
 
     virtual audio_io_handle_t getInput(
-                                    int inputSource,
+                                    audio_source_t inputSource,
                                     uint32_t samplingRate,
-                                    uint32_t format,
+                                    audio_format_t format,
                                     uint32_t channels,
                                     audio_in_acoustics_t acoustics,
                                     int audioSession)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
-        data.writeInt32(inputSource);
+        data.writeInt32((int32_t) inputSource);
         data.writeInt32(samplingRate);
         data.writeInt32(static_cast <uint32_t>(format));
         data.writeInt32(channels);
@@ -240,21 +230,28 @@
         return static_cast <status_t> (reply.readInt32());
     }
 
-    virtual status_t setStreamVolumeIndex(audio_stream_type_t stream, int index)
+    virtual status_t setStreamVolumeIndex(audio_stream_type_t stream,
+                                          int index,
+                                          audio_devices_t device)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
         data.writeInt32(static_cast <uint32_t>(stream));
         data.writeInt32(index);
+        data.writeInt32(static_cast <uint32_t>(device));
         remote()->transact(SET_STREAM_VOLUME, data, &reply);
         return static_cast <status_t> (reply.readInt32());
     }
 
-    virtual status_t getStreamVolumeIndex(audio_stream_type_t stream, int *index)
+    virtual status_t getStreamVolumeIndex(audio_stream_type_t stream,
+                                          int *index,
+                                          audio_devices_t device)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
         data.writeInt32(static_cast <uint32_t>(stream));
+        data.writeInt32(static_cast <uint32_t>(device));
+
         remote()->transact(GET_STREAM_VOLUME, data, &reply);
         int lIndex = reply.readInt32();
         if (index) *index = lIndex;
@@ -270,13 +267,13 @@
         return reply.readInt32();
     }
 
-    virtual uint32_t getDevicesForStream(audio_stream_type_t stream)
+    virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
         data.writeInt32(static_cast <uint32_t>(stream));
         remote()->transact(GET_DEVICES_FOR_STREAM, data, &reply);
-        return (uint32_t) reply.readInt32();
+        return (audio_devices_t) reply.readInt32();
     }
 
     virtual audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc)
@@ -324,11 +321,11 @@
         return static_cast <status_t> (reply.readInt32());
     }
 
-    virtual bool isStreamActive(int stream, uint32_t inPastMs) const
+    virtual bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
-        data.writeInt32(stream);
+        data.writeInt32((int32_t) stream);
         data.writeInt32(inPastMs);
         remote()->transact(IS_STREAM_ACTIVE, data, &reply);
         return reply.readInt32();
@@ -368,7 +365,7 @@
 status_t BnAudioPolicyService::onTransact(
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
-    switch(code) {
+    switch (code) {
         case SET_DEVICE_CONNECTION_STATE: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             audio_devices_t device =
@@ -394,15 +391,7 @@
 
         case SET_PHONE_STATE: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            reply->writeInt32(static_cast <uint32_t>(setPhoneState(data.readInt32())));
-            return NO_ERROR;
-        } break;
-
-        case SET_RINGER_MODE: {
-            CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            uint32_t mode = data.readInt32();
-            uint32_t mask = data.readInt32();
-            reply->writeInt32(static_cast <uint32_t>(setRingerMode(mode, mask)));
+            reply->writeInt32(static_cast <uint32_t>(setPhoneState((audio_mode_t) data.readInt32())));
             return NO_ERROR;
         } break;
 
@@ -427,7 +416,7 @@
             audio_stream_type_t stream =
                     static_cast <audio_stream_type_t>(data.readInt32());
             uint32_t samplingRate = data.readInt32();
-            uint32_t format = data.readInt32();
+            audio_format_t format = (audio_format_t) data.readInt32();
             uint32_t channels = data.readInt32();
             audio_policy_output_flags_t flags =
                     static_cast <audio_policy_output_flags_t>(data.readInt32());
@@ -472,9 +461,9 @@
 
         case GET_INPUT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            int inputSource = data.readInt32();
+            audio_source_t inputSource = (audio_source_t) data.readInt32();
             uint32_t samplingRate = data.readInt32();
-            uint32_t format = data.readInt32();
+            audio_format_t format = (audio_format_t) data.readInt32();
             uint32_t channels = data.readInt32();
             audio_in_acoustics_t acoustics =
                     static_cast <audio_in_acoustics_t>(data.readInt32());
@@ -525,7 +514,10 @@
             audio_stream_type_t stream =
                     static_cast <audio_stream_type_t>(data.readInt32());
             int index = data.readInt32();
-            reply->writeInt32(static_cast <uint32_t>(setStreamVolumeIndex(stream, index)));
+            audio_devices_t device = static_cast <audio_devices_t>(data.readInt32());
+            reply->writeInt32(static_cast <uint32_t>(setStreamVolumeIndex(stream,
+                                                                          index,
+                                                                          device)));
             return NO_ERROR;
         } break;
 
@@ -533,8 +525,9 @@
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             audio_stream_type_t stream =
                     static_cast <audio_stream_type_t>(data.readInt32());
+            audio_devices_t device = static_cast <audio_devices_t>(data.readInt32());
             int index;
-            status_t status = getStreamVolumeIndex(stream, &index);
+            status_t status = getStreamVolumeIndex(stream, &index, device);
             reply->writeInt32(index);
             reply->writeInt32(static_cast <uint32_t>(status));
             return NO_ERROR;
@@ -598,9 +591,9 @@
 
         case IS_STREAM_ACTIVE: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            int stream = data.readInt32();
+            audio_stream_type_t stream = (audio_stream_type_t) data.readInt32();
             uint32_t inPastMs = (uint32_t)data.readInt32();
-            reply->writeInt32( isStreamActive(stream, inPastMs) );
+            reply->writeInt32( isStreamActive((audio_stream_type_t) stream, inPastMs) );
             return NO_ERROR;
         } break;
 
diff --git a/media/libmedia/IAudioRecord.cpp b/media/libmedia/IAudioRecord.cpp
index 8c7a960..377b9a8 100644
--- a/media/libmedia/IAudioRecord.cpp
+++ b/media/libmedia/IAudioRecord.cpp
@@ -2,16 +2,16 @@
 **
 ** Copyright 2007, The Android Open Source Project
 **
-** Licensed under the Apache License, Version 2.0 (the "License"); 
-** you may not use this file except in compliance with the License. 
-** You may obtain a copy of the License at 
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
 **
-**     http://www.apache.org/licenses/LICENSE-2.0 
+**     http://www.apache.org/licenses/LICENSE-2.0
 **
-** Unless required by applicable law or agreed to in writing, software 
-** distributed under the License is distributed on an "AS IS" BASIS, 
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
-** See the License for the specific language governing permissions and 
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
 ** limitations under the License.
 */
 
@@ -41,11 +41,12 @@
         : BpInterface<IAudioRecord>(impl)
     {
     }
-    
-    virtual status_t start()
+
+    virtual status_t start(pid_t tid)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioRecord::getInterfaceDescriptor());
+        data.writeInt32(tid);
         status_t status = remote()->transact(START, data, &reply);
         if (status == NO_ERROR) {
             status = reply.readInt32();
@@ -54,14 +55,14 @@
         }
         return status;
     }
-    
+
     virtual void stop()
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioRecord::getInterfaceDescriptor());
         remote()->transact(STOP, data, &reply);
     }
-    
+
     virtual sp<IMemory> getCblk() const
     {
         Parcel data, reply;
@@ -72,7 +73,7 @@
             cblk = interface_cast<IMemory>(reply.readStrongBinder());
         }
         return cblk;
-    }    
+    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioRecord, "android.media.IAudioRecord");
@@ -82,15 +83,15 @@
 status_t BnAudioRecord::onTransact(
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
-    switch(code) {
-       case GET_CBLK: {
+    switch (code) {
+        case GET_CBLK: {
             CHECK_INTERFACE(IAudioRecord, data, reply);
             reply->writeStrongBinder(getCblk()->asBinder());
             return NO_ERROR;
         } break;
         case START: {
             CHECK_INTERFACE(IAudioRecord, data, reply);
-            reply->writeInt32(start());
+            reply->writeInt32(start(data.readInt32()));
             return NO_ERROR;
         } break;
         case STOP: {
@@ -104,4 +105,3 @@
 }
 
 }; // namespace android
-
diff --git a/media/libmedia/IAudioTrack.cpp b/media/libmedia/IAudioTrack.cpp
index 0b372f3..09f31a7 100644
--- a/media/libmedia/IAudioTrack.cpp
+++ b/media/libmedia/IAudioTrack.cpp
@@ -1,17 +1,17 @@
-/* //device/extlibs/pv/android/IAudioTrack.cpp
+/*
 **
 ** Copyright 2007, The Android Open Source Project
 **
-** Licensed under the Apache License, Version 2.0 (the "License"); 
-** you may not use this file except in compliance with the License. 
-** You may obtain a copy of the License at 
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
 **
-**     http://www.apache.org/licenses/LICENSE-2.0 
+**     http://www.apache.org/licenses/LICENSE-2.0
 **
-** Unless required by applicable law or agreed to in writing, software 
-** distributed under the License is distributed on an "AS IS" BASIS, 
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
-** See the License for the specific language governing permissions and 
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
 ** limitations under the License.
 */
 
@@ -35,7 +35,10 @@
     FLUSH,
     MUTE,
     PAUSE,
-    ATTACH_AUX_EFFECT
+    ATTACH_AUX_EFFECT,
+    ALLOCATE_TIMED_BUFFER,
+    QUEUE_TIMED_BUFFER,
+    SET_MEDIA_TIME_TRANSFORM,
 };
 
 class BpAudioTrack : public BpInterface<IAudioTrack>
@@ -45,11 +48,24 @@
         : BpInterface<IAudioTrack>(impl)
     {
     }
-    
-    virtual status_t start()
+
+    virtual sp<IMemory> getCblk() const
+    {
+        Parcel data, reply;
+        sp<IMemory> cblk;
+        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_CBLK, data, &reply);
+        if (status == NO_ERROR) {
+            cblk = interface_cast<IMemory>(reply.readStrongBinder());
+        }
+        return cblk;
+    }
+
+    virtual status_t start(pid_t tid)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+        data.writeInt32(tid);
         status_t status = remote()->transact(START, data, &reply);
         if (status == NO_ERROR) {
             status = reply.readInt32();
@@ -58,14 +74,14 @@
         }
         return status;
     }
-    
+
     virtual void stop()
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
         remote()->transact(STOP, data, &reply);
     }
-    
+
     virtual void flush()
     {
         Parcel data, reply;
@@ -80,25 +96,13 @@
         data.writeInt32(e);
         remote()->transact(MUTE, data, &reply);
     }
-    
+
     virtual void pause()
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
         remote()->transact(PAUSE, data, &reply);
     }
-    
-    virtual sp<IMemory> getCblk() const
-    {
-        Parcel data, reply;
-        sp<IMemory> cblk;
-        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
-        status_t status = remote()->transact(GET_CBLK, data, &reply);
-        if (status == NO_ERROR) {
-            cblk = interface_cast<IMemory>(reply.readStrongBinder());
-        }
-        return cblk;
-    }
 
     virtual status_t attachAuxEffect(int effectId)
     {
@@ -113,6 +117,52 @@
         }
         return status;
     }
+
+    virtual status_t allocateTimedBuffer(size_t size, sp<IMemory>* buffer) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+        data.writeInt32(size);
+        status_t status = remote()->transact(ALLOCATE_TIMED_BUFFER,
+                                             data, &reply);
+        if (status == NO_ERROR) {
+            status = reply.readInt32();
+            if (status == NO_ERROR) {
+                *buffer = interface_cast<IMemory>(reply.readStrongBinder());
+            }
+        }
+        return status;
+    }
+
+    virtual status_t queueTimedBuffer(const sp<IMemory>& buffer,
+                                      int64_t pts) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+        data.writeStrongBinder(buffer->asBinder());
+        data.writeInt64(pts);
+        status_t status = remote()->transact(QUEUE_TIMED_BUFFER,
+                                             data, &reply);
+        if (status == NO_ERROR) {
+            status = reply.readInt32();
+        }
+        return status;
+    }
+
+    virtual status_t setMediaTimeTransform(const LinearTransform& xform,
+                                           int target) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+        data.writeInt64(xform.a_zero);
+        data.writeInt64(xform.b_zero);
+        data.writeInt32(xform.a_to_b_numer);
+        data.writeInt32(xform.a_to_b_denom);
+        data.writeInt32(target);
+        status_t status = remote()->transact(SET_MEDIA_TIME_TRANSFORM,
+                                             data, &reply);
+        if (status == NO_ERROR) {
+            status = reply.readInt32();
+        }
+        return status;
+    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioTrack, "android.media.IAudioTrack");
@@ -122,15 +172,15 @@
 status_t BnAudioTrack::onTransact(
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
-    switch(code) {
-       case GET_CBLK: {
+    switch (code) {
+        case GET_CBLK: {
             CHECK_INTERFACE(IAudioTrack, data, reply);
             reply->writeStrongBinder(getCblk()->asBinder());
             return NO_ERROR;
         } break;
         case START: {
             CHECK_INTERFACE(IAudioTrack, data, reply);
-            reply->writeInt32(start());
+            reply->writeInt32(start(data.readInt32()));
             return NO_ERROR;
         } break;
         case STOP: {
@@ -158,10 +208,38 @@
             reply->writeInt32(attachAuxEffect(data.readInt32()));
             return NO_ERROR;
         } break;
+        case ALLOCATE_TIMED_BUFFER: {
+            CHECK_INTERFACE(IAudioTrack, data, reply);
+            sp<IMemory> buffer;
+            status_t status = allocateTimedBuffer(data.readInt32(), &buffer);
+            reply->writeInt32(status);
+            if (status == NO_ERROR) {
+                reply->writeStrongBinder(buffer->asBinder());
+            }
+            return NO_ERROR;
+        } break;
+        case QUEUE_TIMED_BUFFER: {
+            CHECK_INTERFACE(IAudioTrack, data, reply);
+            sp<IMemory> buffer = interface_cast<IMemory>(
+                data.readStrongBinder());
+            uint64_t pts = data.readInt64();
+            reply->writeInt32(queueTimedBuffer(buffer, pts));
+            return NO_ERROR;
+        } break;
+        case SET_MEDIA_TIME_TRANSFORM: {
+            CHECK_INTERFACE(IAudioTrack, data, reply);
+            LinearTransform xform;
+            xform.a_zero = data.readInt64();
+            xform.b_zero = data.readInt64();
+            xform.a_to_b_numer = data.readInt32();
+            xform.a_to_b_denom = data.readInt32();
+            int target = data.readInt32();
+            reply->writeInt32(setMediaTimeTransform(xform, target));
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
 }
 
 }; // namespace android
-
diff --git a/media/libmedia/IEffect.cpp b/media/libmedia/IEffect.cpp
index d469e28..a303a8f 100644
--- a/media/libmedia/IEffect.cpp
+++ b/media/libmedia/IEffect.cpp
@@ -83,8 +83,15 @@
             size = *pReplySize;
         }
         data.writeInt32(size);
-        remote()->transact(COMMAND, data, &reply);
-        status_t status = reply.readInt32();
+
+        status_t status = remote()->transact(COMMAND, data, &reply);
+        if (status != NO_ERROR) {
+            if (pReplySize != NULL)
+                *pReplySize = 0;
+            return status;
+        }
+
+        status = reply.readInt32();
         size = reply.readInt32();
         if (size != 0 && pReplyData != NULL && pReplySize != NULL) {
             reply.read(pReplyData, size);
@@ -122,7 +129,7 @@
 status_t BnEffect::onTransact(
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
-    switch(code) {
+    switch (code) {
         case ENABLE: {
             ALOGV("ENABLE");
             CHECK_INTERFACE(IEffect, data, reply);
@@ -179,10 +186,10 @@
         } break;
 
         case GET_CBLK: {
-             CHECK_INTERFACE(IEffect, data, reply);
-             reply->writeStrongBinder(getCblk()->asBinder());
-             return NO_ERROR;
-         } break;
+            CHECK_INTERFACE(IEffect, data, reply);
+            reply->writeStrongBinder(getCblk()->asBinder());
+            return NO_ERROR;
+        } break;
 
         default:
             return BBinder::onTransact(code, data, reply, flags);
@@ -192,4 +199,3 @@
 // ----------------------------------------------------------------------------
 
 }; // namespace android
-
diff --git a/media/libmedia/IEffectClient.cpp b/media/libmedia/IEffectClient.cpp
index 4693b45..aef4371 100644
--- a/media/libmedia/IEffectClient.cpp
+++ b/media/libmedia/IEffectClient.cpp
@@ -94,7 +94,7 @@
 status_t BnEffectClient::onTransact(
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
-    switch(code) {
+    switch (code) {
         case CONTROL_STATUS_CHANGED: {
             ALOGV("CONTROL_STATUS_CHANGED");
             CHECK_INTERFACE(IEffectClient, data, reply);
@@ -142,4 +142,3 @@
 // ----------------------------------------------------------------------------
 
 }; // namespace android
-
diff --git a/media/libmedia/IMediaDeathNotifier.cpp b/media/libmedia/IMediaDeathNotifier.cpp
index 8525482..9199db6 100644
--- a/media/libmedia/IMediaDeathNotifier.cpp
+++ b/media/libmedia/IMediaDeathNotifier.cpp
@@ -36,17 +36,17 @@
 {
     ALOGV("getMediaPlayerService");
     Mutex::Autolock _l(sServiceLock);
-    if (sMediaPlayerService.get() == 0) {
+    if (sMediaPlayerService == 0) {
         sp<IServiceManager> sm = defaultServiceManager();
         sp<IBinder> binder;
         do {
             binder = sm->getService(String16("media.player"));
             if (binder != 0) {
                 break;
-             }
-             ALOGW("Media player service not published, waiting...");
-             usleep(500000); // 0.5 s
-        } while(true);
+            }
+            ALOGW("Media player service not published, waiting...");
+            usleep(500000); // 0.5 s
+        } while (true);
 
         if (sDeathNotifier == NULL) {
         sDeathNotifier = new DeathNotifier();
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index 9c1e6b7..0bb237d 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -15,6 +15,7 @@
 ** limitations under the License.
 */
 
+#include <arpa/inet.h>
 #include <stdint.h>
 #include <sys/types.h>
 
@@ -23,8 +24,6 @@
 #include <media/IMediaPlayer.h>
 #include <media/IStreamSource.h>
 
-#include <surfaceflinger/ISurface.h>
-#include <surfaceflinger/Surface.h>
 #include <gui/ISurfaceTexture.h>
 #include <utils/String8.h>
 
@@ -55,6 +54,8 @@
     SET_VIDEO_SURFACETEXTURE,
     SET_PARAMETER,
     GET_PARAMETER,
+    SET_RETRANSMIT_ENDPOINT,
+    SET_NEXT_PLAYER,
 };
 
 class BpMediaPlayer: public BpInterface<IMediaPlayer>
@@ -198,11 +199,11 @@
         return reply.readInt32();
     }
 
-    status_t setAudioStreamType(int type)
+    status_t setAudioStreamType(audio_stream_type_t stream)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
-        data.writeInt32(type);
+        data.writeInt32((int32_t) stream);
         remote()->transact(SET_AUDIO_STREAM_TYPE, data, &reply);
         return reply.readInt32();
     }
@@ -291,6 +292,33 @@
         return remote()->transact(GET_PARAMETER, data, reply);
     }
 
+    status_t setRetransmitEndpoint(const struct sockaddr_in* endpoint) {
+        Parcel data, reply;
+        status_t err;
+
+        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+        if (NULL != endpoint) {
+            data.writeInt32(sizeof(*endpoint));
+            data.write(endpoint, sizeof(*endpoint));
+        } else {
+            data.writeInt32(0);
+        }
+
+        err = remote()->transact(SET_RETRANSMIT_ENDPOINT, data, &reply);
+        if (OK != err) {
+            return err;
+        }
+        return reply.readInt32();
+    }
+
+    status_t setNextPlayer(const sp<IMediaPlayer>& player) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+        sp<IBinder> b(player->asBinder());
+        data.writeStrongBinder(b);
+        remote()->transact(SET_NEXT_PLAYER, data, &reply);
+        return reply.readInt32();
+    }
 };
 
 IMPLEMENT_META_INTERFACE(MediaPlayer, "android.media.IMediaPlayer");
@@ -300,7 +328,7 @@
 status_t BnMediaPlayer::onTransact(
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
-    switch(code) {
+    switch (code) {
         case DISCONNECT: {
             CHECK_INTERFACE(IMediaPlayer, data, reply);
             disconnect();
@@ -397,7 +425,7 @@
         } break;
         case SET_AUDIO_STREAM_TYPE: {
             CHECK_INTERFACE(IMediaPlayer, data, reply);
-            reply->writeInt32(setAudioStreamType(data.readInt32()));
+            reply->writeInt32(setAudioStreamType((audio_stream_type_t) data.readInt32()));
             return NO_ERROR;
         } break;
         case SET_LOOPING: {
@@ -459,6 +487,24 @@
             CHECK_INTERFACE(IMediaPlayer, data, reply);
             return getParameter(data.readInt32(), reply);
         } break;
+        case SET_RETRANSMIT_ENDPOINT: {
+            CHECK_INTERFACE(IMediaPlayer, data, reply);
+
+            struct sockaddr_in endpoint;
+            int amt = data.readInt32();
+            if (amt == sizeof(endpoint)) {
+                data.read(&endpoint, sizeof(struct sockaddr_in));
+                reply->writeInt32(setRetransmitEndpoint(&endpoint));
+            } else {
+                reply->writeInt32(setRetransmitEndpoint(NULL));
+            }
+            return NO_ERROR;
+        } break;
+        case SET_NEXT_PLAYER: {
+            CHECK_INTERFACE(IMediaPlayer, data, reply);
+            reply->writeInt32(setNextPlayer(interface_cast<IMediaPlayer>(data.readStrongBinder())));
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/IMediaPlayerClient.cpp b/media/libmedia/IMediaPlayerClient.cpp
index 1f135c4..a670c96 100644
--- a/media/libmedia/IMediaPlayerClient.cpp
+++ b/media/libmedia/IMediaPlayerClient.cpp
@@ -56,7 +56,7 @@
 status_t BnMediaPlayerClient::onTransact(
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
-    switch(code) {
+    switch (code) {
         case NOTIFY: {
             CHECK_INTERFACE(IMediaPlayerClient, data, reply);
             int msg = data.readInt32();
diff --git a/media/libmedia/IMediaPlayerService.cpp b/media/libmedia/IMediaPlayerService.cpp
index 8e4dd04..f5fccef 100644
--- a/media/libmedia/IMediaPlayerService.cpp
+++ b/media/libmedia/IMediaPlayerService.cpp
@@ -78,7 +78,7 @@
         return interface_cast<IMediaRecorder>(reply.readStrongBinder());
     }
 
-    virtual sp<IMemory> decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, int* pFormat)
+    virtual sp<IMemory> decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
@@ -86,11 +86,11 @@
         remote()->transact(DECODE_URL, data, &reply);
         *pSampleRate = uint32_t(reply.readInt32());
         *pNumChannels = reply.readInt32();
-        *pFormat = reply.readInt32();
+        *pFormat = (audio_format_t) reply.readInt32();
         return interface_cast<IMemory>(reply.readStrongBinder());
     }
 
-    virtual sp<IMemory> decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, int* pFormat)
+    virtual sp<IMemory> decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
@@ -100,7 +100,7 @@
         remote()->transact(DECODE_FD, data, &reply);
         *pSampleRate = uint32_t(reply.readInt32());
         *pNumChannels = reply.readInt32();
-        *pFormat = reply.readInt32();
+        *pFormat = (audio_format_t) reply.readInt32();
         return interface_cast<IMemory>(reply.readStrongBinder());
     }
 
@@ -132,7 +132,7 @@
 status_t BnMediaPlayerService::onTransact(
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
-    switch(code) {
+    switch (code) {
         case CREATE: {
             CHECK_INTERFACE(IMediaPlayerService, data, reply);
             pid_t pid = data.readInt32();
@@ -148,11 +148,11 @@
             const char* url = data.readCString();
             uint32_t sampleRate;
             int numChannels;
-            int format;
+            audio_format_t format;
             sp<IMemory> player = decode(url, &sampleRate, &numChannels, &format);
             reply->writeInt32(sampleRate);
             reply->writeInt32(numChannels);
-            reply->writeInt32(format);
+            reply->writeInt32((int32_t) format);
             reply->writeStrongBinder(player->asBinder());
             return NO_ERROR;
         } break;
@@ -163,11 +163,11 @@
             int64_t length = data.readInt64();
             uint32_t sampleRate;
             int numChannels;
-            int format;
+            audio_format_t format;
             sp<IMemory> player = decode(fd, offset, length, &sampleRate, &numChannels, &format);
             reply->writeInt32(sampleRate);
             reply->writeInt32(numChannels);
-            reply->writeInt32(format);
+            reply->writeInt32((int32_t) format);
             reply->writeStrongBinder(player->asBinder());
             return NO_ERROR;
         } break;
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index 42f55c2..a710fd7 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -19,10 +19,10 @@
 #define LOG_TAG "IMediaRecorder"
 #include <utils/Log.h>
 #include <binder/Parcel.h>
-#include <surfaceflinger/Surface.h>
 #include <camera/ICamera.h>
 #include <media/IMediaRecorderClient.h>
 #include <media/IMediaRecorder.h>
+#include <gui/Surface.h>
 #include <gui/ISurfaceTexture.h>
 #include <unistd.h>
 
@@ -289,7 +289,7 @@
 status_t BnMediaRecorder::onTransact(
                                      uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
-    switch(code) {
+    switch (code) {
         case RELEASE: {
             ALOGV("RELEASE");
             CHECK_INTERFACE(IMediaRecorder, data, reply);
diff --git a/media/libmedia/IMediaRecorderClient.cpp b/media/libmedia/IMediaRecorderClient.cpp
index ff235c9..e7907e3 100644
--- a/media/libmedia/IMediaRecorderClient.cpp
+++ b/media/libmedia/IMediaRecorderClient.cpp
@@ -53,7 +53,7 @@
 status_t BnMediaRecorderClient::onTransact(
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
-    switch(code) {
+    switch (code) {
         case NOTIFY: {
             CHECK_INTERFACE(IMediaRecorderClient, data, reply);
             int msg = data.readInt32();
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index d2f5f71..48e427a 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -22,8 +22,6 @@
 #include <binder/Parcel.h>
 #include <media/IOMX.h>
 #include <media/stagefright/foundation/ADebug.h>
-#include <surfaceflinger/ISurface.h>
-#include <surfaceflinger/Surface.h>
 
 namespace android {
 
@@ -59,9 +57,10 @@
         : BpInterface<IOMX>(impl) {
     }
 
-    virtual bool livesLocally(pid_t pid) {
+    virtual bool livesLocally(node_id node, pid_t pid) {
         Parcel data, reply;
         data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
+        data.writeIntPtr((intptr_t)node);
         data.writeInt32(pid);
         remote()->transact(LIVES_LOCALLY, data, &reply);
 
@@ -417,7 +416,9 @@
         case LIVES_LOCALLY:
         {
             CHECK_INTERFACE(IOMX, data, reply);
-            reply->writeInt32(livesLocally((pid_t)data.readInt32()));
+            node_id node = (void *)data.readIntPtr();
+            pid_t pid = (pid_t)data.readInt32();
+            reply->writeInt32(livesLocally(node, pid));
 
             return OK;
         }
diff --git a/media/libmedia/JetPlayer.cpp b/media/libmedia/JetPlayer.cpp
index fa5b67a..f1f62f7 100644
--- a/media/libmedia/JetPlayer.cpp
+++ b/media/libmedia/JetPlayer.cpp
@@ -74,14 +74,14 @@
 
     // init the EAS library
     result = EAS_Init(&mEasData);
-    if( result != EAS_SUCCESS) {
+    if (result != EAS_SUCCESS) {
         ALOGE("JetPlayer::init(): Error initializing Sonivox EAS library, aborting.");
         mState = EAS_STATE_ERROR;
         return result;
     }
     // init the JET library with the default app event controller range
     result = JET_Init(mEasData, NULL, sizeof(S_JET_CONFIG));
-    if( result != EAS_SUCCESS) {
+    if (result != EAS_SUCCESS) {
         ALOGE("JetPlayer::init(): Error initializing JET library, aborting.");
         mState = EAS_STATE_ERROR;
         return result;
@@ -91,16 +91,17 @@
     mAudioTrack = new AudioTrack();
     mAudioTrack->set(AUDIO_STREAM_MUSIC,  //TODO parametrize this
             pLibConfig->sampleRate,
-            1, // format = PCM 16bits per sample,
-            (pLibConfig->numChannels == 2) ? AUDIO_CHANNEL_OUT_STEREO : AUDIO_CHANNEL_OUT_MONO,
+            AUDIO_FORMAT_PCM_16_BIT,
+            audio_channel_out_mask_from_count(pLibConfig->numChannels),
             mTrackBufferSize,
-            0);
+            AUDIO_POLICY_OUTPUT_FLAG_NONE);
 
     // create render and playback thread
     {
         Mutex::Autolock l(mMutex);
         ALOGV("JetPlayer::init(): trying to start render thread");
-        createThreadEtc(renderThread, this, "jetRenderThread", ANDROID_PRIORITY_AUDIO);
+        mThread = new JetPlayerThread(this);
+        mThread->run("jetRenderThread", ANDROID_PRIORITY_AUDIO);
         mCondition.wait(mMutex);
     }
     if (mTid > 0) {
@@ -150,18 +151,12 @@
         mAudioBuffer = NULL;
     }
     mEasData = NULL;
-    
+
     return EAS_SUCCESS;
 }
 
 
 //-------------------------------------------------------------------------------------------------
-int JetPlayer::renderThread(void* p) {
-
-    return ((JetPlayer*)p)->render();
-}
-
-//-------------------------------------------------------------------------------------------------
 int JetPlayer::render() {
     EAS_RESULT result = EAS_FAILURE;
     EAS_I32 count;
@@ -171,12 +166,8 @@
     ALOGV("JetPlayer::render(): entering");
 
     // allocate render buffer
-    mAudioBuffer = 
+    mAudioBuffer =
         new EAS_PCM[pLibConfig->mixBufferSize * pLibConfig->numChannels * MIX_NUM_BUFFERS];
-    if (!mAudioBuffer) {
-        ALOGE("JetPlayer::render(): mAudioBuffer allocate failed");
-        goto threadExit;
-    }
 
     // signal main thread that we started
     {
@@ -186,8 +177,8 @@
         mCondition.signal();
     }
 
-   while (1) {
-    
+    while (1) {
+
         mMutex.lock(); // [[[[[[[[ LOCK ---------------------------------------
 
         if (mEasData == NULL) {
@@ -195,20 +186,20 @@
             ALOGV("JetPlayer::render(): NULL EAS data, exiting render.");
             goto threadExit;
         }
-            
+
         // nothing to render, wait for client thread to wake us up
         while (!mRender)
         {
             ALOGV("JetPlayer::render(): signal wait");
-            if (audioStarted) { 
-                mAudioTrack->pause(); 
+            if (audioStarted) {
+                mAudioTrack->pause();
                 // we have to restart the playback once we start rendering again
                 audioStarted = false;
             }
             mCondition.wait(mMutex);
             ALOGV("JetPlayer::render(): signal rx'd");
         }
-        
+
         // render midi data into the input buffer
         int num_output = 0;
         EAS_PCM* p = mAudioBuffer;
@@ -219,8 +210,8 @@
             }
             p += count * pLibConfig->numChannels;
             num_output += count * pLibConfig->numChannels * sizeof(EAS_PCM);
-            
-             // send events that were generated (if any) to the event callback
+
+            // send events that were generated (if any) to the event callback
             fireEventsFromJetQueue();
         }
 
@@ -255,14 +246,12 @@
     }//while (1)
 
 threadExit:
-    if (mAudioTrack) {
+    if (mAudioTrack != NULL) {
         mAudioTrack->stop();
         mAudioTrack->flush();
     }
-    if (mAudioBuffer) {
-        delete [] mAudioBuffer;
-        mAudioBuffer = NULL;
-    }
+    delete [] mAudioBuffer;
+    mAudioBuffer = NULL;
     mMutex.lock();
     mTid = -1;
     mCondition.signal();
@@ -276,9 +265,9 @@
 // precondition: mMutex locked
 void JetPlayer::fireUpdateOnStatusChange()
 {
-    if(  (mJetStatus.currentUserID      != mPreviousJetStatus.currentUserID)
+    if ( (mJetStatus.currentUserID      != mPreviousJetStatus.currentUserID)
        ||(mJetStatus.segmentRepeatCount != mPreviousJetStatus.segmentRepeatCount) ) {
-        if(mEventCallback)  {
+        if (mEventCallback)  {
             mEventCallback(
                 JetPlayer::JET_USERID_UPDATE,
                 mJetStatus.currentUserID,
@@ -289,8 +278,8 @@
         mPreviousJetStatus.segmentRepeatCount = mJetStatus.segmentRepeatCount;
     }
 
-    if(mJetStatus.numQueuedSegments != mPreviousJetStatus.numQueuedSegments) {
-        if(mEventCallback)  {
+    if (mJetStatus.numQueuedSegments != mPreviousJetStatus.numQueuedSegments) {
+        if (mEventCallback)  {
             mEventCallback(
                 JetPlayer::JET_NUMQUEUEDSEGMENT_UPDATE,
                 mJetStatus.numQueuedSegments,
@@ -300,8 +289,8 @@
         mPreviousJetStatus.numQueuedSegments  = mJetStatus.numQueuedSegments;
     }
 
-    if(mJetStatus.paused != mPreviousJetStatus.paused) {
-        if(mEventCallback)  {
+    if (mJetStatus.paused != mPreviousJetStatus.paused) {
+        if (mEventCallback)  {
             mEventCallback(JetPlayer::JET_PAUSE_UPDATE,
                 mJetStatus.paused,
                 -1,
@@ -318,7 +307,7 @@
 // precondition: mMutex locked
 void JetPlayer::fireEventsFromJetQueue()
 {
-    if(!mEventCallback) {
+    if (!mEventCallback) {
         // no callback, just empty the event queue
         while (JET_GetEvent(mEasData, NULL, NULL)) { }
         return;
@@ -343,8 +332,8 @@
     Mutex::Autolock lock(mMutex);
 
     mEasJetFileLoc = (EAS_FILE_LOCATOR) malloc(sizeof(EAS_FILE));
-    memset(mJetFilePath, 0, 256);
-    strncpy(mJetFilePath, path, strlen(path));
+    strncpy(mJetFilePath, path, sizeof(mJetFilePath));
+    mJetFilePath[sizeof(mJetFilePath) - 1] = '\0';
     mEasJetFileLoc->path = mJetFilePath;
 
     mEasJetFileLoc->fd = 0;
@@ -352,7 +341,7 @@
     mEasJetFileLoc->offset = 0;
 
     EAS_RESULT result = JET_OpenFile(mEasData, mEasJetFileLoc);
-    if(result != EAS_SUCCESS)
+    if (result != EAS_SUCCESS)
         mState = EAS_STATE_ERROR;
     else
         mState = EAS_STATE_OPEN;
@@ -364,7 +353,7 @@
 int JetPlayer::loadFromFD(const int fd, const long long offset, const long long length)
 {
     ALOGV("JetPlayer::loadFromFD(): fd=%d offset=%lld length=%lld", fd, offset, length);
-    
+
     Mutex::Autolock lock(mMutex);
 
     mEasJetFileLoc = (EAS_FILE_LOCATOR) malloc(sizeof(EAS_FILE));
@@ -372,9 +361,9 @@
     mEasJetFileLoc->offset = offset;
     mEasJetFileLoc->length = length;
     mEasJetFileLoc->path = NULL;
-    
+
     EAS_RESULT result = JET_OpenFile(mEasData, mEasJetFileLoc);
-    if(result != EAS_SUCCESS)
+    if (result != EAS_SUCCESS)
         mState = EAS_STATE_ERROR;
     else
         mState = EAS_STATE_OPEN;
@@ -403,7 +392,7 @@
 
     JET_Status(mEasData, &mJetStatus);
     this->dumpJetStatus(&mJetStatus);
-    
+
     fireUpdateOnStatusChange();
 
     // wake up render thread
@@ -479,7 +468,7 @@
 
 void JetPlayer::dumpJetStatus(S_JET_STATUS* pJetStatus)
 {
-    if(pJetStatus!=NULL)
+    if (pJetStatus!=NULL)
         ALOGV(">> current JET player status: userID=%d segmentRepeatCount=%d numQueuedSegments=%d paused=%d",
                 pJetStatus->currentUserID, pJetStatus->segmentRepeatCount,
                 pJetStatus->numQueuedSegments, pJetStatus->paused);
@@ -489,4 +478,3 @@
 
 
 } // end namespace android
-
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index c905762..c224f06 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -25,8 +25,8 @@
 #include <cutils/properties.h>
 #include <expat.h>
 #include <media/MediaProfiles.h>
-#include <media/stagefright/MediaDebug.h>
-#include <media/stagefright/openmax/OMX_Video.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <OMX_Video.h>
 
 namespace android {
 
@@ -349,7 +349,7 @@
 {
     CHECK(!strcmp("quality", atts[0]));
     int quality = atoi(atts[1]);
-    ALOGV("%s: cameraId=%d, quality=%d\n", __func__, cameraId, quality);
+    ALOGV("%s: cameraId=%d, quality=%d", __func__, cameraId, quality);
     ImageEncodingQualityLevels *levels = findImageEncodingQualityLevels(cameraId);
 
     if (levels == NULL) {
@@ -1099,12 +1099,12 @@
                                                   camcorder_quality quality) const
 {
     ALOGV("getCamcorderProfileParamByName: %s for camera %d, quality %d",
-         name, cameraId, quality);
+        name, cameraId, quality);
 
     int index = getCamcorderProfileIndex(cameraId, quality);
     if (index == -1) {
         ALOGE("The given camcorder profile camera %d quality %d is not found",
-             cameraId, quality);
+            cameraId, quality);
         return -1;
     }
 
diff --git a/media/libmedia/MediaScanner.cpp b/media/libmedia/MediaScanner.cpp
index 79cab74..28b5aa7 100644
--- a/media/libmedia/MediaScanner.cpp
+++ b/media/libmedia/MediaScanner.cpp
@@ -54,7 +54,7 @@
 void MediaScanner::loadSkipList() {
     mSkipList = (char *)malloc(PROPERTY_VALUE_MAX * sizeof(char));
     if (mSkipList) {
-      property_get("testing.mediascanner.skiplist", mSkipList, "");
+        property_get("testing.mediascanner.skiplist", mSkipList, "");
     }
     if (!mSkipList || (strlen(mSkipList) == 0)) {
         free(mSkipList);
@@ -135,15 +135,15 @@
     struct dirent* entry;
 
     if (shouldSkipDirectory(path)) {
-      ALOGD("Skipping: %s", path);
-      return MEDIA_SCAN_RESULT_OK;
+        ALOGD("Skipping: %s", path);
+        return MEDIA_SCAN_RESULT_OK;
     }
 
     // Treat all files as non-media in directories that contain a  ".nomedia" file
     if (pathRemaining >= 8 /* strlen(".nomedia") */ ) {
         strcpy(fileSpot, ".nomedia");
         if (access(path, F_OK) == 0) {
-            ALOGV("found .nomedia, setting noMedia flag\n");
+            ALOGV("found .nomedia, setting noMedia flag");
             noMedia = true;
         }
 
diff --git a/media/libmedia/MediaScannerClient.cpp b/media/libmedia/MediaScannerClient.cpp
index 40b8188..e1e3348 100644
--- a/media/libmedia/MediaScannerClient.cpp
+++ b/media/libmedia/MediaScannerClient.cpp
@@ -142,12 +142,12 @@
 
         UConverter *conv = ucnv_open(enc, &status);
         if (U_FAILURE(status)) {
-            ALOGE("could not create UConverter for %s\n", enc);
+            ALOGE("could not create UConverter for %s", enc);
             return;
         }
         UConverter *utf8Conv = ucnv_open("UTF-8", &status);
         if (U_FAILURE(status)) {
-            ALOGE("could not create UConverter for UTF-8\n");
+            ALOGE("could not create UConverter for UTF-8");
             ucnv_close(conv);
             return;
         }
@@ -173,6 +173,7 @@
             const char* source = mValues->getEntry(i);
             int targetLength = len * 3 + 1;
             char* buffer = new char[targetLength];
+            // don't normally check for NULL, but in this case targetLength may be large
             if (!buffer)
                 break;
             char* target = buffer;
@@ -180,7 +181,7 @@
             ucnv_convertEx(utf8Conv, conv, &target, target + targetLength,
                     &source, (const char *)dest, NULL, NULL, NULL, NULL, TRUE, TRUE, &status);
             if (U_FAILURE(status)) {
-                ALOGE("ucnv_convertEx failed: %d\n", status);
+                ALOGE("ucnv_convertEx failed: %d", status);
                 mValues->setEntry(i, "???");
             } else {
                 // zero terminate
@@ -227,4 +228,3 @@
 }
 
 }  // namespace android
-
diff --git a/media/libmedia/Metadata.cpp b/media/libmedia/Metadata.cpp
index 546a9b0..ef8a9ed 100644
--- a/media/libmedia/Metadata.cpp
+++ b/media/libmedia/Metadata.cpp
@@ -57,7 +57,7 @@
 
 Metadata::Metadata(Parcel *p)
     :mData(p),
-     mBegin(p->dataPosition()) { }
+      mBegin(p->dataPosition()) { }
 
 Metadata::~Metadata() { }
 
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index 35dfbb8..717d316 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -268,8 +268,8 @@
           repeatCnt: 0,
           repeatSegment: 0 },                            // TONE_CDMA_CALL_SIGNAL_ISDN_SP_PRI
         { segments: { { duration: 0,  waveFreq: { 0 }, 0, 0} },
-           repeatCnt: 0,
-           repeatSegment: 0 },                            // TONE_CDMA_CALL_SIGNAL_ISDN_PAT3
+          repeatCnt: 0,
+          repeatSegment: 0 },                            // TONE_CDMA_CALL_SIGNAL_ISDN_PAT3
         { segments: { { duration: 32, waveFreq: { 2091, 0 }, 0, 0 },
                       { duration: 64, waveFreq: { 2556, 0 }, 4, 0 },
                       { duration: 20, waveFreq: { 2091, 0 }, 0, 0 },
@@ -751,7 +751,7 @@
 
 // Used by ToneGenerator::getToneForRegion() to convert user specified supervisory tone type
 // to actual tone for current region.
-const unsigned char ToneGenerator::sToneMappingTable[NUM_REGIONS-1][NUM_SUP_TONES] = {
+const unsigned char /*tone_type*/ ToneGenerator::sToneMappingTable[NUM_REGIONS-1][NUM_SUP_TONES] = {
         {   // ANSI
             TONE_ANSI_DIAL,             // TONE_SUP_DIAL
             TONE_ANSI_BUSY,             // TONE_SUP_BUSY
@@ -791,16 +791,16 @@
 //        generators, instantiates output audio track.
 //
 //    Input:
-//        streamType:        Type of stream used for tone playback (enum AudioTrack::stream_type)
+//        streamType:        Type of stream used for tone playback
 //        volume:            volume applied to tone (0.0 to 1.0)
 //
 //    Output:
 //        none
 //
 ////////////////////////////////////////////////////////////////////////////////
-ToneGenerator::ToneGenerator(int streamType, float volume, bool threadCanCallJava) {
+ToneGenerator::ToneGenerator(audio_stream_type_t streamType, float volume, bool threadCanCallJava) {
 
-    ALOGV("ToneGenerator constructor: streamType=%d, volume=%f\n", streamType, volume);
+    ALOGV("ToneGenerator constructor: streamType=%d, volume=%f", streamType, volume);
 
     mState = TONE_IDLE;
 
@@ -811,9 +811,9 @@
     mThreadCanCallJava = threadCanCallJava;
     mStreamType = streamType;
     mVolume = volume;
-    mpAudioTrack = 0;
-    mpToneDesc = 0;
-    mpNewToneDesc = 0;
+    mpAudioTrack = NULL;
+    mpToneDesc = NULL;
+    mpNewToneDesc = NULL;
     // Generate tone by chunks of 20 ms to keep cadencing precision
     mProcessSize = (mSamplingRate * 20) / 1000;
 
@@ -829,9 +829,9 @@
     }
 
     if (initAudioTrack()) {
-        ALOGV("ToneGenerator INIT OK, time: %d\n", (unsigned int)(systemTime()/1000000));
+        ALOGV("ToneGenerator INIT OK, time: %d", (unsigned int)(systemTime()/1000000));
     } else {
-        ALOGV("!!!ToneGenerator INIT FAILED!!!\n");
+        ALOGV("!!!ToneGenerator INIT FAILED!!!");
     }
 }
 
@@ -853,11 +853,11 @@
 //
 ////////////////////////////////////////////////////////////////////////////////
 ToneGenerator::~ToneGenerator() {
-    ALOGV("ToneGenerator destructor\n");
+    ALOGV("ToneGenerator destructor");
 
-    if (mpAudioTrack) {
+    if (mpAudioTrack != NULL) {
         stopTone();
-        ALOGV("Delete Track: %p\n", mpAudioTrack);
+        ALOGV("Delete Track: %p", mpAudioTrack);
         delete mpAudioTrack;
     }
 }
@@ -878,7 +878,7 @@
 //        none
 //
 ////////////////////////////////////////////////////////////////////////////////
-bool ToneGenerator::startTone(int toneType, int durationMs) {
+bool ToneGenerator::startTone(tone_type toneType, int durationMs) {
     bool lResult = false;
     status_t lStatus;
 
@@ -892,7 +892,7 @@
         }
     }
 
-    ALOGV("startTone\n");
+    ALOGV("startTone");
 
     mLock.lock();
 
@@ -915,7 +915,7 @@
 
     if (mState == TONE_INIT) {
         if (prepareWave()) {
-            ALOGV("Immediate start, time %d\n", (unsigned int)(systemTime()/1000000));
+            ALOGV("Immediate start, time %d", (unsigned int)(systemTime()/1000000));
             lResult = true;
             mState = TONE_STARTING;
             mLock.unlock();
@@ -934,7 +934,7 @@
             mState = TONE_IDLE;
         }
     } else {
-        ALOGV("Delayed start\n");
+        ALOGV("Delayed start");
         mState = TONE_RESTARTING;
         lStatus = mWaitCbkCond.waitRelative(mLock, seconds(3));
         if (lStatus == NO_ERROR) {
@@ -949,8 +949,8 @@
     }
     mLock.unlock();
 
-    ALOGV_IF(lResult, "Tone started, time %d\n", (unsigned int)(systemTime()/1000000));
-    ALOGW_IF(!lResult, "Tone start failed!!!, time %d\n", (unsigned int)(systemTime()/1000000));
+    ALOGV_IF(lResult, "Tone started, time %d", (unsigned int)(systemTime()/1000000));
+    ALOGW_IF(!lResult, "Tone start failed!!!, time %d", (unsigned int)(systemTime()/1000000));
 
     return lResult;
 }
@@ -1012,23 +1012,19 @@
 
     if (mpAudioTrack) {
         delete mpAudioTrack;
-        mpAudioTrack = 0;
+        mpAudioTrack = NULL;
     }
 
-   // Open audio track in mono, PCM 16bit, default sampling rate, default buffer size
+    // Open audio track in mono, PCM 16bit, default sampling rate, default buffer size
     mpAudioTrack = new AudioTrack();
-    if (mpAudioTrack == 0) {
-        ALOGE("AudioTrack allocation failed");
-        goto initAudioTrack_exit;
-    }
-    ALOGV("Create Track: %p\n", mpAudioTrack);
+    ALOGV("Create Track: %p", mpAudioTrack);
 
     mpAudioTrack->set(mStreamType,
                       0,
                       AUDIO_FORMAT_PCM_16_BIT,
                       AUDIO_CHANNEL_OUT_MONO,
                       0,
-                      0,
+                      AUDIO_POLICY_OUTPUT_FLAG_NONE,
                       audioCallback,
                       this,
                       0,
@@ -1049,10 +1045,10 @@
 initAudioTrack_exit:
 
     // Cleanup
-    if (mpAudioTrack) {
-        ALOGV("Delete Track I: %p\n", mpAudioTrack);
+    if (mpAudioTrack != NULL) {
+        ALOGV("Delete Track I: %p", mpAudioTrack);
         delete mpAudioTrack;
-        mpAudioTrack = 0;
+        mpAudioTrack = NULL;
     }
 
     return false;
@@ -1145,7 +1141,7 @@
         if (lpToneGen->mTotalSmp > lpToneGen->mNextSegSmp) {
             // Time to go to next sequence segment
 
-            ALOGV("End Segment, time: %d\n", (unsigned int)(systemTime()/1000000));
+            ALOGV("End Segment, time: %d", (unsigned int)(systemTime()/1000000));
 
             lGenSmp = lReqSmp;
 
@@ -1160,13 +1156,13 @@
                     lpWaveGen->getSamples(lpOut, lGenSmp, lWaveCmd);
                     lFrequency = lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[++lFreqIdx];
                 }
-                ALOGV("ON->OFF, lGenSmp: %d, lReqSmp: %d\n", lGenSmp, lReqSmp);
+                ALOGV("ON->OFF, lGenSmp: %d, lReqSmp: %d", lGenSmp, lReqSmp);
             }
 
             // check if we need to loop and loop for the reqd times
             if (lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt) {
                 if (lpToneGen->mLoopCounter < lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt) {
-                    ALOGV ("in if loop loopCnt(%d) loopctr(%d), CurSeg(%d) \n",
+                    ALOGV ("in if loop loopCnt(%d) loopctr(%d), CurSeg(%d)",
                           lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt,
                           lpToneGen->mLoopCounter,
                           lpToneGen->mCurSegment);
@@ -1176,14 +1172,14 @@
                     // completed loop. go to next segment
                     lpToneGen->mLoopCounter = 0;
                     lpToneGen->mCurSegment++;
-                    ALOGV ("in else loop loopCnt(%d) loopctr(%d), CurSeg(%d) \n",
+                    ALOGV ("in else loop loopCnt(%d) loopctr(%d), CurSeg(%d)",
                           lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt,
                           lpToneGen->mLoopCounter,
                           lpToneGen->mCurSegment);
                 }
             } else {
                 lpToneGen->mCurSegment++;
-                ALOGV ("Goto next seg loopCnt(%d) loopctr(%d), CurSeg(%d) \n",
+                ALOGV ("Goto next seg loopCnt(%d) loopctr(%d), CurSeg(%d)",
                       lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt,
                       lpToneGen->mLoopCounter,
                       lpToneGen->mCurSegment);
@@ -1192,32 +1188,32 @@
 
             // Handle loop if last segment reached
             if (lpToneDesc->segments[lpToneGen->mCurSegment].duration == 0) {
-                ALOGV("Last Seg: %d\n", lpToneGen->mCurSegment);
+                ALOGV("Last Seg: %d", lpToneGen->mCurSegment);
 
                 // Pre increment loop count and restart if total count not reached. Stop sequence otherwise
                 if (++lpToneGen->mCurCount <= lpToneDesc->repeatCnt) {
-                    ALOGV("Repeating Count: %d\n", lpToneGen->mCurCount);
+                    ALOGV("Repeating Count: %d", lpToneGen->mCurCount);
 
                     lpToneGen->mCurSegment = lpToneDesc->repeatSegment;
                     if (lpToneDesc->segments[lpToneDesc->repeatSegment].waveFreq[0] != 0) {
                         lWaveCmd = WaveGenerator::WAVEGEN_START;
                     }
 
-                    ALOGV("New segment %d, Next Time: %d\n", lpToneGen->mCurSegment,
+                    ALOGV("New segment %d, Next Time: %d", lpToneGen->mCurSegment,
                             (lpToneGen->mNextSegSmp*1000)/lpToneGen->mSamplingRate);
 
                 } else {
                     lGenSmp = 0;
-                    ALOGV("End repeat, time: %d\n", (unsigned int)(systemTime()/1000000));
+                    ALOGV("End repeat, time: %d", (unsigned int)(systemTime()/1000000));
                 }
             } else {
-                ALOGV("New segment %d, Next Time: %d\n", lpToneGen->mCurSegment,
+                ALOGV("New segment %d, Next Time: %d", lpToneGen->mCurSegment,
                         (lpToneGen->mNextSegSmp*1000)/lpToneGen->mSamplingRate);
                 if (lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[0] != 0) {
                     // If next segment is not silent,  OFF -> ON transition : reset wave generator
                     lWaveCmd = WaveGenerator::WAVEGEN_START;
 
-                    ALOGV("OFF->ON, lGenSmp: %d, lReqSmp: %d\n", lGenSmp, lReqSmp);
+                    ALOGV("OFF->ON, lGenSmp: %d, lReqSmp: %d", lGenSmp, lReqSmp);
                 } else {
                     lGenSmp = 0;
                 }
@@ -1255,13 +1251,13 @@
 
         switch (lpToneGen->mState) {
         case TONE_RESTARTING:
-            ALOGV("Cbk restarting track\n");
+            ALOGV("Cbk restarting track");
             if (lpToneGen->prepareWave()) {
                 lpToneGen->mState = TONE_STARTING;
                 // must reload lpToneDesc as prepareWave() may change mpToneDesc
                 lpToneDesc = lpToneGen->mpToneDesc;
             } else {
-                ALOGW("Cbk restarting prepareWave() failed\n");
+                ALOGW("Cbk restarting prepareWave() failed");
                 lpToneGen->mState = TONE_IDLE;
                 lpToneGen->mpAudioTrack->stop();
                 // Force loop exit
@@ -1270,14 +1266,14 @@
             lSignal = true;
             break;
         case TONE_STOPPING:
-            ALOGV("Cbk Stopping\n");
+            ALOGV("Cbk Stopping");
             lpToneGen->mState = TONE_STOPPED;
             // Force loop exit
             lNumSmp = 0;
             break;
         case TONE_STOPPED:
             lpToneGen->mState = TONE_INIT;
-            ALOGV("Cbk Stopped track\n");
+            ALOGV("Cbk Stopped track");
             lpToneGen->mpAudioTrack->stop();
             // Force loop exit
             lNumSmp = 0;
@@ -1285,12 +1281,12 @@
             lSignal = true;
             break;
         case TONE_STARTING:
-            ALOGV("Cbk starting track\n");
+            ALOGV("Cbk starting track");
             lpToneGen->mState = TONE_PLAYING;
             lSignal = true;
-           break;
+            break;
         case TONE_PLAYING:
-           break;
+            break;
         default:
             // Force loop exit
             lNumSmp = 0;
@@ -1321,7 +1317,7 @@
 bool ToneGenerator::prepareWave() {
     unsigned int segmentIdx = 0;
 
-    if (!mpNewToneDesc) {
+    if (mpNewToneDesc == NULL) {
         return false;
     }
 
@@ -1353,9 +1349,6 @@
                         new ToneGenerator::WaveGenerator((unsigned short)mSamplingRate,
                                 frequency,
                                 TONEGEN_GAIN/lNumWaves);
-                if (lpWaveGen == 0) {
-                    goto prepareWave_exit;
-                }
                 mWaveGens.add(frequency, lpWaveGen);
             }
             frequency = mpNewToneDesc->segments[segmentIdx].waveFreq[++freqIdx];
@@ -1375,12 +1368,6 @@
     }
 
     return true;
-
-prepareWave_exit:
-
-    clearWaveGens();
-
-    return false;
 }
 
 
@@ -1447,13 +1434,13 @@
 //        none
 //
 ////////////////////////////////////////////////////////////////////////////////
-int ToneGenerator::getToneForRegion(int toneType) {
-    int regionTone;
+ToneGenerator::tone_type ToneGenerator::getToneForRegion(tone_type toneType) {
+    tone_type regionTone;
 
     if (mRegion == CEPT || toneType < FIRST_SUP_TONE || toneType > LAST_SUP_TONE) {
         regionTone = toneType;
     } else {
-        regionTone = sToneMappingTable[mRegion][toneType - FIRST_SUP_TONE];
+        regionTone = (tone_type) sToneMappingTable[mRegion][toneType - FIRST_SUP_TONE];
     }
 
     ALOGV("getToneForRegion, tone %d, region %d, regionTone %d", toneType, mRegion, regionTone);
@@ -1504,7 +1491,7 @@
         d0 = 32767;
     mA1_Q14 = (short) d0;
 
-    ALOGV("WaveGenerator init, mA1_Q14: %d, mS2_0: %d, mAmplitude_Q15: %d\n",
+    ALOGV("WaveGenerator init, mA1_Q14: %d, mS2_0: %d, mAmplitude_Q15: %d",
             mA1_Q14, mS2_0, mAmplitude_Q15);
 }
 
@@ -1591,4 +1578,3 @@
 }
 
 }  // end namespace android
-
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index d08ffa5..bcd6ae4 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -27,8 +27,7 @@
 #include <cutils/bitops.h>
 
 #include <media/Visualizer.h>
-
-extern void fixed_fft_real(int n, int32_t *v);
+#include <audio_utils/fixedfft.h>
 
 namespace android {
 
@@ -54,7 +53,7 @@
 
 status_t Visualizer::setEnabled(bool enabled)
 {
-    Mutex::Autolock _l(mLock);
+    Mutex::Autolock _l(mCaptureLock);
 
     sp<CaptureThread> t = mCaptureThread;
     if (t != 0) {
@@ -67,14 +66,14 @@
             }
         }
         t->mLock.lock();
-     }
+    }
 
     status_t status = AudioEffect::setEnabled(enabled);
 
     if (status == NO_ERROR) {
         if (t != 0) {
             if (enabled) {
-                t->run("AudioTrackThread");
+                t->run("Visualizer");
             } else {
                 t->requestExit();
             }
@@ -93,7 +92,7 @@
     if (rate > CAPTURE_RATE_MAX) {
         return BAD_VALUE;
     }
-    Mutex::Autolock _l(mLock);
+    Mutex::Autolock _l(mCaptureLock);
 
     if (mEnabled) {
         return INVALID_OPERATION;
@@ -115,10 +114,6 @@
 
     if (cbk != NULL) {
         mCaptureThread = new CaptureThread(*this, rate, ((flags & CAPTURE_CALL_JAVA) != 0));
-        if (mCaptureThread == 0) {
-            ALOGE("Could not create callback thread");
-            return NO_INIT;
-        }
     }
     ALOGV("setCaptureCallBack() rate: %d thread %p flags 0x%08x",
             rate, mCaptureThread.get(), mCaptureFlags);
@@ -133,7 +128,7 @@
         return BAD_VALUE;
     }
 
-    Mutex::Autolock _l(mLock);
+    Mutex::Autolock _l(mCaptureLock);
     if (mEnabled) {
         return INVALID_OPERATION;
     }
@@ -173,7 +168,7 @@
         uint32_t replySize = mCaptureSize;
         status = command(VISUALIZER_CMD_CAPTURE, 0, NULL, &replySize, waveform);
         ALOGV("getWaveForm() command returned %d", status);
-        if (replySize == 0) {
+        if ((status == NO_ERROR) && (replySize == 0)) {
             status = NOT_ENOUGH_DATA;
         }
     } else {
@@ -235,7 +230,7 @@
 
 void Visualizer::periodicCapture()
 {
-    Mutex::Autolock _l(mLock);
+    Mutex::Autolock _l(mCaptureLock);
     ALOGV("periodicCapture() %p mCaptureCallBack %p mCaptureFlags 0x%08x",
             this, mCaptureCallBack, mCaptureFlags);
     if (mCaptureCallBack != NULL &&
@@ -325,4 +320,3 @@
 }
 
 }; // namespace android
-
diff --git a/media/libmedia/autodetect.cpp b/media/libmedia/autodetect.cpp
index dfcc6a0..be5c3b2 100644
--- a/media/libmedia/autodetect.cpp
+++ b/media/libmedia/autodetect.cpp
@@ -16,7 +16,7 @@
 
 #include "autodetect.h"
 
-typedef struct CharRange {
+struct CharRange {
     uint16_t first;
     uint16_t last;
 };
diff --git a/media/libmedia/fixedfft.cpp b/media/libmedia/fixedfft.cpp
deleted file mode 100644
index 2b495e6..0000000
--- a/media/libmedia/fixedfft.cpp
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* A Fixed point implementation of Fast Fourier Transform (FFT). Complex numbers
- * are represented by 32-bit integers, where higher 16 bits are real part and
- * lower ones are imaginary part. Few compromises are made between efficiency,
- * accuracy, and maintainability. To make it fast, arithmetic shifts are used
- * instead of divisions, and bitwise inverses are used instead of negates. To
- * keep it small, only radix-2 Cooley-Tukey algorithm is implemented, and only
- * half of the twiddle factors are stored. Although there are still ways to make
- * it even faster or smaller, it costs too much on one of the aspects.
- */
-
-#include <stdio.h>
-#include <stdint.h>
-#ifdef __arm__
-#include <machine/cpu-features.h>
-#endif
-
-#define LOG_FFT_SIZE 10
-#define MAX_FFT_SIZE (1 << LOG_FFT_SIZE)
-
-static const int32_t twiddle[MAX_FFT_SIZE / 4] = {
-    0x00008000, 0xff378001, 0xfe6e8002, 0xfda58006, 0xfcdc800a, 0xfc13800f,
-    0xfb4a8016, 0xfa81801e, 0xf9b88027, 0xf8ef8032, 0xf827803e, 0xf75e804b,
-    0xf6958059, 0xf5cd8068, 0xf5058079, 0xf43c808b, 0xf374809e, 0xf2ac80b2,
-    0xf1e480c8, 0xf11c80de, 0xf05580f6, 0xef8d8110, 0xeec6812a, 0xedff8146,
-    0xed388163, 0xec718181, 0xebab81a0, 0xeae481c1, 0xea1e81e2, 0xe9588205,
-    0xe892822a, 0xe7cd824f, 0xe7078276, 0xe642829d, 0xe57d82c6, 0xe4b982f1,
-    0xe3f4831c, 0xe3308349, 0xe26d8377, 0xe1a983a6, 0xe0e683d6, 0xe0238407,
-    0xdf61843a, 0xde9e846e, 0xdddc84a3, 0xdd1b84d9, 0xdc598511, 0xdb998549,
-    0xdad88583, 0xda1885be, 0xd95885fa, 0xd8988637, 0xd7d98676, 0xd71b86b6,
-    0xd65c86f6, 0xd59e8738, 0xd4e1877b, 0xd42487c0, 0xd3678805, 0xd2ab884c,
-    0xd1ef8894, 0xd13488dd, 0xd0798927, 0xcfbe8972, 0xcf0489be, 0xce4b8a0c,
-    0xcd928a5a, 0xccd98aaa, 0xcc218afb, 0xcb698b4d, 0xcab28ba0, 0xc9fc8bf5,
-    0xc9468c4a, 0xc8908ca1, 0xc7db8cf8, 0xc7278d51, 0xc6738dab, 0xc5c08e06,
-    0xc50d8e62, 0xc45b8ebf, 0xc3a98f1d, 0xc2f88f7d, 0xc2488fdd, 0xc198903e,
-    0xc0e990a1, 0xc03a9105, 0xbf8c9169, 0xbedf91cf, 0xbe329236, 0xbd86929e,
-    0xbcda9307, 0xbc2f9371, 0xbb8593dc, 0xbadc9448, 0xba3394b5, 0xb98b9523,
-    0xb8e39592, 0xb83c9603, 0xb7969674, 0xb6f196e6, 0xb64c9759, 0xb5a897ce,
-    0xb5059843, 0xb46298b9, 0xb3c09930, 0xb31f99a9, 0xb27f9a22, 0xb1df9a9c,
-    0xb1409b17, 0xb0a29b94, 0xb0059c11, 0xaf689c8f, 0xaecc9d0e, 0xae319d8e,
-    0xad979e0f, 0xacfd9e91, 0xac659f14, 0xabcd9f98, 0xab36a01c, 0xaaa0a0a2,
-    0xaa0aa129, 0xa976a1b0, 0xa8e2a238, 0xa84fa2c2, 0xa7bda34c, 0xa72ca3d7,
-    0xa69ca463, 0xa60ca4f0, 0xa57ea57e, 0xa4f0a60c, 0xa463a69c, 0xa3d7a72c,
-    0xa34ca7bd, 0xa2c2a84f, 0xa238a8e2, 0xa1b0a976, 0xa129aa0a, 0xa0a2aaa0,
-    0xa01cab36, 0x9f98abcd, 0x9f14ac65, 0x9e91acfd, 0x9e0fad97, 0x9d8eae31,
-    0x9d0eaecc, 0x9c8faf68, 0x9c11b005, 0x9b94b0a2, 0x9b17b140, 0x9a9cb1df,
-    0x9a22b27f, 0x99a9b31f, 0x9930b3c0, 0x98b9b462, 0x9843b505, 0x97ceb5a8,
-    0x9759b64c, 0x96e6b6f1, 0x9674b796, 0x9603b83c, 0x9592b8e3, 0x9523b98b,
-    0x94b5ba33, 0x9448badc, 0x93dcbb85, 0x9371bc2f, 0x9307bcda, 0x929ebd86,
-    0x9236be32, 0x91cfbedf, 0x9169bf8c, 0x9105c03a, 0x90a1c0e9, 0x903ec198,
-    0x8fddc248, 0x8f7dc2f8, 0x8f1dc3a9, 0x8ebfc45b, 0x8e62c50d, 0x8e06c5c0,
-    0x8dabc673, 0x8d51c727, 0x8cf8c7db, 0x8ca1c890, 0x8c4ac946, 0x8bf5c9fc,
-    0x8ba0cab2, 0x8b4dcb69, 0x8afbcc21, 0x8aaaccd9, 0x8a5acd92, 0x8a0cce4b,
-    0x89becf04, 0x8972cfbe, 0x8927d079, 0x88ddd134, 0x8894d1ef, 0x884cd2ab,
-    0x8805d367, 0x87c0d424, 0x877bd4e1, 0x8738d59e, 0x86f6d65c, 0x86b6d71b,
-    0x8676d7d9, 0x8637d898, 0x85fad958, 0x85beda18, 0x8583dad8, 0x8549db99,
-    0x8511dc59, 0x84d9dd1b, 0x84a3dddc, 0x846ede9e, 0x843adf61, 0x8407e023,
-    0x83d6e0e6, 0x83a6e1a9, 0x8377e26d, 0x8349e330, 0x831ce3f4, 0x82f1e4b9,
-    0x82c6e57d, 0x829de642, 0x8276e707, 0x824fe7cd, 0x822ae892, 0x8205e958,
-    0x81e2ea1e, 0x81c1eae4, 0x81a0ebab, 0x8181ec71, 0x8163ed38, 0x8146edff,
-    0x812aeec6, 0x8110ef8d, 0x80f6f055, 0x80def11c, 0x80c8f1e4, 0x80b2f2ac,
-    0x809ef374, 0x808bf43c, 0x8079f505, 0x8068f5cd, 0x8059f695, 0x804bf75e,
-    0x803ef827, 0x8032f8ef, 0x8027f9b8, 0x801efa81, 0x8016fb4a, 0x800ffc13,
-    0x800afcdc, 0x8006fda5, 0x8002fe6e, 0x8001ff37,
-};
-
-/* Returns the multiplication of \conj{a} and {b}. */
-static inline int32_t mult(int32_t a, int32_t b)
-{
-#if __ARM_ARCH__ >= 6
-    int32_t t = b;
-    __asm__("smuad  %0, %0, %1"          : "+r" (t) : "r" (a));
-    __asm__("smusdx %0, %0, %1"          : "+r" (b) : "r" (a));
-    __asm__("pkhtb  %0, %0, %1, ASR #16" : "+r" (t) : "r" (b));
-    return t;
-#else
-    return (((a >> 16) * (b >> 16) + (int16_t)a * (int16_t)b) & ~0xFFFF) |
-        ((((a >> 16) * (int16_t)b - (int16_t)a * (b >> 16)) >> 16) & 0xFFFF);
-#endif
-}
-
-static inline int32_t half(int32_t a)
-{
-#if __ARM_ARCH__ >= 6
-    __asm__("shadd16 %0, %0, %1" : "+r" (a) : "r" (0));
-    return a;
-#else
-    return ((a >> 1) & ~0x8000) | (a & 0x8000);
-#endif
-}
-
-void fixed_fft(int n, int32_t *v)
-{
-    int scale = LOG_FFT_SIZE, i, p, r;
-
-    for (r = 0, i = 1; i < n; ++i) {
-        for (p = n; !(p & r); p >>= 1, r ^= p);
-        if (i < r) {
-            int32_t t = v[i];
-            v[i] = v[r];
-            v[r] = t;
-        }
-    }
-
-    for (p = 1; p < n; p <<= 1) {
-        --scale;
-
-        for (i = 0; i < n; i += p << 1) {
-            int32_t x = half(v[i]);
-            int32_t y = half(v[i + p]);
-            v[i] = x + y;
-            v[i + p] = x - y;
-        }
-
-        for (r = 1; r < p; ++r) {
-            int32_t w = MAX_FFT_SIZE / 4 - (r << scale);
-            i = w >> 31;
-            w = twiddle[(w ^ i) - i] ^ (i << 16);
-            for (i = r; i < n; i += p << 1) {
-                int32_t x = half(v[i]);
-                int32_t y = mult(w, v[i + p]);
-                v[i] = x - y;
-                v[i + p] = x + y;
-            }
-        }
-    }
-}
-
-void fixed_fft_real(int n, int32_t *v)
-{
-    int scale = LOG_FFT_SIZE, m = n >> 1, i;
-
-    fixed_fft(n, v);
-    for (i = 1; i <= n; i <<= 1, --scale);
-    v[0] = mult(~v[0], 0x80008000);
-    v[m] = half(v[m]);
-
-    for (i = 1; i < n >> 1; ++i) {
-        int32_t x = half(v[i]);
-        int32_t z = half(v[n - i]);
-        int32_t y = z - (x ^ 0xFFFF);
-        x = half(x + (z ^ 0xFFFF));
-        y = mult(y, twiddle[i << scale]);
-        v[i] = x - y;
-        v[n - i] = (x + y) ^ 0xFFFF;
-    }
-}
diff --git a/media/libmedia/mediametadataretriever.cpp b/media/libmedia/mediametadataretriever.cpp
index 88e269f..b0241aa 100644
--- a/media/libmedia/mediametadataretriever.cpp
+++ b/media/libmedia/mediametadataretriever.cpp
@@ -35,7 +35,7 @@
 const sp<IMediaPlayerService>& MediaMetadataRetriever::getService()
 {
     Mutex::Autolock lock(sServiceLock);
-    if (sService.get() == 0) {
+    if (sService == 0) {
         sp<IServiceManager> sm = defaultServiceManager();
         sp<IBinder> binder;
         do {
@@ -45,7 +45,7 @@
             }
             ALOGW("MediaPlayerService not published, waiting...");
             usleep(500000); // 0.5 s
-        } while(true);
+        } while (true);
         if (sDeathNotifier == NULL) {
             sDeathNotifier = new DeathNotifier();
         }
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 2284927..b52a37d 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -1,4 +1,4 @@
-/* mediaplayer.cpp
+/*
 **
 ** Copyright 2006, The Android Open Source Project
 **
@@ -30,9 +30,7 @@
 #include <gui/SurfaceTextureClient.h>
 
 #include <media/mediaplayer.h>
-#include <media/AudioTrack.h>
-
-#include <surfaceflinger/Surface.h>
+#include <media/AudioSystem.h>
 
 #include <binder/MemoryBase.h>
 
@@ -63,6 +61,7 @@
     mAudioSessionId = AudioSystem::newAudioSessionId();
     AudioSystem::acquireAudioSessionId(mAudioSessionId);
     mSendLevel = 0;
+    mRetransmitEndpointValid = false;
 }
 
 MediaPlayer::~MediaPlayer()
@@ -95,6 +94,7 @@
     mCurrentPosition = -1;
     mSeekPosition = -1;
     mVideoWidth = mVideoHeight = 0;
+    mRetransmitEndpointValid = false;
 }
 
 status_t MediaPlayer::setListener(const sp<MediaPlayerListener>& listener)
@@ -146,7 +146,8 @@
         const sp<IMediaPlayerService>& service(getMediaPlayerService());
         if (service != 0) {
             sp<IMediaPlayer> player(service->create(getpid(), this, mAudioSessionId));
-            if (NO_ERROR != player->setDataSource(url, headers)) {
+            if ((NO_ERROR != doSetRetransmitEndpoint(player)) ||
+                (NO_ERROR != player->setDataSource(url, headers))) {
                 player.clear();
             }
             err = attachNewPlayer(player);
@@ -162,7 +163,8 @@
     const sp<IMediaPlayerService>& service(getMediaPlayerService());
     if (service != 0) {
         sp<IMediaPlayer> player(service->create(getpid(), this, mAudioSessionId));
-        if (NO_ERROR != player->setDataSource(fd, offset, length)) {
+        if ((NO_ERROR != doSetRetransmitEndpoint(player)) ||
+            (NO_ERROR != player->setDataSource(fd, offset, length))) {
             player.clear();
         }
         err = attachNewPlayer(player);
@@ -177,7 +179,8 @@
     const sp<IMediaPlayerService>& service(getMediaPlayerService());
     if (service != 0) {
         sp<IMediaPlayer> player(service->create(getpid(), this, mAudioSessionId));
-        if (NO_ERROR != player->setDataSource(source)) {
+        if ((NO_ERROR != doSetRetransmitEndpoint(player)) ||
+            (NO_ERROR != player->setDataSource(source))) {
             player.clear();
         }
         err = attachNewPlayer(player);
@@ -192,8 +195,8 @@
             (mCurrentState != MEDIA_PLAYER_STATE_ERROR) &&
             ((mCurrentState & MEDIA_PLAYER_IDLE) != MEDIA_PLAYER_IDLE);
     if ((mPlayer != NULL) && hasBeenInitialized) {
-         ALOGV("invoke %d", request.dataSize());
-         return  mPlayer->invoke(request, reply);
+        ALOGV("invoke %d", request.dataSize());
+        return  mPlayer->invoke(request, reply);
     }
     ALOGE("invoke failed: wrong state %X", mCurrentState);
     return INVALID_OPERATION;
@@ -471,6 +474,20 @@
     return NO_ERROR;
 }
 
+status_t MediaPlayer::doSetRetransmitEndpoint(const sp<IMediaPlayer>& player) {
+    Mutex::Autolock _l(mLock);
+
+    if (player == NULL) {
+        return UNKNOWN_ERROR;
+    }
+
+    if (mRetransmitEndpointValid) {
+        return player->setRetransmitEndpoint(&mRetransmitEndpoint);
+    }
+
+    return OK;
+}
+
 status_t MediaPlayer::reset()
 {
     ALOGV("reset");
@@ -478,7 +495,7 @@
     return reset_l();
 }
 
-status_t MediaPlayer::setAudioStreamType(int type)
+status_t MediaPlayer::setAudioStreamType(audio_stream_type_t type)
 {
     ALOGV("MediaPlayer::setAudioStreamType");
     Mutex::Autolock _l(mLock);
@@ -539,9 +556,9 @@
         return BAD_VALUE;
     }
     if (sessionId != mAudioSessionId) {
-      AudioSystem::releaseAudioSessionId(mAudioSessionId);
-      AudioSystem::acquireAudioSessionId(sessionId);
-      mAudioSessionId = sessionId;
+        AudioSystem::releaseAudioSessionId(mAudioSessionId);
+        AudioSystem::acquireAudioSessionId(sessionId);
+        mAudioSessionId = sessionId;
     }
     return NO_ERROR;
 }
@@ -593,12 +610,40 @@
     ALOGV("MediaPlayer::getParameter(%d)", key);
     Mutex::Autolock _l(mLock);
     if (mPlayer != NULL) {
-         return  mPlayer->getParameter(key, reply);
+        return  mPlayer->getParameter(key, reply);
     }
     ALOGV("getParameter: no active player");
     return INVALID_OPERATION;
 }
 
+status_t MediaPlayer::setRetransmitEndpoint(const char* addrString,
+                                            uint16_t port) {
+    ALOGV("MediaPlayer::setRetransmitEndpoint(%s:%hu)",
+            addrString ? addrString : "(null)", port);
+
+    Mutex::Autolock _l(mLock);
+    if ((mPlayer != NULL) || (mCurrentState != MEDIA_PLAYER_IDLE))
+        return INVALID_OPERATION;
+
+    if (NULL == addrString) {
+        mRetransmitEndpointValid = false;
+        return OK;
+    }
+
+    struct in_addr saddr;
+    if(!inet_aton(addrString, &saddr)) {
+        return BAD_VALUE;
+    }
+
+    memset(&mRetransmitEndpoint, 0, sizeof(&mRetransmitEndpoint));
+    mRetransmitEndpoint.sin_family = AF_INET;
+    mRetransmitEndpoint.sin_addr   = saddr;
+    mRetransmitEndpoint.sin_port   = htons(port);
+    mRetransmitEndpointValid       = true;
+
+    return OK;
+}
+
 void MediaPlayer::notify(int msg, int ext1, int ext2, const Parcel *obj)
 {
     ALOGV("message received msg=%d, ext1=%d, ext2=%d", msg, ext1, ext2);
@@ -613,7 +658,7 @@
     // and seekTo within the same process.
     // FIXME: Remember, this is a hack, it's not even a hack that is applied
     // consistently for all use-cases, this needs to be revisited.
-     if (mLockThreadId != getThreadId()) {
+    if (mLockThreadId != getThreadId()) {
         mLock.lock();
         locked = true;
     }
@@ -709,7 +754,7 @@
     }
 }
 
-/*static*/ sp<IMemory> MediaPlayer::decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, int* pFormat)
+/*static*/ sp<IMemory> MediaPlayer::decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat)
 {
     ALOGV("decode(%s)", url);
     sp<IMemory> p;
@@ -729,7 +774,7 @@
     notify(MEDIA_ERROR, MEDIA_ERROR_SERVER_DIED, 0);
 }
 
-/*static*/ sp<IMemory> MediaPlayer::decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, int* pFormat)
+/*static*/ sp<IMemory> MediaPlayer::decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat)
 {
     ALOGV("decode(%d, %lld, %lld)", fd, offset, length);
     sp<IMemory> p;
@@ -743,4 +788,11 @@
 
 }
 
+status_t MediaPlayer::setNextMediaPlayer(const sp<MediaPlayer>& next) {
+    if (mPlayer == NULL) {
+        return NO_INIT;
+    }
+    return mPlayer->setNextPlayer(next == NULL ? NULL : next->mPlayer);
+}
+
 }; // namespace android
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index 8d947d8..9541015 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -18,7 +18,6 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "MediaRecorder"
 #include <utils/Log.h>
-#include <surfaceflinger/Surface.h>
 #include <media/mediarecorder.h>
 #include <binder/IServiceManager.h>
 #include <utils/String8.h>
@@ -32,7 +31,7 @@
 status_t MediaRecorder::setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy)
 {
     ALOGV("setCamera(%p,%p)", camera.get(), proxy.get());
-    if(mMediaRecorder == NULL) {
+    if (mMediaRecorder == NULL) {
         ALOGE("media recorder is not initialized yet");
         return INVALID_OPERATION;
     }
@@ -53,7 +52,7 @@
 status_t MediaRecorder::setPreviewSurface(const sp<Surface>& surface)
 {
     ALOGV("setPreviewSurface(%p)", surface.get());
-    if(mMediaRecorder == NULL) {
+    if (mMediaRecorder == NULL) {
         ALOGE("media recorder is not initialized yet");
         return INVALID_OPERATION;
     }
@@ -78,7 +77,7 @@
 status_t MediaRecorder::init()
 {
     ALOGV("init");
-    if(mMediaRecorder == NULL) {
+    if (mMediaRecorder == NULL) {
         ALOGE("media recorder is not initialized yet");
         return INVALID_OPERATION;
     }
@@ -108,7 +107,7 @@
 status_t MediaRecorder::setVideoSource(int vs)
 {
     ALOGV("setVideoSource(%d)", vs);
-    if(mMediaRecorder == NULL) {
+    if (mMediaRecorder == NULL) {
         ALOGE("media recorder is not initialized yet");
         return INVALID_OPERATION;
     }
@@ -143,7 +142,7 @@
 status_t MediaRecorder::setAudioSource(int as)
 {
     ALOGV("setAudioSource(%d)", as);
-    if(mMediaRecorder == NULL) {
+    if (mMediaRecorder == NULL) {
         ALOGE("media recorder is not initialized yet");
         return INVALID_OPERATION;
     }
@@ -176,7 +175,7 @@
 status_t MediaRecorder::setOutputFormat(int of)
 {
     ALOGV("setOutputFormat(%d)", of);
-    if(mMediaRecorder == NULL) {
+    if (mMediaRecorder == NULL) {
         ALOGE("media recorder is not initialized yet");
         return INVALID_OPERATION;
     }
@@ -202,7 +201,7 @@
 status_t MediaRecorder::setVideoEncoder(int ve)
 {
     ALOGV("setVideoEncoder(%d)", ve);
-    if(mMediaRecorder == NULL) {
+    if (mMediaRecorder == NULL) {
         ALOGE("media recorder is not initialized yet");
         return INVALID_OPERATION;
     }
@@ -232,7 +231,7 @@
 status_t MediaRecorder::setAudioEncoder(int ae)
 {
     ALOGV("setAudioEncoder(%d)", ae);
-    if(mMediaRecorder == NULL) {
+    if (mMediaRecorder == NULL) {
         ALOGE("media recorder is not initialized yet");
         return INVALID_OPERATION;
     }
@@ -262,7 +261,7 @@
 status_t MediaRecorder::setOutputFile(const char* path)
 {
     ALOGV("setOutputFile(%s)", path);
-    if(mMediaRecorder == NULL) {
+    if (mMediaRecorder == NULL) {
         ALOGE("media recorder is not initialized yet");
         return INVALID_OPERATION;
     }
@@ -288,7 +287,7 @@
 status_t MediaRecorder::setOutputFile(int fd, int64_t offset, int64_t length)
 {
     ALOGV("setOutputFile(%d, %lld, %lld)", fd, offset, length);
-    if(mMediaRecorder == NULL) {
+    if (mMediaRecorder == NULL) {
         ALOGE("media recorder is not initialized yet");
         return INVALID_OPERATION;
     }
@@ -325,7 +324,7 @@
 status_t MediaRecorder::setVideoSize(int width, int height)
 {
     ALOGV("setVideoSize(%d, %d)", width, height);
-    if(mMediaRecorder == NULL) {
+    if (mMediaRecorder == NULL) {
         ALOGE("media recorder is not initialized yet");
         return INVALID_OPERATION;
     }
@@ -368,7 +367,7 @@
 status_t MediaRecorder::setVideoFrameRate(int frames_per_second)
 {
     ALOGV("setVideoFrameRate(%d)", frames_per_second);
-    if(mMediaRecorder == NULL) {
+    if (mMediaRecorder == NULL) {
         ALOGE("media recorder is not initialized yet");
         return INVALID_OPERATION;
     }
@@ -392,7 +391,7 @@
 
 status_t MediaRecorder::setParameters(const String8& params) {
     ALOGV("setParameters(%s)", params.string());
-    if(mMediaRecorder == NULL) {
+    if (mMediaRecorder == NULL) {
         ALOGE("media recorder is not initialized yet");
         return INVALID_OPERATION;
     }
@@ -420,7 +419,7 @@
 status_t MediaRecorder::prepare()
 {
     ALOGV("prepare");
-    if(mMediaRecorder == NULL) {
+    if (mMediaRecorder == NULL) {
         ALOGE("media recorder is not initialized yet");
         return INVALID_OPERATION;
     }
@@ -459,7 +458,7 @@
 status_t MediaRecorder::getMaxAmplitude(int* max)
 {
     ALOGV("getMaxAmplitude");
-    if(mMediaRecorder == NULL) {
+    if (mMediaRecorder == NULL) {
         ALOGE("media recorder is not initialized yet");
         return INVALID_OPERATION;
     }
@@ -537,7 +536,7 @@
 
     doCleanUp();
     status_t ret = UNKNOWN_ERROR;
-    switch(mCurrentState) {
+    switch (mCurrentState) {
         case MEDIA_RECORDER_IDLE:
             ret = OK;
             break;
@@ -548,7 +547,7 @@
         case MEDIA_RECORDER_ERROR: {
             ret = doReset();
             if (OK != ret) {
-               return ret;  // No need to continue
+                return ret;  // No need to continue
             }
         }  // Intentional fall through
         case MEDIA_RECORDER_INITIALIZED:
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index a3e2517..ba5c776 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -23,25 +23,27 @@
 	libvorbisidec         			\
 	libsonivox            			\
 	libmedia              			\
+	libmedia_native       			\
 	libcamera_client      			\
 	libandroid_runtime    			\
 	libstagefright        			\
 	libstagefright_omx    			\
 	libstagefright_foundation       \
 	libgui                          \
-	libdl
+	libdl                           \
+	libaah_rtp
 
 LOCAL_STATIC_LIBRARIES := \
         libstagefright_nuplayer                 \
         libstagefright_rtsp                     \
 
-LOCAL_C_INCLUDES :=                                                 \
+LOCAL_C_INCLUDES :=                                               \
 	$(JNI_H_INCLUDE)                                                \
 	$(call include-path-for, graphics corecg)                       \
-	$(TOP)/frameworks/base/include/media/stagefright/openmax \
 	$(TOP)/frameworks/base/media/libstagefright/include             \
 	$(TOP)/frameworks/base/media/libstagefright/rtsp                \
-        $(TOP)/external/tremolo/Tremolo \
+	$(TOP)/frameworks/native/include/media/openmax                  \
+	$(TOP)/external/tremolo/Tremolo
 
 LOCAL_MODULE:= libmediaplayerservice
 
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 72678b9..8f62ee4 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -70,6 +70,11 @@
 
 #include <OMX.h>
 
+namespace android {
+sp<MediaPlayerBase> createAAH_TXPlayer();
+sp<MediaPlayerBase> createAAH_RXPlayer();
+}
+
 namespace {
 using android::media::Metadata;
 using android::status_t;
@@ -487,6 +492,7 @@
     mStatus = NO_INIT;
     mAudioSessionId = audioSessionId;
     mUID = uid;
+    mRetransmitEndpointValid = false;
 
 #if CALLBACK_ANTAGONIZER
     ALOGD("create Antagonizer");
@@ -593,6 +599,10 @@
         return NU_PLAYER;
     }
 
+    if (!strncasecmp("aahRX://", url, 8)) {
+        return AAH_RX_PLAYER;
+    }
+
     // use MidiFile for MIDI extensions
     int lenURL = strlen(url);
     for (int i = 0; i < NELEM(FILE_EXTS); ++i) {
@@ -608,6 +618,44 @@
     return getDefaultPlayerType();
 }
 
+player_type MediaPlayerService::Client::getPlayerType(int fd,
+                                                      int64_t offset,
+                                                      int64_t length)
+{
+    // Until re-transmit functionality is added to the existing core android
+    // players, we use the special AAH TX player whenever we were configured
+    // for retransmission.
+    if (mRetransmitEndpointValid) {
+        return AAH_TX_PLAYER;
+    }
+
+    return android::getPlayerType(fd, offset, length);
+}
+
+player_type MediaPlayerService::Client::getPlayerType(const char* url)
+{
+    // Until re-transmit functionality is added to the existing core android
+    // players, we use the special AAH TX player whenever we were configured
+    // for retransmission.
+    if (mRetransmitEndpointValid) {
+        return AAH_TX_PLAYER;
+    }
+
+    return android::getPlayerType(url);
+}
+
+player_type MediaPlayerService::Client::getPlayerType(
+        const sp<IStreamSource> &source) {
+    // Until re-transmit functionality is added to the existing core android
+    // players, we use the special AAH TX player whenever we were configured
+    // for retransmission.
+    if (mRetransmitEndpointValid) {
+        return AAH_TX_PLAYER;
+    }
+
+    return NU_PLAYER;
+}
+
 static sp<MediaPlayerBase> createPlayer(player_type playerType, void* cookie,
         notify_callback_f notifyFunc)
 {
@@ -629,6 +677,14 @@
             ALOGV("Create Test Player stub");
             p = new TestPlayerStub();
             break;
+        case AAH_RX_PLAYER:
+            ALOGV(" create A@H RX Player");
+            p = createAAH_RXPlayer();
+            break;
+        case AAH_TX_PLAYER:
+            ALOGV(" create A@H TX Player");
+            p = createAAH_TXPlayer();
+            break;
         default:
             ALOGE("Unknown player type: %d", playerType);
             return NULL;
@@ -665,6 +721,49 @@
     return p;
 }
 
+sp<MediaPlayerBase> MediaPlayerService::Client::setDataSource_pre(
+        player_type playerType)
+{
+    ALOGV("player type = %d", playerType);
+
+    // create the right type of player
+    sp<MediaPlayerBase> p = createPlayer(playerType);
+    if (p == NULL) {
+        return p;
+    }
+
+    if (!p->hardwareOutput()) {
+        mAudioOutput = new AudioOutput(mAudioSessionId);
+        static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput);
+    }
+
+    return p;
+}
+
+void MediaPlayerService::Client::setDataSource_post(
+        const sp<MediaPlayerBase>& p,
+        status_t status)
+{
+    ALOGV(" setDataSource");
+    mStatus = status;
+    if (mStatus != OK) {
+        ALOGE("  error: %d", mStatus);
+        return;
+    }
+
+    // Set the re-transmission endpoint if one was chosen.
+    if (mRetransmitEndpointValid) {
+        mStatus = p->setRetransmitEndpoint(&mRetransmitEndpoint);
+        if (mStatus != NO_ERROR) {
+            ALOGE("setRetransmitEndpoint error: %d", mStatus);
+        }
+    }
+
+    if (mStatus == OK) {
+        mPlayer = p;
+    }
+}
+
 status_t MediaPlayerService::Client::setDataSource(
         const char *url, const KeyedVector<String8, String8> *headers)
 {
@@ -696,25 +795,12 @@
         return mStatus;
     } else {
         player_type playerType = getPlayerType(url);
-        ALOGV("player type = %d", playerType);
-
-        // create the right type of player
-        sp<MediaPlayerBase> p = createPlayer(playerType);
-        if (p == NULL) return NO_INIT;
-
-        if (!p->hardwareOutput()) {
-            mAudioOutput = new AudioOutput(mAudioSessionId);
-            static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput);
+        sp<MediaPlayerBase> p = setDataSource_pre(playerType);
+        if (p == NULL) {
+            return NO_INIT;
         }
 
-        // now set data source
-        ALOGV(" setDataSource");
-        mStatus = p->setDataSource(url, headers);
-        if (mStatus == NO_ERROR) {
-            mPlayer = p;
-        } else {
-            ALOGE("  error: %d", mStatus);
-        }
+        setDataSource_post(p, p->setDataSource(url, headers));
         return mStatus;
     }
 }
@@ -745,46 +831,34 @@
         ALOGV("calculated length = %lld", length);
     }
 
+    // Until re-transmit functionality is added to the existing core android
+    // players, we use the special AAH TX player whenever we were configured for
+    // retransmission.
     player_type playerType = getPlayerType(fd, offset, length);
-    ALOGV("player type = %d", playerType);
-
-    // create the right type of player
-    sp<MediaPlayerBase> p = createPlayer(playerType);
-    if (p == NULL) return NO_INIT;
-
-    if (!p->hardwareOutput()) {
-        mAudioOutput = new AudioOutput(mAudioSessionId);
-        static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput);
+    sp<MediaPlayerBase> p = setDataSource_pre(playerType);
+    if (p == NULL) {
+        return NO_INIT;
     }
 
     // now set data source
-    mStatus = p->setDataSource(fd, offset, length);
-    if (mStatus == NO_ERROR) mPlayer = p;
-
+    setDataSource_post(p, p->setDataSource(fd, offset, length));
     return mStatus;
 }
 
 status_t MediaPlayerService::Client::setDataSource(
         const sp<IStreamSource> &source) {
     // create the right type of player
-    sp<MediaPlayerBase> p = createPlayer(NU_PLAYER);
-
+    // Until re-transmit functionality is added to the existing core android
+    // players, we use the special AAH TX player whenever we were configured for
+    // retransmission.
+    player_type playerType = getPlayerType(source);
+    sp<MediaPlayerBase> p = setDataSource_pre(playerType);
     if (p == NULL) {
         return NO_INIT;
     }
 
-    if (!p->hardwareOutput()) {
-        mAudioOutput = new AudioOutput(mAudioSessionId);
-        static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput);
-    }
-
     // now set data source
-    mStatus = p->setDataSource(source);
-
-    if (mStatus == OK) {
-        mPlayer = p;
-    }
-
+    setDataSource_post(p, p->setDataSource(source));
     return mStatus;
 }
 
@@ -994,6 +1068,20 @@
     return ret;
 }
 
+status_t MediaPlayerService::Client::setNextPlayer(const sp<IMediaPlayer>& player) {
+    ALOGV("setNextPlayer");
+    Mutex::Autolock l(mLock);
+    sp<Client> c = static_cast<Client*>(player.get());
+    mNextClient = c;
+    if (mAudioOutput != NULL && c != NULL) {
+        mAudioOutput->setNextOutput(c->mAudioOutput);
+    } else {
+        ALOGE("no current audio output");
+    }
+    return OK;
+}
+
+
 status_t MediaPlayerService::Client::seekTo(int msec)
 {
     ALOGV("[%d] seekTo(%d)", mConnId, msec);
@@ -1005,12 +1093,13 @@
 status_t MediaPlayerService::Client::reset()
 {
     ALOGV("[%d] reset", mConnId);
+    mRetransmitEndpointValid = false;
     sp<MediaPlayerBase> p = getPlayer();
     if (p == 0) return UNKNOWN_ERROR;
     return p->reset();
 }
 
-status_t MediaPlayerService::Client::setAudioStreamType(int type)
+status_t MediaPlayerService::Client::setAudioStreamType(audio_stream_type_t type)
 {
     ALOGV("[%d] setAudioStreamType(%d)", mConnId, type);
     // TODO: for hardware output, call player instead
@@ -1031,9 +1120,21 @@
 status_t MediaPlayerService::Client::setVolume(float leftVolume, float rightVolume)
 {
     ALOGV("[%d] setVolume(%f, %f)", mConnId, leftVolume, rightVolume);
-    // TODO: for hardware output, call player instead
-    Mutex::Autolock l(mLock);
-    if (mAudioOutput != 0) mAudioOutput->setVolume(leftVolume, rightVolume);
+
+    // for hardware output, call player instead
+    sp<MediaPlayerBase> p = getPlayer();
+    {
+      Mutex::Autolock l(mLock);
+      if (p != 0 && p->hardwareOutput()) {
+          MediaPlayerHWInterface* hwp =
+                  reinterpret_cast<MediaPlayerHWInterface*>(p.get());
+          return hwp->setVolume(leftVolume, rightVolume);
+      } else {
+          if (mAudioOutput != 0) mAudioOutput->setVolume(leftVolume, rightVolume);
+          return NO_ERROR;
+      }
+    }
+
     return NO_ERROR;
 }
 
@@ -1067,11 +1168,50 @@
     return p->getParameter(key, reply);
 }
 
+status_t MediaPlayerService::Client::setRetransmitEndpoint(
+        const struct sockaddr_in* endpoint) {
+
+    if (NULL != endpoint) {
+        uint32_t a = ntohl(endpoint->sin_addr.s_addr);
+        uint16_t p = ntohs(endpoint->sin_port);
+        ALOGV("[%d] setRetransmitEndpoint(%u.%u.%u.%u:%hu)", mConnId,
+                (a >> 24), (a >> 16) & 0xFF, (a >> 8) & 0xFF, (a & 0xFF), p);
+    } else {
+        ALOGV("[%d] setRetransmitEndpoint = <none>", mConnId);
+    }
+
+    sp<MediaPlayerBase> p = getPlayer();
+
+    // Right now, the only valid time to set a retransmit endpoint is before
+    // player selection has been made (since the presence or absence of a
+    // retransmit endpoint is going to determine which player is selected during
+    // setDataSource).
+    if (p != 0) return INVALID_OPERATION;
+
+    if (NULL != endpoint) {
+        mRetransmitEndpoint = *endpoint;
+        mRetransmitEndpointValid = true;
+    } else {
+        mRetransmitEndpointValid = false;
+    }
+
+    return NO_ERROR;
+}
+
 void MediaPlayerService::Client::notify(
         void* cookie, int msg, int ext1, int ext2, const Parcel *obj)
 {
     Client* client = static_cast<Client*>(cookie);
 
+    {
+        Mutex::Autolock l(client->mLock);
+        if (msg == MEDIA_PLAYBACK_COMPLETE && client->mNextClient != NULL) {
+            client->mAudioOutput->switchToNextOutput();
+            client->mNextClient->start();
+            client->mNextClient->mClient->notify(MEDIA_INFO, MEDIA_INFO_STARTED_AS_NEXT, 0, obj);
+        }
+    }
+
     if (MEDIA_INFO == msg &&
         MEDIA_INFO_METADATA_UPDATE == ext1) {
         const media::Metadata::Type metadata_type = ext2;
@@ -1149,7 +1289,7 @@
 
 static size_t kDefaultHeapSize = 1024 * 1024; // 1MB
 
-sp<IMemory> MediaPlayerService::decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, int* pFormat)
+sp<IMemory> MediaPlayerService::decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat)
 {
     ALOGV("decode(%s)", url);
     sp<MemoryBase> mem;
@@ -1197,7 +1337,7 @@
     mem = new MemoryBase(cache->getHeap(), 0, cache->size());
     *pSampleRate = cache->sampleRate();
     *pNumChannels = cache->channelCount();
-    *pFormat = (int)cache->format();
+    *pFormat = cache->format();
     ALOGV("return memory @ %p, sampleRate=%u, channelCount = %d, format = %d", mem->pointer(), *pSampleRate, *pNumChannels, *pFormat);
 
 Exit:
@@ -1205,7 +1345,7 @@
     return mem;
 }
 
-sp<IMemory> MediaPlayerService::decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, int* pFormat)
+sp<IMemory> MediaPlayerService::decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat)
 {
     ALOGV("decode(%d, %lld, %lld)", fd, offset, length);
     sp<MemoryBase> mem;
@@ -1259,12 +1399,16 @@
 MediaPlayerService::AudioOutput::AudioOutput(int sessionId)
     : mCallback(NULL),
       mCallbackCookie(NULL),
+      mCallbackData(NULL),
       mSessionId(sessionId) {
     ALOGV("AudioOutput(%d)", sessionId);
     mTrack = 0;
+    mRecycledTrack = 0;
     mStreamType = AUDIO_STREAM_MUSIC;
     mLeftVolume = 1.0;
     mRightVolume = 1.0;
+    mPlaybackRatePermille = 1000;
+    mSampleRateHz = 0;
     mMsecsPerFrame = 0;
     mAuxEffectId = 0;
     mSendLevel = 0.0;
@@ -1274,6 +1418,8 @@
 MediaPlayerService::AudioOutput::~AudioOutput()
 {
     close();
+    delete mRecycledTrack;
+    delete mCallbackData;
 }
 
 void MediaPlayerService::AudioOutput::setMinBufferCount()
@@ -1339,7 +1485,8 @@
 }
 
 status_t MediaPlayerService::AudioOutput::open(
-        uint32_t sampleRate, int channelCount, int format, int bufferCount,
+        uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
+        audio_format_t format, int bufferCount,
         AudioCallback cb, void *cookie)
 {
     mCallback = cb;
@@ -1351,8 +1498,8 @@
         bufferCount = mMinBufferCount;
 
     }
-    ALOGV("open(%u, %d, %d, %d, %d)", sampleRate, channelCount, format, bufferCount,mSessionId);
-    if (mTrack) close();
+    ALOGV("open(%u, %d, 0x%x, %d, %d, %d)", sampleRate, channelCount, channelMask,
+            format, bufferCount, mSessionId);
     int afSampleRate;
     int afFrameCount;
     int frameCount;
@@ -1366,17 +1513,64 @@
 
     frameCount = (sampleRate*afFrameCount*bufferCount)/afSampleRate;
 
+    if (channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER) {
+        channelMask = audio_channel_out_mask_from_count(channelCount);
+        if (0 == channelMask) {
+            ALOGE("open() error, can\'t derive mask for %d audio channels", channelCount);
+            return NO_INIT;
+        }
+    }
+    if (mRecycledTrack) {
+        // check if the existing track can be reused as-is, or if a new track needs to be created.
+
+        bool reuse = true;
+        if ((mCallbackData == NULL && mCallback != NULL) ||
+                (mCallbackData != NULL && mCallback == NULL)) {
+            // recycled track uses callbacks but the caller wants to use writes, or vice versa
+            ALOGV("can't chain callback and write");
+            reuse = false;
+        } else if ((mRecycledTrack->getSampleRate() != sampleRate) ||
+                (mRecycledTrack->channelCount() != channelCount) ||
+                (mRecycledTrack->frameCount() != frameCount)) {
+            ALOGV("samplerate, channelcount or framecount differ");
+            reuse = false;
+        }
+        if (reuse) {
+            ALOGV("chaining to next output");
+            close();
+            mTrack = mRecycledTrack;
+            mRecycledTrack = NULL;
+            if (mCallbackData != NULL) {
+                mCallbackData->setOutput(this);
+            }
+            return OK;
+        }
+
+        // if we're not going to reuse the track, unblock and flush it
+        if (mCallbackData != NULL) {
+            mCallbackData->setOutput(NULL);
+            mCallbackData->endTrackSwitch();
+        }
+        mRecycledTrack->flush();
+        delete mRecycledTrack;
+        mRecycledTrack = NULL;
+        delete mCallbackData;
+        mCallbackData = NULL;
+        close();
+    }
+
     AudioTrack *t;
     if (mCallback != NULL) {
+        mCallbackData = new CallbackData(this);
         t = new AudioTrack(
                 mStreamType,
                 sampleRate,
                 format,
-                (channelCount == 2) ? AUDIO_CHANNEL_OUT_STEREO : AUDIO_CHANNEL_OUT_MONO,
+                channelMask,
                 frameCount,
-                0 /* flags */,
+                AUDIO_POLICY_OUTPUT_FLAG_NONE,
                 CallbackWrapper,
-                this,
+                mCallbackData,
                 0,
                 mSessionId);
     } else {
@@ -1384,9 +1578,9 @@
                 mStreamType,
                 sampleRate,
                 format,
-                (channelCount == 2) ? AUDIO_CHANNEL_OUT_STEREO : AUDIO_CHANNEL_OUT_MONO,
+                channelMask,
                 frameCount,
-                0,
+                AUDIO_POLICY_OUTPUT_FLAG_NONE,
                 NULL,
                 NULL,
                 0,
@@ -1402,9 +1596,14 @@
     ALOGV("setVolume");
     t->setVolume(mLeftVolume, mRightVolume);
 
-    mMsecsPerFrame = 1.e3 / (float) sampleRate;
+    mSampleRateHz = sampleRate;
+    mMsecsPerFrame = mPlaybackRatePermille / (float) sampleRate;
     mTrack = t;
 
+    status_t res = t->setSampleRate(mPlaybackRatePermille * mSampleRateHz / 1000);
+    if (res != NO_ERROR) {
+        return res;
+    }
     t->setAuxEffectSendLevel(mSendLevel);
     return t->attachAuxEffect(mAuxEffectId);;
 }
@@ -1412,6 +1611,9 @@
 void MediaPlayerService::AudioOutput::start()
 {
     ALOGV("start");
+    if (mCallbackData != NULL) {
+        mCallbackData->endTrackSwitch();
+    }
     if (mTrack) {
         mTrack->setVolume(mLeftVolume, mRightVolume);
         mTrack->setAuxEffectSendLevel(mSendLevel);
@@ -1419,8 +1621,27 @@
     }
 }
 
+void MediaPlayerService::AudioOutput::setNextOutput(const sp<AudioOutput>& nextOutput) {
+    mNextOutput = nextOutput;
+}
 
 
+void MediaPlayerService::AudioOutput::switchToNextOutput() {
+    ALOGV("switchToNextOutput");
+    if (mNextOutput != NULL) {
+        if (mCallbackData != NULL) {
+            mCallbackData->beginTrackSwitch();
+        }
+        delete mNextOutput->mCallbackData;
+        mNextOutput->mCallbackData = mCallbackData;
+        mCallbackData = NULL;
+        mNextOutput->mRecycledTrack = mTrack;
+        mTrack = NULL;
+        mNextOutput->mSampleRateHz = mSampleRateHz;
+        mNextOutput->mMsecsPerFrame = mMsecsPerFrame;
+    }
+}
+
 ssize_t MediaPlayerService::AudioOutput::write(const void* buffer, size_t size)
 {
     LOG_FATAL_IF(mCallback != NULL, "Don't call write if supplying a callback.");
@@ -1468,6 +1689,22 @@
     }
 }
 
+status_t MediaPlayerService::AudioOutput::setPlaybackRatePermille(int32_t ratePermille)
+{
+    ALOGV("setPlaybackRatePermille(%d)", ratePermille);
+    status_t res = NO_ERROR;
+    if (mTrack) {
+        res = mTrack->setSampleRate(ratePermille * mSampleRateHz / 1000);
+    } else {
+        res = NO_INIT;
+    }
+    mPlaybackRatePermille = ratePermille;
+    if (mSampleRateHz != 0) {
+        mMsecsPerFrame = mPlaybackRatePermille / (float) mSampleRateHz;
+    }
+    return res;
+}
+
 status_t MediaPlayerService::AudioOutput::setAuxEffectSendLevel(float level)
 {
     ALOGV("setAuxEffectSendLevel(%f)", level);
@@ -1496,13 +1733,22 @@
         return;
     }
 
-    AudioOutput *me = (AudioOutput *)cookie;
+    CallbackData *data = (CallbackData*)cookie;
+    data->lock();
+    AudioOutput *me = data->getOutput();
     AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
+    if (me == NULL) {
+        // no output set, likely because the track was scheduled to be reused
+        // by another player, but the format turned out to be incompatible.
+        data->unlock();
+        buffer->size = 0;
+        return;
+    }
 
     size_t actualSize = (*me->mCallback)(
             me, buffer->raw, buffer->size, me->mCallbackCookie);
 
-    if (actualSize == 0 && buffer->size > 0) {
+    if (actualSize == 0 && buffer->size > 0 && me->mNextOutput == NULL) {
         // We've reached EOS but the audio track is not stopped yet,
         // keep playing silence.
 
@@ -1511,6 +1757,7 @@
     }
 
     buffer->size = actualSize;
+    data->unlock();
 }
 
 int MediaPlayerService::AudioOutput::getSessionId()
@@ -1610,17 +1857,18 @@
 ////////////////////////////////////////////////////////////////////////////////
 
 status_t MediaPlayerService::AudioCache::open(
-        uint32_t sampleRate, int channelCount, int format, int bufferCount,
+        uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
+        audio_format_t format, int bufferCount,
         AudioCallback cb, void *cookie)
 {
-    ALOGV("open(%u, %d, %d, %d)", sampleRate, channelCount, format, bufferCount);
+    ALOGV("open(%u, %d, 0x%x, %d, %d)", sampleRate, channelCount, channelMask, format, bufferCount);
     if (mHeap->getHeapID() < 0) {
         return NO_INIT;
     }
 
     mSampleRate = sampleRate;
     mChannelCount = (uint16_t)channelCount;
-    mFormat = (uint16_t)format;
+    mFormat = format;
     mMsecsPerFrame = 1.e3 / (float) sampleRate;
 
     if (cb != NULL) {
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index b04fddb..d4e0eb1 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -18,6 +18,8 @@
 #ifndef ANDROID_MEDIAPLAYERSERVICE_H
 #define ANDROID_MEDIAPLAYERSERVICE_H
 
+#include <arpa/inet.h>
+
 #include <utils/Log.h>
 #include <utils/threads.h>
 #include <utils/List.h>
@@ -29,11 +31,13 @@
 #include <media/IMediaPlayerService.h>
 #include <media/MediaPlayerInterface.h>
 #include <media/Metadata.h>
+#include <media/stagefright/foundation/ABase.h>
 
 #include <system/audio.h>
 
 namespace android {
 
+class AudioTrack;
 class IMediaRecorder;
 class IMediaMetadataRetriever;
 class IOMX;
@@ -66,7 +70,9 @@
 
     class AudioOutput : public MediaPlayerBase::AudioSink
     {
-    public:
+        class CallbackData;
+
+     public:
                                 AudioOutput(int sessionId);
         virtual                 ~AudioOutput();
 
@@ -82,8 +88,8 @@
         virtual int             getSessionId();
 
         virtual status_t        open(
-                uint32_t sampleRate, int channelCount,
-                int format, int bufferCount,
+                uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
+                audio_format_t format, int bufferCount,
                 AudioCallback cb, void *cookie);
 
         virtual void            start();
@@ -92,25 +98,35 @@
         virtual void            flush();
         virtual void            pause();
         virtual void            close();
-                void            setAudioStreamType(int streamType) { mStreamType = streamType; }
+                void            setAudioStreamType(audio_stream_type_t streamType) { mStreamType = streamType; }
                 void            setVolume(float left, float right);
+        virtual status_t        setPlaybackRatePermille(int32_t ratePermille);
                 status_t        setAuxEffectSendLevel(float level);
                 status_t        attachAuxEffect(int effectId);
         virtual status_t        dump(int fd, const Vector<String16>& args) const;
 
         static bool             isOnEmulator();
         static int              getMinBufferCount();
+                void            setNextOutput(const sp<AudioOutput>& nextOutput);
+                void            switchToNextOutput();
+        virtual bool            needsTrailingPadding() { return mNextOutput == NULL; }
+
     private:
         static void             setMinBufferCount();
         static void             CallbackWrapper(
                 int event, void *me, void *info);
 
         AudioTrack*             mTrack;
+        AudioTrack*             mRecycledTrack;
+        sp<AudioOutput>         mNextOutput;
         AudioCallback           mCallback;
         void *                  mCallbackCookie;
-        int                     mStreamType;
+        CallbackData *          mCallbackData;
+        audio_stream_type_t     mStreamType;
         float                   mLeftVolume;
         float                   mRightVolume;
+        int32_t                 mPlaybackRatePermille;
+        uint32_t                mSampleRateHz; // sample rate of the content, as set in open()
         float                   mMsecsPerFrame;
         int                     mSessionId;
         float                   mSendLevel;
@@ -118,7 +134,38 @@
         static bool             mIsOnEmulator;
         static int              mMinBufferCount;  // 12 for emulator; otherwise 4
 
-    };
+        // CallbackData is what is passed to the AudioTrack as the "user" data.
+        // We need to be able to target this to a different Output on the fly,
+        // so we can't use the Output itself for this.
+        class CallbackData {
+        public:
+            CallbackData(AudioOutput *cookie) {
+                mData = cookie;
+                mSwitching = false;
+            }
+            AudioOutput *   getOutput() { return mData;}
+            void            setOutput(AudioOutput* newcookie) { mData = newcookie; }
+            // lock/unlock are used by the callback before accessing the payload of this object
+            void            lock() { mLock.lock(); }
+            void            unlock() { mLock.unlock(); }
+            // beginTrackSwitch/endTrackSwitch are used when this object is being handed over
+            // to the next sink.
+            void            beginTrackSwitch() { mLock.lock(); mSwitching = true; }
+            void            endTrackSwitch() {
+                if (mSwitching) {
+                    mLock.unlock();
+                }
+                mSwitching = false;
+            }
+        private:
+            AudioOutput *   mData;
+            mutable Mutex   mLock;
+            bool            mSwitching;
+            DISALLOW_EVIL_CONSTRUCTORS(CallbackData);
+        };
+
+    }; // AudioOutput
+
 
     class AudioCache : public MediaPlayerBase::AudioSink
     {
@@ -138,8 +185,8 @@
         virtual int             getSessionId();
 
         virtual status_t        open(
-                uint32_t sampleRate, int channelCount, int format,
-                int bufferCount = 1,
+                uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
+                audio_format_t format, int bufferCount = 1,
                 AudioCallback cb = NULL, void *cookie = NULL);
 
         virtual void            start();
@@ -148,10 +195,11 @@
         virtual void            flush() {}
         virtual void            pause() {}
         virtual void            close() {}
-                void            setAudioStreamType(int streamType) {}
+                void            setAudioStreamType(audio_stream_type_t streamType) {}
                 void            setVolume(float left, float right) {}
+        virtual status_t        setPlaybackRatePermille(int32_t ratePermille) { return INVALID_OPERATION; }
                 uint32_t        sampleRate() const { return mSampleRate; }
-                uint32_t        format() const { return (uint32_t)mFormat; }
+                audio_format_t  format() const { return mFormat; }
                 size_t          size() const { return mSize; }
                 status_t        wait();
 
@@ -169,7 +217,7 @@
         sp<MemoryHeapBase>  mHeap;
         float               mMsecsPerFrame;
         uint16_t            mChannelCount;
-        uint16_t            mFormat;
+        audio_format_t      mFormat;
         ssize_t             mFrameCount;
         uint32_t            mSampleRate;
         uint32_t            mSize;
@@ -177,7 +225,7 @@
         bool                mCommandComplete;
 
         sp<Thread>          mCallbackThread;
-    };
+    }; // AudioCache
 
 public:
     static  void                instantiate();
@@ -189,8 +237,8 @@
 
     virtual sp<IMediaPlayer>    create(pid_t pid, const sp<IMediaPlayerClient>& client, int audioSessionId);
 
-    virtual sp<IMemory>         decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, int* pFormat);
-    virtual sp<IMemory>         decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, int* pFormat);
+    virtual sp<IMemory>         decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat);
+    virtual sp<IMemory>         decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat);
     virtual sp<IOMX>            getOMX();
 
     virtual status_t            dump(int fd, const Vector<String16>& args);
@@ -258,7 +306,7 @@
         virtual status_t        getCurrentPosition(int* msec);
         virtual status_t        getDuration(int* msec);
         virtual status_t        reset();
-        virtual status_t        setAudioStreamType(int type);
+        virtual status_t        setAudioStreamType(audio_stream_type_t type);
         virtual status_t        setLooping(int loop);
         virtual status_t        setVolume(float leftVolume, float rightVolume);
         virtual status_t        invoke(const Parcel& request, Parcel *reply);
@@ -270,6 +318,8 @@
         virtual status_t        attachAuxEffect(int effectId);
         virtual status_t        setParameter(int key, const Parcel &request);
         virtual status_t        getParameter(int key, Parcel *reply);
+        virtual status_t        setRetransmitEndpoint(const struct sockaddr_in* endpoint);
+        virtual status_t        setNextPlayer(const sp<IMediaPlayer>& player);
 
         sp<MediaPlayerBase>     createPlayer(player_type playerType);
 
@@ -281,6 +331,14 @@
 
         virtual status_t        setDataSource(const sp<IStreamSource> &source);
 
+        sp<MediaPlayerBase>     setDataSource_pre(player_type playerType);
+        void                    setDataSource_post(const sp<MediaPlayerBase>& p,
+                                                   status_t status);
+
+        player_type             getPlayerType(int fd, int64_t offset, int64_t length);
+        player_type             getPlayerType(const char* url);
+        player_type             getPlayerType(const sp<IStreamSource> &source);
+
         static  void            notify(void* cookie, int msg,
                                        int ext1, int ext2, const Parcel *obj);
 
@@ -332,6 +390,9 @@
                     uid_t                       mUID;
                     sp<ANativeWindow>           mConnectedWindow;
                     sp<IBinder>                 mConnectedWindowBinder;
+                    struct sockaddr_in          mRetransmitEndpoint;
+                    bool                        mRetransmitEndpointValid;
+                    sp<Client>                  mNextClient;
 
         // Metadata filters.
         media::Metadata::Filter mMetadataAllow;  // protected by mLock
@@ -346,7 +407,7 @@
 #if CALLBACK_ANTAGONIZER
                     Antagonizer*                mAntagonizer;
 #endif
-    };
+    }; // Client
 
 // ----------------------------------------------------------------------------
 
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index d219fc2..beda945 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -33,8 +33,6 @@
 
 #include <utils/String16.h>
 
-#include <media/AudioTrack.h>
-
 #include <system/audio.h>
 
 #include "MediaRecorderClient.h"
diff --git a/media/libmediaplayerservice/MidiFile.cpp b/media/libmediaplayerservice/MidiFile.cpp
index d89b5f4..8db5b9b 100644
--- a/media/libmediaplayerservice/MidiFile.cpp
+++ b/media/libmediaplayerservice/MidiFile.cpp
@@ -86,7 +86,8 @@
     // create playback thread
     {
         Mutex::Autolock l(mMutex);
-        createThreadEtc(renderThread, this, "midithread", ANDROID_PRIORITY_AUDIO);
+        mThread = new MidiFileThread(this);
+        mThread->run("midithread", ANDROID_PRIORITY_AUDIO);
         mCondition.wait(mMutex);
         ALOGV("thread started");
     }
@@ -420,18 +421,14 @@
 }
 
 status_t MidiFile::createOutputTrack() {
-    if (mAudioSink->open(pLibConfig->sampleRate, pLibConfig->numChannels, AUDIO_FORMAT_PCM_16_BIT, 2) != NO_ERROR) {
+    if (mAudioSink->open(pLibConfig->sampleRate, pLibConfig->numChannels,
+            CHANNEL_MASK_USE_CHANNEL_ORDER, AUDIO_FORMAT_PCM_16_BIT, 2) != NO_ERROR) {
         ALOGE("mAudioSink open failed");
         return ERROR_OPEN_FAILED;
     }
     return NO_ERROR;
 }
 
-int MidiFile::renderThread(void* p) {
-
-    return ((MidiFile*)p)->render();
-}
-
 int MidiFile::render() {
     EAS_RESULT result = EAS_FAILURE;
     EAS_I32 count;
diff --git a/media/libmediaplayerservice/MidiFile.h b/media/libmediaplayerservice/MidiFile.h
index 3469389..f6f8f7b 100644
--- a/media/libmediaplayerservice/MidiFile.h
+++ b/media/libmediaplayerservice/MidiFile.h
@@ -19,11 +19,11 @@
 #define ANDROID_MIDIFILE_H
 
 #include <media/MediaPlayerInterface.h>
-#include <media/AudioTrack.h>
 #include <libsonivox/eas.h>
 
 namespace android {
 
+// Note that the name MidiFile is misleading; this actually represents a MIDI file player
 class MidiFile : public MediaPlayerInterface {
 public:
                         MidiFile();
@@ -65,7 +65,6 @@
 private:
             status_t    createOutputTrack();
             status_t    reset_nosync();
-    static  int         renderThread(void*);
             int         render();
             void        updateState(){ EAS_State(mEasData, mEasHandle, &mState); }
 
@@ -78,12 +77,35 @@
     EAS_I32             mDuration;
     EAS_STATE           mState;
     EAS_FILE            mFileLocator;
-    int                 mStreamType;
+    audio_stream_type_t mStreamType;
     bool                mLoop;
     volatile bool       mExit;
     bool                mPaused;
     volatile bool       mRender;
     pid_t               mTid;
+
+    class MidiFileThread : public Thread {
+    public:
+        MidiFileThread(MidiFile *midiPlayer) : mMidiFile(midiPlayer) {
+        }
+
+    protected:
+        virtual ~MidiFileThread() {}
+
+    private:
+        MidiFile *mMidiFile;
+
+        bool threadLoop() {
+            int result;
+            result = mMidiFile->render();
+            return false;
+        }
+
+        MidiFileThread(const MidiFileThread &);
+        MidiFileThread &operator=(const MidiFileThread &);
+    };
+
+    sp<MidiFileThread> mThread;
 };
 
 }; // namespace android
diff --git a/media/libmediaplayerservice/StagefrightPlayer.cpp b/media/libmediaplayerservice/StagefrightPlayer.cpp
index 6d7771a..619c149 100644
--- a/media/libmediaplayerservice/StagefrightPlayer.cpp
+++ b/media/libmediaplayerservice/StagefrightPlayer.cpp
@@ -166,7 +166,8 @@
 }
 
 status_t StagefrightPlayer::invoke(const Parcel &request, Parcel *reply) {
-    return INVALID_OPERATION;
+    ALOGV("invoke()");
+    return mPlayer->invoke(request, reply);
 }
 
 void StagefrightPlayer::setAudioSink(const sp<AudioSink> &audioSink) {
@@ -176,7 +177,7 @@
 }
 
 status_t StagefrightPlayer::setParameter(int key, const Parcel &request) {
-    ALOGV("setParameter");
+    ALOGV("setParameter(key=%d)", key);
     return mPlayer->setParameter(key, request);
 }
 
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 4632016..ca79657 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -24,6 +24,7 @@
 #include <binder/IServiceManager.h>
 
 #include <media/IMediaPlayerService.h>
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/AudioSource.h>
 #include <media/stagefright/AMRWriter.h>
 #include <media/stagefright/AACWriter.h>
@@ -31,7 +32,6 @@
 #include <media/stagefright/CameraSourceTimeLapse.h>
 #include <media/stagefright/MPEG2TSWriter.h>
 #include <media/stagefright/MPEG4Writer.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/OMXClient.h>
@@ -40,7 +40,7 @@
 #include <media/MediaProfiles.h>
 #include <camera/ICamera.h>
 #include <camera/CameraParameters.h>
-#include <surfaceflinger/Surface.h>
+#include <gui/Surface.h>
 
 #include <utils/Errors.h>
 #include <sys/types.h>
@@ -241,8 +241,8 @@
 status_t StagefrightRecorder::setOutputFile(int fd, int64_t offset, int64_t length) {
     ALOGV("setOutputFile: %d, %lld, %lld", fd, offset, length);
     // These don't make any sense, do they?
-    CHECK_EQ(offset, 0);
-    CHECK_EQ(length, 0);
+    CHECK_EQ(offset, 0ll);
+    CHECK_EQ(length, 0ll);
 
     if (fd < 0) {
         ALOGE("Invalid file descriptor: %d", fd);
@@ -734,7 +734,7 @@
 }
 
 status_t StagefrightRecorder::start() {
-    CHECK(mOutputFd >= 0);
+    CHECK_GE(mOutputFd, 0);
 
     if (mWriter != NULL) {
         ALOGE("File writer is not avaialble");
@@ -837,7 +837,7 @@
     }
 
     OMXClient client;
-    CHECK_EQ(client.connect(), OK);
+    CHECK_EQ(client.connect(), (status_t)OK);
 
     sp<MediaSource> audioEncoder =
         OMXCodec::Create(client.interface(), encMeta,
@@ -850,9 +850,9 @@
 status_t StagefrightRecorder::startAACRecording() {
     // FIXME:
     // Add support for OUTPUT_FORMAT_AAC_ADIF
-    CHECK(mOutputFormat == OUTPUT_FORMAT_AAC_ADTS);
+    CHECK_EQ(mOutputFormat, OUTPUT_FORMAT_AAC_ADTS);
 
-    CHECK(mAudioEncoder == AUDIO_ENCODER_AAC);
+    CHECK_EQ(mAudioEncoder, AUDIO_ENCODER_AAC);
     CHECK(mAudioSource != AUDIO_SOURCE_CNT);
 
     mWriter = new AACWriter(mOutputFd);
@@ -1291,6 +1291,12 @@
     videoSize.width = mVideoWidth;
     videoSize.height = mVideoHeight;
     if (mCaptureTimeLapse) {
+        if (mTimeBetweenTimeLapseFrameCaptureUs < 0) {
+            ALOGE("Invalid mTimeBetweenTimeLapseFrameCaptureUs value: %lld",
+                mTimeBetweenTimeLapseFrameCaptureUs);
+            return BAD_VALUE;
+        }
+
         mCameraSourceTimeLapse = CameraSourceTimeLapse::CreateFromCamera(
                 mCamera, mCameraProxy, mCameraId,
                 videoSize, mFrameRate, mPreviewSurface,
@@ -1386,7 +1392,7 @@
     }
 
     OMXClient client;
-    CHECK_EQ(client.connect(), OK);
+    CHECK_EQ(client.connect(), (status_t)OK);
 
     uint32_t encoder_flags = 0;
     if (mIsMetaDataStoredInVideoBuffers) {
diff --git a/media/libmediaplayerservice/nuplayer/Android.mk b/media/libmediaplayerservice/nuplayer/Android.mk
index 33e2f93..9b485d7 100644
--- a/media/libmediaplayerservice/nuplayer/Android.mk
+++ b/media/libmediaplayerservice/nuplayer/Android.mk
@@ -12,11 +12,11 @@
         StreamingSource.cpp             \
 
 LOCAL_C_INCLUDES := \
-        $(TOP)/frameworks/base/include/media/stagefright/openmax        \
+	$(TOP)/frameworks/base/media/libstagefright/httplive            \
 	$(TOP)/frameworks/base/media/libstagefright/include             \
-        $(TOP)/frameworks/base/media/libstagefright/mpeg2ts             \
-        $(TOP)/frameworks/base/media/libstagefright/httplive            \
-        $(TOP)/frameworks/base/media/libstagefright/rtsp                \
+	$(TOP)/frameworks/base/media/libstagefright/mpeg2ts             \
+	$(TOP)/frameworks/base/media/libstagefright/rtsp                \
+	$(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_MODULE:= libstagefright_nuplayer
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index a00aaa5..526120a 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -38,7 +38,6 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
-#include <surfaceflinger/Surface.h>
 #include <gui/ISurfaceTexture.h>
 
 #include "avc_utils.h"
@@ -337,6 +336,7 @@
                     CHECK_EQ(mAudioSink->open(
                                 sampleRate,
                                 numChannels,
+                                CHANNEL_MASK_USE_CHANNEL_ORDER,
                                 AUDIO_FORMAT_PCM_16_BIT,
                                 8 /* bufferCount */),
                              (status_t)OK);
@@ -387,10 +387,10 @@
                      audio ? "audio" : "video");
 
                 mRenderer->queueEOS(audio, UNKNOWN_ERROR);
-            } else {
-                CHECK_EQ((int)what, (int)ACodec::kWhatDrainThisBuffer);
-
+            } else if (what == ACodec::kWhatDrainThisBuffer) {
                 renderBuffer(audio, codecRequest);
+            } else {
+                ALOGV("Unhandled codec notification %d.", what);
             }
 
             break;
@@ -480,7 +480,7 @@
                 // completed.
 
                 ALOGV("postponing reset mFlushingAudio=%d, mFlushingVideo=%d",
-                        mFlushingAudio, mFlushingVideo);
+                      mFlushingAudio, mFlushingVideo);
 
                 mResetPostponed = true;
                 break;
@@ -690,7 +690,7 @@
                 bool timeChange = (type & ATSParser::DISCONTINUITY_TIME) != 0;
 
                 ALOGI("%s discontinuity (formatChange=%d, time=%d)",
-                      audio ? "audio" : "video", formatChange, timeChange);
+                     audio ? "audio" : "video", formatChange, timeChange);
 
                 if (audio) {
                     mSkipRenderingAudioUntilMediaTimeUs = -1;
@@ -768,7 +768,7 @@
          mediaTimeUs / 1E6);
 #endif
 
-    reply->setObject("buffer", accessUnit);
+    reply->setBuffer("buffer", accessUnit);
     reply->post();
 
     return OK;
@@ -793,10 +793,8 @@
         return;
     }
 
-    sp<RefBase> obj;
-    CHECK(msg->findObject("buffer", &obj));
-
-    sp<ABuffer> buffer = static_cast<ABuffer *>(obj.get());
+    sp<ABuffer> buffer;
+    CHECK(msg->findBuffer("buffer", &buffer));
 
     int64_t &skipUntilMediaTimeUs =
         audio
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index ffc710e..6be14be 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -21,8 +21,6 @@
 #include <media/MediaPlayerInterface.h>
 #include <media/stagefright/foundation/AHandler.h>
 #include <media/stagefright/NativeWindowWrapper.h>
-#include <gui/SurfaceTextureClient.h>
-#include <surfaceflinger/Surface.h>
 
 namespace android {
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 56c2773..460fc98 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -29,8 +29,6 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
-#include <surfaceflinger/Surface.h>
-#include <gui/ISurfaceTexture.h>
 
 namespace android {
 
@@ -214,8 +212,6 @@
 
         buffer->meta()->setInt32("csd", true);
         mCSD.push(buffer);
-
-        msg->setObject("csd", buffer);
     } else if (meta->findData(kKeyESDS, &type, &data, &size)) {
         ESDS esds((const char *)data, size);
         CHECK_EQ(esds.InitCheck(), (status_t)OK);
@@ -242,9 +238,8 @@
     CHECK(msg->findMessage("reply", &reply));
 
 #if 0
-    sp<RefBase> obj;
-    CHECK(msg->findObject("buffer", &obj));
-    sp<ABuffer> outBuffer = static_cast<ABuffer *>(obj.get());
+    sp<ABuffer> outBuffer;
+    CHECK(msg->findBuffer("buffer", &outBuffer));
 #else
     sp<ABuffer> outBuffer;
 #endif
@@ -253,7 +248,7 @@
         outBuffer = mCSD.editItemAt(mCSDIndex++);
         outBuffer->meta()->setInt64("timeUs", 0);
 
-        reply->setObject("buffer", outBuffer);
+        reply->setBuffer("buffer", outBuffer);
         reply->post();
         return;
     }
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 074cb4f..5738ecb 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -60,7 +60,7 @@
         const sp<AMessage> &notifyConsumed) {
     sp<AMessage> msg = new AMessage(kWhatQueueBuffer, id());
     msg->setInt32("audio", static_cast<int32_t>(audio));
-    msg->setObject("buffer", buffer);
+    msg->setBuffer("buffer", buffer);
     msg->setMessage("notifyConsumed", notifyConsumed);
     msg->post();
 }
@@ -376,7 +376,7 @@
     bool tooLate = (mVideoLateByUs > 40000);
 
     if (tooLate) {
-        ALOGV("video late by %lld us (%.2f secs)", lateByUs, lateByUs / 1E6);
+        ALOGV("video late by %lld us (%.2f secs)", mVideoLateByUs, mVideoLateByUs / 1E6);
     } else {
         ALOGV("rendering video at media time %.2f secs", mediaTimeUs / 1E6);
     }
@@ -411,9 +411,8 @@
         return;
     }
 
-    sp<RefBase> obj;
-    CHECK(msg->findObject("buffer", &obj));
-    sp<ABuffer> buffer = static_cast<ABuffer *>(obj.get());
+    sp<ABuffer> buffer;
+    CHECK(msg->findBuffer("buffer", &buffer));
 
     sp<AMessage> notifyConsumed;
     CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
@@ -629,7 +628,7 @@
     }
 
     ALOGV("now paused audio queue has %d entries, video has %d entries",
-         mAudioQueue.size(), mVideoQueue.size());
+          mAudioQueue.size(), mVideoQueue.size());
 
     mPaused = true;
 }
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index 6eb0d07..4c65b65 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -218,10 +218,8 @@
             CHECK(msg->findSize("trackIndex", &trackIndex));
             CHECK_LT(trackIndex, mTracks.size());
 
-            sp<RefBase> obj;
-            CHECK(msg->findObject("accessUnit", &obj));
-
-            sp<ABuffer> accessUnit = static_cast<ABuffer *>(obj.get());
+            sp<ABuffer> accessUnit;
+            CHECK(msg->findBuffer("accessUnit", &accessUnit));
 
             int32_t damaged;
             if (accessUnit->meta()->findInt32("damaged", &damaged)
diff --git a/media/libstagefright/AACExtractor.cpp b/media/libstagefright/AACExtractor.cpp
index 52b1200..4d1072f 100644
--- a/media/libstagefright/AACExtractor.cpp
+++ b/media/libstagefright/AACExtractor.cpp
@@ -22,9 +22,10 @@
 #include "include/avc_utils.h"
 
 #include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaSource.h>
@@ -131,18 +132,28 @@
     return frameSize;
 }
 
-AACExtractor::AACExtractor(const sp<DataSource> &source)
+AACExtractor::AACExtractor(
+        const sp<DataSource> &source, const sp<AMessage> &_meta)
     : mDataSource(source),
       mInitCheck(NO_INIT),
       mFrameDurationUs(0) {
-    String8 mimeType;
-    float confidence;
-    if (!SniffAAC(mDataSource, &mimeType, &confidence, NULL)) {
-        return;
+    sp<AMessage> meta = _meta;
+
+    if (meta == NULL) {
+        String8 mimeType;
+        float confidence;
+        sp<AMessage> _meta;
+
+        if (!SniffAAC(mDataSource, &mimeType, &confidence, &meta)) {
+            return;
+        }
     }
 
+    int64_t offset;
+    CHECK(meta->findInt64("offset", &offset));
+
     uint8_t profile, sf_index, channel, header[2];
-    if (mDataSource->readAt(2, &header, 2) < 2) {
+    if (mDataSource->readAt(offset + 2, &header, 2) < 2) {
         return;
     }
 
@@ -156,7 +167,6 @@
 
     mMeta = MakeAACCodecSpecificData(profile, sf_index, channel);
 
-    off64_t offset = 0;
     off64_t streamSize, numFrames = 0;
     size_t frameSize = 0;
     int64_t duration = 0;
@@ -245,7 +255,12 @@
 status_t AACSource::start(MetaData *params) {
     CHECK(!mStarted);
 
-    mOffset = 0;
+    if (mOffsetVector.empty()) {
+        mOffset = 0;
+    } else {
+        mOffset = mOffsetVector.itemAt(0);
+    }
+
     mCurrentTimeUs = 0;
     mGroup = new MediaBufferGroup;
     mGroup->add_buffer(new MediaBuffer(kMaxFrameSize));
@@ -318,10 +333,39 @@
 
 bool SniffAAC(
         const sp<DataSource> &source, String8 *mimeType, float *confidence,
-        sp<AMessage> *) {
+        sp<AMessage> *meta) {
+    off64_t pos = 0;
+
+    for (;;) {
+        uint8_t id3header[10];
+        if (source->readAt(pos, id3header, sizeof(id3header))
+                < (ssize_t)sizeof(id3header)) {
+            return false;
+        }
+
+        if (memcmp("ID3", id3header, 3)) {
+            break;
+        }
+
+        // Skip the ID3v2 header.
+
+        size_t len =
+            ((id3header[6] & 0x7f) << 21)
+            | ((id3header[7] & 0x7f) << 14)
+            | ((id3header[8] & 0x7f) << 7)
+            | (id3header[9] & 0x7f);
+
+        len += 10;
+
+        pos += len;
+
+        ALOGV("skipped ID3 tag, new starting offset is %lld (0x%016llx)",
+             pos, pos);
+    }
+
     uint8_t header[2];
 
-    if (source->readAt(0, &header, 2) != 2) {
+    if (source->readAt(pos, &header, 2) != 2) {
         return false;
     }
 
@@ -329,6 +373,10 @@
     if ((header[0] == 0xff) && ((header[1] & 0xf6) == 0xf0)) {
         *mimeType = MEDIA_MIMETYPE_AUDIO_AAC_ADTS;
         *confidence = 0.2;
+
+        *meta = new AMessage;
+        (*meta)->setInt64("offset", pos);
+
         return true;
     }
 
diff --git a/media/libstagefright/AACWriter.cpp b/media/libstagefright/AACWriter.cpp
index 1673ccd..9cdb463 100644
--- a/media/libstagefright/AACWriter.cpp
+++ b/media/libstagefright/AACWriter.cpp
@@ -60,7 +60,7 @@
 
 AACWriter::~AACWriter() {
     if (mStarted) {
-        stop();
+        reset();
     }
 
     if (mFd != -1) {
@@ -152,7 +152,7 @@
     return OK;
 }
 
-status_t AACWriter::stop() {
+status_t AACWriter::reset() {
     if (!mStarted) {
         return OK;
     }
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index ca44ea3..e5ad4b7 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -26,14 +26,12 @@
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 
+#include <media/stagefright/MediaCodecList.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/NativeWindowWrapper.h>
 #include <media/stagefright/OMXClient.h>
 #include <media/stagefright/OMXCodec.h>
 
-#include <surfaceflinger/Surface.h>
-#include <gui/SurfaceTextureClient.h>
-
 #include <OMX_Component.h>
 
 namespace android {
@@ -168,15 +166,36 @@
 
 protected:
     virtual bool onMessageReceived(const sp<AMessage> &msg);
+    virtual void stateEntered();
 
 private:
     void onSetup(const sp<AMessage> &msg);
+    bool onAllocateComponent(const sp<AMessage> &msg);
 
     DISALLOW_EVIL_CONSTRUCTORS(UninitializedState);
 };
 
 ////////////////////////////////////////////////////////////////////////////////
 
+struct ACodec::LoadedState : public ACodec::BaseState {
+    LoadedState(ACodec *codec);
+
+protected:
+    virtual bool onMessageReceived(const sp<AMessage> &msg);
+    virtual void stateEntered();
+
+private:
+    friend struct ACodec::UninitializedState;
+
+    bool onConfigureComponent(const sp<AMessage> &msg);
+    void onStart();
+    void onShutdown(bool keepComponentAllocated);
+
+    DISALLOW_EVIL_CONSTRUCTORS(LoadedState);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
 struct ACodec::LoadedToIdleState : public ACodec::BaseState {
     LoadedToIdleState(ACodec *codec);
 
@@ -265,6 +284,8 @@
 private:
     void changeStateIfWeOwnAllBuffers();
 
+    bool mComponentNowIdle;
+
     DISALLOW_EVIL_CONSTRUCTORS(ExecutingToIdleState);
 };
 
@@ -308,9 +329,13 @@
 ////////////////////////////////////////////////////////////////////////////////
 
 ACodec::ACodec()
-    : mNode(NULL),
-      mSentFormat(false) {
+    : mQuirks(0),
+      mNode(NULL),
+      mSentFormat(false),
+      mIsEncoder(false),
+      mShutdownInProgress(false) {
     mUninitializedState = new UninitializedState(this);
+    mLoadedState = new LoadedState(this);
     mLoadedToIdleState = new LoadedToIdleState(this);
     mIdleToExecutingState = new IdleToExecutingState(this);
     mExecutingState = new ExecutingState(this);
@@ -341,6 +366,22 @@
     msg->post();
 }
 
+void ACodec::initiateAllocateComponent(const sp<AMessage> &msg) {
+    msg->setWhat(kWhatAllocateComponent);
+    msg->setTarget(id());
+    msg->post();
+}
+
+void ACodec::initiateConfigureComponent(const sp<AMessage> &msg) {
+    msg->setWhat(kWhatConfigureComponent);
+    msg->setTarget(id());
+    msg->post();
+}
+
+void ACodec::initiateStart() {
+    (new AMessage(kWhatStart, id()))->post();
+}
+
 void ACodec::signalFlush() {
     ALOGV("[%s] signalFlush", mComponentName.c_str());
     (new AMessage(kWhatFlush, id()))->post();
@@ -350,8 +391,10 @@
     (new AMessage(kWhatResume, id()))->post();
 }
 
-void ACodec::initiateShutdown() {
-    (new AMessage(kWhatShutdown, id()))->post();
+void ACodec::initiateShutdown(bool keepComponentAllocated) {
+    sp<AMessage> msg = new AMessage(kWhatShutdown, id());
+    msg->setInt32("keepComponentAllocated", keepComponentAllocated);
+    msg->post();
 }
 
 status_t ACodec::allocateBuffersOnPort(OMX_U32 portIndex) {
@@ -360,62 +403,71 @@
     CHECK(mDealer[portIndex] == NULL);
     CHECK(mBuffers[portIndex].isEmpty());
 
+    status_t err;
     if (mNativeWindow != NULL && portIndex == kPortIndexOutput) {
-        return allocateOutputBuffersFromNativeWindow();
+        err = allocateOutputBuffersFromNativeWindow();
+    } else {
+        OMX_PARAM_PORTDEFINITIONTYPE def;
+        InitOMXParams(&def);
+        def.nPortIndex = portIndex;
+
+        err = mOMX->getParameter(
+                mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+
+        if (err == OK) {
+            ALOGV("[%s] Allocating %lu buffers of size %lu on %s port",
+                    mComponentName.c_str(),
+                    def.nBufferCountActual, def.nBufferSize,
+                    portIndex == kPortIndexInput ? "input" : "output");
+
+            size_t totalSize = def.nBufferCountActual * def.nBufferSize;
+            mDealer[portIndex] = new MemoryDealer(totalSize, "ACodec");
+
+            for (OMX_U32 i = 0; i < def.nBufferCountActual; ++i) {
+                sp<IMemory> mem = mDealer[portIndex]->allocate(def.nBufferSize);
+                CHECK(mem.get() != NULL);
+
+                IOMX::buffer_id buffer;
+
+                uint32_t requiresAllocateBufferBit =
+                    (portIndex == kPortIndexInput)
+                        ? OMXCodec::kRequiresAllocateBufferOnInputPorts
+                        : OMXCodec::kRequiresAllocateBufferOnOutputPorts;
+
+                if (mQuirks & requiresAllocateBufferBit) {
+                    err = mOMX->allocateBufferWithBackup(
+                            mNode, portIndex, mem, &buffer);
+                } else {
+                    err = mOMX->useBuffer(mNode, portIndex, mem, &buffer);
+                }
+
+                BufferInfo info;
+                info.mBufferID = buffer;
+                info.mStatus = BufferInfo::OWNED_BY_US;
+                info.mData = new ABuffer(mem->pointer(), def.nBufferSize);
+                mBuffers[portIndex].push(info);
+            }
+        }
     }
 
-    OMX_PARAM_PORTDEFINITIONTYPE def;
-    InitOMXParams(&def);
-    def.nPortIndex = portIndex;
-
-    status_t err = mOMX->getParameter(
-            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-
     if (err != OK) {
         return err;
     }
 
-    ALOGV("[%s] Allocating %lu buffers of size %lu on %s port",
-            mComponentName.c_str(),
-            def.nBufferCountActual, def.nBufferSize,
-            portIndex == kPortIndexInput ? "input" : "output");
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", ACodec::kWhatBuffersAllocated);
 
-    size_t totalSize = def.nBufferCountActual * def.nBufferSize;
-    mDealer[portIndex] = new MemoryDealer(totalSize, "OMXCodec");
+    notify->setInt32("portIndex", portIndex);
+    for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) {
+        AString name = StringPrintf("buffer-id_%d", i);
+        notify->setPointer(name.c_str(), mBuffers[portIndex][i].mBufferID);
 
-    for (OMX_U32 i = 0; i < def.nBufferCountActual; ++i) {
-        sp<IMemory> mem = mDealer[portIndex]->allocate(def.nBufferSize);
-        CHECK(mem.get() != NULL);
-
-        IOMX::buffer_id buffer;
-
-        if (!strcasecmp(
-                    mComponentName.c_str(), "OMX.TI.DUCATI1.VIDEO.DECODER")) {
-            if (portIndex == kPortIndexInput && i == 0) {
-                // Only log this warning once per allocation round.
-
-                ALOGW("OMX.TI.DUCATI1.VIDEO.DECODER requires the use of "
-                     "OMX_AllocateBuffer instead of the preferred "
-                     "OMX_UseBuffer. Vendor must fix this.");
-            }
-
-            err = mOMX->allocateBufferWithBackup(
-                    mNode, portIndex, mem, &buffer);
-        } else {
-            err = mOMX->useBuffer(mNode, portIndex, mem, &buffer);
-        }
-
-        if (err != OK) {
-            return err;
-        }
-
-        BufferInfo info;
-        info.mBufferID = buffer;
-        info.mStatus = BufferInfo::OWNED_BY_US;
-        info.mData = new ABuffer(mem->pointer(), def.nBufferSize);
-        mBuffers[portIndex].push(info);
+        name = StringPrintf("data_%d", i);
+        notify->setBuffer(name.c_str(), mBuffers[portIndex][i].mData);
     }
 
+    notify->post();
+
     return OK;
 }
 
@@ -671,7 +723,7 @@
     return NULL;
 }
 
-void ACodec::setComponentRole(
+status_t ACodec::setComponentRole(
         bool isEncoder, const char *mime) {
     struct MimeToRole {
         const char *mime;
@@ -694,12 +746,18 @@
             "audio_decoder.aac", "audio_encoder.aac" },
         { MEDIA_MIMETYPE_AUDIO_VORBIS,
             "audio_decoder.vorbis", "audio_encoder.vorbis" },
+        { MEDIA_MIMETYPE_AUDIO_G711_MLAW,
+            "audio_decoder.g711mlaw", "audio_encoder.g711mlaw" },
+        { MEDIA_MIMETYPE_AUDIO_G711_ALAW,
+            "audio_decoder.g711alaw", "audio_encoder.g711alaw" },
         { MEDIA_MIMETYPE_VIDEO_AVC,
             "video_decoder.avc", "video_encoder.avc" },
         { MEDIA_MIMETYPE_VIDEO_MPEG4,
             "video_decoder.mpeg4", "video_encoder.mpeg4" },
         { MEDIA_MIMETYPE_VIDEO_H263,
             "video_decoder.h263", "video_encoder.h263" },
+        { MEDIA_MIMETYPE_VIDEO_VPX,
+            "video_decoder.vpx", "video_encoder.vpx" },
     };
 
     static const size_t kNumMimeToRole =
@@ -713,7 +771,7 @@
     }
 
     if (i == kNumMimeToRole) {
-        return;
+        return ERROR_UNSUPPORTED;
     }
 
     const char *role =
@@ -736,50 +794,79 @@
         if (err != OK) {
             ALOGW("[%s] Failed to set standard component role '%s'.",
                  mComponentName.c_str(), role);
+
+            return err;
         }
     }
+
+    return OK;
 }
 
-void ACodec::configureCodec(
+status_t ACodec::configureCodec(
         const char *mime, const sp<AMessage> &msg) {
-    setComponentRole(false /* isEncoder */, mime);
+    int32_t encoder;
+    if (!msg->findInt32("encoder", &encoder)) {
+        encoder = false;
+    }
+
+    mIsEncoder = encoder;
+
+    status_t err = setComponentRole(encoder /* isEncoder */, mime);
+
+    if (err != OK) {
+        return err;
+    }
+
+    int32_t bitRate = 0;
+    if (encoder && !msg->findInt32("bitrate", &bitRate)) {
+        return INVALID_OPERATION;
+    }
 
     if (!strncasecmp(mime, "video/", 6)) {
-        int32_t width, height;
-        CHECK(msg->findInt32("width", &width));
-        CHECK(msg->findInt32("height", &height));
-
-        CHECK_EQ(setupVideoDecoder(mime, width, height),
-                 (status_t)OK);
+        if (encoder) {
+            err = setupVideoEncoder(mime, msg);
+        } else {
+            int32_t width, height;
+            if (!msg->findInt32("width", &width)
+                    || !msg->findInt32("height", &height)) {
+                err = INVALID_OPERATION;
+            } else {
+                err = setupVideoDecoder(mime, width, height);
+            }
+        }
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
         int32_t numChannels, sampleRate;
-        CHECK(msg->findInt32("channel-count", &numChannels));
-        CHECK(msg->findInt32("sample-rate", &sampleRate));
-
-        CHECK_EQ(setupAACDecoder(numChannels, sampleRate), (status_t)OK);
+        if (!msg->findInt32("channel-count", &numChannels)
+                || !msg->findInt32("sample-rate", &sampleRate)) {
+            err = INVALID_OPERATION;
+        } else {
+            err = setupAACCodec(encoder, numChannels, sampleRate, bitRate);
+        }
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)) {
-        CHECK_EQ(setupAMRDecoder(false /* isWAMR */), (status_t)OK);
+        err = setupAMRCodec(encoder, false /* isWAMR */, bitRate);
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_WB)) {
-        CHECK_EQ(setupAMRDecoder(true /* isWAMR */), (status_t)OK);
+        err = setupAMRCodec(encoder, true /* isWAMR */, bitRate);
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_G711_ALAW)
             || !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_G711_MLAW)) {
         // These are PCM-like formats with a fixed sample rate but
         // a variable number of channels.
 
         int32_t numChannels;
-        CHECK(msg->findInt32("channel-count", &numChannels));
-
-        CHECK_EQ(setupG711Decoder(numChannels), (status_t)OK);
+        if (!msg->findInt32("channel-count", &numChannels)) {
+            err = INVALID_OPERATION;
+        } else {
+            err = setupG711Codec(encoder, numChannels);
+        }
     }
 
     int32_t maxInputSize;
     if (msg->findInt32("max-input-size", &maxInputSize)) {
-        CHECK_EQ(setMinBufferSize(kPortIndexInput, (size_t)maxInputSize),
-                 (status_t)OK);
+        err = setMinBufferSize(kPortIndexInput, (size_t)maxInputSize);
     } else if (!strcmp("OMX.Nvidia.aac.decoder", mComponentName.c_str())) {
-        CHECK_EQ(setMinBufferSize(kPortIndexInput, 8192),  // XXX
-                 (status_t)OK);
+        err = setMinBufferSize(kPortIndexInput, 8192);  // XXX
     }
+
+    return err;
 }
 
 status_t ACodec::setMinBufferSize(OMX_U32 portIndex, size_t size) {
@@ -819,12 +906,113 @@
     return OK;
 }
 
-status_t ACodec::setupAACDecoder(int32_t numChannels, int32_t sampleRate) {
+status_t ACodec::selectAudioPortFormat(
+        OMX_U32 portIndex, OMX_AUDIO_CODINGTYPE desiredFormat) {
+    OMX_AUDIO_PARAM_PORTFORMATTYPE format;
+    InitOMXParams(&format);
+
+    format.nPortIndex = portIndex;
+    for (OMX_U32 index = 0;; ++index) {
+        format.nIndex = index;
+
+        status_t err = mOMX->getParameter(
+                mNode, OMX_IndexParamAudioPortFormat,
+                &format, sizeof(format));
+
+        if (err != OK) {
+            return err;
+        }
+
+        if (format.eEncoding == desiredFormat) {
+            break;
+        }
+    }
+
+    return mOMX->setParameter(
+            mNode, OMX_IndexParamAudioPortFormat, &format, sizeof(format));
+}
+
+status_t ACodec::setupAACCodec(
+        bool encoder,
+        int32_t numChannels, int32_t sampleRate, int32_t bitRate) {
+    status_t err = setupRawAudioFormat(
+            encoder ? kPortIndexInput : kPortIndexOutput,
+            sampleRate,
+            numChannels);
+
+    if (err != OK) {
+        return err;
+    }
+
+    if (encoder) {
+        err = selectAudioPortFormat(kPortIndexOutput, OMX_AUDIO_CodingAAC);
+
+        if (err != OK) {
+            return err;
+        }
+
+        OMX_PARAM_PORTDEFINITIONTYPE def;
+        InitOMXParams(&def);
+        def.nPortIndex = kPortIndexOutput;
+
+        err = mOMX->getParameter(
+                mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+
+        if (err != OK) {
+            return err;
+        }
+
+        def.format.audio.bFlagErrorConcealment = OMX_TRUE;
+        def.format.audio.eEncoding = OMX_AUDIO_CodingAAC;
+
+        err = mOMX->setParameter(
+                mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+
+        if (err != OK) {
+            return err;
+        }
+
+        OMX_AUDIO_PARAM_AACPROFILETYPE profile;
+        InitOMXParams(&profile);
+        profile.nPortIndex = kPortIndexOutput;
+
+        err = mOMX->getParameter(
+                mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile));
+
+        if (err != OK) {
+            return err;
+        }
+
+        profile.nChannels = numChannels;
+
+        profile.eChannelMode =
+            (numChannels == 1)
+                ? OMX_AUDIO_ChannelModeMono: OMX_AUDIO_ChannelModeStereo;
+
+        profile.nSampleRate = sampleRate;
+        profile.nBitRate = bitRate;
+        profile.nAudioBandWidth = 0;
+        profile.nFrameLength = 0;
+        profile.nAACtools = OMX_AUDIO_AACToolAll;
+        profile.nAACERtools = OMX_AUDIO_AACERNone;
+        profile.eAACProfile = OMX_AUDIO_AACObjectLC;
+        profile.eAACStreamFormat = OMX_AUDIO_AACStreamFormatMP4FF;
+
+        err = mOMX->setParameter(
+                mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile));
+
+        if (err != OK) {
+            return err;
+        }
+
+        return err;
+    }
+
     OMX_AUDIO_PARAM_AACPROFILETYPE profile;
     InitOMXParams(&profile);
     profile.nPortIndex = kPortIndexInput;
 
-    status_t err = mOMX->getParameter(
+    err = mOMX->getParameter(
             mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile));
 
     if (err != OK) {
@@ -835,16 +1023,59 @@
     profile.nSampleRate = sampleRate;
     profile.eAACStreamFormat = OMX_AUDIO_AACStreamFormatMP4ADTS;
 
-    err = mOMX->setParameter(
+    return mOMX->setParameter(
             mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile));
-
-    return err;
 }
 
-status_t ACodec::setupAMRDecoder(bool isWAMR) {
+static OMX_AUDIO_AMRBANDMODETYPE pickModeFromBitRate(
+        bool isAMRWB, int32_t bps) {
+    if (isAMRWB) {
+        if (bps <= 6600) {
+            return OMX_AUDIO_AMRBandModeWB0;
+        } else if (bps <= 8850) {
+            return OMX_AUDIO_AMRBandModeWB1;
+        } else if (bps <= 12650) {
+            return OMX_AUDIO_AMRBandModeWB2;
+        } else if (bps <= 14250) {
+            return OMX_AUDIO_AMRBandModeWB3;
+        } else if (bps <= 15850) {
+            return OMX_AUDIO_AMRBandModeWB4;
+        } else if (bps <= 18250) {
+            return OMX_AUDIO_AMRBandModeWB5;
+        } else if (bps <= 19850) {
+            return OMX_AUDIO_AMRBandModeWB6;
+        } else if (bps <= 23050) {
+            return OMX_AUDIO_AMRBandModeWB7;
+        }
+
+        // 23850 bps
+        return OMX_AUDIO_AMRBandModeWB8;
+    } else {  // AMRNB
+        if (bps <= 4750) {
+            return OMX_AUDIO_AMRBandModeNB0;
+        } else if (bps <= 5150) {
+            return OMX_AUDIO_AMRBandModeNB1;
+        } else if (bps <= 5900) {
+            return OMX_AUDIO_AMRBandModeNB2;
+        } else if (bps <= 6700) {
+            return OMX_AUDIO_AMRBandModeNB3;
+        } else if (bps <= 7400) {
+            return OMX_AUDIO_AMRBandModeNB4;
+        } else if (bps <= 7950) {
+            return OMX_AUDIO_AMRBandModeNB5;
+        } else if (bps <= 10200) {
+            return OMX_AUDIO_AMRBandModeNB6;
+        }
+
+        // 12200 bps
+        return OMX_AUDIO_AMRBandModeNB7;
+    }
+}
+
+status_t ACodec::setupAMRCodec(bool encoder, bool isWAMR, int32_t bitrate) {
     OMX_AUDIO_PARAM_AMRTYPE def;
     InitOMXParams(&def);
-    def.nPortIndex = kPortIndexInput;
+    def.nPortIndex = encoder ? kPortIndexOutput : kPortIndexInput;
 
     status_t err =
         mOMX->getParameter(mNode, OMX_IndexParamAudioAmr, &def, sizeof(def));
@@ -854,14 +1085,24 @@
     }
 
     def.eAMRFrameFormat = OMX_AUDIO_AMRFrameFormatFSF;
+    def.eAMRBandMode = pickModeFromBitRate(isWAMR, bitrate);
 
-    def.eAMRBandMode =
-        isWAMR ? OMX_AUDIO_AMRBandModeWB0 : OMX_AUDIO_AMRBandModeNB0;
+    err = mOMX->setParameter(
+            mNode, OMX_IndexParamAudioAmr, &def, sizeof(def));
 
-    return mOMX->setParameter(mNode, OMX_IndexParamAudioAmr, &def, sizeof(def));
+    if (err != OK) {
+        return err;
+    }
+
+    return setupRawAudioFormat(
+            encoder ? kPortIndexInput : kPortIndexOutput,
+            isWAMR ? 16000 : 8000 /* sampleRate */,
+            1 /* numChannels */);
 }
 
-status_t ACodec::setupG711Decoder(int32_t numChannels) {
+status_t ACodec::setupG711Codec(bool encoder, int32_t numChannels) {
+    CHECK(!encoder);  // XXX TODO
+
     return setupRawAudioFormat(
             kPortIndexInput, 8000 /* sampleRate */, numChannels);
 }
@@ -1001,22 +1242,36 @@
             &format, sizeof(format));
 }
 
-status_t ACodec::setupVideoDecoder(
-        const char *mime, int32_t width, int32_t height) {
-    OMX_VIDEO_CODINGTYPE compressionFormat = OMX_VIDEO_CodingUnused;
+static status_t GetVideoCodingTypeFromMime(
+        const char *mime, OMX_VIDEO_CODINGTYPE *codingType) {
     if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) {
-        compressionFormat = OMX_VIDEO_CodingAVC;
+        *codingType = OMX_VIDEO_CodingAVC;
     } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime)) {
-        compressionFormat = OMX_VIDEO_CodingMPEG4;
+        *codingType = OMX_VIDEO_CodingMPEG4;
     } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_H263, mime)) {
-        compressionFormat = OMX_VIDEO_CodingH263;
+        *codingType = OMX_VIDEO_CodingH263;
     } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG2, mime)) {
-        compressionFormat = OMX_VIDEO_CodingMPEG2;
+        *codingType = OMX_VIDEO_CodingMPEG2;
+    } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_VPX, mime)) {
+        *codingType = OMX_VIDEO_CodingVPX;
     } else {
-        TRESPASS();
+        *codingType = OMX_VIDEO_CodingUnused;
+        return ERROR_UNSUPPORTED;
     }
 
-    status_t err = setVideoPortFormatType(
+    return OK;
+}
+
+status_t ACodec::setupVideoDecoder(
+        const char *mime, int32_t width, int32_t height) {
+    OMX_VIDEO_CODINGTYPE compressionFormat;
+    status_t err = GetVideoCodingTypeFromMime(mime, &compressionFormat);
+
+    if (err != OK) {
+        return err;
+    }
+
+    err = setVideoPortFormatType(
             kPortIndexInput, compressionFormat, OMX_COLOR_FormatUnused);
 
     if (err != OK) {
@@ -1046,6 +1301,489 @@
     return OK;
 }
 
+status_t ACodec::setupVideoEncoder(const char *mime, const sp<AMessage> &msg) {
+    int32_t tmp;
+    if (!msg->findInt32("color-format", &tmp)) {
+        return INVALID_OPERATION;
+    }
+
+    OMX_COLOR_FORMATTYPE colorFormat =
+        static_cast<OMX_COLOR_FORMATTYPE>(tmp);
+
+    status_t err = setVideoPortFormatType(
+            kPortIndexInput, OMX_VIDEO_CodingUnused, colorFormat);
+
+    if (err != OK) {
+        ALOGE("[%s] does not support color format %d",
+              mComponentName.c_str(), colorFormat);
+
+        return err;
+    }
+
+    /* Input port configuration */
+
+    OMX_PARAM_PORTDEFINITIONTYPE def;
+    InitOMXParams(&def);
+
+    OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &def.format.video;
+
+    def.nPortIndex = kPortIndexInput;
+
+    err = mOMX->getParameter(
+            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+
+    if (err != OK) {
+        return err;
+    }
+
+    int32_t width, height, bitrate;
+    if (!msg->findInt32("width", &width)
+            || !msg->findInt32("height", &height)
+            || !msg->findInt32("bitrate", &bitrate)) {
+        return INVALID_OPERATION;
+    }
+
+    video_def->nFrameWidth = width;
+    video_def->nFrameHeight = height;
+
+    int32_t stride;
+    if (!msg->findInt32("stride", &stride)) {
+        stride = width;
+    }
+
+    video_def->nStride = stride;
+
+    int32_t sliceHeight;
+    if (!msg->findInt32("slice-height", &sliceHeight)) {
+        sliceHeight = height;
+    }
+
+    video_def->nSliceHeight = sliceHeight;
+
+    def.nBufferSize = (video_def->nStride * video_def->nSliceHeight * 3) / 2;
+
+    float frameRate;
+    if (!msg->findFloat("frame-rate", &frameRate)) {
+        int32_t tmp;
+        if (!msg->findInt32("frame-rate", &tmp)) {
+            return INVALID_OPERATION;
+        }
+        frameRate = (float)tmp;
+    }
+
+    video_def->xFramerate = (OMX_U32)(frameRate * 65536.0f);
+    video_def->eCompressionFormat = OMX_VIDEO_CodingUnused;
+    video_def->eColorFormat = colorFormat;
+
+    err = mOMX->setParameter(
+            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+
+    if (err != OK) {
+        ALOGE("[%s] failed to set input port definition parameters.",
+              mComponentName.c_str());
+
+        return err;
+    }
+
+    /* Output port configuration */
+
+    OMX_VIDEO_CODINGTYPE compressionFormat;
+    err = GetVideoCodingTypeFromMime(mime, &compressionFormat);
+
+    if (err != OK) {
+        return err;
+    }
+
+    err = setVideoPortFormatType(
+            kPortIndexOutput, compressionFormat, OMX_COLOR_FormatUnused);
+
+    if (err != OK) {
+        ALOGE("[%s] does not support compression format %d",
+             mComponentName.c_str(), compressionFormat);
+
+        return err;
+    }
+
+    def.nPortIndex = kPortIndexOutput;
+
+    err = mOMX->getParameter(
+            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+
+    if (err != OK) {
+        return err;
+    }
+
+    video_def->nFrameWidth = width;
+    video_def->nFrameHeight = height;
+    video_def->xFramerate = 0;
+    video_def->nBitrate = bitrate;
+    video_def->eCompressionFormat = compressionFormat;
+    video_def->eColorFormat = OMX_COLOR_FormatUnused;
+
+    err = mOMX->setParameter(
+            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+
+    if (err != OK) {
+        ALOGE("[%s] failed to set output port definition parameters.",
+              mComponentName.c_str());
+
+        return err;
+    }
+
+    switch (compressionFormat) {
+        case OMX_VIDEO_CodingMPEG4:
+            err = setupMPEG4EncoderParameters(msg);
+            break;
+
+        case OMX_VIDEO_CodingH263:
+            err = setupH263EncoderParameters(msg);
+            break;
+
+        case OMX_VIDEO_CodingAVC:
+            err = setupAVCEncoderParameters(msg);
+            break;
+
+        default:
+            break;
+    }
+
+    ALOGI("setupVideoEncoder succeeded");
+
+    return err;
+}
+
+static OMX_U32 setPFramesSpacing(int32_t iFramesInterval, int32_t frameRate) {
+    if (iFramesInterval < 0) {
+        return 0xFFFFFFFF;
+    } else if (iFramesInterval == 0) {
+        return 0;
+    }
+    OMX_U32 ret = frameRate * iFramesInterval;
+    CHECK(ret > 1);
+    return ret;
+}
+
+status_t ACodec::setupMPEG4EncoderParameters(const sp<AMessage> &msg) {
+    int32_t bitrate, iFrameInterval;
+    if (!msg->findInt32("bitrate", &bitrate)
+            || !msg->findInt32("i-frame-interval", &iFrameInterval)) {
+        return INVALID_OPERATION;
+    }
+
+    float frameRate;
+    if (!msg->findFloat("frame-rate", &frameRate)) {
+        int32_t tmp;
+        if (!msg->findInt32("frame-rate", &tmp)) {
+            return INVALID_OPERATION;
+        }
+        frameRate = (float)tmp;
+    }
+
+    OMX_VIDEO_PARAM_MPEG4TYPE mpeg4type;
+    InitOMXParams(&mpeg4type);
+    mpeg4type.nPortIndex = kPortIndexOutput;
+
+    status_t err = mOMX->getParameter(
+            mNode, OMX_IndexParamVideoMpeg4, &mpeg4type, sizeof(mpeg4type));
+
+    if (err != OK) {
+        return err;
+    }
+
+    mpeg4type.nSliceHeaderSpacing = 0;
+    mpeg4type.bSVH = OMX_FALSE;
+    mpeg4type.bGov = OMX_FALSE;
+
+    mpeg4type.nAllowedPictureTypes =
+        OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP;
+
+    mpeg4type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate);
+    if (mpeg4type.nPFrames == 0) {
+        mpeg4type.nAllowedPictureTypes = OMX_VIDEO_PictureTypeI;
+    }
+    mpeg4type.nBFrames = 0;
+    mpeg4type.nIDCVLCThreshold = 0;
+    mpeg4type.bACPred = OMX_TRUE;
+    mpeg4type.nMaxPacketSize = 256;
+    mpeg4type.nTimeIncRes = 1000;
+    mpeg4type.nHeaderExtension = 0;
+    mpeg4type.bReversibleVLC = OMX_FALSE;
+
+    int32_t profile;
+    if (msg->findInt32("profile", &profile)) {
+        int32_t level;
+        if (!msg->findInt32("level", &level)) {
+            return INVALID_OPERATION;
+        }
+
+        err = verifySupportForProfileAndLevel(profile, level);
+
+        if (err != OK) {
+            return err;
+        }
+
+        mpeg4type.eProfile = static_cast<OMX_VIDEO_MPEG4PROFILETYPE>(profile);
+        mpeg4type.eLevel = static_cast<OMX_VIDEO_MPEG4LEVELTYPE>(level);
+    }
+
+    err = mOMX->setParameter(
+            mNode, OMX_IndexParamVideoMpeg4, &mpeg4type, sizeof(mpeg4type));
+
+    if (err != OK) {
+        return err;
+    }
+
+    err = configureBitrate(bitrate);
+
+    if (err != OK) {
+        return err;
+    }
+
+    return setupErrorCorrectionParameters();
+}
+
+status_t ACodec::setupH263EncoderParameters(const sp<AMessage> &msg) {
+    int32_t bitrate, iFrameInterval;
+    if (!msg->findInt32("bitrate", &bitrate)
+            || !msg->findInt32("i-frame-interval", &iFrameInterval)) {
+        return INVALID_OPERATION;
+    }
+
+    float frameRate;
+    if (!msg->findFloat("frame-rate", &frameRate)) {
+        int32_t tmp;
+        if (!msg->findInt32("frame-rate", &tmp)) {
+            return INVALID_OPERATION;
+        }
+        frameRate = (float)tmp;
+    }
+
+    OMX_VIDEO_PARAM_H263TYPE h263type;
+    InitOMXParams(&h263type);
+    h263type.nPortIndex = kPortIndexOutput;
+
+    status_t err = mOMX->getParameter(
+            mNode, OMX_IndexParamVideoH263, &h263type, sizeof(h263type));
+
+    if (err != OK) {
+        return err;
+    }
+
+    h263type.nAllowedPictureTypes =
+        OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP;
+
+    h263type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate);
+    if (h263type.nPFrames == 0) {
+        h263type.nAllowedPictureTypes = OMX_VIDEO_PictureTypeI;
+    }
+    h263type.nBFrames = 0;
+
+    int32_t profile;
+    if (msg->findInt32("profile", &profile)) {
+        int32_t level;
+        if (!msg->findInt32("level", &level)) {
+            return INVALID_OPERATION;
+        }
+
+        err = verifySupportForProfileAndLevel(profile, level);
+
+        if (err != OK) {
+            return err;
+        }
+
+        h263type.eProfile = static_cast<OMX_VIDEO_H263PROFILETYPE>(profile);
+        h263type.eLevel = static_cast<OMX_VIDEO_H263LEVELTYPE>(level);
+    }
+
+    h263type.bPLUSPTYPEAllowed = OMX_FALSE;
+    h263type.bForceRoundingTypeToZero = OMX_FALSE;
+    h263type.nPictureHeaderRepetition = 0;
+    h263type.nGOBHeaderInterval = 0;
+
+    err = mOMX->setParameter(
+            mNode, OMX_IndexParamVideoH263, &h263type, sizeof(h263type));
+
+    if (err != OK) {
+        return err;
+    }
+
+    err = configureBitrate(bitrate);
+
+    if (err != OK) {
+        return err;
+    }
+
+    return setupErrorCorrectionParameters();
+}
+
+status_t ACodec::setupAVCEncoderParameters(const sp<AMessage> &msg) {
+    int32_t bitrate, iFrameInterval;
+    if (!msg->findInt32("bitrate", &bitrate)
+            || !msg->findInt32("i-frame-interval", &iFrameInterval)) {
+        return INVALID_OPERATION;
+    }
+
+    float frameRate;
+    if (!msg->findFloat("frame-rate", &frameRate)) {
+        int32_t tmp;
+        if (!msg->findInt32("frame-rate", &tmp)) {
+            return INVALID_OPERATION;
+        }
+        frameRate = (float)tmp;
+    }
+
+    OMX_VIDEO_PARAM_AVCTYPE h264type;
+    InitOMXParams(&h264type);
+    h264type.nPortIndex = kPortIndexOutput;
+
+    status_t err = mOMX->getParameter(
+            mNode, OMX_IndexParamVideoAvc, &h264type, sizeof(h264type));
+
+    if (err != OK) {
+        return err;
+    }
+
+    h264type.nAllowedPictureTypes =
+        OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP;
+
+    int32_t profile;
+    if (msg->findInt32("profile", &profile)) {
+        int32_t level;
+        if (!msg->findInt32("level", &level)) {
+            return INVALID_OPERATION;
+        }
+
+        err = verifySupportForProfileAndLevel(profile, level);
+
+        if (err != OK) {
+            return err;
+        }
+
+        h264type.eProfile = static_cast<OMX_VIDEO_AVCPROFILETYPE>(profile);
+        h264type.eLevel = static_cast<OMX_VIDEO_AVCLEVELTYPE>(level);
+    }
+
+    // XXX
+    if (!strncmp(mComponentName.c_str(), "OMX.TI.DUCATI1", 14)) {
+        h264type.eProfile = OMX_VIDEO_AVCProfileBaseline;
+    }
+
+    if (h264type.eProfile == OMX_VIDEO_AVCProfileBaseline) {
+        h264type.nSliceHeaderSpacing = 0;
+        h264type.bUseHadamard = OMX_TRUE;
+        h264type.nRefFrames = 1;
+        h264type.nBFrames = 0;
+        h264type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate);
+        if (h264type.nPFrames == 0) {
+            h264type.nAllowedPictureTypes = OMX_VIDEO_PictureTypeI;
+        }
+        h264type.nRefIdx10ActiveMinus1 = 0;
+        h264type.nRefIdx11ActiveMinus1 = 0;
+        h264type.bEntropyCodingCABAC = OMX_FALSE;
+        h264type.bWeightedPPrediction = OMX_FALSE;
+        h264type.bconstIpred = OMX_FALSE;
+        h264type.bDirect8x8Inference = OMX_FALSE;
+        h264type.bDirectSpatialTemporal = OMX_FALSE;
+        h264type.nCabacInitIdc = 0;
+    }
+
+    if (h264type.nBFrames != 0) {
+        h264type.nAllowedPictureTypes |= OMX_VIDEO_PictureTypeB;
+    }
+
+    h264type.bEnableUEP = OMX_FALSE;
+    h264type.bEnableFMO = OMX_FALSE;
+    h264type.bEnableASO = OMX_FALSE;
+    h264type.bEnableRS = OMX_FALSE;
+    h264type.bFrameMBsOnly = OMX_TRUE;
+    h264type.bMBAFF = OMX_FALSE;
+    h264type.eLoopFilterMode = OMX_VIDEO_AVCLoopFilterEnable;
+
+    if (!strcasecmp("OMX.Nvidia.h264.encoder", mComponentName.c_str())) {
+        h264type.eLevel = OMX_VIDEO_AVCLevelMax;
+    }
+
+    err = mOMX->setParameter(
+            mNode, OMX_IndexParamVideoAvc, &h264type, sizeof(h264type));
+
+    if (err != OK) {
+        return err;
+    }
+
+    return configureBitrate(bitrate);
+}
+
+status_t ACodec::verifySupportForProfileAndLevel(
+        int32_t profile, int32_t level) {
+    OMX_VIDEO_PARAM_PROFILELEVELTYPE params;
+    InitOMXParams(&params);
+    params.nPortIndex = kPortIndexOutput;
+
+    for (params.nProfileIndex = 0;; ++params.nProfileIndex) {
+        status_t err = mOMX->getParameter(
+                mNode,
+                OMX_IndexParamVideoProfileLevelQuerySupported,
+                &params,
+                sizeof(params));
+
+        if (err != OK) {
+            return err;
+        }
+
+        int32_t supportedProfile = static_cast<int32_t>(params.eProfile);
+        int32_t supportedLevel = static_cast<int32_t>(params.eLevel);
+
+        if (profile == supportedProfile && level <= supportedLevel) {
+            return OK;
+        }
+    }
+}
+
+status_t ACodec::configureBitrate(int32_t bitrate) {
+    OMX_VIDEO_PARAM_BITRATETYPE bitrateType;
+    InitOMXParams(&bitrateType);
+    bitrateType.nPortIndex = kPortIndexOutput;
+
+    status_t err = mOMX->getParameter(
+            mNode, OMX_IndexParamVideoBitrate,
+            &bitrateType, sizeof(bitrateType));
+
+    if (err != OK) {
+        return err;
+    }
+
+    bitrateType.eControlRate = OMX_Video_ControlRateVariable;
+    bitrateType.nTargetBitrate = bitrate;
+
+    return mOMX->setParameter(
+            mNode, OMX_IndexParamVideoBitrate,
+            &bitrateType, sizeof(bitrateType));
+}
+
+status_t ACodec::setupErrorCorrectionParameters() {
+    OMX_VIDEO_PARAM_ERRORCORRECTIONTYPE errorCorrectionType;
+    InitOMXParams(&errorCorrectionType);
+    errorCorrectionType.nPortIndex = kPortIndexOutput;
+
+    status_t err = mOMX->getParameter(
+            mNode, OMX_IndexParamVideoErrorCorrection,
+            &errorCorrectionType, sizeof(errorCorrectionType));
+
+    if (err != OK) {
+        return OK;  // Optional feature. Ignore this failure
+    }
+
+    errorCorrectionType.bEnableHEC = OMX_FALSE;
+    errorCorrectionType.bEnableResync = OMX_TRUE;
+    errorCorrectionType.nResynchMarkerSpacing = 256;
+    errorCorrectionType.bEnableDataPartitioning = OMX_FALSE;
+    errorCorrectionType.bEnableRVLC = OMX_FALSE;
+
+    return mOMX->setParameter(
+            mNode, OMX_IndexParamVideoErrorCorrection,
+            &errorCorrectionType, sizeof(errorCorrectionType));
+}
+
 status_t ACodec::setVideoFormatOnPort(
         OMX_U32 portIndex,
         int32_t width, int32_t height, OMX_VIDEO_CODINGTYPE compressionFormat) {
@@ -1166,6 +1904,9 @@
             notify->setString("mime", MEDIA_MIMETYPE_VIDEO_RAW);
             notify->setInt32("width", videoDef->nFrameWidth);
             notify->setInt32("height", videoDef->nFrameHeight);
+            notify->setInt32("stride", videoDef->nStride);
+            notify->setInt32("slice-height", videoDef->nSliceHeight);
+            notify->setInt32("color-format", videoDef->eColorFormat);
 
             OMX_CONFIG_RECTTYPE rect;
             InitOMXParams(&rect);
@@ -1241,10 +1982,11 @@
     mSentFormat = true;
 }
 
-void ACodec::signalError(OMX_ERRORTYPE error) {
+void ACodec::signalError(OMX_ERRORTYPE error, status_t internalError) {
     sp<AMessage> notify = mNotify->dup();
     notify->setInt32("what", ACodec::kWhatError);
     notify->setInt32("omx-error", error);
+    notify->setInt32("err", internalError);
     notify->post();
 }
 
@@ -1417,7 +2159,7 @@
     notify->setPointer("buffer-id", info->mBufferID);
 
     info->mData->meta()->clear();
-    notify->setObject("buffer", info->mData);
+    notify->setBuffer("buffer", info->mData);
 
     sp<AMessage> reply = new AMessage(kWhatInputBufferFilled, mCodec->id());
     reply->setPointer("buffer-id", info->mBufferID);
@@ -1433,18 +2175,26 @@
     IOMX::buffer_id bufferID;
     CHECK(msg->findPointer("buffer-id", &bufferID));
 
-    sp<RefBase> obj;
+    sp<ABuffer> buffer;
     int32_t err = OK;
-    if (!msg->findObject("buffer", &obj)) {
+    bool eos = false;
+
+    if (!msg->findBuffer("buffer", &buffer)) {
         CHECK(msg->findInt32("err", &err));
 
         ALOGV("[%s] saw error %d instead of an input buffer",
              mCodec->mComponentName.c_str(), err);
 
-        obj.clear();
+        buffer.clear();
+
+        eos = true;
     }
 
-    sp<ABuffer> buffer = static_cast<ABuffer *>(obj.get());
+    int32_t tmp;
+    if (buffer != NULL && buffer->meta()->findInt32("eos", &tmp) && tmp) {
+        eos = true;
+        err = ERROR_END_OF_STREAM;
+    }
 
     BufferInfo *info = mCodec->findBufferByID(kPortIndexInput, bufferID);
     CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_UPSTREAM);
@@ -1456,7 +2206,7 @@
     switch (mode) {
         case KEEP_BUFFERS:
         {
-            if (buffer == NULL) {
+            if (eos) {
                 if (!mCodec->mPortEOS[kPortIndexInput]) {
                     mCodec->mPortEOS[kPortIndexInput] = true;
                     mCodec->mInputEOSResult = err;
@@ -1467,9 +2217,7 @@
 
         case RESUBMIT_BUFFERS:
         {
-            if (buffer != NULL) {
-                CHECK(!mCodec->mPortEOS[kPortIndexInput]);
-
+            if (buffer != NULL && !mCodec->mPortEOS[kPortIndexInput]) {
                 int64_t timeUs;
                 CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
 
@@ -1480,6 +2228,10 @@
                     flags |= OMX_BUFFERFLAG_CODECCONFIG;
                 }
 
+                if (eos) {
+                    flags |= OMX_BUFFERFLAG_EOS;
+                }
+
                 if (buffer != info->mData) {
                     if (0 && !(flags & OMX_BUFFERFLAG_CODECCONFIG)) {
                         ALOGV("[%s] Needs to copy input data.",
@@ -1493,6 +2245,9 @@
                 if (flags & OMX_BUFFERFLAG_CODECCONFIG) {
                     ALOGV("[%s] calling emptyBuffer %p w/ codec specific data",
                          mCodec->mComponentName.c_str(), bufferID);
+                } else if (flags & OMX_BUFFERFLAG_EOS) {
+                    ALOGV("[%s] calling emptyBuffer %p w/ EOS",
+                         mCodec->mComponentName.c_str(), bufferID);
                 } else {
                     ALOGV("[%s] calling emptyBuffer %p w/ time %lld us",
                          mCodec->mComponentName.c_str(), bufferID, timeUs);
@@ -1509,7 +2264,15 @@
 
                 info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
 
-                getMoreInputDataIfPossible();
+                if (!eos) {
+                    getMoreInputDataIfPossible();
+                } else {
+                    ALOGV("[%s] Signalled EOS on the input port",
+                         mCodec->mComponentName.c_str());
+
+                    mCodec->mPortEOS[kPortIndexInput] = true;
+                    mCodec->mInputEOSResult = err;
+                }
             } else if (!mCodec->mPortEOS[kPortIndexInput]) {
                 if (err != ERROR_END_OF_STREAM) {
                     ALOGV("[%s] Signalling EOS on the input port "
@@ -1582,8 +2345,8 @@
         int64_t timeUs,
         void *platformPrivate,
         void *dataPtr) {
-    ALOGV("[%s] onOMXFillBufferDone %p time %lld us",
-         mCodec->mComponentName.c_str(), bufferID, timeUs);
+    ALOGV("[%s] onOMXFillBufferDone %p time %lld us, flags = 0x%08lx",
+         mCodec->mComponentName.c_str(), bufferID, timeUs, flags);
 
     ssize_t index;
     BufferInfo *info =
@@ -1601,46 +2364,48 @@
 
         case RESUBMIT_BUFFERS:
         {
-            if (rangeLength == 0) {
-                if (!(flags & OMX_BUFFERFLAG_EOS)) {
-                    ALOGV("[%s] calling fillBuffer %p",
-                         mCodec->mComponentName.c_str(), info->mBufferID);
+            if (rangeLength == 0 && !(flags & OMX_BUFFERFLAG_EOS)) {
+                ALOGV("[%s] calling fillBuffer %p",
+                     mCodec->mComponentName.c_str(), info->mBufferID);
 
-                    CHECK_EQ(mCodec->mOMX->fillBuffer(
-                                mCodec->mNode, info->mBufferID),
-                             (status_t)OK);
+                CHECK_EQ(mCodec->mOMX->fillBuffer(
+                            mCodec->mNode, info->mBufferID),
+                         (status_t)OK);
 
-                    info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
-                }
-            } else {
-                if (!mCodec->mSentFormat) {
-                    mCodec->sendFormatChange();
-                }
-
-                if (mCodec->mNativeWindow == NULL) {
-                    info->mData->setRange(rangeOffset, rangeLength);
-                }
-
-                info->mData->meta()->setInt64("timeUs", timeUs);
-
-                sp<AMessage> notify = mCodec->mNotify->dup();
-                notify->setInt32("what", ACodec::kWhatDrainThisBuffer);
-                notify->setPointer("buffer-id", info->mBufferID);
-                notify->setObject("buffer", info->mData);
-
-                sp<AMessage> reply =
-                    new AMessage(kWhatOutputBufferDrained, mCodec->id());
-
-                reply->setPointer("buffer-id", info->mBufferID);
-
-                notify->setMessage("reply", reply);
-
-                notify->post();
-
-                info->mStatus = BufferInfo::OWNED_BY_DOWNSTREAM;
+                info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
+                break;
             }
 
+            if (!mCodec->mIsEncoder && !mCodec->mSentFormat) {
+                mCodec->sendFormatChange();
+            }
+
+            if (mCodec->mNativeWindow == NULL) {
+                info->mData->setRange(rangeOffset, rangeLength);
+            }
+
+            info->mData->meta()->setInt64("timeUs", timeUs);
+
+            sp<AMessage> notify = mCodec->mNotify->dup();
+            notify->setInt32("what", ACodec::kWhatDrainThisBuffer);
+            notify->setPointer("buffer-id", info->mBufferID);
+            notify->setBuffer("buffer", info->mData);
+            notify->setInt32("flags", flags);
+
+            sp<AMessage> reply =
+                new AMessage(kWhatOutputBufferDrained, mCodec->id());
+
+            reply->setPointer("buffer-id", info->mBufferID);
+
+            notify->setMessage("reply", reply);
+
+            notify->post();
+
+            info->mStatus = BufferInfo::OWNED_BY_DOWNSTREAM;
+
             if (flags & OMX_BUFFERFLAG_EOS) {
+                ALOGV("[%s] saw output EOS", mCodec->mComponentName.c_str());
+
                 sp<AMessage> notify = mCodec->mNotify->dup();
                 notify->setInt32("what", ACodec::kWhatEOS);
                 notify->setInt32("err", mCodec->mInputEOSResult);
@@ -1678,12 +2443,13 @@
             && msg->findInt32("render", &render) && render != 0) {
         // The client wants this buffer to be rendered.
 
-        if (mCodec->mNativeWindow->queueBuffer(
+        status_t err;
+        if ((err = mCodec->mNativeWindow->queueBuffer(
                     mCodec->mNativeWindow.get(),
-                    info->mGraphicBuffer.get()) == OK) {
+                    info->mGraphicBuffer.get())) == OK) {
             info->mStatus = BufferInfo::OWNED_BY_NATIVE_WINDOW;
         } else {
-            mCodec->signalError();
+            mCodec->signalError(OMX_ErrorUndefined, err);
             info->mStatus = BufferInfo::OWNED_BY_US;
         }
     } else {
@@ -1746,6 +2512,10 @@
     : BaseState(codec) {
 }
 
+void ACodec::UninitializedState::stateEntered() {
+    ALOGV("Now uninitialized");
+}
+
 bool ACodec::UninitializedState::onMessageReceived(const sp<AMessage> &msg) {
     bool handled = false;
 
@@ -1758,8 +2528,20 @@
             break;
         }
 
+        case ACodec::kWhatAllocateComponent:
+        {
+            onAllocateComponent(msg);
+            handled = true;
+            break;
+        }
+
         case ACodec::kWhatShutdown:
         {
+            int32_t keepComponentAllocated;
+            CHECK(msg->findInt32(
+                        "keepComponentAllocated", &keepComponentAllocated));
+            CHECK(!keepComponentAllocated);
+
             sp<AMessage> notify = mCodec->mNotify->dup();
             notify->setInt32("what", ACodec::kWhatShutdownCompleted);
             notify->post();
@@ -1787,30 +2569,60 @@
 
 void ACodec::UninitializedState::onSetup(
         const sp<AMessage> &msg) {
+    if (onAllocateComponent(msg)
+            && mCodec->mLoadedState->onConfigureComponent(msg)) {
+        mCodec->mLoadedState->onStart();
+    }
+}
+
+bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) {
+    ALOGV("onAllocateComponent");
+
+    CHECK(mCodec->mNode == NULL);
+
     OMXClient client;
     CHECK_EQ(client.connect(), (status_t)OK);
 
     sp<IOMX> omx = client.interface();
 
-    AString mime;
-    CHECK(msg->findString("mime", &mime));
-
     Vector<String8> matchingCodecs;
-    OMXCodec::findMatchingCodecs(
-            mime.c_str(),
-            false, // createEncoder
-            NULL,  // matchComponentName
-            0,     // flags
-            &matchingCodecs);
+    Vector<uint32_t> matchingCodecQuirks;
+
+    AString mime;
+
+    AString componentName;
+    uint32_t quirks;
+    if (msg->findString("componentName", &componentName)) {
+        matchingCodecs.push_back(String8(componentName.c_str()));
+
+        if (!OMXCodec::findCodecQuirks(componentName.c_str(), &quirks)) {
+            quirks = 0;
+        }
+        matchingCodecQuirks.push_back(quirks);
+    } else {
+        CHECK(msg->findString("mime", &mime));
+
+        int32_t encoder;
+        if (!msg->findInt32("encoder", &encoder)) {
+            encoder = false;
+        }
+
+        OMXCodec::findMatchingCodecs(
+                mime.c_str(),
+                encoder, // createEncoder
+                NULL,  // matchComponentName
+                0,     // flags
+                &matchingCodecs,
+                &matchingCodecQuirks);
+    }
 
     sp<CodecObserver> observer = new CodecObserver;
     IOMX::node_id node = NULL;
 
-    AString componentName;
-
     for (size_t matchIndex = 0; matchIndex < matchingCodecs.size();
             ++matchIndex) {
         componentName = matchingCodecs.itemAt(matchIndex).string();
+        quirks = matchingCodecQuirks.itemAt(matchIndex);
 
         pid_t tid = androidGetTid();
         int prevPriority = androidGetThreadPriority(tid);
@@ -1826,16 +2638,22 @@
     }
 
     if (node == NULL) {
-        ALOGE("Unable to instantiate a decoder for type '%s'.", mime.c_str());
+        if (!mime.empty()) {
+            ALOGE("Unable to instantiate a decoder for type '%s'.",
+                 mime.c_str());
+        } else {
+            ALOGE("Unable to instantiate decoder '%s'.", componentName.c_str());
+        }
 
         mCodec->signalError(OMX_ErrorComponentNotFound);
-        return;
+        return false;
     }
 
     sp<AMessage> notify = new AMessage(kWhatOMXMessage, mCodec->id());
     observer->setNotificationMessage(notify);
 
     mCodec->mComponentName = componentName;
+    mCodec->mQuirks = quirks;
     mCodec->mOMX = omx;
     mCodec->mNode = node;
 
@@ -1844,20 +2662,145 @@
 
     mCodec->mInputEOSResult = OK;
 
-    mCodec->configureCodec(mime.c_str(), msg);
+    {
+        sp<AMessage> notify = mCodec->mNotify->dup();
+        notify->setInt32("what", ACodec::kWhatComponentAllocated);
+        notify->setString("componentName", mCodec->mComponentName.c_str());
+        notify->post();
+    }
+
+    mCodec->changeState(mCodec->mLoadedState);
+
+    return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+ACodec::LoadedState::LoadedState(ACodec *codec)
+    : BaseState(codec) {
+}
+
+void ACodec::LoadedState::stateEntered() {
+    ALOGV("[%s] Now Loaded", mCodec->mComponentName.c_str());
+
+    if (mCodec->mShutdownInProgress) {
+        bool keepComponentAllocated = mCodec->mKeepComponentAllocated;
+
+        mCodec->mShutdownInProgress = false;
+        mCodec->mKeepComponentAllocated = false;
+
+        onShutdown(keepComponentAllocated);
+    }
+}
+
+void ACodec::LoadedState::onShutdown(bool keepComponentAllocated) {
+    if (!keepComponentAllocated) {
+        CHECK_EQ(mCodec->mOMX->freeNode(mCodec->mNode), (status_t)OK);
+
+        mCodec->mNativeWindow.clear();
+        mCodec->mNode = NULL;
+        mCodec->mOMX.clear();
+        mCodec->mQuirks = 0;
+        mCodec->mComponentName.clear();
+
+        mCodec->changeState(mCodec->mUninitializedState);
+    }
+
+    sp<AMessage> notify = mCodec->mNotify->dup();
+    notify->setInt32("what", ACodec::kWhatShutdownCompleted);
+    notify->post();
+}
+
+bool ACodec::LoadedState::onMessageReceived(const sp<AMessage> &msg) {
+    bool handled = false;
+
+    switch (msg->what()) {
+        case ACodec::kWhatConfigureComponent:
+        {
+            onConfigureComponent(msg);
+            handled = true;
+            break;
+        }
+
+        case ACodec::kWhatStart:
+        {
+            onStart();
+            handled = true;
+            break;
+        }
+
+        case ACodec::kWhatShutdown:
+        {
+            int32_t keepComponentAllocated;
+            CHECK(msg->findInt32(
+                        "keepComponentAllocated", &keepComponentAllocated));
+
+            onShutdown(keepComponentAllocated);
+
+            handled = true;
+            break;
+        }
+
+        case ACodec::kWhatFlush:
+        {
+            sp<AMessage> notify = mCodec->mNotify->dup();
+            notify->setInt32("what", ACodec::kWhatFlushCompleted);
+            notify->post();
+
+            handled = true;
+            break;
+        }
+
+        default:
+            return BaseState::onMessageReceived(msg);
+    }
+
+    return handled;
+}
+
+bool ACodec::LoadedState::onConfigureComponent(
+        const sp<AMessage> &msg) {
+    ALOGV("onConfigureComponent");
+
+    CHECK(mCodec->mNode != NULL);
+
+    AString mime;
+    CHECK(msg->findString("mime", &mime));
+
+    status_t err = mCodec->configureCodec(mime.c_str(), msg);
+
+    if (err != OK) {
+        ALOGE("[%s] configureCodec returning error %d",
+              mCodec->mComponentName.c_str(), err);
+
+        mCodec->signalError(OMX_ErrorUndefined, err);
+        return false;
+    }
 
     sp<RefBase> obj;
     if (msg->findObject("native-window", &obj)
-            && strncmp("OMX.google.", componentName.c_str(), 11)) {
+            && strncmp("OMX.google.", mCodec->mComponentName.c_str(), 11)) {
         sp<NativeWindowWrapper> nativeWindow(
                 static_cast<NativeWindowWrapper *>(obj.get()));
         CHECK(nativeWindow != NULL);
         mCodec->mNativeWindow = nativeWindow->getNativeWindow();
     }
-
     CHECK_EQ((status_t)OK, mCodec->initNativeWindow());
 
-    CHECK_EQ(omx->sendCommand(node, OMX_CommandStateSet, OMX_StateIdle),
+    {
+        sp<AMessage> notify = mCodec->mNotify->dup();
+        notify->setInt32("what", ACodec::kWhatComponentConfigured);
+        notify->post();
+    }
+
+    return true;
+}
+
+void ACodec::LoadedState::onStart() {
+    ALOGV("onStart");
+
+    CHECK_EQ(mCodec->mOMX->sendCommand(
+                mCodec->mNode, OMX_CommandStateSet, OMX_StateIdle),
              (status_t)OK);
 
     mCodec->changeState(mCodec->mLoadedToIdleState);
@@ -1878,7 +2821,7 @@
              "(error 0x%08x)",
              err);
 
-        mCodec->signalError();
+        mCodec->signalError(OMX_ErrorUndefined, err);
     }
 }
 
@@ -2042,6 +2985,13 @@
     switch (msg->what()) {
         case kWhatShutdown:
         {
+            int32_t keepComponentAllocated;
+            CHECK(msg->findInt32(
+                        "keepComponentAllocated", &keepComponentAllocated));
+
+            mCodec->mShutdownInProgress = true;
+            mCodec->mKeepComponentAllocated = keepComponentAllocated;
+
             mActive = false;
 
             CHECK_EQ(mCodec->mOMX->sendCommand(
@@ -2202,7 +3152,7 @@
                          "port reconfiguration (error 0x%08x)",
                          err);
 
-                    mCodec->signalError();
+                    mCodec->signalError(OMX_ErrorUndefined, err);
 
                     // This is technically not correct, since we were unable
                     // to allocate output buffers and therefore the output port
@@ -2240,7 +3190,8 @@
 ////////////////////////////////////////////////////////////////////////////////
 
 ACodec::ExecutingToIdleState::ExecutingToIdleState(ACodec *codec)
-    : BaseState(codec) {
+    : BaseState(codec),
+      mComponentNowIdle(false) {
 }
 
 bool ACodec::ExecutingToIdleState::onMessageReceived(const sp<AMessage> &msg) {
@@ -2274,6 +3225,7 @@
 void ACodec::ExecutingToIdleState::stateEntered() {
     ALOGV("[%s] Now Executing->Idle", mCodec->mComponentName.c_str());
 
+    mComponentNowIdle = false;
     mCodec->mSentFormat = false;
 }
 
@@ -2285,6 +3237,8 @@
             CHECK_EQ(data1, (OMX_U32)OMX_CommandStateSet);
             CHECK_EQ(data2, (OMX_U32)OMX_StateIdle);
 
+            mComponentNowIdle = true;
+
             changeStateIfWeOwnAllBuffers();
 
             return true;
@@ -2303,7 +3257,7 @@
 }
 
 void ACodec::ExecutingToIdleState::changeStateIfWeOwnAllBuffers() {
-    if (mCodec->allYourBuffersAreBelongToUs()) {
+    if (mComponentNowIdle && mCodec->allYourBuffersAreBelongToUs()) {
         CHECK_EQ(mCodec->mOMX->sendCommand(
                     mCodec->mNode, OMX_CommandStateSet, OMX_StateLoaded),
                  (status_t)OK);
@@ -2375,20 +3329,7 @@
             CHECK_EQ(data1, (OMX_U32)OMX_CommandStateSet);
             CHECK_EQ(data2, (OMX_U32)OMX_StateLoaded);
 
-            ALOGV("[%s] Now Loaded", mCodec->mComponentName.c_str());
-
-            CHECK_EQ(mCodec->mOMX->freeNode(mCodec->mNode), (status_t)OK);
-
-            mCodec->mNativeWindow.clear();
-            mCodec->mNode = NULL;
-            mCodec->mOMX.clear();
-            mCodec->mComponentName.clear();
-
-            mCodec->changeState(mCodec->mUninitializedState);
-
-            sp<AMessage> notify = mCodec->mNotify->dup();
-            notify->setInt32("what", ACodec::kWhatShutdownCompleted);
-            notify->post();
+            mCodec->changeState(mCodec->mLoadedState);
 
             return true;
         }
diff --git a/media/libstagefright/AMRExtractor.cpp b/media/libstagefright/AMRExtractor.cpp
index 5a28347..03dcbf9 100644
--- a/media/libstagefright/AMRExtractor.cpp
+++ b/media/libstagefright/AMRExtractor.cpp
@@ -20,9 +20,9 @@
 
 #include "include/AMRExtractor.h"
 
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaSource.h>
diff --git a/media/libstagefright/AMRWriter.cpp b/media/libstagefright/AMRWriter.cpp
index 6c4e307..ca85640 100644
--- a/media/libstagefright/AMRWriter.cpp
+++ b/media/libstagefright/AMRWriter.cpp
@@ -14,9 +14,9 @@
  * limitations under the License.
  */
 
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/AMRWriter.h>
 #include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaSource.h>
@@ -52,7 +52,7 @@
 
 AMRWriter::~AMRWriter() {
     if (mStarted) {
-        stop();
+        reset();
     }
 
     if (mFd != -1) {
@@ -152,7 +152,7 @@
     return OK;
 }
 
-status_t AMRWriter::stop() {
+status_t AMRWriter::reset() {
     if (!mStarted) {
         return OK;
     }
diff --git a/media/libstagefright/AVIExtractor.cpp b/media/libstagefright/AVIExtractor.cpp
index a3187b7..5a6211e 100644
--- a/media/libstagefright/AVIExtractor.cpp
+++ b/media/libstagefright/AVIExtractor.cpp
@@ -577,6 +577,7 @@
         case FOURCC('a', 'v', 'c', '1'):
         case FOURCC('d', 'a', 'v', 'c'):
         case FOURCC('x', '2', '6', '4'):
+        case FOURCC('H', '2', '6', '4'):
         case FOURCC('v', 's', 's', 'h'):
             return MEDIA_MIMETYPE_VIDEO_AVC;
 
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 690deac..77714f3 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -9,6 +9,7 @@
         AACWriter.cpp                     \
         AMRExtractor.cpp                  \
         AMRWriter.cpp                     \
+        AVIExtractor.cpp                  \
         AudioPlayer.cpp                   \
         AudioSource.cpp                   \
         AwesomePlayer.cpp                 \
@@ -28,12 +29,14 @@
         MPEG4Writer.cpp                   \
         MediaBuffer.cpp                   \
         MediaBufferGroup.cpp              \
+        MediaCodec.cpp                    \
+        MediaCodecList.cpp                \
         MediaDefs.cpp                     \
         MediaExtractor.cpp                \
         MediaSource.cpp                   \
-        MediaSourceSplitter.cpp           \
         MetaData.cpp                      \
         NuCachedSource2.cpp               \
+        NuMediaExtractor.cpp              \
         OMXClient.cpp                     \
         OMXCodec.cpp                      \
         OggExtractor.cpp                  \
@@ -54,31 +57,41 @@
 
 LOCAL_C_INCLUDES:= \
 	$(JNI_H_INCLUDE) \
-        $(TOP)/frameworks/base/include/media/stagefright/openmax \
+        $(TOP)/frameworks/base/include/media/stagefright/timedtext \
+        $(TOP)/frameworks/native/include/media/hardware \
+        $(TOP)/frameworks/native/include/media/openmax \
+        $(TOP)/external/expat/lib \
         $(TOP)/external/flac/include \
         $(TOP)/external/tremolo \
         $(TOP)/external/openssl/include \
 
 LOCAL_SHARED_LIBRARIES := \
-        libbinder         \
-        libmedia          \
-        libutils          \
-        libcutils         \
-        libui             \
-        libsonivox        \
-        libvorbisidec     \
-        libstagefright_yuv \
+        libbinder \
         libcamera_client \
-        libdrmframework  \
-        libcrypto        \
-        libssl           \
-        libgui           \
+        libchromium_net \
+        libcrypto \
+        libcutils \
+        libdl \
+        libdrmframework \
+        libexpat \
+        libgui \
+        libicui18n \
+        libicuuc \
+        liblog \
+        libmedia \
+        libmedia_native \
+        libsonivox \
+        libssl \
+        libstagefright_omx \
+        libstagefright_yuv \
+        libui \
+        libutils \
+        libvorbisidec \
+        libz \
 
 LOCAL_STATIC_LIBRARIES := \
         libstagefright_color_conversion \
         libstagefright_aacenc \
-        libstagefright_amrnbenc \
-        libstagefright_amrwbenc \
         libstagefright_avcenc \
         libstagefright_m4vh263enc \
         libstagefright_matroska \
@@ -88,59 +101,15 @@
         libstagefright_httplive \
         libstagefright_id3 \
         libFLAC \
+        libstagefright_chromium_http \
 
-################################################################################
-
-# The following was shamelessly copied from external/webkit/Android.mk and
-# currently must follow the same logic to determine how webkit was built and
-# if it's safe to link against libchromium.net
-
-# V8 also requires an ARMv7 CPU, and since we must use jsc, we cannot
-# use the Chrome http stack either.
-ifneq ($(strip $(ARCH_ARM_HAVE_ARMV7A)),true)
-  USE_ALT_HTTP := true
-endif
-
-# See if the user has specified a stack they want to use
-HTTP_STACK = $(HTTP)
-# We default to the Chrome HTTP stack.
-DEFAULT_HTTP = chrome
-ALT_HTTP = android
-
-ifneq ($(HTTP_STACK),chrome)
-  ifneq ($(HTTP_STACK),android)
-    # No HTTP stack is specified, pickup the one we want as default.
-    ifeq ($(USE_ALT_HTTP),true)
-      HTTP_STACK = $(ALT_HTTP)
-    else
-      HTTP_STACK = $(DEFAULT_HTTP)
-    endif
-  endif
-endif
-
-ifeq ($(HTTP_STACK),chrome)
-
-LOCAL_SHARED_LIBRARIES += \
-        liblog           \
-        libicuuc         \
-        libicui18n       \
-        libz             \
-        libdl            \
-
-LOCAL_STATIC_LIBRARIES += \
-        libstagefright_chromium_http
-
-LOCAL_SHARED_LIBRARIES += libstlport libchromium_net
+LOCAL_SHARED_LIBRARIES += libstlport
 include external/stlport/libstlport.mk
 
+# TODO: Chromium is always available, so this flag can be removed.
 LOCAL_CPPFLAGS += -DCHROMIUM_AVAILABLE=1
 
-endif  # ifeq ($(HTTP_STACK),chrome)
-
-################################################################################
-
 LOCAL_SHARED_LIBRARIES += \
-        libstagefright_amrnb_common \
         libstagefright_enc_common \
         libstagefright_avc_common \
         libstagefright_foundation \
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index 13c624d..0f816e7 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -110,13 +110,18 @@
     success = format->findInt32(kKeySampleRate, &mSampleRate);
     CHECK(success);
 
-    int32_t numChannels;
+    int32_t numChannels, channelMask;
     success = format->findInt32(kKeyChannelCount, &numChannels);
     CHECK(success);
 
+    if(!format->findInt32(kKeyChannelMask, &channelMask)) {
+        ALOGW("source format didn't specify channel mask, using channel order");
+        channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
+    }
+
     if (mAudioSink.get() != NULL) {
         status_t err = mAudioSink->open(
-                mSampleRate, numChannels, AUDIO_FORMAT_PCM_16_BIT,
+                mSampleRate, numChannels, channelMask, AUDIO_FORMAT_PCM_16_BIT,
                 DEFAULT_AUDIOSINK_BUFFERCOUNT,
                 &AudioPlayer::AudioSinkCallback, this);
         if (err != OK) {
@@ -137,12 +142,16 @@
 
         mAudioSink->start();
     } else {
+        // playing to an AudioTrack, set up mask if necessary
+        audio_channel_mask_t audioMask = channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER ?
+                audio_channel_out_mask_from_count(numChannels) : channelMask;
+        if (0 == audioMask) {
+            return BAD_VALUE;
+        }
+
         mAudioTrack = new AudioTrack(
-                AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT,
-                (numChannels == 2)
-                    ? AUDIO_CHANNEL_OUT_STEREO
-                    : AUDIO_CHANNEL_OUT_MONO,
-                0, 0, &AudioCallback, this, 0);
+                AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT, audioMask,
+                0, AUDIO_POLICY_OUTPUT_FLAG_NONE, &AudioCallback, this, 0);
 
         if ((err = mAudioTrack->initCheck()) != OK) {
             delete mAudioTrack;
@@ -268,6 +277,16 @@
     return mReachedEOS;
 }
 
+status_t AudioPlayer::setPlaybackRatePermille(int32_t ratePermille) {
+    if (mAudioSink.get() != NULL) {
+        return mAudioSink->setPlaybackRatePermille(ratePermille);
+    } else if (mAudioTrack != NULL){
+        return mAudioTrack->setSampleRate(ratePermille * mSampleRate / 1000);
+    } else {
+        return NO_INIT;
+    }
+}
+
 // static
 size_t AudioPlayer::AudioSinkCallback(
         MediaPlayerBase::AudioSink *audioSink,
@@ -400,7 +419,11 @@
                          timeToCompletionUs, timeToCompletionUs / 1E6);
 
                     postEOS = true;
-                    postEOSDelayUs = timeToCompletionUs + mLatencyUs;
+                    if (mAudioSink->needsTrailingPadding()) {
+                        postEOSDelayUs = timeToCompletionUs + mLatencyUs;
+                    } else {
+                        postEOSDelayUs = 0;
+                    }
                 }
 
                 mReachedEOS = true;
@@ -418,8 +441,11 @@
                         kKeyTime, &mPositionTimeMediaUs));
 
             mPositionTimeRealUs =
-                ((mNumFramesPlayed + size_done / mFrameSize) * 1000000)
+                -mLatencyUs + ((mNumFramesPlayed + size_done / mFrameSize) * 1000000)
                     / mSampleRate;
+            if (mPositionTimeRealUs < 0) {
+                mPositionTimeRealUs = 0;
+            }
 
             ALOGV("buffer->size() = %d, "
                  "mPositionTimeMediaUs=%.2f mPositionTimeRealUs=%.2f",
@@ -474,7 +500,9 @@
 int64_t AudioPlayer::getRealTimeUsLocked() const {
     CHECK(mStarted);
     CHECK_NE(mSampleRate, 0);
-    return -mLatencyUs + (mNumFramesPlayed * 1000000) / mSampleRate;
+    int64_t t = -mLatencyUs + (mNumFramesPlayed * 1000000) / mSampleRate;
+    if (t < 0) return 0;
+    return t;
 }
 
 int64_t AudioPlayer::getMediaTimeUs() {
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 2172cc0..0f1d841 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -47,21 +47,22 @@
 }
 
 AudioSource::AudioSource(
-        int inputSource, uint32_t sampleRate, uint32_t channels)
+        audio_source_t inputSource, uint32_t sampleRate, uint32_t channelCount)
     : mStarted(false),
       mSampleRate(sampleRate),
       mPrevSampleTimeUs(0),
       mNumFramesReceived(0),
       mNumClientOwnedBuffers(0) {
 
-    ALOGV("sampleRate: %d, channels: %d", sampleRate, channels);
-    CHECK(channels == 1 || channels == 2);
-    uint32_t flags = AudioRecord::RECORD_AGC_ENABLE |
+    ALOGV("sampleRate: %d, channelCount: %d", sampleRate, channelCount);
+    CHECK(channelCount == 1 || channelCount == 2);
+    AudioRecord::record_flags flags = (AudioRecord::record_flags)
+                    (AudioRecord::RECORD_AGC_ENABLE |
                      AudioRecord::RECORD_NS_ENABLE  |
-                     AudioRecord::RECORD_IIR_ENABLE;
+                     AudioRecord::RECORD_IIR_ENABLE);
     mRecord = new AudioRecord(
                 inputSource, sampleRate, AUDIO_FORMAT_PCM_16_BIT,
-                channels > 1? AUDIO_CHANNEL_IN_STEREO: AUDIO_CHANNEL_IN_MONO,
+                audio_channel_in_mask_from_count(channelCount),
                 4 * kMaxBufferSize / sizeof(int16_t), /* Enable ping-pong buffers */
                 flags,
                 AudioRecordCallbackFunction,
@@ -72,7 +73,7 @@
 
 AudioSource::~AudioSource() {
     if (mStarted) {
-        stop();
+        reset();
     }
 
     delete mRecord;
@@ -130,7 +131,7 @@
     }
 }
 
-status_t AudioSource::stop() {
+status_t AudioSource::reset() {
     Mutex::Autolock autoLock(mLock);
     if (!mStarted) {
         return UNKNOWN_ERROR;
@@ -282,8 +283,6 @@
         mPrevSampleTimeUs = mStartTimeUs;
     }
 
-    int64_t timestampUs = mPrevSampleTimeUs;
-
     size_t numLostBytes = 0;
     if (mNumFramesReceived > 0) {  // Ignore earlier frame lost
         // getInputFramesLost() returns the number of lost frames.
@@ -293,37 +292,58 @@
 
     CHECK_EQ(numLostBytes & 1, 0u);
     CHECK_EQ(audioBuffer.size & 1, 0u);
-    size_t bufferSize = numLostBytes + audioBuffer.size;
-    MediaBuffer *buffer = new MediaBuffer(bufferSize);
     if (numLostBytes > 0) {
-        memset(buffer->data(), 0, numLostBytes);
-        memcpy((uint8_t *) buffer->data() + numLostBytes,
-                    audioBuffer.i16, audioBuffer.size);
-    } else {
-        if (audioBuffer.size == 0) {
-            ALOGW("Nothing is available from AudioRecord callback buffer");
-            buffer->release();
-            return OK;
-        }
-        memcpy((uint8_t *) buffer->data(),
-                audioBuffer.i16, audioBuffer.size);
+        // Loss of audio frames should happen rarely; thus the LOGW should
+        // not cause a logging spam
+        ALOGW("Lost audio record data: %d bytes", numLostBytes);
     }
 
+    while (numLostBytes > 0) {
+        size_t bufferSize = numLostBytes;
+        if (numLostBytes > kMaxBufferSize) {
+            numLostBytes -= kMaxBufferSize;
+            bufferSize = kMaxBufferSize;
+        } else {
+            numLostBytes = 0;
+        }
+        MediaBuffer *lostAudioBuffer = new MediaBuffer(bufferSize);
+        memset(lostAudioBuffer->data(), 0, bufferSize);
+        lostAudioBuffer->set_range(0, bufferSize);
+        queueInputBuffer_l(lostAudioBuffer, timeUs);
+    }
+
+    if (audioBuffer.size == 0) {
+        ALOGW("Nothing is available from AudioRecord callback buffer");
+        return OK;
+    }
+
+    const size_t bufferSize = audioBuffer.size;
+    MediaBuffer *buffer = new MediaBuffer(bufferSize);
+    memcpy((uint8_t *) buffer->data(),
+            audioBuffer.i16, audioBuffer.size);
     buffer->set_range(0, bufferSize);
-    timestampUs += ((1000000LL * (bufferSize >> 1)) +
-                    (mSampleRate >> 1)) / mSampleRate;
+    queueInputBuffer_l(buffer, timeUs);
+    return OK;
+}
+
+void AudioSource::queueInputBuffer_l(MediaBuffer *buffer, int64_t timeUs) {
+    const size_t bufferSize = buffer->range_length();
+    const size_t frameSize = mRecord->frameSize();
+    const int64_t timestampUs =
+                mPrevSampleTimeUs +
+                    ((1000000LL * (bufferSize / frameSize)) +
+                        (mSampleRate >> 1)) / mSampleRate;
 
     if (mNumFramesReceived == 0) {
         buffer->meta_data()->setInt64(kKeyAnchorTime, mStartTimeUs);
     }
+
     buffer->meta_data()->setInt64(kKeyTime, mPrevSampleTimeUs);
     buffer->meta_data()->setInt64(kKeyDriftTime, timeUs - mInitialReadTimeUs);
     mPrevSampleTimeUs = timestampUs;
-    mNumFramesReceived += buffer->range_length() / sizeof(int16_t);
+    mNumFramesReceived += bufferSize / frameSize;
     mBuffersReceived.push_back(buffer);
     mFrameAvailableCondition.signal();
-
-    return OK;
 }
 
 void AudioSource::trackMaxAmplitude(int16_t *data, int nSamples) {
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index d0cb7ff..f96a4df 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -30,13 +30,12 @@
 #include "include/MPEG2TSExtractor.h"
 #include "include/WVMExtractor.h"
 
-#include "timedtext/TimedTextPlayer.h"
-
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
 #include <media/IMediaPlayerService.h>
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/timedtext/TimedTextDriver.h>
 #include <media/stagefright/AudioPlayer.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/FileSource.h>
@@ -47,10 +46,8 @@
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/OMXCodec.h>
 
-#include <surfaceflinger/Surface.h>
 #include <gui/ISurfaceTexture.h>
 #include <gui/SurfaceTextureClient.h>
-#include <surfaceflinger/ISurfaceComposer.h>
 
 #include <media/stagefright/foundation/AMessage.h>
 
@@ -192,7 +189,7 @@
       mVideoBuffer(NULL),
       mDecryptHandle(NULL),
       mLastVideoTimeUs(-1),
-      mTextPlayer(NULL) {
+      mTextDriver(NULL) {
     CHECK_EQ(mClient.connect(), (status_t)OK);
 
     DataSource::RegisterDefaultSniffers();
@@ -335,6 +332,14 @@
         return UNKNOWN_ERROR;
     }
 
+    if (extractor->getDrmFlag()) {
+        checkDrmStatus(dataSource);
+    }
+
+    return setDataSource_l(extractor);
+}
+
+void AwesomePlayer::checkDrmStatus(const sp<DataSource>& dataSource) {
     dataSource->getDrmInfo(mDecryptHandle, &mDrmManagerClient);
     if (mDecryptHandle != NULL) {
         CHECK(mDrmManagerClient);
@@ -342,8 +347,6 @@
             notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, ERROR_DRM_NO_LICENSE);
         }
     }
-
-    return setDataSource_l(extractor);
 }
 
 status_t AwesomePlayer::setDataSource_l(const sp<MediaExtractor> &extractor) {
@@ -524,9 +527,9 @@
     delete mAudioPlayer;
     mAudioPlayer = NULL;
 
-    if (mTextPlayer != NULL) {
-        delete mTextPlayer;
-        mTextPlayer = NULL;
+    if (mTextDriver != NULL) {
+        delete mTextDriver;
+        mTextDriver = NULL;
     }
 
     mVideoRenderer.clear();
@@ -1111,8 +1114,8 @@
         modifyFlags(AUDIO_RUNNING, CLEAR);
     }
 
-    if (mFlags & TEXTPLAYER_STARTED) {
-        mTextPlayer->pause();
+    if (mFlags & TEXTPLAYER_INITIALIZED) {
+        mTextDriver->pause();
         modifyFlags(TEXT_RUNNING, CLEAR);
     }
 
@@ -1265,32 +1268,6 @@
     return OK;
 }
 
-status_t AwesomePlayer::setTimedTextTrackIndex(int32_t index) {
-    if (mTextPlayer != NULL) {
-        if (index >= 0) { // to turn on a text track
-            status_t err = mTextPlayer->setTimedTextTrackIndex(index);
-            if (err != OK) {
-                return err;
-            }
-
-            modifyFlags(TEXT_RUNNING, SET);
-            modifyFlags(TEXTPLAYER_STARTED, SET);
-            return OK;
-        } else { // to turn off the text track display
-            if (mFlags  & TEXT_RUNNING) {
-                modifyFlags(TEXT_RUNNING, CLEAR);
-            }
-            if (mFlags  & TEXTPLAYER_STARTED) {
-                modifyFlags(TEXTPLAYER_STARTED, CLEAR);
-            }
-
-            return mTextPlayer->setTimedTextTrackIndex(index);
-        }
-    } else {
-        return INVALID_OPERATION;
-    }
-}
-
 status_t AwesomePlayer::seekTo_l(int64_t timeUs) {
     if (mFlags & CACHE_UNDERRUN) {
         modifyFlags(CACHE_UNDERRUN, CLEAR);
@@ -1312,8 +1289,8 @@
 
     seekAudioIfNecessary_l();
 
-    if (mFlags & TEXTPLAYER_STARTED) {
-        mTextPlayer->seekTo(mSeekTimeUs);
+    if (mFlags & TEXTPLAYER_INITIALIZED) {
+        mTextDriver->seekToAsync(mSeekTimeUs);
     }
 
     if (!(mFlags & PLAYING)) {
@@ -1354,15 +1331,15 @@
     mAudioTrack = source;
 }
 
-void AwesomePlayer::addTextSource(sp<MediaSource> source) {
+void AwesomePlayer::addTextSource(const sp<MediaSource>& source) {
     Mutex::Autolock autoLock(mTimedTextLock);
     CHECK(source != NULL);
 
-    if (mTextPlayer == NULL) {
-        mTextPlayer = new TimedTextPlayer(this, mListener, &mQueue);
+    if (mTextDriver == NULL) {
+        mTextDriver = new TimedTextDriver(mListener);
     }
 
-    mTextPlayer->addTextSource(source);
+    mTextDriver->addInBandTextSource(source);
 }
 
 status_t AwesomePlayer::initAudioDecoder() {
@@ -1603,7 +1580,7 @@
                     mSeekTimeUs,
                     mSeeking == SEEK_VIDEO_ONLY
                         ? MediaSource::ReadOptions::SEEK_NEXT_SYNC
-                        : MediaSource::ReadOptions::SEEK_CLOSEST_SYNC);
+                        : MediaSource::ReadOptions::SEEK_CLOSEST);
         }
         for (;;) {
             status_t err = mVideoSource->read(&mVideoBuffer, &options);
@@ -1688,8 +1665,8 @@
         }
     }
 
-    if ((mFlags & TEXTPLAYER_STARTED) && !(mFlags & (TEXT_RUNNING | SEEK_PREVIEW))) {
-        mTextPlayer->resume();
+    if ((mFlags & TEXTPLAYER_INITIALIZED) && !(mFlags & (TEXT_RUNNING | SEEK_PREVIEW))) {
+        mTextDriver->start();
         modifyFlags(TEXT_RUNNING, SET);
     }
 
@@ -2095,7 +2072,7 @@
         String8 mimeType;
         float confidence;
         sp<AMessage> dummy;
-        bool success = SniffDRM(dataSource, &mimeType, &confidence, &dummy);
+        bool success = SniffWVM(dataSource, &mimeType, &confidence, &dummy);
 
         if (!success
                 || strcasecmp(
@@ -2105,6 +2082,8 @@
 
         mWVMExtractor = new WVMExtractor(dataSource);
         mWVMExtractor->setAdaptiveStreamingMode(true);
+        if (mUIDValid)
+            mWVMExtractor->setUID(mUID);
         extractor = mWVMExtractor;
     } else {
         extractor = MediaExtractor::Create(
@@ -2115,13 +2094,8 @@
         }
     }
 
-    dataSource->getDrmInfo(mDecryptHandle, &mDrmManagerClient);
-
-    if (mDecryptHandle != NULL) {
-        CHECK(mDrmManagerClient);
-        if (RightsStatus::RIGHTS_VALID != mDecryptHandle->status) {
-            notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, ERROR_DRM_NO_LICENSE);
-        }
+    if (extractor->getDrmFlag()) {
+        checkDrmStatus(dataSource);
     }
 
     status_t err = setDataSource_l(extractor);
@@ -2232,24 +2206,18 @@
 
 status_t AwesomePlayer::setParameter(int key, const Parcel &request) {
     switch (key) {
-        case KEY_PARAMETER_TIMED_TEXT_TRACK_INDEX:
-        {
-            Mutex::Autolock autoLock(mTimedTextLock);
-            return setTimedTextTrackIndex(request.readInt32());
-        }
-        case KEY_PARAMETER_TIMED_TEXT_ADD_OUT_OF_BAND_SOURCE:
-        {
-            Mutex::Autolock autoLock(mTimedTextLock);
-            if (mTextPlayer == NULL) {
-                mTextPlayer = new TimedTextPlayer(this, mListener, &mQueue);
-            }
-
-            return mTextPlayer->setParameter(key, request);
-        }
         case KEY_PARAMETER_CACHE_STAT_COLLECT_FREQ_MS:
         {
             return setCacheStatCollectFreq(request);
         }
+        case KEY_PARAMETER_PLAYBACK_RATE_PERMILLE:
+        {
+            if (mAudioPlayer != NULL) {
+                return mAudioPlayer->setPlaybackRatePermille(request.readInt32());
+            } else {
+                return NO_INIT;
+            }
+        }
         default:
         {
             return ERROR_UNSUPPORTED;
@@ -2286,6 +2254,90 @@
     }
 }
 
+status_t AwesomePlayer::invoke(const Parcel &request, Parcel *reply) {
+    if (NULL == reply) {
+        return android::BAD_VALUE;
+    }
+    int32_t methodId;
+    status_t ret = request.readInt32(&methodId);
+    if (ret != android::OK) {
+        return ret;
+    }
+    switch(methodId) {
+        case INVOKE_ID_GET_TRACK_INFO:
+        {
+            Mutex::Autolock autoLock(mTimedTextLock);
+            if (mTextDriver == NULL) {
+                return INVALID_OPERATION;
+            }
+            mTextDriver->getTrackInfo(reply);
+            return OK;
+        }
+        case INVOKE_ID_ADD_EXTERNAL_SOURCE:
+        {
+            Mutex::Autolock autoLock(mTimedTextLock);
+            if (mTextDriver == NULL) {
+                mTextDriver = new TimedTextDriver(mListener);
+            }
+            // String values written in Parcel are UTF-16 values.
+            String8 uri(request.readString16());
+            String8 mimeType(request.readString16());
+            return mTextDriver->addOutOfBandTextSource(uri, mimeType);
+        }
+        case INVOKE_ID_ADD_EXTERNAL_SOURCE_FD:
+        {
+            Mutex::Autolock autoLock(mTimedTextLock);
+            if (mTextDriver == NULL) {
+                mTextDriver = new TimedTextDriver(mListener);
+            }
+            int fd         = request.readFileDescriptor();
+            off64_t offset = request.readInt64();
+            size_t length  = request.readInt64();
+            String8 mimeType(request.readString16());
+            return mTextDriver->addOutOfBandTextSource(
+                    fd, offset, length, mimeType);
+        }
+        case INVOKE_ID_SELECT_TRACK:
+        {
+            Mutex::Autolock autoLock(mTimedTextLock);
+            if (mTextDriver == NULL) {
+                return INVALID_OPERATION;
+            }
+
+            status_t err = mTextDriver->selectTrack(
+                    request.readInt32());
+            if (err == OK) {
+                modifyFlags(TEXTPLAYER_INITIALIZED, SET);
+                if (mFlags & PLAYING && !(mFlags & TEXT_RUNNING)) {
+                    mTextDriver->start();
+                    modifyFlags(TEXT_RUNNING, SET);
+                }
+            }
+            return err;
+        }
+        case INVOKE_ID_UNSELECT_TRACK:
+        {
+            Mutex::Autolock autoLock(mTimedTextLock);
+            if (mTextDriver == NULL) {
+                return INVALID_OPERATION;
+            }
+            status_t err = mTextDriver->unselectTrack(
+                    request.readInt32());
+            if (err == OK) {
+                modifyFlags(TEXTPLAYER_INITIALIZED, CLEAR);
+                modifyFlags(TEXT_RUNNING, CLEAR);
+            }
+            return err;
+        }
+        default:
+        {
+            return ERROR_UNSUPPORTED;
+        }
+    }
+    // It will not reach here.
+    return OK;
+}
+
 bool AwesomePlayer::isStreamingHTTP() const {
     return mCachedSource != NULL || mWVMExtractor != NULL;
 }
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 1850c9c..fd3f892 100755
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -20,14 +20,14 @@
 
 #include <OMX_Component.h>
 #include <binder/IPCThreadState.h>
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/CameraSource.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 #include <camera/Camera.h>
 #include <camera/CameraParameters.h>
-#include <surfaceflinger/Surface.h>
+#include <gui/Surface.h>
 #include <utils/String8.h>
 #include <cutils/properties.h>
 
@@ -114,7 +114,7 @@
     ALOGE("Uknown color format (%s), please add it to "
          "CameraSource::getColorFormat", colorFormat);
 
-    CHECK_EQ(0, "Unknown color format");
+    CHECK(!"Unknown color format");
 }
 
 CameraSource *CameraSource::Create() {
@@ -182,7 +182,7 @@
     int32_t cameraId) {
 
     if (camera == 0) {
-        mCamera = Camera::connect(cameraId);
+        mCamera = Camera::connect(cameraId, false, false);
         if (mCamera == 0) return -EBUSY;
         mCameraFlags &= ~FLAGS_HOT_CAMERA;
     } else {
@@ -515,9 +515,13 @@
         return err;
     }
 
-    // This CHECK is good, since we just passed the lock/unlock
-    // check earlier by calling mCamera->setParameters().
-    CHECK_EQ(OK, mCamera->setPreviewDisplay(mSurface));
+    // Set the preview display. Skip this if mSurface is null because
+    // applications may already set a surface to the camera.
+    if (mSurface != NULL) {
+        // This CHECK is good, since we just passed the lock/unlock
+        // check earlier by calling mCamera->setParameters().
+        CHECK_EQ((status_t)OK, mCamera->setPreviewDisplay(mSurface));
+    }
 
     // By default, do not store metadata in video buffers
     mIsMetaDataStoredInVideoBuffers = false;
@@ -548,7 +552,7 @@
 
 CameraSource::~CameraSource() {
     if (mStarted) {
-        stop();
+        reset();
     } else if (mInitCheck == OK) {
         // Camera is initialized but because start() is never called,
         // the lock on Camera is never released(). This makes sure
@@ -566,7 +570,8 @@
     if (mCameraFlags & FLAGS_HOT_CAMERA) {
         mCamera->unlock();
         mCamera.clear();
-        CHECK_EQ(OK, mCameraRecordingProxy->startRecording(new ProxyListener(this)));
+        CHECK_EQ((status_t)OK,
+            mCameraRecordingProxy->startRecording(new ProxyListener(this)));
     } else {
         mCamera->setListener(new CameraSourceListener(this));
         mCamera->startRecording();
@@ -632,8 +637,8 @@
     mCameraFlags = 0;
 }
 
-status_t CameraSource::stop() {
-    ALOGD("stop: E");
+status_t CameraSource::reset() {
+    ALOGD("reset: E");
     Mutex::Autolock autoLock(mLock);
     mStarted = false;
     mFrameAvailableCondition.signal();
@@ -670,7 +675,7 @@
     }
 
     CHECK_EQ(mNumFramesReceived, mNumFramesEncoded + mNumFramesDropped);
-    ALOGD("stop: X");
+    ALOGD("reset: X");
     return OK;
 }
 
@@ -718,7 +723,7 @@
             return;
         }
     }
-    CHECK_EQ(0, "signalBufferReturned: bogus buffer");
+    CHECK(!"signalBufferReturned: bogus buffer");
 }
 
 status_t CameraSource::read(
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
index 263ab50..26ce7ae 100644
--- a/media/libstagefright/CameraSourceTimeLapse.cpp
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -20,9 +20,9 @@
 #include <binder/IPCThreadState.h>
 #include <binder/MemoryBase.h>
 #include <binder/MemoryHeapBase.h>
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/CameraSource.h>
 #include <media/stagefright/CameraSourceTimeLapse.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MetaData.h>
 #include <camera/Camera.h>
 #include <camera/CameraParameters.h>
@@ -87,6 +87,10 @@
 }
 
 CameraSourceTimeLapse::~CameraSourceTimeLapse() {
+    if (mLastReadBufferCopy) {
+        mLastReadBufferCopy->release();
+        mLastReadBufferCopy = NULL;
+    }
 }
 
 void CameraSourceTimeLapse::startQuickReadReturns() {
@@ -204,15 +208,6 @@
     }
 }
 
-void CameraSourceTimeLapse::stopCameraRecording() {
-    ALOGV("stopCameraRecording");
-    CameraSource::stopCameraRecording();
-    if (mLastReadBufferCopy) {
-        mLastReadBufferCopy->release();
-        mLastReadBufferCopy = NULL;
-    }
-}
-
 sp<IMemory> CameraSourceTimeLapse::createIMemoryCopy(
         const sp<IMemory> &source_data) {
 
diff --git a/media/libstagefright/DRMExtractor.cpp b/media/libstagefright/DRMExtractor.cpp
index 9452ab1..524c3aa 100644
--- a/media/libstagefright/DRMExtractor.cpp
+++ b/media/libstagefright/DRMExtractor.cpp
@@ -23,6 +23,7 @@
 
 #include <arpa/inet.h>
 #include <utils/String8.h>
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/Utils.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaSource.h>
@@ -30,7 +31,6 @@
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaDebug.h>
 
 #include <drm/drm_framework_common.h>
 #include <utils/Errors.h>
@@ -282,13 +282,13 @@
     if (decryptHandle != NULL) {
         if (decryptHandle->decryptApiType == DecryptApiType::CONTAINER_BASED) {
             *mimeType = String8("drm+container_based+") + decryptHandle->mimeType;
+            *confidence = 10.0f;
         } else if (decryptHandle->decryptApiType == DecryptApiType::ELEMENTARY_STREAM_BASED) {
             *mimeType = String8("drm+es_based+") + decryptHandle->mimeType;
-        } else if (decryptHandle->decryptApiType == DecryptApiType::WV_BASED) {
-            *mimeType = MEDIA_MIMETYPE_CONTAINER_WVM;
-            ALOGW("SniffWVM: found match\n");
+            *confidence = 10.0f;
+        } else {
+            return false;
         }
-        *confidence = 10.0f;
 
         return true;
     }
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index 43539bb..d0a7880 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -15,6 +15,12 @@
  */
 
 #include "include/AMRExtractor.h"
+#include "include/AVIExtractor.h"
+
+#if CHROMIUM_AVAILABLE
+#include "include/DataUriSource.h"
+#endif
+
 #include "include/MP3Extractor.h"
 #include "include/MPEG4Extractor.h"
 #include "include/WAVExtractor.h"
@@ -26,6 +32,7 @@
 #include "include/DRMExtractor.h"
 #include "include/FLACExtractor.h"
 #include "include/AACExtractor.h"
+#include "include/WVMExtractor.h"
 
 #include "matroska/MatroskaExtractor.h"
 
@@ -112,7 +119,9 @@
     RegisterSniffer(SniffMPEG2TS);
     RegisterSniffer(SniffMP3);
     RegisterSniffer(SniffAAC);
+    RegisterSniffer(SniffAVI);
     RegisterSniffer(SniffMPEG2PS);
+    RegisterSniffer(SniffWVM);
 
     char value[PROPERTY_VALUE_MAX];
     if (property_get("drm.service.enabled", value, NULL)
@@ -134,6 +143,10 @@
             return NULL;
         }
         source = new NuCachedSource2(httpSource);
+# if CHROMIUM_AVAILABLE
+    } else if (!strncasecmp("data:", uri, 5)) {
+        source = new DataUriSource(uri);
+#endif
     } else {
         // Assume it's a filename.
         source = new FileSource(uri);
diff --git a/media/libstagefright/FileSource.cpp b/media/libstagefright/FileSource.cpp
index 73cb48c..73c8d03 100644
--- a/media/libstagefright/FileSource.cpp
+++ b/media/libstagefright/FileSource.cpp
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/FileSource.h>
-#include <media/stagefright/MediaDebug.h>
 #include <sys/types.h>
 #include <unistd.h>
 #include <sys/types.h>
@@ -127,7 +127,7 @@
     return OK;
 }
 
-sp<DecryptHandle> FileSource::DrmInitialization() {
+sp<DecryptHandle> FileSource::DrmInitialization(const char *mime) {
     if (mDrmManagerClient == NULL) {
         mDrmManagerClient = new DrmManagerClient();
     }
@@ -138,7 +138,7 @@
 
     if (mDecryptHandle == NULL) {
         mDecryptHandle = mDrmManagerClient->openDecryptSession(
-                mFd, mOffset, mLength);
+                mFd, mOffset, mLength, mime);
     }
 
     if (mDecryptHandle == NULL) {
diff --git a/media/libstagefright/JPEGSource.cpp b/media/libstagefright/JPEGSource.cpp
index e818115..bafa4b2 100644
--- a/media/libstagefright/JPEGSource.cpp
+++ b/media/libstagefright/JPEGSource.cpp
@@ -18,10 +18,10 @@
 #define LOG_TAG "JPEGSource"
 #include <utils/Log.h>
 
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/JPEGSource.h>
 #include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
@@ -59,7 +59,7 @@
       mWidth(0),
       mHeight(0),
       mOffset(0) {
-    CHECK_EQ(parseJPEG(), OK);
+    CHECK_EQ(parseJPEG(), (status_t)OK);
     CHECK(mSource->getSize(&mSize) == OK);
 }
 
diff --git a/media/libstagefright/MP3Extractor.cpp b/media/libstagefright/MP3Extractor.cpp
index 2215c07..6abaf23 100644
--- a/media/libstagefright/MP3Extractor.cpp
+++ b/media/libstagefright/MP3Extractor.cpp
@@ -311,10 +311,25 @@
     mMeta->setInt32(kKeyBitRate, bitrate * 1000);
     mMeta->setInt32(kKeyChannelCount, num_channels);
 
-    mSeeker = XINGSeeker::CreateFromSource(mDataSource, mFirstFramePos);
+    sp<XINGSeeker> seeker = XINGSeeker::CreateFromSource(mDataSource, mFirstFramePos);
 
-    if (mSeeker == NULL) {
+    if (seeker == NULL) {
         mSeeker = VBRISeeker::CreateFromSource(mDataSource, post_id3_pos);
+    } else {
+        mSeeker = seeker;
+        int encd = seeker->getEncoderDelay();
+        int encp = seeker->getEncoderPadding();
+        if (encd != 0 || encp != 0) {
+            mMeta->setInt32(kKeyEncoderDelay, encd);
+            mMeta->setInt32(kKeyEncoderPadding, encp);
+        }
+    }
+
+    if (mSeeker != NULL) {
+        // While it is safe to send the XING/VBRI frame to the decoder, this will
+        // result in an extra 1152 samples being output. The real first frame to
+        // decode is after the XING/VBRI frame, so skip there.
+        mFirstFramePos += frame_size;
     }
 
     int64_t durationUs;
@@ -333,6 +348,37 @@
     }
 
     mInitCheck = OK;
+
+    // get iTunes-style gapless info if present
+    ID3 id3(mDataSource);
+    if (id3.isValid()) {
+        ID3::Iterator *com = new ID3::Iterator(id3, "COM");
+        if (com->done()) {
+            delete com;
+            com = new ID3::Iterator(id3, "COMM");
+        }
+        while(!com->done()) {
+            String8 commentdesc;
+            String8 commentvalue;
+            com->getString(&commentdesc, &commentvalue);
+            const char * desc = commentdesc.string();
+            const char * value = commentvalue.string();
+
+            // first 3 characters are the language, which we don't care about
+            if(strlen(desc) > 3 && strcmp(desc + 3, "iTunSMPB") == 0) {
+
+                int32_t delay, padding;
+                if (sscanf(value, " %*x %x %x %*x", &delay, &padding) == 2) {
+                    mMeta->setInt32(kKeyEncoderDelay, delay);
+                    mMeta->setInt32(kKeyEncoderPadding, padding);
+                }
+                break;
+            }
+            com->next();
+        }
+        delete com;
+        com = NULL;
+    }
 }
 
 size_t MP3Extractor::countTracks() {
diff --git a/media/libstagefright/MPEG2TSWriter.cpp b/media/libstagefright/MPEG2TSWriter.cpp
index 36009ab..f702376 100644
--- a/media/libstagefright/MPEG2TSWriter.cpp
+++ b/media/libstagefright/MPEG2TSWriter.cpp
@@ -244,7 +244,7 @@
 
     sp<AMessage> notify = mNotify->dup();
     notify->setInt32("what", kNotifyBuffer);
-    notify->setObject("buffer", out);
+    notify->setBuffer("buffer", out);
     notify->setInt32("oob", true);
     notify->post();
 }
@@ -270,7 +270,7 @@
         copy->meta()->setInt32("isSync", true);
     }
 
-    notify->setObject("buffer", copy);
+    notify->setBuffer("buffer", copy);
     notify->post();
 }
 
@@ -351,7 +351,7 @@
 
     sp<AMessage> notify = mNotify->dup();
     notify->setInt32("what", kNotifyBuffer);
-    notify->setObject("buffer", mAACBuffer);
+    notify->setBuffer("buffer", mAACBuffer);
     notify->post();
 
     mAACBuffer.clear();
@@ -513,7 +513,7 @@
 
 MPEG2TSWriter::~MPEG2TSWriter() {
     if (mStarted) {
-        stop();
+        reset();
     }
 
     mLooper->unregisterHandler(mReflector->id());
@@ -564,7 +564,7 @@
     return OK;
 }
 
-status_t MPEG2TSWriter::stop() {
+status_t MPEG2TSWriter::reset() {
     CHECK(mStarted);
 
     for (size_t i = 0; i < mSources.size(); ++i) {
@@ -614,10 +614,8 @@
 
                 ++mNumSourcesDone;
             } else if (what == SourceInfo::kNotifyBuffer) {
-                sp<RefBase> obj;
-                CHECK(msg->findObject("buffer", &obj));
-
-                sp<ABuffer> buffer = static_cast<ABuffer *>(obj.get());
+                sp<ABuffer> buffer;
+                CHECK(msg->findBuffer("buffer", &buffer));
 
                 int32_t oob;
                 if (msg->findInt32("oob", &oob) && oob) {
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 22bdd95..6c95d4e 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -20,7 +20,6 @@
 #include "include/MPEG4Extractor.h"
 #include "include/SampleTable.h"
 #include "include/ESDS.h"
-#include "timedtext/TimedTextPlayer.h"
 
 #include <arpa/inet.h>
 
@@ -1372,8 +1371,9 @@
 
             uint32_t type = ntohl(buffer);
             // For the 3GPP file format, the handler-type within the 'hdlr' box
-            // shall be 'text'
-            if (type == FOURCC('t', 'e', 'x', 't')) {
+            // shall be 'text'. We also want to support 'sbtl' handler type
+            // for a practical reason as various MPEG4 containers use it.
+            if (type == FOURCC('t', 'e', 'x', 't') || type == FOURCC('s', 'b', 't', 'l')) {
                 mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_TEXT_3GPP);
             }
 
@@ -2429,4 +2429,3 @@
 }
 
 }  // namespace android
-
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 06dd875..7ebbe1d 100755
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -23,10 +23,10 @@
 #include <pthread.h>
 #include <sys/prctl.h>
 
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/MPEG4Writer.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MetaData.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaSource.h>
@@ -70,6 +70,10 @@
     status_t dump(int fd, const Vector<String16>& args) const;
 
 private:
+    enum {
+        kMaxCttsOffsetTimeUs = 1000000LL,  // 1 second
+    };
+
     MPEG4Writer *mOwner;
     sp<MetaData> mMeta;
     sp<MediaSource> mSource;
@@ -137,11 +141,12 @@
             : sampleCount(count), sampleDuration(timescaledDur) {}
 
         uint32_t sampleCount;
-        int32_t sampleDuration;  // time scale based
+        uint32_t sampleDuration;  // time scale based
     };
-    bool          mHasNegativeCttsDeltaDuration;
     size_t        mNumCttsTableEntries;
     List<CttsTableEntry> mCttsTableEntries;
+    int64_t mMinCttsOffsetTimeUs;
+    int64_t mMaxCttsOffsetTimeUs;
 
     // Sequence parameter set or picture parameter set
     struct AVCParamSet {
@@ -172,6 +177,8 @@
     // Update the audio track's drift information.
     void updateDriftTime(const sp<MetaData>& meta);
 
+    int32_t getStartTimeOffsetScaledTime() const;
+
     static void *ThreadWrapper(void *me);
     status_t threadEntry();
 
@@ -282,7 +289,7 @@
 }
 
 MPEG4Writer::~MPEG4Writer() {
-    stop();
+    reset();
 
     while (!mTracks.empty()) {
         List<Track *>::iterator it = mTracks.begin();
@@ -471,7 +478,7 @@
         !param->findInt32(kKeyTimeScale, &mTimeScale)) {
         mTimeScale = 1000;
     }
-    CHECK(mTimeScale > 0);
+    CHECK_GT(mTimeScale, 0);
     ALOGV("movie time scale: %d", mTimeScale);
 
     mStreamableFile = true;
@@ -490,7 +497,7 @@
         }
         mEstimatedMoovBoxSize = estimateMoovBoxSize(bitRate);
     }
-    CHECK(mEstimatedMoovBoxSize >= 8);
+    CHECK_GE(mEstimatedMoovBoxSize, 8);
     lseek64(mFd, mFreeBoxOffset, SEEK_SET);
     writeInt32(mEstimatedMoovBoxSize);
     write("free", 4);
@@ -616,7 +623,7 @@
     mStarted = false;
 }
 
-status_t MPEG4Writer::stop() {
+status_t MPEG4Writer::reset() {
     if (mInitCheck != OK) {
         return OK;
     } else {
@@ -684,7 +691,7 @@
 
     mWriteMoovBoxToMemory = false;
     if (mStreamableFile) {
-        CHECK(mMoovBoxBufferOffset + 8 <= mEstimatedMoovBoxSize);
+        CHECK_LE(mMoovBoxBufferOffset + 8, mEstimatedMoovBoxSize);
 
         // Moov box
         lseek64(mFd, mFreeBoxOffset, SEEK_SET);
@@ -856,7 +863,7 @@
 
         mOffset += length + 4;
     } else {
-        CHECK(length < 65536);
+        CHECK_LT(length, 65536);
 
         uint8_t x = length >> 8;
         ::write(mFd, &x, 1);
@@ -1085,7 +1092,7 @@
 
 void MPEG4Writer::setStartTimestampUs(int64_t timeUs) {
     ALOGI("setStartTimestampUs: %lld", timeUs);
-    CHECK(timeUs >= 0);
+    CHECK_GE(timeUs, 0ll);
     Mutex::Autolock autoLock(mLock);
     if (mStartTimestampUs < 0 || mStartTimestampUs > timeUs) {
         mStartTimestampUs = timeUs;
@@ -1186,9 +1193,6 @@
     if (mIsAudio) {
         return;
     }
-    if (duration < 0 && !mHasNegativeCttsDeltaDuration) {
-        mHasNegativeCttsDeltaDuration = true;
-    }
     CttsTableEntry cttsEntry(sampleCount, duration);
     mCttsTableEntries.push_back(cttsEntry);
     ++mNumCttsTableEntries;
@@ -1218,7 +1222,7 @@
         mTimeScale = timeScale;
     }
 
-    CHECK(mTimeScale > 0);
+    CHECK_GT(mTimeScale, 0);
 }
 
 void MPEG4Writer::Track::getCodecSpecificDataFromInputFormatIfPossible() {
@@ -1299,7 +1303,7 @@
         }
     }
 
-    CHECK("Received a chunk for a unknown track" == 0);
+    CHECK(!"Received a chunk for a unknown track");
 }
 
 void MPEG4Writer::writeChunkToFile(Chunk* chunk) {
@@ -1509,7 +1513,6 @@
     mMdatSizeBytes = 0;
 
     mMaxChunkDurationUs = 0;
-    mHasNegativeCttsDeltaDuration = false;
 
     pthread_create(&mThread, &attr, ThreadWrapper, this);
     pthread_attr_destroy(&attr);
@@ -1833,29 +1836,18 @@
     int32_t nChunks = 0;
     int32_t nZeroLengthFrames = 0;
     int64_t lastTimestampUs = 0;      // Previous sample time stamp
-    int64_t lastCttsTimeUs = 0;       // Previous sample time stamp
     int64_t lastDurationUs = 0;       // Between the previous two samples
     int64_t currDurationTicks = 0;    // Timescale based ticks
     int64_t lastDurationTicks = 0;    // Timescale based ticks
     int32_t sampleCount = 1;          // Sample count in the current stts table entry
-    int64_t currCttsDurTicks = 0;     // Timescale based ticks
-    int64_t lastCttsDurTicks = 0;     // Timescale based ticks
-    int32_t cttsSampleCount = 1;      // Sample count in the current ctts table entry
-    uint32_t previousSampleSize = 0;      // Size of the previous sample
+    uint32_t previousSampleSize = 0;  // Size of the previous sample
     int64_t previousPausedDurationUs = 0;
     int64_t timestampUs = 0;
-    int64_t cttsDeltaTimeUs = 0;
-    bool hasBFrames = false;
+    int64_t cttsOffsetTimeUs = 0;
+    int64_t currCttsOffsetTimeTicks = 0;   // Timescale based ticks
+    int64_t lastCttsOffsetTimeTicks = -1;  // Timescale based ticks
+    int32_t cttsSampleCount = 0;           // Sample count in the current ctts table entry
 
-#if 1
-    // XXX: Samsung's video encoder's output buffer timestamp
-    // is not correct. see bug 4724339
-    char value[PROPERTY_VALUE_MAX];
-    if (property_get("rw.media.record.hasb", value, NULL) &&
-        (!strcasecmp(value, "true") || !strcasecmp(value, "1"))) {
-        hasBFrames = true;
-    }
-#endif
     if (mIsAudio) {
         prctl(PR_SET_NAME, (unsigned long)"AudioTrackEncoding", 0, 0, 0);
     } else {
@@ -1897,7 +1889,7 @@
                         (const uint8_t *)buffer->data()
                             + buffer->range_offset(),
                         buffer->range_length());
-                CHECK_EQ(OK, err);
+                CHECK_EQ((status_t)OK, err);
             } else if (mIsMPEG4) {
                 mCodecSpecificDataSize = buffer->range_length();
                 mCodecSpecificData = malloc(mCodecSpecificDataSize);
@@ -1963,32 +1955,64 @@
 
         if (mResumed) {
             int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs;
-            CHECK(durExcludingEarlierPausesUs >= 0);
+            CHECK_GE(durExcludingEarlierPausesUs, 0ll);
             int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs;
-            CHECK(pausedDurationUs >= lastDurationUs);
+            CHECK_GE(pausedDurationUs, lastDurationUs);
             previousPausedDurationUs += pausedDurationUs - lastDurationUs;
             mResumed = false;
         }
 
         timestampUs -= previousPausedDurationUs;
-        CHECK(timestampUs >= 0);
-        if (!mIsAudio && hasBFrames) {
+        CHECK_GE(timestampUs, 0ll);
+        if (!mIsAudio) {
             /*
              * Composition time: timestampUs
              * Decoding time: decodingTimeUs
-             * Composition time delta = composition time - decoding time
-             *
-             * We save picture decoding time stamp delta in stts table entries,
-             * and composition time delta duration in ctts table entries.
+             * Composition time offset = composition time - decoding time
              */
             int64_t decodingTimeUs;
             CHECK(meta_data->findInt64(kKeyDecodingTime, &decodingTimeUs));
             decodingTimeUs -= previousPausedDurationUs;
-            int64_t timeUs = decodingTimeUs;
-            cttsDeltaTimeUs = timestampUs - decodingTimeUs;
+            cttsOffsetTimeUs =
+                    timestampUs + kMaxCttsOffsetTimeUs - decodingTimeUs;
+            CHECK_GE(cttsOffsetTimeUs, 0ll);
             timestampUs = decodingTimeUs;
-            ALOGV("decoding time: %lld and ctts delta time: %lld",
-                timestampUs, cttsDeltaTimeUs);
+            ALOGV("decoding time: %lld and ctts offset time: %lld",
+                timestampUs, cttsOffsetTimeUs);
+
+            // Update ctts box table if necessary
+            currCttsOffsetTimeTicks =
+                    (cttsOffsetTimeUs * mTimeScale + 500000LL) / 1000000LL;
+            CHECK_LE(currCttsOffsetTimeTicks, 0x0FFFFFFFFLL);
+            if (mNumSamples == 0) {
+                // Force the first ctts table entry to have one single entry
+                // so that we can do adjustment for the initial track start
+                // time offset easily in writeCttsBox().
+                lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
+                addOneCttsTableEntry(1, currCttsOffsetTimeTicks);
+                cttsSampleCount = 0;      // No sample in ctts box is pending
+            } else {
+                if (currCttsOffsetTimeTicks != lastCttsOffsetTimeTicks) {
+                    addOneCttsTableEntry(cttsSampleCount, lastCttsOffsetTimeTicks);
+                    lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
+                    cttsSampleCount = 1;  // One sample in ctts box is pending
+                } else {
+                    ++cttsSampleCount;
+                }
+            }
+
+            // Update ctts time offset range
+            if (mNumSamples == 0) {
+                mMinCttsOffsetTimeUs = currCttsOffsetTimeTicks;
+                mMaxCttsOffsetTimeUs = currCttsOffsetTimeTicks;
+            } else {
+                if (currCttsOffsetTimeTicks > mMaxCttsOffsetTimeUs) {
+                    mMaxCttsOffsetTimeUs = currCttsOffsetTimeTicks;
+                } else if (currCttsOffsetTimeTicks < mMinCttsOffsetTimeUs) {
+                    mMinCttsOffsetTimeUs = currCttsOffsetTimeTicks;
+                }
+            }
+
         }
 
         if (mIsRealTimeRecording) {
@@ -1997,7 +2021,7 @@
             }
         }
 
-        CHECK(timestampUs >= 0);
+        CHECK_GE(timestampUs, 0ll);
         ALOGV("%s media time stamp: %lld and previous paused duration %lld",
                 mIsAudio? "Audio": "Video", timestampUs, previousPausedDurationUs);
         if (timestampUs > mTrackDurationUs) {
@@ -2012,6 +2036,7 @@
         currDurationTicks =
             ((timestampUs * mTimeScale + 500000LL) / 1000000LL -
                 (lastTimestampUs * mTimeScale + 500000LL) / 1000000LL);
+        CHECK_GE(currDurationTicks, 0ll);
 
         mSampleSizes.push_back(sampleSize);
         ++mNumSamples;
@@ -2020,25 +2045,12 @@
             // Force the first sample to have its own stts entry so that
             // we can adjust its value later to maintain the A/V sync.
             if (mNumSamples == 3 || currDurationTicks != lastDurationTicks) {
-                ALOGV("%s lastDurationUs: %lld us, currDurationTicks: %lld us",
-                        mIsAudio? "Audio": "Video", lastDurationUs, currDurationTicks);
                 addOneSttsTableEntry(sampleCount, lastDurationTicks);
                 sampleCount = 1;
             } else {
                 ++sampleCount;
             }
 
-            if (!mIsAudio) {
-                currCttsDurTicks =
-                     ((cttsDeltaTimeUs * mTimeScale + 500000LL) / 1000000LL -
-                     (lastCttsTimeUs * mTimeScale + 500000LL) / 1000000LL);
-                if (currCttsDurTicks != lastCttsDurTicks) {
-                    addOneCttsTableEntry(cttsSampleCount, lastCttsDurTicks);
-                    cttsSampleCount = 1;
-                } else {
-                    ++cttsSampleCount;
-                }
-            }
         }
         if (mSamplesHaveSameSize) {
             if (mNumSamples >= 2 && previousSampleSize != sampleSize) {
@@ -2052,11 +2064,6 @@
         lastDurationTicks = currDurationTicks;
         lastTimestampUs = timestampUs;
 
-        if (!mIsAudio) {
-            lastCttsDurTicks = currCttsDurTicks;
-            lastCttsTimeUs = cttsDeltaTimeUs;
-        }
-
         if (isSync != 0) {
             addOneStssTableEntry(mNumSamples);
         }
@@ -2125,10 +2132,8 @@
     if (mNumSamples == 1) {
         lastDurationUs = 0;  // A single sample's duration
         lastDurationTicks = 0;
-        lastCttsDurTicks = 0;
     } else {
         ++sampleCount;  // Count for the last sample
-        ++cttsSampleCount;
     }
 
     if (mNumSamples <= 2) {
@@ -2140,7 +2145,14 @@
         addOneSttsTableEntry(sampleCount, lastDurationTicks);
     }
 
-    addOneCttsTableEntry(cttsSampleCount, lastCttsDurTicks);
+    // The last ctts box may not have been written yet, and this
+    // is to make sure that we write out the last ctts box.
+    if (currCttsOffsetTimeTicks == lastCttsOffsetTimeTicks) {
+        if (cttsSampleCount > 0) {
+            addOneCttsTableEntry(cttsSampleCount, lastCttsOffsetTimeTicks);
+        }
+    }
+
     mTrackDurationUs += lastDurationUs;
     mReachedEOS = true;
 
@@ -2406,7 +2418,7 @@
     mOwner->writeInt16(0x18);        // depth
     mOwner->writeInt16(-1);          // predefined
 
-    CHECK(23 + mCodecSpecificDataSize < 128);
+    CHECK_LT(23 + mCodecSpecificDataSize, 128);
 
     if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime)) {
         writeMp4vEsdsBox();
@@ -2465,10 +2477,10 @@
 void MPEG4Writer::Track::writeMp4aEsdsBox() {
     mOwner->beginBox("esds");
     CHECK(mCodecSpecificData);
-    CHECK(mCodecSpecificDataSize > 0);
+    CHECK_GT(mCodecSpecificDataSize, 0);
 
     // Make sure all sizes encode to a single byte.
-    CHECK(mCodecSpecificDataSize + 23 < 128);
+    CHECK_LT(mCodecSpecificDataSize + 23, 128);
 
     mOwner->writeInt32(0);     // version=0, flags=0
     mOwner->writeInt8(0x03);   // ES_DescrTag
@@ -2502,7 +2514,7 @@
 
 void MPEG4Writer::Track::writeMp4vEsdsBox() {
     CHECK(mCodecSpecificData);
-    CHECK(mCodecSpecificDataSize > 0);
+    CHECK_GT(mCodecSpecificDataSize, 0);
     mOwner->beginBox("esds");
 
     mOwner->writeInt32(0);    // version=0, flags=0
@@ -2662,7 +2674,7 @@
 
 void MPEG4Writer::Track::writeAvccBox() {
     CHECK(mCodecSpecificData);
-    CHECK(mCodecSpecificDataSize >= 5);
+    CHECK_GE(mCodecSpecificDataSize, 5);
 
     // Patch avcc's lengthSize field to match the number
     // of bytes we use to indicate the size of a nal unit.
@@ -2690,23 +2702,26 @@
     mOwner->endBox();  // pasp
 }
 
+int32_t MPEG4Writer::Track::getStartTimeOffsetScaledTime() const {
+    int64_t trackStartTimeOffsetUs = 0;
+    int64_t moovStartTimeUs = mOwner->getStartTimestampUs();
+    if (mStartTimestampUs != moovStartTimeUs) {
+        CHECK_GT(mStartTimestampUs, moovStartTimeUs);
+        trackStartTimeOffsetUs = mStartTimestampUs - moovStartTimeUs;
+    }
+    return (trackStartTimeOffsetUs *  mTimeScale + 500000LL) / 1000000LL;
+}
+
 void MPEG4Writer::Track::writeSttsBox() {
     mOwner->beginBox("stts");
     mOwner->writeInt32(0);  // version=0, flags=0
     mOwner->writeInt32(mNumSttsTableEntries);
 
     // Compensate for small start time difference from different media tracks
-    int64_t trackStartTimeOffsetUs = 0;
-    int64_t moovStartTimeUs = mOwner->getStartTimestampUs();
-    if (mStartTimestampUs != moovStartTimeUs) {
-        CHECK(mStartTimestampUs > moovStartTimeUs);
-        trackStartTimeOffsetUs = mStartTimestampUs - moovStartTimeUs;
-    }
     List<SttsTableEntry>::iterator it = mSttsTableEntries.begin();
     CHECK(it != mSttsTableEntries.end() && it->sampleCount == 1);
     mOwner->writeInt32(it->sampleCount);
-    int32_t dur = (trackStartTimeOffsetUs * mTimeScale + 500000LL) / 1000000LL;
-    mOwner->writeInt32(dur + it->sampleDuration);
+    mOwner->writeInt32(getStartTimeOffsetScaledTime() + it->sampleDuration);
 
     int64_t totalCount = 1;
     while (++it != mSttsTableEntries.end()) {
@@ -2714,7 +2729,7 @@
         mOwner->writeInt32(it->sampleDuration);
         totalCount += it->sampleCount;
     }
-    CHECK(totalCount == mNumSamples);
+    CHECK_EQ(totalCount, mNumSamples);
     mOwner->endBox();  // stts
 }
 
@@ -2723,6 +2738,11 @@
         return;
     }
 
+    // There is no B frame at all
+    if (mMinCttsOffsetTimeUs == mMaxCttsOffsetTimeUs) {
+        return;
+    }
+
     // Do not write ctts box when there is no need to have it.
     if ((mNumCttsTableEntries == 1 &&
         mCttsTableEntries.begin()->sampleDuration == 0) ||
@@ -2730,24 +2750,29 @@
         return;
     }
 
-    ALOGV("ctts box has %d entries", mNumCttsTableEntries);
+    ALOGD("ctts box has %d entries with range [%lld, %lld]",
+            mNumCttsTableEntries, mMinCttsOffsetTimeUs, mMaxCttsOffsetTimeUs);
 
     mOwner->beginBox("ctts");
-    if (mHasNegativeCttsDeltaDuration) {
-        mOwner->writeInt32(0x00010000);  // version=1, flags=0
-    } else {
-        mOwner->writeInt32(0);  // version=0, flags=0
-    }
+    // Version 1 allows to use negative offset time value, but
+    // we are sticking to version 0 for now.
+    mOwner->writeInt32(0);  // version=0, flags=0
     mOwner->writeInt32(mNumCttsTableEntries);
 
-    int64_t totalCount = 0;
-    for (List<CttsTableEntry>::iterator it = mCttsTableEntries.begin();
-         it != mCttsTableEntries.end(); ++it) {
+    // Compensate for small start time difference from different media tracks
+    List<CttsTableEntry>::iterator it = mCttsTableEntries.begin();
+    CHECK(it != mCttsTableEntries.end() && it->sampleCount == 1);
+    mOwner->writeInt32(it->sampleCount);
+    mOwner->writeInt32(getStartTimeOffsetScaledTime() +
+            it->sampleDuration - mMinCttsOffsetTimeUs);
+
+    int64_t totalCount = 1;
+    while (++it != mCttsTableEntries.end()) {
         mOwner->writeInt32(it->sampleCount);
-        mOwner->writeInt32(it->sampleDuration);
+        mOwner->writeInt32(it->sampleDuration - mMinCttsOffsetTimeUs);
         totalCount += it->sampleCount;
     }
-    CHECK(totalCount == mNumSamples);
+    CHECK_EQ(totalCount, mNumSamples);
     mOwner->endBox();  // ctts
 }
 
diff --git a/media/libstagefright/MediaBuffer.cpp b/media/libstagefright/MediaBuffer.cpp
index 96271e4..11b80bf 100644
--- a/media/libstagefright/MediaBuffer.cpp
+++ b/media/libstagefright/MediaBuffer.cpp
@@ -22,8 +22,8 @@
 #include <stdlib.h>
 
 #include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MetaData.h>
 
 #include <ui/GraphicBuffer.h>
@@ -157,7 +157,7 @@
 }
 
 MediaBuffer::~MediaBuffer() {
-    CHECK_EQ(mObserver, NULL);
+    CHECK(mObserver == NULL);
 
     if (mOwnsData && mData != NULL) {
         free(mData);
@@ -188,7 +188,7 @@
 }
 
 MediaBuffer *MediaBuffer::clone() {
-    CHECK_EQ(mGraphicBuffer, NULL);
+    CHECK(mGraphicBuffer == NULL);
 
     MediaBuffer *buffer = new MediaBuffer(mData, mSize);
     buffer->set_range(mRangeOffset, mRangeLength);
diff --git a/media/libstagefright/MediaBufferGroup.cpp b/media/libstagefright/MediaBufferGroup.cpp
index c8d05f4..80aae51 100644
--- a/media/libstagefright/MediaBufferGroup.cpp
+++ b/media/libstagefright/MediaBufferGroup.cpp
@@ -17,9 +17,9 @@
 #define LOG_TAG "MediaBufferGroup"
 #include <utils/Log.h>
 
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDebug.h>
 
 namespace android {
 
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
new file mode 100644
index 0000000..a9e7f36
--- /dev/null
+++ b/media/libstagefright/MediaCodec.cpp
@@ -0,0 +1,1217 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaCodec"
+#include <utils/Log.h>
+
+#include <media/stagefright/MediaCodec.h>
+
+#include "include/SoftwareRenderer.h"
+
+#include <gui/SurfaceTextureClient.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/ACodec.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/NativeWindowWrapper.h>
+
+namespace android {
+
+// static
+sp<MediaCodec> MediaCodec::CreateByType(
+        const sp<ALooper> &looper, const char *mime, bool encoder) {
+    sp<MediaCodec> codec = new MediaCodec(looper);
+    if (codec->init(mime, true /* nameIsType */, encoder) != OK) {
+        return NULL;
+    }
+
+    return codec;
+}
+
+// static
+sp<MediaCodec> MediaCodec::CreateByComponentName(
+        const sp<ALooper> &looper, const char *name) {
+    sp<MediaCodec> codec = new MediaCodec(looper);
+    if (codec->init(name, false /* nameIsType */, false /* encoder */) != OK) {
+        return NULL;
+    }
+
+    return codec;
+}
+
+MediaCodec::MediaCodec(const sp<ALooper> &looper)
+    : mState(UNINITIALIZED),
+      mLooper(looper),
+      mCodec(new ACodec),
+      mFlags(0),
+      mSoftRenderer(NULL),
+      mDequeueInputTimeoutGeneration(0),
+      mDequeueInputReplyID(0),
+      mDequeueOutputTimeoutGeneration(0),
+      mDequeueOutputReplyID(0) {
+}
+
+MediaCodec::~MediaCodec() {
+    CHECK_EQ(mState, UNINITIALIZED);
+}
+
+// static
+status_t MediaCodec::PostAndAwaitResponse(
+        const sp<AMessage> &msg, sp<AMessage> *response) {
+    status_t err = msg->postAndAwaitResponse(response);
+
+    if (err != OK) {
+        return err;
+    }
+
+    if (!(*response)->findInt32("err", &err)) {
+        err = OK;
+    }
+
+    return err;
+}
+
+status_t MediaCodec::init(const char *name, bool nameIsType, bool encoder) {
+    // Current video decoders do not return from OMX_FillThisBuffer
+    // quickly, violating the OpenMAX specs, until that is remedied
+    // we need to invest in an extra looper to free the main event
+    // queue.
+    bool needDedicatedLooper = false;
+    if (nameIsType && !strncasecmp(name, "video/", 6)) {
+        needDedicatedLooper = true;
+    } else if (!nameIsType && !strncmp(name, "OMX.TI.DUCATI1.VIDEO.", 21)) {
+        needDedicatedLooper = true;
+    }
+
+    if (needDedicatedLooper) {
+        if (mCodecLooper == NULL) {
+            mCodecLooper = new ALooper;
+            mCodecLooper->setName("CodecLooper");
+            mCodecLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
+        }
+
+        mCodecLooper->registerHandler(mCodec);
+    } else {
+        mLooper->registerHandler(mCodec);
+    }
+
+    mLooper->registerHandler(this);
+
+    mCodec->setNotificationMessage(new AMessage(kWhatCodecNotify, id()));
+
+    sp<AMessage> msg = new AMessage(kWhatInit, id());
+    msg->setString("name", name);
+    msg->setInt32("nameIsType", nameIsType);
+
+    if (nameIsType) {
+        msg->setInt32("encoder", encoder);
+    }
+
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
+status_t MediaCodec::configure(
+        const sp<AMessage> &format,
+        const sp<SurfaceTextureClient> &nativeWindow,
+        uint32_t flags) {
+    sp<AMessage> msg = new AMessage(kWhatConfigure, id());
+
+    msg->setMessage("format", format);
+    msg->setInt32("flags", flags);
+
+    if (nativeWindow != NULL) {
+        if (!(mFlags & kFlagIsSoftwareCodec)) {
+            msg->setObject(
+                    "native-window",
+                    new NativeWindowWrapper(nativeWindow));
+        } else {
+            mNativeWindow = nativeWindow;
+        }
+    }
+
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
+status_t MediaCodec::start() {
+    sp<AMessage> msg = new AMessage(kWhatStart, id());
+
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
+status_t MediaCodec::stop() {
+    sp<AMessage> msg = new AMessage(kWhatStop, id());
+
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
+status_t MediaCodec::release() {
+    sp<AMessage> msg = new AMessage(kWhatRelease, id());
+
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
+status_t MediaCodec::queueInputBuffer(
+        size_t index,
+        size_t offset,
+        size_t size,
+        int64_t presentationTimeUs,
+        uint32_t flags) {
+    sp<AMessage> msg = new AMessage(kWhatQueueInputBuffer, id());
+    msg->setSize("index", index);
+    msg->setSize("offset", offset);
+    msg->setSize("size", size);
+    msg->setInt64("timeUs", presentationTimeUs);
+    msg->setInt32("flags", flags);
+
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
+status_t MediaCodec::dequeueInputBuffer(size_t *index, int64_t timeoutUs) {
+    sp<AMessage> msg = new AMessage(kWhatDequeueInputBuffer, id());
+    msg->setInt64("timeoutUs", timeoutUs);
+
+    sp<AMessage> response;
+    status_t err;
+    if ((err = PostAndAwaitResponse(msg, &response)) != OK) {
+        return err;
+    }
+
+    CHECK(response->findSize("index", index));
+
+    return OK;
+}
+
+status_t MediaCodec::dequeueOutputBuffer(
+        size_t *index,
+        size_t *offset,
+        size_t *size,
+        int64_t *presentationTimeUs,
+        uint32_t *flags,
+        int64_t timeoutUs) {
+    sp<AMessage> msg = new AMessage(kWhatDequeueOutputBuffer, id());
+    msg->setInt64("timeoutUs", timeoutUs);
+
+    sp<AMessage> response;
+    status_t err;
+    if ((err = PostAndAwaitResponse(msg, &response)) != OK) {
+        return err;
+    }
+
+    CHECK(response->findSize("index", index));
+    CHECK(response->findSize("offset", offset));
+    CHECK(response->findSize("size", size));
+    CHECK(response->findInt64("timeUs", presentationTimeUs));
+    CHECK(response->findInt32("flags", (int32_t *)flags));
+
+    return OK;
+}
+
+status_t MediaCodec::renderOutputBufferAndRelease(size_t index) {
+    sp<AMessage> msg = new AMessage(kWhatReleaseOutputBuffer, id());
+    msg->setSize("index", index);
+    msg->setInt32("render", true);
+
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
+status_t MediaCodec::releaseOutputBuffer(size_t index) {
+    sp<AMessage> msg = new AMessage(kWhatReleaseOutputBuffer, id());
+    msg->setSize("index", index);
+
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
+status_t MediaCodec::getOutputFormat(sp<AMessage> *format) const {
+    sp<AMessage> msg = new AMessage(kWhatGetOutputFormat, id());
+
+    sp<AMessage> response;
+    status_t err;
+    if ((err = PostAndAwaitResponse(msg, &response)) != OK) {
+        return err;
+    }
+
+    CHECK(response->findMessage("format", format));
+
+    return OK;
+}
+
+status_t MediaCodec::getInputBuffers(Vector<sp<ABuffer> > *buffers) const {
+    sp<AMessage> msg = new AMessage(kWhatGetBuffers, id());
+    msg->setInt32("portIndex", kPortIndexInput);
+    msg->setPointer("buffers", buffers);
+
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
+status_t MediaCodec::getOutputBuffers(Vector<sp<ABuffer> > *buffers) const {
+    sp<AMessage> msg = new AMessage(kWhatGetBuffers, id());
+    msg->setInt32("portIndex", kPortIndexOutput);
+    msg->setPointer("buffers", buffers);
+
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
+status_t MediaCodec::flush() {
+    sp<AMessage> msg = new AMessage(kWhatFlush, id());
+
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void MediaCodec::cancelPendingDequeueOperations() {
+    if (mFlags & kFlagDequeueInputPending) {
+        sp<AMessage> response = new AMessage;
+        response->setInt32("err", INVALID_OPERATION);
+        response->postReply(mDequeueInputReplyID);
+
+        ++mDequeueInputTimeoutGeneration;
+        mDequeueInputReplyID = 0;
+        mFlags &= ~kFlagDequeueInputPending;
+    }
+
+    if (mFlags & kFlagDequeueOutputPending) {
+        sp<AMessage> response = new AMessage;
+        response->setInt32("err", INVALID_OPERATION);
+        response->postReply(mDequeueOutputReplyID);
+
+        ++mDequeueOutputTimeoutGeneration;
+        mDequeueOutputReplyID = 0;
+        mFlags &= ~kFlagDequeueOutputPending;
+    }
+}
+
+bool MediaCodec::handleDequeueInputBuffer(uint32_t replyID, bool newRequest) {
+    if (mState != STARTED
+            || (mFlags & kFlagStickyError)
+            || (newRequest && (mFlags & kFlagDequeueInputPending))) {
+        sp<AMessage> response = new AMessage;
+        response->setInt32("err", INVALID_OPERATION);
+
+        response->postReply(replyID);
+
+        return true;
+    }
+
+    ssize_t index = dequeuePortBuffer(kPortIndexInput);
+
+    if (index < 0) {
+        CHECK_EQ(index, -EAGAIN);
+        return false;
+    }
+
+    sp<AMessage> response = new AMessage;
+    response->setSize("index", index);
+    response->postReply(replyID);
+
+    return true;
+}
+
+bool MediaCodec::handleDequeueOutputBuffer(uint32_t replyID, bool newRequest) {
+    sp<AMessage> response = new AMessage;
+
+    if (mState != STARTED
+            || (mFlags & kFlagStickyError)
+            || (newRequest && (mFlags & kFlagDequeueOutputPending))) {
+        response->setInt32("err", INVALID_OPERATION);
+    } else if (mFlags & kFlagOutputBuffersChanged) {
+        response->setInt32("err", INFO_OUTPUT_BUFFERS_CHANGED);
+        mFlags &= ~kFlagOutputBuffersChanged;
+    } else if (mFlags & kFlagOutputFormatChanged) {
+        response->setInt32("err", INFO_FORMAT_CHANGED);
+        mFlags &= ~kFlagOutputFormatChanged;
+    } else {
+        ssize_t index = dequeuePortBuffer(kPortIndexOutput);
+
+        if (index < 0) {
+            CHECK_EQ(index, -EAGAIN);
+            return false;
+        }
+
+        const sp<ABuffer> &buffer =
+            mPortBuffers[kPortIndexOutput].itemAt(index).mData;
+
+        response->setSize("index", index);
+        response->setSize("offset", buffer->offset());
+        response->setSize("size", buffer->size());
+
+        int64_t timeUs;
+        CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
+
+        response->setInt64("timeUs", timeUs);
+
+        int32_t omxFlags;
+        CHECK(buffer->meta()->findInt32("omxFlags", &omxFlags));
+
+        uint32_t flags = 0;
+        if (omxFlags & OMX_BUFFERFLAG_SYNCFRAME) {
+            flags |= BUFFER_FLAG_SYNCFRAME;
+        }
+        if (omxFlags & OMX_BUFFERFLAG_CODECCONFIG) {
+            flags |= BUFFER_FLAG_CODECCONFIG;
+        }
+        if (omxFlags & OMX_BUFFERFLAG_EOS) {
+            flags |= BUFFER_FLAG_EOS;
+        }
+
+        response->setInt32("flags", flags);
+    }
+
+    response->postReply(replyID);
+
+    return true;
+}
+
+void MediaCodec::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatCodecNotify:
+        {
+            int32_t what;
+            CHECK(msg->findInt32("what", &what));
+
+            switch (what) {
+                case ACodec::kWhatError:
+                {
+                    int32_t omxError, internalError;
+                    CHECK(msg->findInt32("omx-error", &omxError));
+                    CHECK(msg->findInt32("err", &internalError));
+
+                    ALOGE("Codec reported an error. "
+                          "(omx error 0x%08x, internalError %d)",
+                          omxError, internalError);
+
+                    bool sendErrorReponse = true;
+
+                    switch (mState) {
+                        case INITIALIZING:
+                        {
+                            setState(UNINITIALIZED);
+                            break;
+                        }
+
+                        case CONFIGURING:
+                        {
+                            setState(INITIALIZED);
+                            break;
+                        }
+
+                        case STARTING:
+                        {
+                            setState(CONFIGURED);
+                            break;
+                        }
+
+                        case STOPPING:
+                        case RELEASING:
+                        {
+                            // Ignore the error, assuming we'll still get
+                            // the shutdown complete notification.
+
+                            sendErrorReponse = false;
+                            break;
+                        }
+
+                        case FLUSHING:
+                        {
+                            setState(STARTED);
+                            break;
+                        }
+
+                        case STARTED:
+                        {
+                            sendErrorReponse = false;
+
+                            mFlags |= kFlagStickyError;
+
+                            cancelPendingDequeueOperations();
+                            break;
+                        }
+
+                        default:
+                        {
+                            sendErrorReponse = false;
+
+                            mFlags |= kFlagStickyError;
+                            break;
+                        }
+                    }
+
+                    if (sendErrorReponse) {
+                        sp<AMessage> response = new AMessage;
+                        response->setInt32("err", UNKNOWN_ERROR);
+
+                        response->postReply(mReplyID);
+                    }
+                    break;
+                }
+
+                case ACodec::kWhatComponentAllocated:
+                {
+                    CHECK_EQ(mState, INITIALIZING);
+                    setState(INITIALIZED);
+
+                    AString componentName;
+                    CHECK(msg->findString("componentName", &componentName));
+
+                    if (componentName.startsWith("OMX.google.")) {
+                        mFlags |= kFlagIsSoftwareCodec;
+                    } else {
+                        mFlags &= ~kFlagIsSoftwareCodec;
+                    }
+
+                    (new AMessage)->postReply(mReplyID);
+                    break;
+                }
+
+                case ACodec::kWhatComponentConfigured:
+                {
+                    CHECK_EQ(mState, CONFIGURING);
+                    setState(CONFIGURED);
+
+                    (new AMessage)->postReply(mReplyID);
+                    break;
+                }
+
+                case ACodec::kWhatBuffersAllocated:
+                {
+                    int32_t portIndex;
+                    CHECK(msg->findInt32("portIndex", &portIndex));
+
+                    ALOGV("%s buffers allocated",
+                          portIndex == kPortIndexInput ? "input" : "output");
+
+                    CHECK(portIndex == kPortIndexInput
+                            || portIndex == kPortIndexOutput);
+
+                    mPortBuffers[portIndex].clear();
+
+                    Vector<BufferInfo> *buffers = &mPortBuffers[portIndex];
+                    for (size_t i = 0;; ++i) {
+                        AString name = StringPrintf("buffer-id_%d", i);
+
+                        void *bufferID;
+                        if (!msg->findPointer(name.c_str(), &bufferID)) {
+                            break;
+                        }
+
+                        name = StringPrintf("data_%d", i);
+
+                        BufferInfo info;
+                        info.mBufferID = bufferID;
+                        info.mOwnedByClient = false;
+                        CHECK(msg->findBuffer(name.c_str(), &info.mData));
+
+                        buffers->push_back(info);
+                    }
+
+                    if (portIndex == kPortIndexOutput) {
+                        if (mState == STARTING) {
+                            // We're always allocating output buffers after
+                            // allocating input buffers, so this is a good
+                            // indication that now all buffers are allocated.
+                            setState(STARTED);
+                            (new AMessage)->postReply(mReplyID);
+                        } else {
+                            mFlags |= kFlagOutputBuffersChanged;
+                        }
+                    }
+                    break;
+                }
+
+                case ACodec::kWhatOutputFormatChanged:
+                {
+                    ALOGV("codec output format changed");
+
+                    if ((mFlags & kFlagIsSoftwareCodec)
+                            && mNativeWindow != NULL) {
+                        AString mime;
+                        CHECK(msg->findString("mime", &mime));
+
+                        if (!strncasecmp("video/", mime.c_str(), 6)) {
+                            delete mSoftRenderer;
+                            mSoftRenderer = NULL;
+
+                            int32_t width, height;
+                            CHECK(msg->findInt32("width", &width));
+                            CHECK(msg->findInt32("height", &height));
+
+                            int32_t colorFormat;
+                            CHECK(msg->findInt32(
+                                        "color-format", &colorFormat));
+
+                            sp<MetaData> meta = new MetaData;
+                            meta->setInt32(kKeyWidth, width);
+                            meta->setInt32(kKeyHeight, height);
+                            meta->setInt32(kKeyColorFormat, colorFormat);
+
+                            mSoftRenderer =
+                                new SoftwareRenderer(mNativeWindow, meta);
+                        }
+                    }
+
+                    mOutputFormat = msg;
+                    mFlags |= kFlagOutputFormatChanged;
+                    break;
+                }
+
+                case ACodec::kWhatFillThisBuffer:
+                {
+                    /* size_t index = */updateBuffers(kPortIndexInput, msg);
+
+                    if (mState == FLUSHING
+                            || mState == STOPPING
+                            || mState == RELEASING) {
+                        returnBuffersToCodecOnPort(kPortIndexInput);
+                        break;
+                    }
+
+                    if (mFlags & kFlagDequeueInputPending) {
+                        CHECK(handleDequeueInputBuffer(mDequeueInputReplyID));
+
+                        ++mDequeueInputTimeoutGeneration;
+                        mFlags &= ~kFlagDequeueInputPending;
+                        mDequeueInputReplyID = 0;
+                    }
+                    break;
+                }
+
+                case ACodec::kWhatDrainThisBuffer:
+                {
+                    /* size_t index = */updateBuffers(kPortIndexOutput, msg);
+
+                    if (mState == FLUSHING
+                            || mState == STOPPING
+                            || mState == RELEASING) {
+                        returnBuffersToCodecOnPort(kPortIndexOutput);
+                        break;
+                    }
+
+                    sp<ABuffer> buffer;
+                    CHECK(msg->findBuffer("buffer", &buffer));
+
+                    int32_t omxFlags;
+                    CHECK(msg->findInt32("flags", &omxFlags));
+
+                    buffer->meta()->setInt32("omxFlags", omxFlags);
+
+                    if (mFlags & kFlagDequeueOutputPending) {
+                        CHECK(handleDequeueOutputBuffer(mDequeueOutputReplyID));
+
+                        ++mDequeueOutputTimeoutGeneration;
+                        mFlags &= ~kFlagDequeueOutputPending;
+                        mDequeueOutputReplyID = 0;
+                    }
+                    break;
+                }
+
+                case ACodec::kWhatEOS:
+                {
+                    // We already notify the client of this by using the
+                    // corresponding flag in "onOutputBufferReady".
+                    break;
+                }
+
+                case ACodec::kWhatShutdownCompleted:
+                {
+                    if (mState == STOPPING) {
+                        setState(INITIALIZED);
+                    } else {
+                        CHECK_EQ(mState, RELEASING);
+                        setState(UNINITIALIZED);
+                    }
+
+                    (new AMessage)->postReply(mReplyID);
+                    break;
+                }
+
+                case ACodec::kWhatFlushCompleted:
+                {
+                    CHECK_EQ(mState, FLUSHING);
+                    setState(STARTED);
+
+                    mCodec->signalResume();
+
+                    (new AMessage)->postReply(mReplyID);
+                    break;
+                }
+
+                default:
+                    TRESPASS();
+            }
+            break;
+        }
+
+        case kWhatInit:
+        {
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            if (mState != UNINITIALIZED) {
+                sp<AMessage> response = new AMessage;
+                response->setInt32("err", INVALID_OPERATION);
+
+                response->postReply(replyID);
+                break;
+            }
+
+            mReplyID = replyID;
+            setState(INITIALIZING);
+
+            AString name;
+            CHECK(msg->findString("name", &name));
+
+            int32_t nameIsType;
+            int32_t encoder = false;
+            CHECK(msg->findInt32("nameIsType", &nameIsType));
+            if (nameIsType) {
+                CHECK(msg->findInt32("encoder", &encoder));
+            }
+
+            sp<AMessage> format = new AMessage;
+
+            if (nameIsType) {
+                format->setString("mime", name.c_str());
+                format->setInt32("encoder", encoder);
+            } else {
+                format->setString("componentName", name.c_str());
+            }
+
+            mCodec->initiateAllocateComponent(format);
+            break;
+        }
+
+        case kWhatConfigure:
+        {
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            if (mState != INITIALIZED) {
+                sp<AMessage> response = new AMessage;
+                response->setInt32("err", INVALID_OPERATION);
+
+                response->postReply(replyID);
+                break;
+            }
+
+            mReplyID = replyID;
+            setState(CONFIGURING);
+
+            sp<RefBase> obj;
+            if (!msg->findObject("native-window", &obj)) {
+                obj.clear();
+            }
+
+            sp<AMessage> format;
+            CHECK(msg->findMessage("format", &format));
+
+            if (obj != NULL) {
+                format->setObject("native-window", obj);
+            }
+
+            uint32_t flags;
+            CHECK(msg->findInt32("flags", (int32_t *)&flags));
+
+            if (flags & CONFIGURE_FLAG_ENCODE) {
+                format->setInt32("encoder", true);
+            }
+
+            mCodec->initiateConfigureComponent(format);
+            break;
+        }
+
+        case kWhatStart:
+        {
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            if (mState != CONFIGURED) {
+                sp<AMessage> response = new AMessage;
+                response->setInt32("err", INVALID_OPERATION);
+
+                response->postReply(replyID);
+                break;
+            }
+
+            mReplyID = replyID;
+            setState(STARTING);
+
+            mCodec->initiateStart();
+            break;
+        }
+
+        case kWhatStop:
+        {
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            if (mState != INITIALIZED
+                    && mState != CONFIGURED && mState != STARTED) {
+                sp<AMessage> response = new AMessage;
+                response->setInt32("err", INVALID_OPERATION);
+
+                response->postReply(replyID);
+                break;
+            }
+
+            mReplyID = replyID;
+            setState(STOPPING);
+
+            mCodec->initiateShutdown(true /* keepComponentAllocated */);
+            returnBuffersToCodec();
+            break;
+        }
+
+        case kWhatRelease:
+        {
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            if (mState != INITIALIZED
+                    && mState != CONFIGURED && mState != STARTED) {
+                sp<AMessage> response = new AMessage;
+                response->setInt32("err", INVALID_OPERATION);
+
+                response->postReply(replyID);
+                break;
+            }
+
+            mReplyID = replyID;
+            setState(RELEASING);
+
+            mCodec->initiateShutdown();
+            returnBuffersToCodec();
+            break;
+        }
+
+        case kWhatDequeueInputBuffer:
+        {
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            if (handleDequeueInputBuffer(replyID, true /* new request */)) {
+                break;
+            }
+
+            int64_t timeoutUs;
+            CHECK(msg->findInt64("timeoutUs", &timeoutUs));
+
+            if (timeoutUs == 0ll) {
+                sp<AMessage> response = new AMessage;
+                response->setInt32("err", -EAGAIN);
+                response->postReply(replyID);
+                break;
+            }
+
+            mFlags |= kFlagDequeueInputPending;
+            mDequeueInputReplyID = replyID;
+
+            if (timeoutUs > 0ll) {
+                sp<AMessage> timeoutMsg =
+                    new AMessage(kWhatDequeueInputTimedOut, id());
+                timeoutMsg->setInt32(
+                        "generation", ++mDequeueInputTimeoutGeneration);
+                timeoutMsg->post(timeoutUs);
+            }
+            break;
+        }
+
+        case kWhatDequeueInputTimedOut:
+        {
+            int32_t generation;
+            CHECK(msg->findInt32("generation", &generation));
+
+            if (generation != mDequeueInputTimeoutGeneration) {
+                // Obsolete
+                break;
+            }
+
+            CHECK(mFlags & kFlagDequeueInputPending);
+
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", -EAGAIN);
+            response->postReply(mDequeueInputReplyID);
+
+            mFlags &= ~kFlagDequeueInputPending;
+            mDequeueInputReplyID = 0;
+            break;
+        }
+
+        case kWhatQueueInputBuffer:
+        {
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            if (mState != STARTED || (mFlags & kFlagStickyError)) {
+                sp<AMessage> response = new AMessage;
+                response->setInt32("err", INVALID_OPERATION);
+
+                response->postReply(replyID);
+                break;
+            }
+
+            status_t err = onQueueInputBuffer(msg);
+
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatDequeueOutputBuffer:
+        {
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            if (handleDequeueOutputBuffer(replyID, true /* new request */)) {
+                break;
+            }
+
+            int64_t timeoutUs;
+            CHECK(msg->findInt64("timeoutUs", &timeoutUs));
+
+            if (timeoutUs == 0ll) {
+                sp<AMessage> response = new AMessage;
+                response->setInt32("err", -EAGAIN);
+                response->postReply(replyID);
+                break;
+            }
+
+            mFlags |= kFlagDequeueOutputPending;
+            mDequeueOutputReplyID = replyID;
+
+            if (timeoutUs > 0ll) {
+                sp<AMessage> timeoutMsg =
+                    new AMessage(kWhatDequeueOutputTimedOut, id());
+                timeoutMsg->setInt32(
+                        "generation", ++mDequeueOutputTimeoutGeneration);
+                timeoutMsg->post(timeoutUs);
+            }
+            break;
+        }
+
+        case kWhatDequeueOutputTimedOut:
+        {
+            int32_t generation;
+            CHECK(msg->findInt32("generation", &generation));
+
+            if (generation != mDequeueOutputTimeoutGeneration) {
+                // Obsolete
+                break;
+            }
+
+            CHECK(mFlags & kFlagDequeueOutputPending);
+
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", -EAGAIN);
+            response->postReply(mDequeueOutputReplyID);
+
+            mFlags &= ~kFlagDequeueOutputPending;
+            mDequeueOutputReplyID = 0;
+            break;
+        }
+
+        case kWhatReleaseOutputBuffer:
+        {
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            if (mState != STARTED || (mFlags & kFlagStickyError)) {
+                sp<AMessage> response = new AMessage;
+                response->setInt32("err", INVALID_OPERATION);
+
+                response->postReply(replyID);
+                break;
+            }
+
+            status_t err = onReleaseOutputBuffer(msg);
+
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatGetBuffers:
+        {
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            if (mState != STARTED || (mFlags & kFlagStickyError)) {
+                sp<AMessage> response = new AMessage;
+                response->setInt32("err", INVALID_OPERATION);
+
+                response->postReply(replyID);
+                break;
+            }
+
+            int32_t portIndex;
+            CHECK(msg->findInt32("portIndex", &portIndex));
+
+            Vector<sp<ABuffer> > *dstBuffers;
+            CHECK(msg->findPointer("buffers", (void **)&dstBuffers));
+
+            dstBuffers->clear();
+            const Vector<BufferInfo> &srcBuffers = mPortBuffers[portIndex];
+
+            for (size_t i = 0; i < srcBuffers.size(); ++i) {
+                const BufferInfo &info = srcBuffers.itemAt(i);
+
+                dstBuffers->push_back(info.mData);
+            }
+
+            (new AMessage)->postReply(replyID);
+            break;
+        }
+
+        case kWhatFlush:
+        {
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            if (mState != STARTED || (mFlags & kFlagStickyError)) {
+                sp<AMessage> response = new AMessage;
+                response->setInt32("err", INVALID_OPERATION);
+
+                response->postReply(replyID);
+                break;
+            }
+
+            mReplyID = replyID;
+            setState(FLUSHING);
+
+            mCodec->signalFlush();
+            returnBuffersToCodec();
+            break;
+        }
+
+        case kWhatGetOutputFormat:
+        {
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            if ((mState != STARTED && mState != FLUSHING)
+                    || (mFlags & kFlagStickyError)) {
+                sp<AMessage> response = new AMessage;
+                response->setInt32("err", INVALID_OPERATION);
+
+                response->postReply(replyID);
+                break;
+            }
+
+            sp<AMessage> response = new AMessage;
+            response->setMessage("format", mOutputFormat);
+            response->postReply(replyID);
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void MediaCodec::setState(State newState) {
+    if (newState == UNINITIALIZED) {
+        delete mSoftRenderer;
+        mSoftRenderer = NULL;
+
+        mNativeWindow.clear();
+
+        mOutputFormat.clear();
+        mFlags &= ~kFlagOutputFormatChanged;
+        mFlags &= ~kFlagOutputBuffersChanged;
+        mFlags &= ~kFlagStickyError;
+    }
+
+    mState = newState;
+
+    cancelPendingDequeueOperations();
+}
+
+void MediaCodec::returnBuffersToCodec() {
+    returnBuffersToCodecOnPort(kPortIndexInput);
+    returnBuffersToCodecOnPort(kPortIndexOutput);
+}
+
+void MediaCodec::returnBuffersToCodecOnPort(int32_t portIndex) {
+    CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
+
+    Vector<BufferInfo> *buffers = &mPortBuffers[portIndex];
+
+    for (size_t i = 0; i < buffers->size(); ++i) {
+        BufferInfo *info = &buffers->editItemAt(i);
+
+        if (info->mNotify != NULL) {
+            sp<AMessage> msg = info->mNotify;
+            info->mNotify = NULL;
+            info->mOwnedByClient = false;
+
+            if (portIndex == kPortIndexInput) {
+                msg->setInt32("err", ERROR_END_OF_STREAM);
+            }
+            msg->post();
+        }
+    }
+
+    mAvailPortBuffers[portIndex].clear();
+}
+
+size_t MediaCodec::updateBuffers(
+        int32_t portIndex, const sp<AMessage> &msg) {
+    CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
+
+    void *bufferID;
+    CHECK(msg->findPointer("buffer-id", &bufferID));
+
+    Vector<BufferInfo> *buffers = &mPortBuffers[portIndex];
+
+    for (size_t i = 0; i < buffers->size(); ++i) {
+        BufferInfo *info = &buffers->editItemAt(i);
+
+        if (info->mBufferID == bufferID) {
+            CHECK(info->mNotify == NULL);
+            CHECK(msg->findMessage("reply", &info->mNotify));
+
+            mAvailPortBuffers[portIndex].push_back(i);
+
+            return i;
+        }
+    }
+
+    TRESPASS();
+
+    return 0;
+}
+
+status_t MediaCodec::onQueueInputBuffer(const sp<AMessage> &msg) {
+    size_t index;
+    size_t offset;
+    size_t size;
+    int64_t timeUs;
+    uint32_t flags;
+    CHECK(msg->findSize("index", &index));
+    CHECK(msg->findSize("offset", &offset));
+    CHECK(msg->findSize("size", &size));
+    CHECK(msg->findInt64("timeUs", &timeUs));
+    CHECK(msg->findInt32("flags", (int32_t *)&flags));
+
+    if (index >= mPortBuffers[kPortIndexInput].size()) {
+        return -ERANGE;
+    }
+
+    BufferInfo *info = &mPortBuffers[kPortIndexInput].editItemAt(index);
+
+    if (info->mNotify == NULL || !info->mOwnedByClient) {
+        return -EACCES;
+    }
+
+    if (offset + size > info->mData->capacity()) {
+        return -EINVAL;
+    }
+
+    sp<AMessage> reply = info->mNotify;
+    info->mNotify = NULL;
+    info->mOwnedByClient = false;
+
+    info->mData->setRange(offset, size);
+    info->mData->meta()->setInt64("timeUs", timeUs);
+
+    if (flags & BUFFER_FLAG_EOS) {
+        info->mData->meta()->setInt32("eos", true);
+    }
+
+    if (flags & BUFFER_FLAG_CODECCONFIG) {
+        info->mData->meta()->setInt32("csd", true);
+    }
+
+    reply->setBuffer("buffer", info->mData);
+    reply->post();
+
+    return OK;
+}
+
+status_t MediaCodec::onReleaseOutputBuffer(const sp<AMessage> &msg) {
+    size_t index;
+    CHECK(msg->findSize("index", &index));
+
+    int32_t render;
+    if (!msg->findInt32("render", &render)) {
+        render = 0;
+    }
+
+    if (mState != STARTED) {
+        return -EINVAL;
+    }
+
+    if (index >= mPortBuffers[kPortIndexOutput].size()) {
+        return -ERANGE;
+    }
+
+    BufferInfo *info = &mPortBuffers[kPortIndexOutput].editItemAt(index);
+
+    if (info->mNotify == NULL || !info->mOwnedByClient) {
+        return -EACCES;
+    }
+
+    if (render) {
+        info->mNotify->setInt32("render", true);
+
+        if (mSoftRenderer != NULL) {
+            mSoftRenderer->render(
+                    info->mData->data(), info->mData->size(), NULL);
+        }
+    }
+
+    info->mNotify->post();
+    info->mNotify = NULL;
+    info->mOwnedByClient = false;
+
+    return OK;
+}
+
+ssize_t MediaCodec::dequeuePortBuffer(int32_t portIndex) {
+    CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
+
+    List<size_t> *availBuffers = &mAvailPortBuffers[portIndex];
+
+    if (availBuffers->empty()) {
+        return -EAGAIN;
+    }
+
+    size_t index = *availBuffers->begin();
+    availBuffers->erase(availBuffers->begin());
+
+    BufferInfo *info = &mPortBuffers[portIndex].editItemAt(index);
+    CHECK(!info->mOwnedByClient);
+    info->mOwnedByClient = true;
+
+    return index;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
new file mode 100644
index 0000000..6b64e21
--- /dev/null
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -0,0 +1,475 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaCodecList"
+#include <utils/Log.h>
+
+#include <media/stagefright/MediaCodecList.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaErrors.h>
+#include <utils/threads.h>
+
+#include <expat.h>
+
+namespace android {
+
+static Mutex sInitMutex;
+
+// static
+MediaCodecList *MediaCodecList::sCodecList;
+
+// static
+const MediaCodecList *MediaCodecList::getInstance() {
+    Mutex::Autolock autoLock(sInitMutex);
+
+    if (sCodecList == NULL) {
+        sCodecList = new MediaCodecList;
+    }
+
+    return sCodecList->initCheck() == OK ? sCodecList : NULL;
+}
+
+MediaCodecList::MediaCodecList()
+    : mInitCheck(NO_INIT) {
+    FILE *file = fopen("/etc/media_codecs.xml", "r");
+
+    if (file == NULL) {
+        ALOGW("unable to open media codecs configuration xml file.");
+        return;
+    }
+
+    parseXMLFile(file);
+
+    if (mInitCheck == OK) {
+        // These are currently still used by the video editing suite.
+
+        addMediaCodec(true /* encoder */, "AACEncoder", "audio/mp4a-latm");
+        addMediaCodec(true /* encoder */, "AVCEncoder", "video/avc");
+
+        addMediaCodec(true /* encoder */, "M4vH263Encoder");
+        addType("video/3gpp");
+        addType("video/mp4v-es");
+    }
+
+#if 0
+    for (size_t i = 0; i < mCodecInfos.size(); ++i) {
+        const CodecInfo &info = mCodecInfos.itemAt(i);
+
+        AString line = info.mName;
+        line.append(" supports ");
+        for (size_t j = 0; j < mTypes.size(); ++j) {
+            uint32_t value = mTypes.valueAt(j);
+
+            if (info.mTypes & (1ul << value)) {
+                line.append(mTypes.keyAt(j));
+                line.append(" ");
+            }
+        }
+
+        ALOGI("%s", line.c_str());
+    }
+#endif
+
+    fclose(file);
+    file = NULL;
+}
+
+MediaCodecList::~MediaCodecList() {
+}
+
+status_t MediaCodecList::initCheck() const {
+    return mInitCheck;
+}
+
+void MediaCodecList::parseXMLFile(FILE *file) {
+    mInitCheck = OK;
+    mCurrentSection = SECTION_TOPLEVEL;
+    mDepth = 0;
+
+    XML_Parser parser = ::XML_ParserCreate(NULL);
+    CHECK(parser != NULL);
+
+    ::XML_SetUserData(parser, this);
+    ::XML_SetElementHandler(
+            parser, StartElementHandlerWrapper, EndElementHandlerWrapper);
+
+    const int BUFF_SIZE = 512;
+    while (mInitCheck == OK) {
+        void *buff = ::XML_GetBuffer(parser, BUFF_SIZE);
+        if (buff == NULL) {
+            ALOGE("failed to in call to XML_GetBuffer()");
+            mInitCheck = UNKNOWN_ERROR;
+            break;
+        }
+
+        int bytes_read = ::fread(buff, 1, BUFF_SIZE, file);
+        if (bytes_read < 0) {
+            ALOGE("failed in call to read");
+            mInitCheck = ERROR_IO;
+            break;
+        }
+
+        if (::XML_ParseBuffer(parser, bytes_read, bytes_read == 0)
+                != XML_STATUS_OK) {
+            mInitCheck = ERROR_MALFORMED;
+            break;
+        }
+
+        if (bytes_read == 0) {
+            break;
+        }
+    }
+
+    ::XML_ParserFree(parser);
+
+    if (mInitCheck == OK) {
+        for (size_t i = mCodecInfos.size(); i-- > 0;) {
+            CodecInfo *info = &mCodecInfos.editItemAt(i);
+
+            if (info->mTypes == 0) {
+                // No types supported by this component???
+
+                ALOGW("Component %s does not support any type of media?",
+                      info->mName.c_str());
+
+                mCodecInfos.removeAt(i);
+            }
+        }
+    }
+
+    if (mInitCheck != OK) {
+        mCodecInfos.clear();
+        mCodecQuirks.clear();
+    }
+}
+
+// static
+void MediaCodecList::StartElementHandlerWrapper(
+        void *me, const char *name, const char **attrs) {
+    static_cast<MediaCodecList *>(me)->startElementHandler(name, attrs);
+}
+
+// static
+void MediaCodecList::EndElementHandlerWrapper(void *me, const char *name) {
+    static_cast<MediaCodecList *>(me)->endElementHandler(name);
+}
+
+void MediaCodecList::startElementHandler(
+        const char *name, const char **attrs) {
+    if (mInitCheck != OK) {
+        return;
+    }
+
+    switch (mCurrentSection) {
+        case SECTION_TOPLEVEL:
+        {
+            if (!strcmp(name, "Decoders")) {
+                mCurrentSection = SECTION_DECODERS;
+            } else if (!strcmp(name, "Encoders")) {
+                mCurrentSection = SECTION_ENCODERS;
+            }
+            break;
+        }
+
+        case SECTION_DECODERS:
+        {
+            if (!strcmp(name, "MediaCodec")) {
+                mInitCheck =
+                    addMediaCodecFromAttributes(false /* encoder */, attrs);
+
+                mCurrentSection = SECTION_DECODER;
+            }
+            break;
+        }
+
+        case SECTION_ENCODERS:
+        {
+            if (!strcmp(name, "MediaCodec")) {
+                mInitCheck =
+                    addMediaCodecFromAttributes(true /* encoder */, attrs);
+
+                mCurrentSection = SECTION_ENCODER;
+            }
+            break;
+        }
+
+        case SECTION_DECODER:
+        case SECTION_ENCODER:
+        {
+            if (!strcmp(name, "Quirk")) {
+                mInitCheck = addQuirk(attrs);
+            } else if (!strcmp(name, "Type")) {
+                mInitCheck = addTypeFromAttributes(attrs);
+            }
+            break;
+        }
+
+        default:
+            break;
+    }
+
+    ++mDepth;
+}
+
+void MediaCodecList::endElementHandler(const char *name) {
+    if (mInitCheck != OK) {
+        return;
+    }
+
+    switch (mCurrentSection) {
+        case SECTION_DECODERS:
+        {
+            if (!strcmp(name, "Decoders")) {
+                mCurrentSection = SECTION_TOPLEVEL;
+            }
+            break;
+        }
+
+        case SECTION_ENCODERS:
+        {
+            if (!strcmp(name, "Encoders")) {
+                mCurrentSection = SECTION_TOPLEVEL;
+            }
+            break;
+        }
+
+        case SECTION_DECODER:
+        {
+            if (!strcmp(name, "MediaCodec")) {
+                mCurrentSection = SECTION_DECODERS;
+            }
+            break;
+        }
+
+        case SECTION_ENCODER:
+        {
+            if (!strcmp(name, "MediaCodec")) {
+                mCurrentSection = SECTION_ENCODERS;
+            }
+            break;
+        }
+
+        default:
+            break;
+    }
+
+    --mDepth;
+}
+
+status_t MediaCodecList::addMediaCodecFromAttributes(
+        bool encoder, const char **attrs) {
+    const char *name = NULL;
+    const char *type = NULL;
+
+    size_t i = 0;
+    while (attrs[i] != NULL) {
+        if (!strcmp(attrs[i], "name")) {
+            if (attrs[i + 1] == NULL) {
+                return -EINVAL;
+            }
+            name = attrs[i + 1];
+            ++i;
+        } else if (!strcmp(attrs[i], "type")) {
+            if (attrs[i + 1] == NULL) {
+                return -EINVAL;
+            }
+            type = attrs[i + 1];
+            ++i;
+        } else {
+            return -EINVAL;
+        }
+
+        ++i;
+    }
+
+    if (name == NULL) {
+        return -EINVAL;
+    }
+
+    addMediaCodec(encoder, name, type);
+
+    return OK;
+}
+
+void MediaCodecList::addMediaCodec(
+        bool encoder, const char *name, const char *type) {
+    mCodecInfos.push();
+    CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+    info->mName = name;
+    info->mIsEncoder = encoder;
+    info->mTypes = 0;
+    info->mQuirks = 0;
+
+    if (type != NULL) {
+        addType(type);
+    }
+}
+
+status_t MediaCodecList::addQuirk(const char **attrs) {
+    const char *name = NULL;
+
+    size_t i = 0;
+    while (attrs[i] != NULL) {
+        if (!strcmp(attrs[i], "name")) {
+            if (attrs[i + 1] == NULL) {
+                return -EINVAL;
+            }
+            name = attrs[i + 1];
+            ++i;
+        } else {
+            return -EINVAL;
+        }
+
+        ++i;
+    }
+
+    if (name == NULL) {
+        return -EINVAL;
+    }
+
+    uint32_t bit;
+    ssize_t index = mCodecQuirks.indexOfKey(name);
+    if (index < 0) {
+        bit = mCodecQuirks.size();
+
+        if (bit == 32) {
+            ALOGW("Too many distinct quirk names in configuration.");
+            return OK;
+        }
+
+        mCodecQuirks.add(name, bit);
+    } else {
+        bit = mCodecQuirks.valueAt(index);
+    }
+
+    CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+    info->mQuirks |= 1ul << bit;
+
+    return OK;
+}
+
+status_t MediaCodecList::addTypeFromAttributes(const char **attrs) {
+    const char *name = NULL;
+
+    size_t i = 0;
+    while (attrs[i] != NULL) {
+        if (!strcmp(attrs[i], "name")) {
+            if (attrs[i + 1] == NULL) {
+                return -EINVAL;
+            }
+            name = attrs[i + 1];
+            ++i;
+        } else {
+            return -EINVAL;
+        }
+
+        ++i;
+    }
+
+    if (name == NULL) {
+        return -EINVAL;
+    }
+
+    addType(name);
+
+    return OK;
+}
+
+void MediaCodecList::addType(const char *name) {
+    uint32_t bit;
+    ssize_t index = mTypes.indexOfKey(name);
+    if (index < 0) {
+        bit = mTypes.size();
+
+        if (bit == 32) {
+            ALOGW("Too many distinct type names in configuration.");
+            return;
+        }
+
+        mTypes.add(name, bit);
+    } else {
+        bit = mTypes.valueAt(index);
+    }
+
+    CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1);
+    info->mTypes |= 1ul << bit;
+}
+
+ssize_t MediaCodecList::findCodecByType(
+        const char *type, bool encoder, size_t startIndex) const {
+    ssize_t typeIndex = mTypes.indexOfKey(type);
+
+    if (typeIndex < 0) {
+        return -ENOENT;
+    }
+
+    uint32_t typeMask = 1ul << mTypes.valueAt(typeIndex);
+
+    while (startIndex < mCodecInfos.size()) {
+        const CodecInfo &info = mCodecInfos.itemAt(startIndex);
+
+        if (info.mIsEncoder == encoder && (info.mTypes & typeMask)) {
+            return startIndex;
+        }
+
+        ++startIndex;
+    }
+
+    return -ENOENT;
+}
+
+ssize_t MediaCodecList::findCodecByName(const char *name) const {
+    for (size_t i = 0; i < mCodecInfos.size(); ++i) {
+        const CodecInfo &info = mCodecInfos.itemAt(i);
+
+        if (info.mName == name) {
+            return i;
+        }
+    }
+
+    return -ENOENT;
+}
+
+const char *MediaCodecList::getCodecName(size_t index) const {
+    if (index >= mCodecInfos.size()) {
+        return NULL;
+    }
+
+    const CodecInfo &info = mCodecInfos.itemAt(index);
+    return info.mName.c_str();
+}
+
+bool MediaCodecList::codecHasQuirk(
+        size_t index, const char *quirkName) const {
+    if (index >= mCodecInfos.size()) {
+        return NULL;
+    }
+
+    const CodecInfo &info = mCodecInfos.itemAt(index);
+
+    if (info.mQuirks != 0) {
+        ssize_t index = mCodecQuirks.indexOfKey(quirkName);
+        if (index >= 0 && info.mQuirks & (1ul << mCodecQuirks.valueAt(index))) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/MediaDefs.cpp b/media/libstagefright/MediaDefs.cpp
index 444e823..2549de6 100644
--- a/media/libstagefright/MediaDefs.cpp
+++ b/media/libstagefright/MediaDefs.cpp
@@ -52,5 +52,6 @@
 const char *MEDIA_MIMETYPE_CONTAINER_WVM = "video/wvm";
 
 const char *MEDIA_MIMETYPE_TEXT_3GPP = "text/3gpp-tt";
+const char *MEDIA_MIMETYPE_TEXT_SUBRIP = "application/x-subrip";
 
 }  // namespace android
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index 7b17d65..2171492 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -19,6 +19,7 @@
 #include <utils/Log.h>
 
 #include "include/AMRExtractor.h"
+#include "include/AVIExtractor.h"
 #include "include/MP3Extractor.h"
 #include "include/MPEG4Extractor.h"
 #include "include/WAVExtractor.h"
@@ -109,10 +110,12 @@
         ret = new MatroskaExtractor(source);
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2TS)) {
         ret = new MPEG2TSExtractor(source);
+    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_AVI)) {
+        ret = new AVIExtractor(source);
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_WVM)) {
         ret = new WVMExtractor(source);
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC_ADTS)) {
-        ret = new AACExtractor(source);
+        ret = new AACExtractor(source, meta);
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2PS)) {
         ret = new MPEG2PSExtractor(source);
     }
diff --git a/media/libstagefright/MediaSourceSplitter.cpp b/media/libstagefright/MediaSourceSplitter.cpp
deleted file mode 100644
index 8af0694..0000000
--- a/media/libstagefright/MediaSourceSplitter.cpp
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "MediaSourceSplitter"
-#include <utils/Log.h>
-
-#include <media/stagefright/MediaSourceSplitter.h>
-#include <media/stagefright/MediaDebug.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MetaData.h>
-
-namespace android {
-
-MediaSourceSplitter::MediaSourceSplitter(sp<MediaSource> mediaSource) {
-    mNumberOfClients = 0;
-    mSource = mediaSource;
-    mSourceStarted = false;
-
-    mNumberOfClientsStarted = 0;
-    mNumberOfCurrentReads = 0;
-    mCurrentReadBit = 0;
-    mLastReadCompleted = true;
-}
-
-MediaSourceSplitter::~MediaSourceSplitter() {
-}
-
-sp<MediaSource> MediaSourceSplitter::createClient() {
-    Mutex::Autolock autoLock(mLock);
-
-    sp<MediaSource> client = new Client(this, mNumberOfClients++);
-    mClientsStarted.push(false);
-    mClientsDesiredReadBit.push(0);
-    return client;
-}
-
-status_t MediaSourceSplitter::start(int clientId, MetaData *params) {
-    Mutex::Autolock autoLock(mLock);
-
-    ALOGV("start client (%d)", clientId);
-    if (mClientsStarted[clientId]) {
-        return OK;
-    }
-
-    mNumberOfClientsStarted++;
-
-    if (!mSourceStarted) {
-        ALOGV("Starting real source from client (%d)", clientId);
-        status_t err = mSource->start(params);
-
-        if (err == OK) {
-            mSourceStarted = true;
-            mClientsStarted.editItemAt(clientId) = true;
-            mClientsDesiredReadBit.editItemAt(clientId) = !mCurrentReadBit;
-        }
-
-        return err;
-    } else {
-        mClientsStarted.editItemAt(clientId) = true;
-        if (mLastReadCompleted) {
-            // Last read was completed. So join in the threads for the next read.
-            mClientsDesiredReadBit.editItemAt(clientId) = !mCurrentReadBit;
-        } else {
-            // Last read is ongoing. So join in the threads for the current read.
-            mClientsDesiredReadBit.editItemAt(clientId) = mCurrentReadBit;
-        }
-        return OK;
-    }
-}
-
-status_t MediaSourceSplitter::stop(int clientId) {
-    Mutex::Autolock autoLock(mLock);
-
-    ALOGV("stop client (%d)", clientId);
-    CHECK(clientId >= 0 && clientId < mNumberOfClients);
-    CHECK(mClientsStarted[clientId]);
-
-    if (--mNumberOfClientsStarted == 0) {
-        ALOGV("Stopping real source from client (%d)", clientId);
-        status_t err = mSource->stop();
-        mSourceStarted = false;
-        mClientsStarted.editItemAt(clientId) = false;
-        return err;
-    } else {
-        mClientsStarted.editItemAt(clientId) = false;
-        if (!mLastReadCompleted && (mClientsDesiredReadBit[clientId] == mCurrentReadBit)) {
-            // !mLastReadCompleted implies that buffer has been read from source, but all
-            // clients haven't read it.
-            // mClientsDesiredReadBit[clientId] == mCurrentReadBit implies that this
-            // client would have wanted to read from this buffer. (i.e. it has not yet
-            // called read() for the current read buffer.)
-            // Since other threads may be waiting for all the clients' reads to complete,
-            // signal that this read has been aborted.
-            signalReadComplete_lock(true);
-        }
-        return OK;
-    }
-}
-
-sp<MetaData> MediaSourceSplitter::getFormat(int clientId) {
-    Mutex::Autolock autoLock(mLock);
-
-    ALOGV("getFormat client (%d)", clientId);
-    return mSource->getFormat();
-}
-
-status_t MediaSourceSplitter::read(int clientId,
-        MediaBuffer **buffer, const MediaSource::ReadOptions *options) {
-    Mutex::Autolock autoLock(mLock);
-
-    CHECK(clientId >= 0 && clientId < mNumberOfClients);
-
-    ALOGV("read client (%d)", clientId);
-    *buffer = NULL;
-
-    if (!mClientsStarted[clientId]) {
-        return OK;
-    }
-
-    if (mCurrentReadBit != mClientsDesiredReadBit[clientId]) {
-        // Desired buffer has not been read from source yet.
-
-        // If the current client is the special client with clientId = 0
-        // then read from source, else wait until the client 0 has finished
-        // reading from source.
-        if (clientId == 0) {
-            // Wait for all client's last read to complete first so as to not
-            // corrupt the buffer at mLastReadMediaBuffer.
-            waitForAllClientsLastRead_lock(clientId);
-
-            readFromSource_lock(options);
-            *buffer = mLastReadMediaBuffer;
-        } else {
-            waitForReadFromSource_lock(clientId);
-
-            *buffer = mLastReadMediaBuffer;
-            (*buffer)->add_ref();
-        }
-        CHECK(mCurrentReadBit == mClientsDesiredReadBit[clientId]);
-    } else {
-        // Desired buffer has already been read from source. Use the cached data.
-        CHECK(clientId != 0);
-
-        *buffer = mLastReadMediaBuffer;
-        (*buffer)->add_ref();
-    }
-
-    mClientsDesiredReadBit.editItemAt(clientId) = !mClientsDesiredReadBit[clientId];
-    signalReadComplete_lock(false);
-
-    return mLastReadStatus;
-}
-
-void MediaSourceSplitter::readFromSource_lock(const MediaSource::ReadOptions *options) {
-    mLastReadStatus = mSource->read(&mLastReadMediaBuffer , options);
-
-    mCurrentReadBit = !mCurrentReadBit;
-    mLastReadCompleted = false;
-    mReadFromSourceCondition.broadcast();
-}
-
-void MediaSourceSplitter::waitForReadFromSource_lock(int32_t clientId) {
-    mReadFromSourceCondition.wait(mLock);
-}
-
-void MediaSourceSplitter::waitForAllClientsLastRead_lock(int32_t clientId) {
-    if (mLastReadCompleted) {
-        return;
-    }
-    mAllReadsCompleteCondition.wait(mLock);
-    CHECK(mLastReadCompleted);
-}
-
-void MediaSourceSplitter::signalReadComplete_lock(bool readAborted) {
-    if (!readAborted) {
-        mNumberOfCurrentReads++;
-    }
-
-    if (mNumberOfCurrentReads == mNumberOfClientsStarted) {
-        mLastReadCompleted = true;
-        mNumberOfCurrentReads = 0;
-        mAllReadsCompleteCondition.broadcast();
-    }
-}
-
-status_t MediaSourceSplitter::pause(int clientId) {
-    return ERROR_UNSUPPORTED;
-}
-
-// Client
-
-MediaSourceSplitter::Client::Client(
-        sp<MediaSourceSplitter> splitter,
-        int32_t clientId) {
-    mSplitter = splitter;
-    mClientId = clientId;
-}
-
-status_t MediaSourceSplitter::Client::start(MetaData *params) {
-    return mSplitter->start(mClientId, params);
-}
-
-status_t MediaSourceSplitter::Client::stop() {
-    return mSplitter->stop(mClientId);
-}
-
-sp<MetaData> MediaSourceSplitter::Client::getFormat() {
-    return mSplitter->getFormat(mClientId);
-}
-
-status_t MediaSourceSplitter::Client::read(
-        MediaBuffer **buffer, const ReadOptions *options) {
-    return mSplitter->read(mClientId, buffer, options);
-}
-
-status_t MediaSourceSplitter::Client::pause() {
-    return mSplitter->pause(mClientId);
-}
-
-}  // namespace android
diff --git a/media/libstagefright/MetaData.cpp b/media/libstagefright/MetaData.cpp
index 884f3b4..66dec90 100644
--- a/media/libstagefright/MetaData.cpp
+++ b/media/libstagefright/MetaData.cpp
@@ -17,7 +17,7 @@
 #include <stdlib.h>
 #include <string.h>
 
-#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/MetaData.h>
 
 namespace android {
diff --git a/media/libstagefright/NuCachedSource2.cpp b/media/libstagefright/NuCachedSource2.cpp
index 249c298..0957426 100644
--- a/media/libstagefright/NuCachedSource2.cpp
+++ b/media/libstagefright/NuCachedSource2.cpp
@@ -370,6 +370,7 @@
                     && (mSource->flags() & DataSource::kIsHTTPBasedSource)) {
                 ALOGV("Disconnecting at high watermark");
                 static_cast<HTTPBase *>(mSource.get())->disconnect();
+                mFinalStatus = -EAGAIN;
             }
         }
     } else {
@@ -549,7 +550,7 @@
 
     size_t delta = offset - mCacheOffset;
 
-    if (mFinalStatus != OK) {
+    if (mFinalStatus != OK && mNumRetriesLeft == 0) {
         if (delta >= mCache->totalSize()) {
             return mFinalStatus;
         }
@@ -591,7 +592,7 @@
     size_t totalSize = mCache->totalSize();
     CHECK_EQ(mCache->releaseFromStart(totalSize), totalSize);
 
-    mFinalStatus = OK;
+    mNumRetriesLeft = kMaxNumRetries;
     mFetching = true;
 
     return OK;
@@ -603,8 +604,8 @@
     restartPrefetcherIfNecessary_l(true /* ignore low water threshold */);
 }
 
-sp<DecryptHandle> NuCachedSource2::DrmInitialization() {
-    return mSource->DrmInitialization();
+sp<DecryptHandle> NuCachedSource2::DrmInitialization(const char* mime) {
+    return mSource->DrmInitialization(mime);
 }
 
 void NuCachedSource2::getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client) {
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
new file mode 100644
index 0000000..afd4763
--- /dev/null
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -0,0 +1,433 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "NuMediaExtractor"
+#include <utils/Log.h>
+
+#include <media/stagefright/NuMediaExtractor.h>
+
+#include "include/ESDS.h"
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
+
+namespace android {
+
+NuMediaExtractor::NuMediaExtractor() {
+}
+
+NuMediaExtractor::~NuMediaExtractor() {
+    releaseTrackSamples();
+
+    for (size_t i = 0; i < mSelectedTracks.size(); ++i) {
+        TrackInfo *info = &mSelectedTracks.editItemAt(i);
+
+        CHECK_EQ((status_t)OK, info->mSource->stop());
+    }
+
+    mSelectedTracks.clear();
+}
+
+status_t NuMediaExtractor::setDataSource(const char *path) {
+    sp<DataSource> dataSource = DataSource::CreateFromURI(path);
+
+    if (dataSource == NULL) {
+        return -ENOENT;
+    }
+
+    mImpl = MediaExtractor::Create(dataSource);
+
+    if (mImpl == NULL) {
+        return ERROR_UNSUPPORTED;
+    }
+
+    return OK;
+}
+
+size_t NuMediaExtractor::countTracks() const {
+    return mImpl == NULL ? 0 : mImpl->countTracks();
+}
+
+status_t NuMediaExtractor::getTrackFormat(
+        size_t index, sp<AMessage> *format) const {
+    *format = NULL;
+
+    if (mImpl == NULL) {
+        return -EINVAL;
+    }
+
+    if (index >= mImpl->countTracks()) {
+        return -ERANGE;
+    }
+
+    sp<MetaData> meta = mImpl->getTrackMetaData(index);
+
+    const char *mime;
+    CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+    sp<AMessage> msg = new AMessage;
+    msg->setString("mime", mime);
+
+    if (!strncasecmp("video/", mime, 6)) {
+        int32_t width, height;
+        CHECK(meta->findInt32(kKeyWidth, &width));
+        CHECK(meta->findInt32(kKeyHeight, &height));
+
+        msg->setInt32("width", width);
+        msg->setInt32("height", height);
+    } else {
+        CHECK(!strncasecmp("audio/", mime, 6));
+
+        int32_t numChannels, sampleRate;
+        CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+        CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+
+        msg->setInt32("channel-count", numChannels);
+        msg->setInt32("sample-rate", sampleRate);
+    }
+
+    int32_t maxInputSize;
+    if (meta->findInt32(kKeyMaxInputSize, &maxInputSize)) {
+        msg->setInt32("max-input-size", maxInputSize);
+    }
+
+    uint32_t type;
+    const void *data;
+    size_t size;
+    if (meta->findData(kKeyAVCC, &type, &data, &size)) {
+        // Parse the AVCDecoderConfigurationRecord
+
+        const uint8_t *ptr = (const uint8_t *)data;
+
+        CHECK(size >= 7);
+        CHECK_EQ((unsigned)ptr[0], 1u);  // configurationVersion == 1
+        uint8_t profile = ptr[1];
+        uint8_t level = ptr[3];
+
+        // There is decodable content out there that fails the following
+        // assertion, let's be lenient for now...
+        // CHECK((ptr[4] >> 2) == 0x3f);  // reserved
+
+        size_t lengthSize = 1 + (ptr[4] & 3);
+
+        // commented out check below as H264_QVGA_500_NO_AUDIO.3gp
+        // violates it...
+        // CHECK((ptr[5] >> 5) == 7);  // reserved
+
+        size_t numSeqParameterSets = ptr[5] & 31;
+
+        ptr += 6;
+        size -= 6;
+
+        sp<ABuffer> buffer = new ABuffer(1024);
+        buffer->setRange(0, 0);
+
+        for (size_t i = 0; i < numSeqParameterSets; ++i) {
+            CHECK(size >= 2);
+            size_t length = U16_AT(ptr);
+
+            ptr += 2;
+            size -= 2;
+
+            CHECK(size >= length);
+
+            memcpy(buffer->data() + buffer->size(), "\x00\x00\x00\x01", 4);
+            memcpy(buffer->data() + buffer->size() + 4, ptr, length);
+            buffer->setRange(0, buffer->size() + 4 + length);
+
+            ptr += length;
+            size -= length;
+        }
+
+        buffer->meta()->setInt32("csd", true);
+        buffer->meta()->setInt64("timeUs", 0);
+
+        msg->setBuffer("csd-0", buffer);
+
+        buffer = new ABuffer(1024);
+        buffer->setRange(0, 0);
+
+        CHECK(size >= 1);
+        size_t numPictureParameterSets = *ptr;
+        ++ptr;
+        --size;
+
+        for (size_t i = 0; i < numPictureParameterSets; ++i) {
+            CHECK(size >= 2);
+            size_t length = U16_AT(ptr);
+
+            ptr += 2;
+            size -= 2;
+
+            CHECK(size >= length);
+
+            memcpy(buffer->data() + buffer->size(), "\x00\x00\x00\x01", 4);
+            memcpy(buffer->data() + buffer->size() + 4, ptr, length);
+            buffer->setRange(0, buffer->size() + 4 + length);
+
+            ptr += length;
+            size -= length;
+        }
+
+        buffer->meta()->setInt32("csd", true);
+        buffer->meta()->setInt64("timeUs", 0);
+        msg->setBuffer("csd-1", buffer);
+    } else if (meta->findData(kKeyESDS, &type, &data, &size)) {
+        ESDS esds((const char *)data, size);
+        CHECK_EQ(esds.InitCheck(), (status_t)OK);
+
+        const void *codec_specific_data;
+        size_t codec_specific_data_size;
+        esds.getCodecSpecificInfo(
+                &codec_specific_data, &codec_specific_data_size);
+
+        sp<ABuffer> buffer = new ABuffer(codec_specific_data_size);
+
+        memcpy(buffer->data(), codec_specific_data,
+               codec_specific_data_size);
+
+        buffer->meta()->setInt32("csd", true);
+        buffer->meta()->setInt64("timeUs", 0);
+        msg->setBuffer("csd-0", buffer);
+    } else if (meta->findData(kKeyVorbisInfo, &type, &data, &size)) {
+        sp<ABuffer> buffer = new ABuffer(size);
+        memcpy(buffer->data(), data, size);
+
+        buffer->meta()->setInt32("csd", true);
+        buffer->meta()->setInt64("timeUs", 0);
+        msg->setBuffer("csd-0", buffer);
+
+        if (!meta->findData(kKeyVorbisBooks, &type, &data, &size)) {
+            return -EINVAL;
+        }
+
+        buffer = new ABuffer(size);
+        memcpy(buffer->data(), data, size);
+
+        buffer->meta()->setInt32("csd", true);
+        buffer->meta()->setInt64("timeUs", 0);
+        msg->setBuffer("csd-1", buffer);
+    }
+
+    *format = msg;
+
+    return OK;
+}
+
+status_t NuMediaExtractor::selectTrack(size_t index) {
+    if (mImpl == NULL) {
+        return -EINVAL;
+    }
+
+    if (index >= mImpl->countTracks()) {
+        return -ERANGE;
+    }
+
+    for (size_t i = 0; i < mSelectedTracks.size(); ++i) {
+        TrackInfo *info = &mSelectedTracks.editItemAt(i);
+
+        if (info->mTrackIndex == index) {
+            // This track has already been selected.
+            return OK;
+        }
+    }
+
+    sp<MediaSource> source = mImpl->getTrack(index);
+
+    CHECK_EQ((status_t)OK, source->start());
+
+    mSelectedTracks.push();
+    TrackInfo *info = &mSelectedTracks.editItemAt(mSelectedTracks.size() - 1);
+
+    info->mSource = source;
+    info->mTrackIndex = index;
+    info->mFinalResult = OK;
+    info->mSample = NULL;
+    info->mSampleTimeUs = -1ll;
+    info->mFlags = 0;
+
+    const char *mime;
+    CHECK(source->getFormat()->findCString(kKeyMIMEType, &mime));
+
+    if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS)) {
+        info->mFlags |= kIsVorbis;
+    }
+
+    return OK;
+}
+
+void NuMediaExtractor::releaseTrackSamples() {
+    for (size_t i = 0; i < mSelectedTracks.size(); ++i) {
+        TrackInfo *info = &mSelectedTracks.editItemAt(i);
+
+        if (info->mSample != NULL) {
+            info->mSample->release();
+            info->mSample = NULL;
+
+            info->mSampleTimeUs = -1ll;
+        }
+    }
+}
+
+ssize_t NuMediaExtractor::fetchTrackSamples(int64_t seekTimeUs) {
+    TrackInfo *minInfo = NULL;
+    ssize_t minIndex = -1;
+
+    for (size_t i = 0; i < mSelectedTracks.size(); ++i) {
+        TrackInfo *info = &mSelectedTracks.editItemAt(i);
+
+        if (seekTimeUs >= 0ll) {
+            info->mFinalResult = OK;
+
+            if (info->mSample != NULL) {
+                info->mSample->release();
+                info->mSample = NULL;
+                info->mSampleTimeUs = -1ll;
+            }
+        } else if (info->mFinalResult != OK) {
+            continue;
+        }
+
+        if (info->mSample == NULL) {
+            MediaSource::ReadOptions options;
+            if (seekTimeUs >= 0ll) {
+                options.setSeekTo(seekTimeUs);
+            }
+            status_t err = info->mSource->read(&info->mSample, &options);
+
+            if (err != OK) {
+                CHECK(info->mSample == NULL);
+
+                info->mFinalResult = err;
+                info->mSampleTimeUs = -1ll;
+                continue;
+            } else {
+                CHECK(info->mSample != NULL);
+                CHECK(info->mSample->meta_data()->findInt64(
+                            kKeyTime, &info->mSampleTimeUs));
+            }
+        }
+
+        if (minInfo == NULL  || info->mSampleTimeUs < minInfo->mSampleTimeUs) {
+            minInfo = info;
+            minIndex = i;
+        }
+    }
+
+    return minIndex;
+}
+
+status_t NuMediaExtractor::seekTo(int64_t timeUs) {
+    return fetchTrackSamples(timeUs);
+}
+
+status_t NuMediaExtractor::advance() {
+    ssize_t minIndex = fetchTrackSamples();
+
+    if (minIndex < 0) {
+        return ERROR_END_OF_STREAM;
+    }
+
+    TrackInfo *info = &mSelectedTracks.editItemAt(minIndex);
+
+    info->mSample->release();
+    info->mSample = NULL;
+    info->mSampleTimeUs = -1ll;
+
+    return OK;
+}
+
+status_t NuMediaExtractor::readSampleData(const sp<ABuffer> &buffer) {
+    ssize_t minIndex = fetchTrackSamples();
+
+    if (minIndex < 0) {
+        return ERROR_END_OF_STREAM;
+    }
+
+    TrackInfo *info = &mSelectedTracks.editItemAt(minIndex);
+
+    size_t sampleSize = info->mSample->range_length();
+
+    if (info->mFlags & kIsVorbis) {
+        // Each sample's data is suffixed by the number of page samples
+        // or -1 if not available.
+        sampleSize += sizeof(int32_t);
+    }
+
+    if (buffer->capacity() < sampleSize) {
+        return -ENOMEM;
+    }
+
+    const uint8_t *src =
+        (const uint8_t *)info->mSample->data()
+            + info->mSample->range_offset();
+
+    memcpy((uint8_t *)buffer->data(), src, info->mSample->range_length());
+
+    if (info->mFlags & kIsVorbis) {
+        int32_t numPageSamples;
+        if (!info->mSample->meta_data()->findInt32(
+                    kKeyValidSamples, &numPageSamples)) {
+            numPageSamples = -1;
+        }
+
+        memcpy((uint8_t *)buffer->data() + info->mSample->range_length(),
+               &numPageSamples,
+               sizeof(numPageSamples));
+    }
+
+    buffer->setRange(0, sampleSize);
+
+    return OK;
+}
+
+status_t NuMediaExtractor::getSampleTrackIndex(size_t *trackIndex) {
+    ssize_t minIndex = fetchTrackSamples();
+
+    if (minIndex < 0) {
+        return ERROR_END_OF_STREAM;
+    }
+
+    TrackInfo *info = &mSelectedTracks.editItemAt(minIndex);
+    *trackIndex = info->mTrackIndex;
+
+    return OK;
+}
+
+status_t NuMediaExtractor::getSampleTime(int64_t *sampleTimeUs) {
+    ssize_t minIndex = fetchTrackSamples();
+
+    if (minIndex < 0) {
+        return ERROR_END_OF_STREAM;
+    }
+
+    TrackInfo *info = &mSelectedTracks.editItemAt(minIndex);
+    *sampleTimeUs = info->mSampleTimeUs;
+
+    return OK;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index 9de873e..7cdb793 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -20,11 +20,299 @@
 
 #include <binder/IServiceManager.h>
 #include <media/IMediaPlayerService.h>
-#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/OMXClient.h>
+#include <utils/KeyedVector.h>
+
+#include "include/OMX.h"
 
 namespace android {
 
+struct MuxOMX : public IOMX {
+    MuxOMX(const sp<IOMX> &remoteOMX);
+    virtual ~MuxOMX();
+
+    virtual IBinder *onAsBinder() { return NULL; }
+
+    virtual bool livesLocally(node_id node, pid_t pid);
+
+    virtual status_t listNodes(List<ComponentInfo> *list);
+
+    virtual status_t allocateNode(
+            const char *name, const sp<IOMXObserver> &observer,
+            node_id *node);
+
+    virtual status_t freeNode(node_id node);
+
+    virtual status_t sendCommand(
+            node_id node, OMX_COMMANDTYPE cmd, OMX_S32 param);
+
+    virtual status_t getParameter(
+            node_id node, OMX_INDEXTYPE index,
+            void *params, size_t size);
+
+    virtual status_t setParameter(
+            node_id node, OMX_INDEXTYPE index,
+            const void *params, size_t size);
+
+    virtual status_t getConfig(
+            node_id node, OMX_INDEXTYPE index,
+            void *params, size_t size);
+
+    virtual status_t setConfig(
+            node_id node, OMX_INDEXTYPE index,
+            const void *params, size_t size);
+
+    virtual status_t getState(
+            node_id node, OMX_STATETYPE* state);
+
+    virtual status_t storeMetaDataInBuffers(
+            node_id node, OMX_U32 port_index, OMX_BOOL enable);
+
+    virtual status_t enableGraphicBuffers(
+            node_id node, OMX_U32 port_index, OMX_BOOL enable);
+
+    virtual status_t getGraphicBufferUsage(
+            node_id node, OMX_U32 port_index, OMX_U32* usage);
+
+    virtual status_t useBuffer(
+            node_id node, OMX_U32 port_index, const sp<IMemory> &params,
+            buffer_id *buffer);
+
+    virtual status_t useGraphicBuffer(
+            node_id node, OMX_U32 port_index,
+            const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer);
+
+    virtual status_t allocateBuffer(
+            node_id node, OMX_U32 port_index, size_t size,
+            buffer_id *buffer, void **buffer_data);
+
+    virtual status_t allocateBufferWithBackup(
+            node_id node, OMX_U32 port_index, const sp<IMemory> &params,
+            buffer_id *buffer);
+
+    virtual status_t freeBuffer(
+            node_id node, OMX_U32 port_index, buffer_id buffer);
+
+    virtual status_t fillBuffer(node_id node, buffer_id buffer);
+
+    virtual status_t emptyBuffer(
+            node_id node,
+            buffer_id buffer,
+            OMX_U32 range_offset, OMX_U32 range_length,
+            OMX_U32 flags, OMX_TICKS timestamp);
+
+    virtual status_t getExtensionIndex(
+            node_id node,
+            const char *parameter_name,
+            OMX_INDEXTYPE *index);
+
+private:
+    mutable Mutex mLock;
+
+    sp<IOMX> mRemoteOMX;
+    sp<IOMX> mLocalOMX;
+
+    KeyedVector<node_id, bool> mIsLocalNode;
+
+    bool isLocalNode(node_id node) const;
+    bool isLocalNode_l(node_id node) const;
+    const sp<IOMX> &getOMX(node_id node) const;
+    const sp<IOMX> &getOMX_l(node_id node) const;
+
+    static bool IsSoftwareComponent(const char *name);
+
+    DISALLOW_EVIL_CONSTRUCTORS(MuxOMX);
+};
+
+MuxOMX::MuxOMX(const sp<IOMX> &remoteOMX)
+    : mRemoteOMX(remoteOMX) {
+}
+
+MuxOMX::~MuxOMX() {
+}
+
+bool MuxOMX::isLocalNode(node_id node) const {
+    Mutex::Autolock autoLock(mLock);
+
+    return isLocalNode_l(node);
+}
+
+bool MuxOMX::isLocalNode_l(node_id node) const {
+    return mIsLocalNode.indexOfKey(node) >= 0;
+}
+
+// static
+bool MuxOMX::IsSoftwareComponent(const char *name) {
+    return !strncasecmp(name, "OMX.google.", 11);
+}
+
+const sp<IOMX> &MuxOMX::getOMX(node_id node) const {
+    return isLocalNode(node) ? mLocalOMX : mRemoteOMX;
+}
+
+const sp<IOMX> &MuxOMX::getOMX_l(node_id node) const {
+    return isLocalNode_l(node) ? mLocalOMX : mRemoteOMX;
+}
+
+bool MuxOMX::livesLocally(node_id node, pid_t pid) {
+    return getOMX(node)->livesLocally(node, pid);
+}
+
+status_t MuxOMX::listNodes(List<ComponentInfo> *list) {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mLocalOMX == NULL) {
+        mLocalOMX = new OMX;
+    }
+
+    return mLocalOMX->listNodes(list);
+}
+
+status_t MuxOMX::allocateNode(
+        const char *name, const sp<IOMXObserver> &observer,
+        node_id *node) {
+    Mutex::Autolock autoLock(mLock);
+
+    sp<IOMX> omx;
+
+    if (IsSoftwareComponent(name)) {
+        if (mLocalOMX == NULL) {
+            mLocalOMX = new OMX;
+        }
+        omx = mLocalOMX;
+    } else {
+        omx = mRemoteOMX;
+    }
+
+    status_t err = omx->allocateNode(name, observer, node);
+
+    if (err != OK) {
+        return err;
+    }
+
+    if (omx == mLocalOMX) {
+        mIsLocalNode.add(*node, true);
+    }
+
+    return OK;
+}
+
+status_t MuxOMX::freeNode(node_id node) {
+    Mutex::Autolock autoLock(mLock);
+
+    status_t err = getOMX_l(node)->freeNode(node);
+
+    if (err != OK) {
+        return err;
+    }
+
+    mIsLocalNode.removeItem(node);
+
+    return OK;
+}
+
+status_t MuxOMX::sendCommand(
+        node_id node, OMX_COMMANDTYPE cmd, OMX_S32 param) {
+    return getOMX(node)->sendCommand(node, cmd, param);
+}
+
+status_t MuxOMX::getParameter(
+        node_id node, OMX_INDEXTYPE index,
+        void *params, size_t size) {
+    return getOMX(node)->getParameter(node, index, params, size);
+}
+
+status_t MuxOMX::setParameter(
+        node_id node, OMX_INDEXTYPE index,
+        const void *params, size_t size) {
+    return getOMX(node)->setParameter(node, index, params, size);
+}
+
+status_t MuxOMX::getConfig(
+        node_id node, OMX_INDEXTYPE index,
+        void *params, size_t size) {
+    return getOMX(node)->getConfig(node, index, params, size);
+}
+
+status_t MuxOMX::setConfig(
+        node_id node, OMX_INDEXTYPE index,
+        const void *params, size_t size) {
+    return getOMX(node)->setConfig(node, index, params, size);
+}
+
+status_t MuxOMX::getState(
+        node_id node, OMX_STATETYPE* state) {
+    return getOMX(node)->getState(node, state);
+}
+
+status_t MuxOMX::storeMetaDataInBuffers(
+        node_id node, OMX_U32 port_index, OMX_BOOL enable) {
+    return getOMX(node)->storeMetaDataInBuffers(node, port_index, enable);
+}
+
+status_t MuxOMX::enableGraphicBuffers(
+        node_id node, OMX_U32 port_index, OMX_BOOL enable) {
+    return getOMX(node)->enableGraphicBuffers(node, port_index, enable);
+}
+
+status_t MuxOMX::getGraphicBufferUsage(
+        node_id node, OMX_U32 port_index, OMX_U32* usage) {
+    return getOMX(node)->getGraphicBufferUsage(node, port_index, usage);
+}
+
+status_t MuxOMX::useBuffer(
+        node_id node, OMX_U32 port_index, const sp<IMemory> &params,
+        buffer_id *buffer) {
+    return getOMX(node)->useBuffer(node, port_index, params, buffer);
+}
+
+status_t MuxOMX::useGraphicBuffer(
+        node_id node, OMX_U32 port_index,
+        const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer) {
+    return getOMX(node)->useGraphicBuffer(
+            node, port_index, graphicBuffer, buffer);
+}
+
+status_t MuxOMX::allocateBuffer(
+        node_id node, OMX_U32 port_index, size_t size,
+        buffer_id *buffer, void **buffer_data) {
+    return getOMX(node)->allocateBuffer(
+            node, port_index, size, buffer, buffer_data);
+}
+
+status_t MuxOMX::allocateBufferWithBackup(
+        node_id node, OMX_U32 port_index, const sp<IMemory> &params,
+        buffer_id *buffer) {
+    return getOMX(node)->allocateBufferWithBackup(
+            node, port_index, params, buffer);
+}
+
+status_t MuxOMX::freeBuffer(
+        node_id node, OMX_U32 port_index, buffer_id buffer) {
+    return getOMX(node)->freeBuffer(node, port_index, buffer);
+}
+
+status_t MuxOMX::fillBuffer(node_id node, buffer_id buffer) {
+    return getOMX(node)->fillBuffer(node, buffer);
+}
+
+status_t MuxOMX::emptyBuffer(
+        node_id node,
+        buffer_id buffer,
+        OMX_U32 range_offset, OMX_U32 range_length,
+        OMX_U32 flags, OMX_TICKS timestamp) {
+    return getOMX(node)->emptyBuffer(
+            node, buffer, range_offset, range_length, flags, timestamp);
+}
+
+status_t MuxOMX::getExtensionIndex(
+        node_id node,
+        const char *parameter_name,
+        OMX_INDEXTYPE *index) {
+    return getOMX(node)->getExtensionIndex(node, parameter_name, index);
+}
+
 OMXClient::OMXClient() {
 }
 
@@ -38,10 +326,19 @@
     mOMX = service->getOMX();
     CHECK(mOMX.get() != NULL);
 
+    if (!mOMX->livesLocally(NULL /* node */, getpid())) {
+        ALOGI("Using client-side OMX mux.");
+        mOMX = new MuxOMX(mOMX);
+    }
+
     return OK;
 }
 
 void OMXClient::disconnect() {
+    if (mOMX.get() != NULL) {
+        mOMX.clear();
+        mOMX = NULL;
+    }
 }
 
 }  // namespace android
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 60d9bb7..d5e6bec 100755
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -19,8 +19,6 @@
 #include <utils/Log.h>
 
 #include "include/AACEncoder.h"
-#include "include/AMRNBEncoder.h"
-#include "include/AMRWBEncoder.h"
 #include "include/AVCEncoder.h"
 #include "include/M4vH263Encoder.h"
 
@@ -29,12 +27,13 @@
 #include <binder/IServiceManager.h>
 #include <binder/MemoryDealer.h>
 #include <binder/ProcessState.h>
+#include <HardwareAPI.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/IMediaPlayerService.h>
-#include <media/stagefright/HardwareAPI.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaBufferGroup.h>
 #include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaCodecList.h>
 #include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/OMXCodec.h>
@@ -59,11 +58,6 @@
 // component in question is buggy or not.
 const static uint32_t kMaxColorFormatSupported = 1000;
 
-struct CodecInfo {
-    const char *mime;
-    const char *codec;
-};
-
 #define FACTORY_CREATE_ENCODER(name) \
 static sp<MediaSource> Make##name(const sp<MediaSource> &source, const sp<MetaData> &meta) { \
     return new name(source, meta); \
@@ -71,8 +65,6 @@
 
 #define FACTORY_REF(name) { #name, Make##name },
 
-FACTORY_CREATE_ENCODER(AMRNBEncoder)
-FACTORY_CREATE_ENCODER(AMRWBEncoder)
 FACTORY_CREATE_ENCODER(AACEncoder)
 FACTORY_CREATE_ENCODER(AVCEncoder)
 FACTORY_CREATE_ENCODER(M4vH263Encoder)
@@ -86,8 +78,6 @@
     };
 
     static const FactoryInfo kFactoryInfo[] = {
-        FACTORY_REF(AMRNBEncoder)
-        FACTORY_REF(AMRWBEncoder)
         FACTORY_REF(AACEncoder)
         FACTORY_REF(AVCEncoder)
         FACTORY_REF(M4vH263Encoder)
@@ -102,82 +92,8 @@
     return NULL;
 }
 
+#undef FACTORY_CREATE_ENCODER
 #undef FACTORY_REF
-#undef FACTORY_CREATE
-
-static const CodecInfo kDecoderInfo[] = {
-    { MEDIA_MIMETYPE_IMAGE_JPEG, "OMX.TI.JPEG.decode" },
-//    { MEDIA_MIMETYPE_AUDIO_MPEG, "OMX.TI.MP3.decode" },
-    { MEDIA_MIMETYPE_AUDIO_MPEG, "OMX.google.mp3.decoder" },
-    { MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II, "OMX.Nvidia.mp2.decoder" },
-//    { MEDIA_MIMETYPE_AUDIO_AMR_NB, "OMX.TI.AMR.decode" },
-//    { MEDIA_MIMETYPE_AUDIO_AMR_NB, "OMX.Nvidia.amr.decoder" },
-    { MEDIA_MIMETYPE_AUDIO_AMR_NB, "OMX.google.amrnb.decoder" },
-//    { MEDIA_MIMETYPE_AUDIO_AMR_NB, "OMX.Nvidia.amrwb.decoder" },
-    { MEDIA_MIMETYPE_AUDIO_AMR_WB, "OMX.TI.WBAMR.decode" },
-    { MEDIA_MIMETYPE_AUDIO_AMR_WB, "OMX.google.amrwb.decoder" },
-//    { MEDIA_MIMETYPE_AUDIO_AAC, "OMX.Nvidia.aac.decoder" },
-    { MEDIA_MIMETYPE_AUDIO_AAC, "OMX.TI.AAC.decode" },
-    { MEDIA_MIMETYPE_AUDIO_AAC, "OMX.google.aac.decoder" },
-    { MEDIA_MIMETYPE_AUDIO_G711_ALAW, "OMX.google.g711.alaw.decoder" },
-    { MEDIA_MIMETYPE_AUDIO_G711_MLAW, "OMX.google.g711.mlaw.decoder" },
-    { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.TI.DUCATI1.VIDEO.DECODER" },
-    { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.Nvidia.mp4.decode" },
-    { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.qcom.7x30.video.decoder.mpeg4" },
-    { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.qcom.video.decoder.mpeg4" },
-    { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.TI.Video.Decoder" },
-    { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.SEC.MPEG4.Decoder" },
-    { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.google.mpeg4.decoder" },
-    { MEDIA_MIMETYPE_VIDEO_H263, "OMX.TI.DUCATI1.VIDEO.DECODER" },
-    { MEDIA_MIMETYPE_VIDEO_H263, "OMX.Nvidia.h263.decode" },
-    { MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.7x30.video.decoder.h263" },
-    { MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.video.decoder.h263" },
-    { MEDIA_MIMETYPE_VIDEO_H263, "OMX.SEC.H263.Decoder" },
-    { MEDIA_MIMETYPE_VIDEO_H263, "OMX.google.h263.decoder" },
-    { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.TI.DUCATI1.VIDEO.DECODER" },
-    { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.Nvidia.h264.decode" },
-    { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.7x30.video.decoder.avc" },
-    { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.video.decoder.avc" },
-    { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.TI.Video.Decoder" },
-    { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.SEC.AVC.Decoder" },
-    { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.google.h264.decoder" },
-    { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.google.avc.decoder" },
-    { MEDIA_MIMETYPE_AUDIO_VORBIS, "OMX.google.vorbis.decoder" },
-    { MEDIA_MIMETYPE_VIDEO_VPX, "OMX.google.vpx.decoder" },
-    { MEDIA_MIMETYPE_VIDEO_MPEG2, "OMX.Nvidia.mpeg2v.decode" },
-};
-
-static const CodecInfo kEncoderInfo[] = {
-    { MEDIA_MIMETYPE_AUDIO_AMR_NB, "OMX.TI.AMR.encode" },
-    { MEDIA_MIMETYPE_AUDIO_AMR_NB, "AMRNBEncoder" },
-    { MEDIA_MIMETYPE_AUDIO_AMR_WB, "OMX.TI.WBAMR.encode" },
-    { MEDIA_MIMETYPE_AUDIO_AMR_WB, "AMRWBEncoder" },
-    { MEDIA_MIMETYPE_AUDIO_AAC, "OMX.TI.AAC.encode" },
-    { MEDIA_MIMETYPE_AUDIO_AAC, "AACEncoder" },
-    { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.TI.DUCATI1.VIDEO.MPEG4E" },
-    { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.qcom.7x30.video.encoder.mpeg4" },
-    { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.qcom.video.encoder.mpeg4" },
-    { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.TI.Video.encoder" },
-    { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.Nvidia.mp4.encoder" },
-    { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.SEC.MPEG4.Encoder" },
-    { MEDIA_MIMETYPE_VIDEO_MPEG4, "M4vH263Encoder" },
-    { MEDIA_MIMETYPE_VIDEO_H263, "OMX.TI.DUCATI1.VIDEO.MPEG4E" },
-    { MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.7x30.video.encoder.h263" },
-    { MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.video.encoder.h263" },
-    { MEDIA_MIMETYPE_VIDEO_H263, "OMX.TI.Video.encoder" },
-    { MEDIA_MIMETYPE_VIDEO_H263, "OMX.Nvidia.h263.encoder" },
-    { MEDIA_MIMETYPE_VIDEO_H263, "OMX.SEC.H263.Encoder" },
-    { MEDIA_MIMETYPE_VIDEO_H263, "M4vH263Encoder" },
-    { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.TI.DUCATI1.VIDEO.H264E" },
-    { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.7x30.video.encoder.avc" },
-    { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.video.encoder.avc" },
-    { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.TI.Video.encoder" },
-    { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.Nvidia.h264.encoder" },
-    { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.SEC.AVC.Encoder" },
-    { MEDIA_MIMETYPE_VIDEO_AVC, "AVCEncoder" },
-};
-
-#undef OPTIONAL
 
 #define CODEC_LOGI(x, ...) ALOGI("[%s] "x, mComponentName, ##__VA_ARGS__)
 #define CODEC_LOGV(x, ...) ALOGV("[%s] "x, mComponentName, ##__VA_ARGS__)
@@ -212,22 +128,6 @@
     OMXCodecObserver &operator=(const OMXCodecObserver &);
 };
 
-static const char *GetCodec(const CodecInfo *info, size_t numInfos,
-                            const char *mime, int index) {
-    CHECK(index >= 0);
-    for(size_t i = 0; i < numInfos; ++i) {
-        if (!strcasecmp(mime, info[i].mime)) {
-            if (index == 0) {
-                return info[i].codec;
-            }
-
-            --index;
-        }
-    }
-
-    return NULL;
-}
-
 template<class T>
 static void InitOMXParams(T *params) {
     params->nSize = sizeof(T);
@@ -283,119 +183,36 @@
 }
 
 // static
-uint32_t OMXCodec::getComponentQuirks(
-        const char *componentName, bool isEncoder) {
-    uint32_t quirks = 0;
-
-    if (!strcmp(componentName, "OMX.Nvidia.amr.decoder") ||
-         !strcmp(componentName, "OMX.Nvidia.amrwb.decoder") ||
-         !strcmp(componentName, "OMX.Nvidia.aac.decoder") ||
-         !strcmp(componentName, "OMX.Nvidia.mp3.decoder")) {
-        quirks |= kDecoderLiesAboutNumberOfChannels;
-    }
-
-    if (!strcmp(componentName, "OMX.TI.MP3.decode")) {
-        quirks |= kNeedsFlushBeforeDisable;
-        quirks |= kDecoderLiesAboutNumberOfChannels;
-    }
-    if (!strcmp(componentName, "OMX.TI.AAC.decode")) {
-        quirks |= kNeedsFlushBeforeDisable;
-        quirks |= kRequiresFlushCompleteEmulation;
-        quirks |= kSupportsMultipleFramesPerInputBuffer;
-    }
-    if (!strncmp(componentName, "OMX.qcom.video.encoder.", 23)) {
-        quirks |= kRequiresLoadedToIdleAfterAllocation;
-        quirks |= kRequiresAllocateBufferOnInputPorts;
-        quirks |= kRequiresAllocateBufferOnOutputPorts;
-        if (!strncmp(componentName, "OMX.qcom.video.encoder.avc", 26)) {
-
-            // The AVC encoder advertises the size of output buffers
-            // based on the input video resolution and assumes
-            // the worst/least compression ratio is 0.5. It is found that
-            // sometimes, the output buffer size is larger than
-            // size advertised by the encoder.
-            quirks |= kRequiresLargerEncoderOutputBuffer;
-        }
-    }
-    if (!strncmp(componentName, "OMX.qcom.7x30.video.encoder.", 28)) {
-    }
-    if (!strncmp(componentName, "OMX.qcom.video.decoder.", 23)) {
-        quirks |= kRequiresAllocateBufferOnOutputPorts;
-        quirks |= kDefersOutputBufferAllocation;
-    }
-    if (!strncmp(componentName, "OMX.qcom.7x30.video.decoder.", 28)) {
-        quirks |= kRequiresAllocateBufferOnInputPorts;
-        quirks |= kRequiresAllocateBufferOnOutputPorts;
-        quirks |= kDefersOutputBufferAllocation;
-    }
-
-    if (!strcmp(componentName, "OMX.TI.DUCATI1.VIDEO.DECODER")) {
-        quirks |= kRequiresAllocateBufferOnInputPorts;
-        quirks |= kRequiresAllocateBufferOnOutputPorts;
-    }
-
-    // FIXME:
-    // Remove the quirks after the work is done.
-    else if (!strcmp(componentName, "OMX.TI.DUCATI1.VIDEO.MPEG4E") ||
-             !strcmp(componentName, "OMX.TI.DUCATI1.VIDEO.H264E")) {
-
-        quirks |= kRequiresAllocateBufferOnInputPorts;
-        quirks |= kRequiresAllocateBufferOnOutputPorts;
-    }
-    else if (!strncmp(componentName, "OMX.TI.", 7)) {
-        // Apparently I must not use OMX_UseBuffer on either input or
-        // output ports on any of the TI components or quote:
-        // "(I) may have unexpected problem (sic) which can be timing related
-        //  and hard to reproduce."
-
-        quirks |= kRequiresAllocateBufferOnInputPorts;
-        quirks |= kRequiresAllocateBufferOnOutputPorts;
-        if (!strncmp(componentName, "OMX.TI.Video.encoder", 20)) {
-            quirks |= kAvoidMemcopyInputRecordingFrames;
-        }
-    }
-
-    if (!strcmp(componentName, "OMX.TI.Video.Decoder")) {
-        quirks |= kInputBufferSizesAreBogus;
-    }
-
-    if (!strncmp(componentName, "OMX.SEC.", 8) && !isEncoder) {
-        // These output buffers contain no video data, just some
-        // opaque information that allows the overlay to display their
-        // contents.
-        quirks |= kOutputBuffersAreUnreadable;
-    }
-
-    return quirks;
-}
-
-// static
 void OMXCodec::findMatchingCodecs(
         const char *mime,
         bool createEncoder, const char *matchComponentName,
         uint32_t flags,
-        Vector<String8> *matchingCodecs) {
+        Vector<String8> *matchingCodecs,
+        Vector<uint32_t> *matchingCodecQuirks) {
     matchingCodecs->clear();
 
-    for (int index = 0;; ++index) {
-        const char *componentName;
+    if (matchingCodecQuirks) {
+        matchingCodecQuirks->clear();
+    }
 
-        if (createEncoder) {
-            componentName = GetCodec(
-                    kEncoderInfo,
-                    sizeof(kEncoderInfo) / sizeof(kEncoderInfo[0]),
-                    mime, index);
-        } else {
-            componentName = GetCodec(
-                    kDecoderInfo,
-                    sizeof(kDecoderInfo) / sizeof(kDecoderInfo[0]),
-                    mime, index);
-        }
+    const MediaCodecList *list = MediaCodecList::getInstance();
+    if (list == NULL) {
+        return;
+    }
 
-        if (!componentName) {
+    size_t index = 0;
+    for (;;) {
+        ssize_t matchIndex =
+            list->findCodecByType(mime, createEncoder, index);
+
+        if (matchIndex < 0) {
             break;
         }
 
+        index = matchIndex + 1;
+
+        const char *componentName = list->getCodecName(matchIndex);
+
         // If a specific codec is requested, skip the non-matching ones.
         if (matchComponentName && strcmp(componentName, matchComponentName)) {
             continue;
@@ -410,6 +227,10 @@
             (!(flags & (kSoftwareCodecsOnly | kHardwareCodecsOnly)))) {
 
             matchingCodecs->push(String8(componentName));
+
+            if (matchingCodecQuirks) {
+                matchingCodecQuirks->push(getComponentQuirks(list, matchIndex));
+            }
         }
     }
 
@@ -419,6 +240,45 @@
 }
 
 // static
+uint32_t OMXCodec::getComponentQuirks(
+        const MediaCodecList *list, size_t index) {
+    uint32_t quirks = 0;
+    if (list->codecHasQuirk(
+                index, "requires-allocate-on-input-ports")) {
+        quirks |= kRequiresAllocateBufferOnInputPorts;
+    }
+    if (list->codecHasQuirk(
+                index, "requires-allocate-on-output-ports")) {
+        quirks |= kRequiresAllocateBufferOnOutputPorts;
+    }
+    if (list->codecHasQuirk(
+                index, "output-buffers-are-unreadable")) {
+        quirks |= kOutputBuffersAreUnreadable;
+    }
+
+    return quirks;
+}
+
+// static
+bool OMXCodec::findCodecQuirks(const char *componentName, uint32_t *quirks) {
+    const MediaCodecList *list = MediaCodecList::getInstance();
+
+    if (list == NULL) {
+        return false;
+    }
+
+    ssize_t index = list->findCodecByName(componentName);
+
+    if (index < 0) {
+        return false;
+    }
+
+    *quirks = getComponentQuirks(list, index);
+
+    return true;
+}
+
+// static
 sp<MediaSource> OMXCodec::Create(
         const sp<IOMX> &omx,
         const sp<MetaData> &meta, bool createEncoder,
@@ -440,8 +300,10 @@
     CHECK(success);
 
     Vector<String8> matchingCodecs;
+    Vector<uint32_t> matchingCodecQuirks;
     findMatchingCodecs(
-            mime, createEncoder, matchComponentName, flags, &matchingCodecs);
+            mime, createEncoder, matchComponentName, flags,
+            &matchingCodecs, &matchingCodecQuirks);
 
     if (matchingCodecs.isEmpty()) {
         return NULL;
@@ -452,6 +314,7 @@
 
     for (size_t i = 0; i < matchingCodecs.size(); ++i) {
         const char *componentNameBase = matchingCodecs[i].string();
+        uint32_t quirks = matchingCodecQuirks[i];
         const char *componentName = componentNameBase;
 
         AString tmp;
@@ -475,8 +338,6 @@
 
         ALOGV("Attempting to allocate OMX node '%s'", componentName);
 
-        uint32_t quirks = getComponentQuirks(componentNameBase, createEncoder);
-
         if (!createEncoder
                 && (quirks & kOutputBuffersAreUnreadable)
                 && (flags & kClientNeedsFramebuffer)) {
@@ -632,16 +493,6 @@
             CODEC_LOGI(
                     "AVC profile = %u (%s), level = %u",
                     profile, AVCProfileToString(profile), level);
-
-            if (!strcmp(mComponentName, "OMX.TI.Video.Decoder")
-                && (profile != kAVCProfileBaseline || level > 30)) {
-                // This stream exceeds the decoder's capabilities. The decoder
-                // does not handle this gracefully and would clobber the heap
-                // and wreak havoc instead...
-
-                ALOGE("Profile and/or level exceed the decoder's capabilities.");
-                return ERROR_UNSUPPORTED;
-            }
         } else if (meta->findData(kKeyVorbisInfo, &type, &data, &size)) {
             addCodecSpecificData(data, size);
 
@@ -697,40 +548,11 @@
         }
     }
 
-    if (!strcasecmp(mMIME, MEDIA_MIMETYPE_IMAGE_JPEG)
-        && !strcmp(mComponentName, "OMX.TI.JPEG.decode")) {
-        OMX_COLOR_FORMATTYPE format =
-            OMX_COLOR_Format32bitARGB8888;
-            // OMX_COLOR_FormatYUV420PackedPlanar;
-            // OMX_COLOR_FormatCbYCrY;
-            // OMX_COLOR_FormatYUV411Planar;
-
-        int32_t width, height;
-        bool success = meta->findInt32(kKeyWidth, &width);
-        success = success && meta->findInt32(kKeyHeight, &height);
-
-        int32_t compressedSize;
-        success = success && meta->findInt32(
-                kKeyMaxInputSize, &compressedSize);
-
-        CHECK(success);
-        CHECK(compressedSize > 0);
-
-        setImageOutputFormat(format, width, height);
-        setJPEGInputFormat(width, height, (OMX_U32)compressedSize);
-    }
-
     int32_t maxInputSize;
     if (meta->findInt32(kKeyMaxInputSize, &maxInputSize)) {
         setMinBufferSize(kPortIndexInput, (OMX_U32)maxInputSize);
     }
 
-    if (!strcmp(mComponentName, "OMX.TI.AMR.encode")
-        || !strcmp(mComponentName, "OMX.TI.WBAMR.encode")
-        || !strcmp(mComponentName, "OMX.TI.AAC.encode")) {
-        setMinBufferSize(kPortIndexOutput, 8192);  // XXX
-    }
-
     initOutputFormat(meta);
 
     if ((mFlags & kClientNeedsFramebuffer)
@@ -834,21 +656,6 @@
              index, format.eCompressionFormat, format.eColorFormat);
 #endif
 
-        if (!strcmp("OMX.TI.Video.encoder", mComponentName)) {
-            if (portIndex == kPortIndexInput
-                    && colorFormat == format.eColorFormat) {
-                // eCompressionFormat does not seem right.
-                found = true;
-                break;
-            }
-            if (portIndex == kPortIndexOutput
-                    && compressionFormat == format.eCompressionFormat) {
-                // eColorFormat does not seem right.
-                found = true;
-                break;
-            }
-        }
-
         if (format.eCompressionFormat == compressionFormat
                 && format.eColorFormat == colorFormat) {
             found = true;
@@ -911,13 +718,8 @@
     int32_t targetColorFormat;
     if (meta->findInt32(kKeyColorFormat, &targetColorFormat)) {
         *colorFormat = (OMX_COLOR_FORMATTYPE) targetColorFormat;
-    } else {
-        if (!strcasecmp("OMX.TI.Video.encoder", mComponentName)) {
-            *colorFormat = OMX_COLOR_FormatYCbYCr;
-        }
     }
 
-
     // Check whether the target color format is supported.
     return isColorFormatSupported(*colorFormat, kPortIndexInput);
 }
@@ -1482,11 +1284,12 @@
         const sp<MediaSource> &source,
         const sp<ANativeWindow> &nativeWindow)
     : mOMX(omx),
-      mOMXLivesLocally(omx->livesLocally(getpid())),
+      mOMXLivesLocally(omx->livesLocally(node, getpid())),
       mNode(node),
       mQuirks(quirks),
       mFlags(flags),
       mIsEncoder(isEncoder),
+      mIsVideo(!strncasecmp("video/", mime, 6)),
       mMIME(strdup(mime)),
       mComponentName(strdup(componentName)),
       mSource(source),
@@ -1529,8 +1332,6 @@
             "audio_decoder.mp1", "audio_encoder.mp1" },
         { MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II,
             "audio_decoder.mp2", "audio_encoder.mp2" },
-        { MEDIA_MIMETYPE_AUDIO_MPEG,
-            "audio_decoder.mp3", "audio_encoder.mp3" },
         { MEDIA_MIMETYPE_AUDIO_AMR_NB,
             "audio_decoder.amrnb", "audio_encoder.amrnb" },
         { MEDIA_MIMETYPE_AUDIO_AMR_WB,
@@ -1539,12 +1340,18 @@
             "audio_decoder.aac", "audio_encoder.aac" },
         { MEDIA_MIMETYPE_AUDIO_VORBIS,
             "audio_decoder.vorbis", "audio_encoder.vorbis" },
+        { MEDIA_MIMETYPE_AUDIO_G711_MLAW,
+            "audio_decoder.g711mlaw", "audio_encoder.g711mlaw" },
+        { MEDIA_MIMETYPE_AUDIO_G711_ALAW,
+            "audio_decoder.g711alaw", "audio_encoder.g711alaw" },
         { MEDIA_MIMETYPE_VIDEO_AVC,
             "video_decoder.avc", "video_encoder.avc" },
         { MEDIA_MIMETYPE_VIDEO_MPEG4,
             "video_decoder.mpeg4", "video_encoder.mpeg4" },
         { MEDIA_MIMETYPE_VIDEO_H263,
             "video_decoder.h263", "video_encoder.h263" },
+        { MEDIA_MIMETYPE_VIDEO_VPX,
+            "video_decoder.vpx", "video_encoder.vpx" },
     };
 
     static const size_t kNumMimeToRole =
@@ -2191,8 +1998,8 @@
     }
 }
 
-int64_t OMXCodec::retrieveDecodingTimeUs(bool isCodecSpecific) {
-    CHECK(mIsEncoder);
+int64_t OMXCodec::getDecodingTimeUs() {
+    CHECK(mIsEncoder && mIsVideo);
 
     if (mDecodingTimeList.empty()) {
         CHECK(mSignalledEOS || mNoMoreOutputData);
@@ -2203,12 +2010,7 @@
 
     List<int64_t>::iterator it = mDecodingTimeList.begin();
     int64_t timeUs = *it;
-
-    // If the output buffer is codec specific configuration,
-    // do not remove the decoding time from the list.
-    if (!isCodecSpecific) {
-        mDecodingTimeList.erase(it);
-    }
+    mDecodingTimeList.erase(it);
     return timeUs;
 }
 
@@ -2387,8 +2189,8 @@
                     mNoMoreOutputData = true;
                 }
 
-                if (mIsEncoder) {
-                    int64_t decodingTimeUs = retrieveDecodingTimeUs(isCodecSpecific);
+                if (mIsEncoder && mIsVideo) {
+                    int64_t decodingTimeUs = isCodecSpecific? 0: getDecodingTimeUs();
                     buffer->meta_data()->setInt64(kKeyDecodingTime, decodingTimeUs);
                 }
 
@@ -3249,7 +3051,7 @@
         int64_t lastBufferTimeUs;
         CHECK(srcBuffer->meta_data()->findInt64(kKeyTime, &lastBufferTimeUs));
         CHECK(lastBufferTimeUs >= 0);
-        if (mIsEncoder) {
+        if (mIsEncoder && mIsVideo) {
             mDecodingTimeList.push_back(lastBufferTimeUs);
         }
 
@@ -3333,13 +3135,6 @@
 
     info->mStatus = OWNED_BY_COMPONENT;
 
-    // This component does not ever signal the EOS flag on output buffers,
-    // Thanks for nothing.
-    if (mSignalledEOS && !strcmp(mComponentName, "OMX.TI.Video.encoder")) {
-        mNoMoreOutputData = true;
-        mBufferFilled.signal();
-    }
-
     return true;
 }
 
@@ -3565,6 +3360,7 @@
         //////////////// output port ////////////////////
         // format
         OMX_AUDIO_PARAM_PORTFORMATTYPE format;
+        InitOMXParams(&format);
         format.nPortIndex = kPortIndexOutput;
         format.nIndex = 0;
         status_t err = OMX_ErrorNone;
diff --git a/media/libstagefright/OggExtractor.cpp b/media/libstagefright/OggExtractor.cpp
index 73efc27..5e79e78 100644
--- a/media/libstagefright/OggExtractor.cpp
+++ b/media/libstagefright/OggExtractor.cpp
@@ -21,10 +21,10 @@
 #include "include/OggExtractor.h"
 
 #include <cutils/properties.h>
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaSource.h>
diff --git a/media/libstagefright/SampleIterator.cpp b/media/libstagefright/SampleIterator.cpp
index 81ec5c1..eae721b 100644
--- a/media/libstagefright/SampleIterator.cpp
+++ b/media/libstagefright/SampleIterator.cpp
@@ -22,8 +22,8 @@
 
 #include <arpa/inet.h>
 
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/Utils.h>
 
 #include "include/SampleTable.h"
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index 8d80d63..d9858d7 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -618,26 +618,31 @@
     }
 
     uint32_t left = 0;
-    while (left < mNumSyncSamples) {
-        uint32_t x = mSyncSamples[left];
+    uint32_t right = mNumSyncSamples;
+    while (left < right) {
+        uint32_t center = left + (right - left) / 2;
+        uint32_t x = mSyncSamples[center];
 
-        if (x >= start_sample_index) {
+        if (start_sample_index < x) {
+            right = center;
+        } else if (start_sample_index > x) {
+            left = center + 1;
+        } else {
+            left = center;
             break;
         }
-
-        ++left;
     }
-
     if (left == mNumSyncSamples) {
         if (flags == kFlagAfter) {
             ALOGE("tried to find a sync frame after the last one: %d", left);
             return ERROR_OUT_OF_RANGE;
         }
+        left = left - 1;
     }
 
-    if (left > 0) {
-        --left;
-    }
+    // Now ssi[left] is the sync sample index just before (or at)
+    // start_sample_index.
+    // Also start_sample_index < ssi[left + 1], if left + 1 < mNumSyncSamples.
 
     uint32_t x = mSyncSamples[left];
 
@@ -682,7 +687,11 @@
 
                 x = mSyncSamples[left - 1];
 
-                CHECK(x <= start_sample_index);
+                if (x > start_sample_index) {
+                    // The table of sync sample indices was not sorted
+                    // properly.
+                    return ERROR_MALFORMED;
+                }
             }
             break;
         }
@@ -696,7 +705,11 @@
 
                 x = mSyncSamples[left + 1];
 
-                CHECK(x >= start_sample_index);
+                if (x < start_sample_index) {
+                    // The table of sync sample indices was not sorted
+                    // properly.
+                    return ERROR_MALFORMED;
+                }
             }
 
             break;
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index 43bfd9e..35f9c1f 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -20,10 +20,10 @@
 
 #include "include/StagefrightMetadataRetriever.h"
 
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/ColorConverter.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/FileSource.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/OMXCodec.h>
@@ -37,7 +37,7 @@
     ALOGV("StagefrightMetadataRetriever()");
 
     DataSource::RegisterDefaultSniffers();
-    CHECK_EQ(mClient.connect(), OK);
+    CHECK_EQ(mClient.connect(), (status_t)OK);
 }
 
 StagefrightMetadataRetriever::~StagefrightMetadataRetriever() {
@@ -169,7 +169,7 @@
              || (buffer != NULL && buffer->range_length() == 0));
 
     if (err != OK) {
-        CHECK_EQ(buffer, NULL);
+        CHECK(buffer == NULL);
 
         ALOGV("decoding frame failed.");
         decoder->stop();
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index 48df058..7481e2e 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -16,22 +16,23 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "SurfaceMediaSource"
 
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/SurfaceMediaSource.h>
-#include <ui/GraphicBuffer.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaDebug.h>
-#include <media/stagefright/openmax/OMX_IVCommon.h>
-#include <media/stagefright/MetadataBufferType.h>
+#include <OMX_IVCommon.h>
+#include <MetadataBufferType.h>
 
-#include <surfaceflinger/ISurfaceComposer.h>
-#include <surfaceflinger/SurfaceComposerClient.h>
-#include <surfaceflinger/IGraphicBufferAlloc.h>
+#include <ui/GraphicBuffer.h>
+#include <gui/ISurfaceComposer.h>
+#include <gui/IGraphicBufferAlloc.h>
 #include <OMX_Component.h>
 
 #include <utils/Log.h>
 #include <utils/String8.h>
 
+#include <private/gui/ComposerService.h>
+
 namespace android {
 
 SurfaceMediaSource::SurfaceMediaSource(uint32_t bufW, uint32_t bufH) :
@@ -53,12 +54,15 @@
     ALOGV("SurfaceMediaSource::SurfaceMediaSource");
     sp<ISurfaceComposer> composer(ComposerService::getComposerService());
     mGraphicBufferAlloc = composer->createGraphicBufferAlloc();
+    if (mGraphicBufferAlloc == 0) {
+        ALOGE("createGraphicBufferAlloc() failed in SurfaceMediaSource()");
+    }
 }
 
 SurfaceMediaSource::~SurfaceMediaSource() {
     ALOGV("SurfaceMediaSource::~SurfaceMediaSource");
     if (!mStopped) {
-        stop();
+        reset();
     }
 }
 
@@ -714,9 +718,9 @@
 }
 
 
-status_t SurfaceMediaSource::stop()
+status_t SurfaceMediaSource::reset()
 {
-    ALOGV("Stop");
+    ALOGV("Reset");
 
     Mutex::Autolock lock(mMutex);
     // TODO: Add waiting on mFrameCompletedCondition here?
@@ -853,7 +857,7 @@
     }
 
     if (!foundBuffer) {
-        CHECK_EQ(0, "signalBufferReturned: bogus buffer");
+        CHECK(!"signalBufferReturned: bogus buffer");
     }
 }
 
diff --git a/media/libstagefright/ThrottledSource.cpp b/media/libstagefright/ThrottledSource.cpp
index 88e07b0..b1fcafd 100644
--- a/media/libstagefright/ThrottledSource.cpp
+++ b/media/libstagefright/ThrottledSource.cpp
@@ -16,7 +16,7 @@
 
 #include "include/ThrottledSource.h"
 
-#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/foundation/ADebug.h>
 
 namespace android {
 
diff --git a/media/libstagefright/TimedEventQueue.cpp b/media/libstagefright/TimedEventQueue.cpp
index 12c9c36..6d345bb 100644
--- a/media/libstagefright/TimedEventQueue.cpp
+++ b/media/libstagefright/TimedEventQueue.cpp
@@ -26,12 +26,10 @@
 
 #include "include/TimedEventQueue.h"
 
-#include <cutils/sched_policy.h>
-
 #include <sys/prctl.h>
 #include <sys/time.h>
 
-#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/foundation/ADebug.h>
 
 #ifdef ANDROID_SIMULATOR
 #include <jni.h>
diff --git a/media/libstagefright/VBRISeeker.cpp b/media/libstagefright/VBRISeeker.cpp
index 6ac5a83..bcba874 100644
--- a/media/libstagefright/VBRISeeker.cpp
+++ b/media/libstagefright/VBRISeeker.cpp
@@ -92,7 +92,7 @@
     }
 
     sp<VBRISeeker> seeker = new VBRISeeker;
-    seeker->mBasePos = post_id3_pos;
+    seeker->mBasePos = post_id3_pos + frameSize;
     seeker->mDurationUs = durationUs;
 
     off64_t offset = post_id3_pos;
diff --git a/media/libstagefright/VideoSourceDownSampler.cpp b/media/libstagefright/VideoSourceDownSampler.cpp
index 1b66990..90a42c9 100644
--- a/media/libstagefright/VideoSourceDownSampler.cpp
+++ b/media/libstagefright/VideoSourceDownSampler.cpp
@@ -17,9 +17,9 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "VideoSourceDownSampler"
 
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/VideoSourceDownSampler.h>
 #include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/YUVImage.h>
 #include <media/stagefright/YUVCanvas.h>
diff --git a/media/libstagefright/WAVExtractor.cpp b/media/libstagefright/WAVExtractor.cpp
index 0bcaf08..c35a77a 100644
--- a/media/libstagefright/WAVExtractor.cpp
+++ b/media/libstagefright/WAVExtractor.cpp
@@ -20,23 +20,30 @@
 
 #include "include/WAVExtractor.h"
 
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
 #include <utils/String8.h>
+#include <cutils/bitops.h>
+
+#define CHANNEL_MASK_USE_CHANNEL_ORDER 0
 
 namespace android {
 
 enum {
-    WAVE_FORMAT_PCM = 1,
-    WAVE_FORMAT_ALAW = 6,
-    WAVE_FORMAT_MULAW = 7,
+    WAVE_FORMAT_PCM        = 0x0001,
+    WAVE_FORMAT_ALAW       = 0x0006,
+    WAVE_FORMAT_MULAW      = 0x0007,
+    WAVE_FORMAT_EXTENSIBLE = 0xFFFE
 };
 
+static const char* WAVEEXT_SUBFORMAT = "\x00\x00\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71";
+
+
 static uint32_t U32_LE_AT(const uint8_t *ptr) {
     return ptr[3] << 24 | ptr[2] << 16 | ptr[1] << 8 | ptr[0];
 }
@@ -84,7 +91,8 @@
 
 WAVExtractor::WAVExtractor(const sp<DataSource> &source)
     : mDataSource(source),
-      mValidFormat(false) {
+      mValidFormat(false),
+      mChannelMask(CHANNEL_MASK_USE_CHANNEL_ORDER) {
     mInitCheck = init();
 }
 
@@ -161,21 +169,37 @@
                 return NO_INIT;
             }
 
-            uint8_t formatSpec[16];
-            if (mDataSource->readAt(offset, formatSpec, 16) < 16) {
+            uint8_t formatSpec[40];
+            if (mDataSource->readAt(offset, formatSpec, 2) < 2) {
                 return NO_INIT;
             }
 
             mWaveFormat = U16_LE_AT(formatSpec);
             if (mWaveFormat != WAVE_FORMAT_PCM
                     && mWaveFormat != WAVE_FORMAT_ALAW
-                    && mWaveFormat != WAVE_FORMAT_MULAW) {
+                    && mWaveFormat != WAVE_FORMAT_MULAW
+                    && mWaveFormat != WAVE_FORMAT_EXTENSIBLE) {
                 return ERROR_UNSUPPORTED;
             }
 
+            uint8_t fmtSize = 16;
+            if (mWaveFormat == WAVE_FORMAT_EXTENSIBLE) {
+                fmtSize = 40;
+            }
+            if (mDataSource->readAt(offset, formatSpec, fmtSize) < fmtSize) {
+                return NO_INIT;
+            }
+
             mNumChannels = U16_LE_AT(&formatSpec[2]);
-            if (mNumChannels != 1 && mNumChannels != 2) {
-                return ERROR_UNSUPPORTED;
+            if (mWaveFormat != WAVE_FORMAT_EXTENSIBLE) {
+                if (mNumChannels != 1 && mNumChannels != 2) {
+                    ALOGW("More than 2 channels (%d) in non-WAVE_EXT, unknown channel mask",
+                            mNumChannels);
+                }
+            } else {
+                if (mNumChannels < 1 && mNumChannels > 8) {
+                    return ERROR_UNSUPPORTED;
+                }
             }
 
             mSampleRate = U32_LE_AT(&formatSpec[4]);
@@ -186,7 +210,8 @@
 
             mBitsPerSample = U16_LE_AT(&formatSpec[14]);
 
-            if (mWaveFormat == WAVE_FORMAT_PCM) {
+            if (mWaveFormat == WAVE_FORMAT_PCM
+                    || mWaveFormat == WAVE_FORMAT_EXTENSIBLE) {
                 if (mBitsPerSample != 8 && mBitsPerSample != 16
                     && mBitsPerSample != 24) {
                     return ERROR_UNSUPPORTED;
@@ -199,6 +224,42 @@
                 }
             }
 
+            if (mWaveFormat == WAVE_FORMAT_EXTENSIBLE) {
+                uint16_t validBitsPerSample = U16_LE_AT(&formatSpec[18]);
+                if (validBitsPerSample != mBitsPerSample) {
+                    ALOGE("validBits(%d) != bitsPerSample(%d) are not supported",
+                            validBitsPerSample, mBitsPerSample);
+                    return ERROR_UNSUPPORTED;
+                }
+
+                mChannelMask = U32_LE_AT(&formatSpec[20]);
+                ALOGV("numChannels=%d channelMask=0x%x", mNumChannels, mChannelMask);
+                if ((mChannelMask >> 18) != 0) {
+                    ALOGE("invalid channel mask 0x%x", mChannelMask);
+                    return ERROR_MALFORMED;
+                }
+
+                if ((mChannelMask != CHANNEL_MASK_USE_CHANNEL_ORDER)
+                        && (popcount(mChannelMask) != mNumChannels)) {
+                    ALOGE("invalid number of channels (%d) in channel mask (0x%x)",
+                            popcount(mChannelMask), mChannelMask);
+                    return ERROR_MALFORMED;
+                }
+
+                // In a WAVE_EXT header, the first two bytes of the GUID stored at byte 24 contain
+                // the sample format, using the same definitions as a regular WAV header
+                mWaveFormat = U16_LE_AT(&formatSpec[24]);
+                if (mWaveFormat != WAVE_FORMAT_PCM
+                        && mWaveFormat != WAVE_FORMAT_ALAW
+                        && mWaveFormat != WAVE_FORMAT_MULAW) {
+                    return ERROR_UNSUPPORTED;
+                }
+                if (memcmp(&formatSpec[26], WAVEEXT_SUBFORMAT, 14)) {
+                    ALOGE("unsupported GUID");
+                    return ERROR_UNSUPPORTED;
+                }
+            }
+
             mValidFormat = true;
         } else if (!memcmp(chunkHeader, "data", 4)) {
             if (mValidFormat) {
@@ -217,13 +278,14 @@
                                 kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_G711_ALAW);
                         break;
                     default:
-                        CHECK_EQ(mWaveFormat, WAVE_FORMAT_MULAW);
+                        CHECK_EQ(mWaveFormat, (uint16_t)WAVE_FORMAT_MULAW);
                         mTrackMeta->setCString(
                                 kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_G711_MLAW);
                         break;
                 }
 
                 mTrackMeta->setInt32(kKeyChannelCount, mNumChannels);
+                mTrackMeta->setInt32(kKeyChannelMask, mChannelMask);
                 mTrackMeta->setInt32(kKeySampleRate, mSampleRate);
 
                 size_t bytesPerSample = mBitsPerSample >> 3;
@@ -362,7 +424,7 @@
             // Convert 8-bit unsigned samples to 16-bit signed.
 
             MediaBuffer *tmp;
-            CHECK_EQ(mGroup->acquire_buffer(&tmp), OK);
+            CHECK_EQ(mGroup->acquire_buffer(&tmp), (status_t)OK);
 
             // The new buffer holds the sample number of samples, but each
             // one is 2 bytes wide.
diff --git a/media/libstagefright/WVMExtractor.cpp b/media/libstagefright/WVMExtractor.cpp
index 2092cb6..dac8106 100644
--- a/media/libstagefright/WVMExtractor.cpp
+++ b/media/libstagefright/WVMExtractor.cpp
@@ -21,6 +21,7 @@
 
 #include <arpa/inet.h>
 #include <utils/String8.h>
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/Utils.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaSource.h>
@@ -28,7 +29,6 @@
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaDebug.h>
 #include <dlfcn.h>
 
 #include <utils/Errors.h>
@@ -45,17 +45,12 @@
 static Mutex gWVMutex;
 
 WVMExtractor::WVMExtractor(const sp<DataSource> &source)
-    : mDataSource(source) {
-    {
-        Mutex::Autolock autoLock(gWVMutex);
-        if (gVendorLibHandle == NULL) {
-            gVendorLibHandle = dlopen("libwvm.so", RTLD_NOW);
-        }
+    : mDataSource(source)
+{
+    Mutex::Autolock autoLock(gWVMutex);
 
-        if (gVendorLibHandle == NULL) {
-            ALOGE("Failed to open libwvm.so");
-            return;
-        }
+    if (!getVendorLibHandle()) {
+        return;
     }
 
     typedef WVMLoadableExtractor *(*GetInstanceFunc)(sp<DataSource>);
@@ -64,13 +59,28 @@
                 "_ZN7android11GetInstanceENS_2spINS_10DataSourceEEE");
 
     if (getInstanceFunc) {
+        CHECK(source->DrmInitialization(MEDIA_MIMETYPE_CONTAINER_WVM) != NULL);
         mImpl = (*getInstanceFunc)(source);
         CHECK(mImpl != NULL);
+        setDrmFlag(true);
     } else {
         ALOGE("Failed to locate GetInstance in libwvm.so");
     }
 }
 
+bool WVMExtractor::getVendorLibHandle()
+{
+    if (gVendorLibHandle == NULL) {
+        gVendorLibHandle = dlopen("libwvm.so", RTLD_NOW);
+    }
+
+    if (gVendorLibHandle == NULL) {
+        ALOGE("Failed to open libwvm.so");
+    }
+
+    return gVendorLibHandle != NULL;
+}
+
 WVMExtractor::~WVMExtractor() {
 }
 
@@ -113,5 +123,39 @@
     }
 }
 
+void WVMExtractor::setUID(uid_t uid) {
+    if (mImpl != NULL) {
+        mImpl->setUID(uid);
+    }
+}
+
+bool SniffWVM(
+    const sp<DataSource> &source, String8 *mimeType, float *confidence,
+        sp<AMessage> *) {
+
+    Mutex::Autolock autoLock(gWVMutex);
+
+    if (!WVMExtractor::getVendorLibHandle()) {
+        return false;
+    }
+
+    typedef WVMLoadableExtractor *(*SnifferFunc)(const sp<DataSource>&);
+    SnifferFunc snifferFunc =
+        (SnifferFunc) dlsym(gVendorLibHandle,
+                            "_ZN7android15IsWidevineMediaERKNS_2spINS_10DataSourceEEE");
+
+    if (snifferFunc) {
+        if ((*snifferFunc)(source)) {
+            *mimeType = MEDIA_MIMETYPE_CONTAINER_WVM;
+            *confidence = 10.0f;
+            return true;
+        }
+    } else {
+        ALOGE("IsWidevineMedia not found in libwvm.so");
+    }
+
+    return false;
+}
+
 } //namespace android
 
diff --git a/media/libstagefright/XINGSeeker.cpp b/media/libstagefright/XINGSeeker.cpp
index 2091381..9c91134 100644
--- a/media/libstagefright/XINGSeeker.cpp
+++ b/media/libstagefright/XINGSeeker.cpp
@@ -14,39 +14,22 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "XINGSEEKER"
+#include <utils/Log.h>
+
 #include "include/XINGSeeker.h"
+#include "include/avc_utils.h"
 
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/Utils.h>
 
 namespace android {
 
-static bool parse_xing_header(
-        const sp<DataSource> &source, off64_t first_frame_pos,
-        int32_t *frame_number = NULL, int32_t *byte_number = NULL,
-        unsigned char *table_of_contents = NULL,
-        int32_t *quality_indicator = NULL, int64_t *duration = NULL);
-
-// static
-sp<XINGSeeker> XINGSeeker::CreateFromSource(
-        const sp<DataSource> &source, off64_t first_frame_pos) {
-    sp<XINGSeeker> seeker = new XINGSeeker;
-
-    seeker->mFirstFramePos = first_frame_pos;
-
-    if (!parse_xing_header(
-                source, first_frame_pos,
-                NULL, &seeker->mSizeBytes, seeker->mTableOfContents,
-                NULL, &seeker->mDurationUs)) {
-        return NULL;
-    }
-
-    return seeker;
-}
-
 XINGSeeker::XINGSeeker()
     : mDurationUs(-1),
-      mSizeBytes(0) {
+      mSizeBytes(0),
+      mEncoderDelay(0),
+      mEncoderPadding(0) {
 }
 
 bool XINGSeeker::getDuration(int64_t *durationUs) {
@@ -60,7 +43,7 @@
 }
 
 bool XINGSeeker::getOffsetForTime(int64_t *timeUs, off64_t *pos) {
-    if (mSizeBytes == 0 || mTableOfContents[0] <= 0 || mDurationUs < 0) {
+    if (mSizeBytes == 0 || !mTOCValid || mDurationUs < 0) {
         return false;
     }
 
@@ -76,10 +59,10 @@
         if ( a == 0 ) {
             fa = 0.0f;
         } else {
-            fa = (float)mTableOfContents[a-1];
+            fa = (float)mTOC[a-1];
         }
         if ( a < 99 ) {
-            fb = (float)mTableOfContents[a];
+            fb = (float)mTOC[a];
         } else {
             fb = 256.0f;
         }
@@ -91,59 +74,50 @@
     return true;
 }
 
-static bool parse_xing_header(
-        const sp<DataSource> &source, off64_t first_frame_pos,
-        int32_t *frame_number, int32_t *byte_number,
-        unsigned char *table_of_contents, int32_t *quality_indicator,
-        int64_t *duration) {
-    if (frame_number) {
-        *frame_number = 0;
-    }
-    if (byte_number) {
-        *byte_number = 0;
-    }
-    if (table_of_contents) {
-        table_of_contents[0] = 0;
-    }
-    if (quality_indicator) {
-        *quality_indicator = 0;
-    }
-    if (duration) {
-        *duration = 0;
-    }
+// static
+sp<XINGSeeker> XINGSeeker::CreateFromSource(
+        const sp<DataSource> &source, off64_t first_frame_pos) {
+    sp<XINGSeeker> seeker = new XINGSeeker;
+
+    seeker->mFirstFramePos = first_frame_pos;
+
+    seeker->mSizeBytes = 0;
+    seeker->mTOCValid = false;
+    seeker->mDurationUs = 0;
 
     uint8_t buffer[4];
     int offset = first_frame_pos;
     if (source->readAt(offset, &buffer, 4) < 4) { // get header
-        return false;
+        return NULL;
     }
     offset += 4;
 
-    uint8_t id, layer, sr_index, mode;
-    layer = (buffer[1] >> 1) & 3;
-    id = (buffer[1] >> 3) & 3;
-    sr_index = (buffer[2] >> 2) & 3;
-    mode = (buffer[3] >> 6) & 3;
-    if (layer == 0) {
-        return false;
+    int header = U32_AT(buffer);;
+    size_t xingframesize = 0;
+    int sampling_rate = 0;
+    int num_channels;
+    int samples_per_frame = 0;
+    if (!GetMPEGAudioFrameSize(header, &xingframesize, &sampling_rate, &num_channels,
+                               NULL, &samples_per_frame)) {
+        return NULL;
     }
-    if (id == 1) {
-        return false;
-    }
-    if (sr_index == 3) {
-        return false;
-    }
+    seeker->mFirstFramePos += xingframesize;
+
+    uint8_t version = (buffer[1] >> 3) & 3;
+
     // determine offset of XING header
-    if(id&1) { // mpeg1
-        if (mode != 3) offset += 32;
+    if(version & 1) { // mpeg1
+        if (num_channels != 1) offset += 32;
         else offset += 17;
-    } else { // mpeg2
-        if (mode != 3) offset += 17;
+    } else { // mpeg 2 or 2.5
+        if (num_channels != 1) offset += 17;
         else offset += 9;
     }
 
+    int xingbase = offset;
+
     if (source->readAt(offset, &buffer, 4) < 4) { // XING header ID
-        return false;
+        return NULL;
     }
     offset += 4;
     // Check XING ID
@@ -151,70 +125,71 @@
                 || (buffer[2] != 'n') || (buffer[3] != 'g')) {
         if ((buffer[0] != 'I') || (buffer[1] != 'n')
                     || (buffer[2] != 'f') || (buffer[3] != 'o')) {
-            return false;
+            return NULL;
         }
     }
 
     if (source->readAt(offset, &buffer, 4) < 4) { // flags
-        return false;
+        return NULL;
     }
     offset += 4;
     uint32_t flags = U32_AT(buffer);
 
     if (flags & 0x0001) {  // Frames field is present
         if (source->readAt(offset, buffer, 4) < 4) {
-             return false;
+             return NULL;
         }
-        if (frame_number) {
-           *frame_number = U32_AT(buffer);
-        }
-        int32_t frame = U32_AT(buffer);
-        // Samples per Frame: 1. index = MPEG Version ID, 2. index = Layer
-        const int samplesPerFrames[2][3] =
-        {
-            { 384, 1152, 576  }, // MPEG 2, 2.5: layer1, layer2, layer3
-            { 384, 1152, 1152 }, // MPEG 1: layer1, layer2, layer3
-        };
-        // sampling rates in hertz: 1. index = MPEG Version ID, 2. index = sampling rate index
-        const int samplingRates[4][3] =
-        {
-            { 11025, 12000, 8000,  },    // MPEG 2.5
-            { 0,     0,     0,     },    // reserved
-            { 22050, 24000, 16000, },    // MPEG 2
-            { 44100, 48000, 32000, }     // MPEG 1
-        };
-        if (duration) {
-            *duration = (int64_t)frame * samplesPerFrames[id&1][3-layer] * 1000000LL
-                / samplingRates[id][sr_index];
-        }
+        int32_t frames = U32_AT(buffer);
+        seeker->mDurationUs = (int64_t)frames * samples_per_frame * 1000000LL / sampling_rate;
         offset += 4;
     }
     if (flags & 0x0002) {  // Bytes field is present
-        if (byte_number) {
-            if (source->readAt(offset, buffer, 4) < 4) {
-                return false;
-            }
-            *byte_number = U32_AT(buffer);
+        if (source->readAt(offset, buffer, 4) < 4) {
+            return NULL;
         }
+        seeker->mSizeBytes = U32_AT(buffer);
         offset += 4;
     }
     if (flags & 0x0004) {  // TOC field is present
-       if (table_of_contents) {
-            if (source->readAt(offset + 1, table_of_contents, 99) < 99) {
-                return false;
-            }
+        if (source->readAt(offset + 1, seeker->mTOC, 99) < 99) {
+            return NULL;
         }
+        seeker->mTOCValid = true;
         offset += 100;
     }
+
+#if 0
     if (flags & 0x0008) {  // Quality indicator field is present
-        if (quality_indicator) {
-            if (source->readAt(offset, buffer, 4) < 4) {
-                return false;
-            }
-            *quality_indicator = U32_AT(buffer);
+        if (source->readAt(offset, buffer, 4) < 4) {
+            return NULL;
         }
+        // do something with the quality indicator
+        offset += 4;
     }
-    return true;
+
+    if (source->readAt(xingbase + 0xaf - 0x24, &buffer, 1) < 1) { // encoding flags
+        return false;
+    }
+
+    ALOGV("nogap preceding: %s, nogap continued in next: %s",
+              (buffer[0] & 0x80) ? "true" : "false",
+              (buffer[0] & 0x40) ? "true" : "false");
+#endif
+
+    if (source->readAt(xingbase + 0xb1 - 0x24, &buffer, 3) == 3) {
+        seeker->mEncoderDelay = (buffer[0] << 4) + (buffer[1] >> 4);
+        seeker->mEncoderPadding = ((buffer[1] & 0xf) << 8) + buffer[2];
+    }
+
+    return seeker;
+}
+
+int32_t XINGSeeker::getEncoderDelay() {
+    return mEncoderDelay;
+}
+
+int32_t XINGSeeker::getEncoderPadding() {
+    return mEncoderPadding;
 }
 
 }  // namespace android
diff --git a/media/libstagefright/chromium_http/Android.mk b/media/libstagefright/chromium_http/Android.mk
index 6573e3c..e37b4a8 100644
--- a/media/libstagefright/chromium_http/Android.mk
+++ b/media/libstagefright/chromium_http/Android.mk
@@ -3,13 +3,14 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:=       \
-        ChromiumHTTPDataSource.cpp        \
-        support.cpp                     \
+        DataUriSource.cpp \
+        ChromiumHTTPDataSource.cpp \
+        support.cpp
 
 LOCAL_C_INCLUDES:= \
         $(JNI_H_INCLUDE) \
         frameworks/base/media/libstagefright \
-        $(TOP)/frameworks/base/include/media/stagefright/openmax \
+        $(TOP)/frameworks/native/include/media/openmax \
         external/chromium \
         external/chromium/android
 
diff --git a/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp b/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp
index 180460b..76f7946 100644
--- a/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp
+++ b/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp
@@ -259,7 +259,7 @@
     mCondition.broadcast();
 }
 
-sp<DecryptHandle> ChromiumHTTPDataSource::DrmInitialization() {
+sp<DecryptHandle> ChromiumHTTPDataSource::DrmInitialization(const char* mime) {
     Mutex::Autolock autoLock(mLock);
 
     if (mDrmManagerClient == NULL) {
@@ -275,7 +275,7 @@
          * original one
          */
         mDecryptHandle = mDrmManagerClient->openDecryptSession(
-                String8(mURI.c_str()));
+                String8(mURI.c_str()), mime);
     }
 
     if (mDecryptHandle == NULL) {
diff --git a/media/libstagefright/chromium_http/DataUriSource.cpp b/media/libstagefright/chromium_http/DataUriSource.cpp
new file mode 100644
index 0000000..ecf3fa1
--- /dev/null
+++ b/media/libstagefright/chromium_http/DataUriSource.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <include/DataUriSource.h>
+
+#include <net/base/data_url.h>
+#include <googleurl/src/gurl.h>
+
+
+namespace android {
+
+DataUriSource::DataUriSource(const char *uri) :
+    mDataUri(uri),
+    mInited(NO_INIT) {
+
+    // Copy1: const char *uri -> String8 mDataUri.
+    std::string mimeTypeStr, unusedCharsetStr, dataStr;
+    // Copy2: String8 mDataUri -> std::string
+    const bool ret = net::DataURL::Parse(
+            GURL(std::string(mDataUri.string())),
+            &mimeTypeStr, &unusedCharsetStr, &dataStr);
+    // Copy3: std::string dataStr -> AString mData
+    mData.setTo(dataStr.data(), dataStr.length());
+    mInited = ret ? OK : UNKNOWN_ERROR;
+
+    // The chromium data url implementation defaults to using "text/plain"
+    // if no mime type is specified. We prefer to leave this unspecified
+    // instead, since the mime type is sniffed in most cases.
+    if (mimeTypeStr != "text/plain") {
+        mMimeType = mimeTypeStr.c_str();
+    }
+}
+
+ssize_t DataUriSource::readAt(off64_t offset, void *out, size_t size) {
+    if (mInited != OK) {
+        return mInited;
+    }
+
+    const off64_t length = mData.size();
+    if (offset >= length) {
+        return UNKNOWN_ERROR;
+    }
+
+    const char *dataBuf = mData.c_str();
+    const size_t bytesToCopy =
+            offset + size >= length ? (length - offset) : size;
+
+    if (bytesToCopy > 0) {
+        memcpy(out, dataBuf + offset, bytesToCopy);
+    }
+
+    return bytesToCopy;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/codecs/aacdec/Android.mk b/media/libstagefright/codecs/aacdec/Android.mk
index 20c7bc0..fd6de79 100644
--- a/media/libstagefright/codecs/aacdec/Android.mk
+++ b/media/libstagefright/codecs/aacdec/Android.mk
@@ -164,7 +164,7 @@
 
 LOCAL_C_INCLUDES := \
         frameworks/base/media/libstagefright/include \
-        frameworks/base/include/media/stagefright/openmax \
+        frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS := -DOSCL_IMPORT_REF=
 
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC.cpp b/media/libstagefright/codecs/aacdec/SoftAAC.cpp
index da9d280..ea6c360 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC.cpp
@@ -218,6 +218,18 @@
             return OMX_ErrorNone;
         }
 
+        case OMX_IndexParamAudioPcm:
+        {
+            const OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+                (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+            if (pcmParams->nPortIndex != 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
         default:
             return SimpleSoftOMXComponent::internalSetParameter(index, params);
     }
diff --git a/media/libstagefright/codecs/aacenc/AACEncoder.cpp b/media/libstagefright/codecs/aacenc/AACEncoder.cpp
index 2b8633d..8b5007e 100644
--- a/media/libstagefright/codecs/aacenc/AACEncoder.cpp
+++ b/media/libstagefright/codecs/aacenc/AACEncoder.cpp
@@ -22,8 +22,8 @@
 #include "voAAC.h"
 #include "cmnMemory.h"
 
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
@@ -114,8 +114,8 @@
     ALOGV("setAudioSpecificConfigData: %d hz, %d bps, and %d channels",
          mSampleRate, mBitRate, mChannels);
 
-    int32_t index;
-    CHECK_EQ(OK, getSampleRateTableIndex(mSampleRate, index));
+    int32_t index = 0;
+    CHECK_EQ((status_t)OK, getSampleRateTableIndex(mSampleRate, index));
     if (mChannels > 2 || mChannels <= 0) {
         ALOGE("Unsupported number of channels(%d)", mChannels);
         return UNKNOWN_ERROR;
@@ -142,7 +142,7 @@
     mBufferGroup = new MediaBufferGroup;
     mBufferGroup->add_buffer(new MediaBuffer(2048));
 
-    CHECK_EQ(OK, initCheck());
+    CHECK_EQ((status_t)OK, initCheck());
 
     mNumInputSamples = 0;
     mAnchorTimeUs = 0;
@@ -183,7 +183,7 @@
 
     mSource->stop();
     if (mEncoderHandle) {
-        CHECK_EQ(VO_ERR_NONE, mApiHandle->Uninit(mEncoderHandle));
+        CHECK_EQ((VO_U32)VO_ERR_NONE, mApiHandle->Uninit(mEncoderHandle));
         mEncoderHandle = NULL;
     }
     delete mApiHandle;
@@ -223,7 +223,7 @@
     CHECK(options == NULL || !options->getSeekTo(&seekTimeUs, &mode));
 
     MediaBuffer *buffer;
-    CHECK_EQ(mBufferGroup->acquire_buffer(&buffer), OK);
+    CHECK_EQ(mBufferGroup->acquire_buffer(&buffer), (status_t)OK);
     uint8_t *outPtr = (uint8_t *)buffer->data();
     bool readFromSource = false;
     int64_t wallClockTimeUs = -1;
@@ -255,7 +255,7 @@
             }
 
             size_t align = mInputBuffer->range_length() % sizeof(int16_t);
-            CHECK_EQ(align, 0);
+            CHECK_EQ(align, (size_t)0);
 
             int64_t timeUs;
             if (mInputBuffer->meta_data()->findInt64(kKeyDriftTime, &timeUs)) {
diff --git a/media/libstagefright/codecs/aacenc/Android.mk b/media/libstagefright/codecs/aacenc/Android.mk
index c2579c7..b47cb1e 100644
--- a/media/libstagefright/codecs/aacenc/Android.mk
+++ b/media/libstagefright/codecs/aacenc/Android.mk
@@ -85,3 +85,29 @@
 endif
 
 include $(BUILD_STATIC_LIBRARY)
+
+################################################################################
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+        SoftAACEncoder.cpp
+
+LOCAL_C_INCLUDES := \
+	frameworks/base/media/libstagefright/include \
+	frameworks/base/media/libstagefright/codecs/common/include \
+	frameworks/native/include/media/openmax
+
+LOCAL_CFLAGS := -DOSCL_IMPORT_REF=
+
+LOCAL_STATIC_LIBRARIES := \
+        libstagefright_aacenc
+
+LOCAL_SHARED_LIBRARIES := \
+        libstagefright_omx libstagefright_foundation libutils \
+        libstagefright_enc_common
+
+LOCAL_MODULE := libstagefright_soft_aacenc
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder.cpp b/media/libstagefright/codecs/aacenc/SoftAACEncoder.cpp
new file mode 100644
index 0000000..c6724c2
--- /dev/null
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder.cpp
@@ -0,0 +1,560 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SoftAACEncoder"
+#include <utils/Log.h>
+
+#include "SoftAACEncoder.h"
+
+#include "voAAC.h"
+#include "cmnMemory.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/hexdump.h>
+
+namespace android {
+
+template<class T>
+static void InitOMXParams(T *params) {
+    params->nSize = sizeof(T);
+    params->nVersion.s.nVersionMajor = 1;
+    params->nVersion.s.nVersionMinor = 0;
+    params->nVersion.s.nRevision = 0;
+    params->nVersion.s.nStep = 0;
+}
+
+SoftAACEncoder::SoftAACEncoder(
+        const char *name,
+        const OMX_CALLBACKTYPE *callbacks,
+        OMX_PTR appData,
+        OMX_COMPONENTTYPE **component)
+    : SimpleSoftOMXComponent(name, callbacks, appData, component),
+      mEncoderHandle(NULL),
+      mApiHandle(NULL),
+      mMemOperator(NULL),
+      mNumChannels(1),
+      mSampleRate(44100),
+      mBitRate(0),
+      mSentCodecSpecificData(false),
+      mInputSize(0),
+      mInputFrame(NULL),
+      mInputTimeUs(-1ll),
+      mSawInputEOS(false),
+      mSignalledError(false) {
+    initPorts();
+    CHECK_EQ(initEncoder(), (status_t)OK);
+
+    setAudioParams();
+}
+
+SoftAACEncoder::~SoftAACEncoder() {
+    delete[] mInputFrame;
+    mInputFrame = NULL;
+
+    if (mEncoderHandle) {
+        CHECK_EQ(VO_ERR_NONE, mApiHandle->Uninit(mEncoderHandle));
+        mEncoderHandle = NULL;
+    }
+
+    delete mApiHandle;
+    mApiHandle = NULL;
+
+    delete mMemOperator;
+    mMemOperator = NULL;
+}
+
+void SoftAACEncoder::initPorts() {
+    OMX_PARAM_PORTDEFINITIONTYPE def;
+    InitOMXParams(&def);
+
+    def.nPortIndex = 0;
+    def.eDir = OMX_DirInput;
+    def.nBufferCountMin = kNumBuffers;
+    def.nBufferCountActual = def.nBufferCountMin;
+    def.nBufferSize = kNumSamplesPerFrame * sizeof(int16_t) * 2;
+    def.bEnabled = OMX_TRUE;
+    def.bPopulated = OMX_FALSE;
+    def.eDomain = OMX_PortDomainAudio;
+    def.bBuffersContiguous = OMX_FALSE;
+    def.nBufferAlignment = 1;
+
+    def.format.audio.cMIMEType = const_cast<char *>("audio/raw");
+    def.format.audio.pNativeRender = NULL;
+    def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+    def.format.audio.eEncoding = OMX_AUDIO_CodingPCM;
+
+    addPort(def);
+
+    def.nPortIndex = 1;
+    def.eDir = OMX_DirOutput;
+    def.nBufferCountMin = kNumBuffers;
+    def.nBufferCountActual = def.nBufferCountMin;
+    def.nBufferSize = 8192;
+    def.bEnabled = OMX_TRUE;
+    def.bPopulated = OMX_FALSE;
+    def.eDomain = OMX_PortDomainAudio;
+    def.bBuffersContiguous = OMX_FALSE;
+    def.nBufferAlignment = 2;
+
+    def.format.audio.cMIMEType = const_cast<char *>("audio/aac");
+    def.format.audio.pNativeRender = NULL;
+    def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+    def.format.audio.eEncoding = OMX_AUDIO_CodingAAC;
+
+    addPort(def);
+}
+
+status_t SoftAACEncoder::initEncoder() {
+    mApiHandle = new VO_AUDIO_CODECAPI;
+
+    if (VO_ERR_NONE != voGetAACEncAPI(mApiHandle)) {
+        ALOGE("Failed to get api handle");
+        return UNKNOWN_ERROR;
+    }
+
+    mMemOperator = new VO_MEM_OPERATOR;
+    mMemOperator->Alloc = cmnMemAlloc;
+    mMemOperator->Copy = cmnMemCopy;
+    mMemOperator->Free = cmnMemFree;
+    mMemOperator->Set = cmnMemSet;
+    mMemOperator->Check = cmnMemCheck;
+
+    VO_CODEC_INIT_USERDATA userData;
+    memset(&userData, 0, sizeof(userData));
+    userData.memflag = VO_IMF_USERMEMOPERATOR;
+    userData.memData = (VO_PTR) mMemOperator;
+    if (VO_ERR_NONE !=
+            mApiHandle->Init(&mEncoderHandle, VO_AUDIO_CodingAAC, &userData)) {
+        ALOGE("Failed to init AAC encoder");
+        return UNKNOWN_ERROR;
+    }
+
+    return OK;
+}
+
+OMX_ERRORTYPE SoftAACEncoder::internalGetParameter(
+        OMX_INDEXTYPE index, OMX_PTR params) {
+    switch (index) {
+        case OMX_IndexParamAudioPortFormat:
+        {
+            OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
+                (OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+
+            if (formatParams->nPortIndex > 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (formatParams->nIndex > 0) {
+                return OMX_ErrorNoMore;
+            }
+
+            formatParams->eEncoding =
+                (formatParams->nPortIndex == 0)
+                    ? OMX_AUDIO_CodingPCM : OMX_AUDIO_CodingAAC;
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioAac:
+        {
+            OMX_AUDIO_PARAM_AACPROFILETYPE *aacParams =
+                (OMX_AUDIO_PARAM_AACPROFILETYPE *)params;
+
+            if (aacParams->nPortIndex != 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            aacParams->nBitRate = mBitRate;
+            aacParams->nAudioBandWidth = 0;
+            aacParams->nAACtools = 0;
+            aacParams->nAACERtools = 0;
+            aacParams->eAACProfile = OMX_AUDIO_AACObjectMain;
+            aacParams->eAACStreamFormat = OMX_AUDIO_AACStreamFormatMP4FF;
+            aacParams->eChannelMode = OMX_AUDIO_ChannelModeStereo;
+
+            aacParams->nChannels = mNumChannels;
+            aacParams->nSampleRate = mSampleRate;
+            aacParams->nFrameLength = 0;
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioPcm:
+        {
+            OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+                (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+            if (pcmParams->nPortIndex != 0) {
+                return OMX_ErrorUndefined;
+            }
+
+            pcmParams->eNumData = OMX_NumericalDataSigned;
+            pcmParams->eEndian = OMX_EndianBig;
+            pcmParams->bInterleaved = OMX_TRUE;
+            pcmParams->nBitPerSample = 16;
+            pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
+            pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF;
+            pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF;
+
+            pcmParams->nChannels = mNumChannels;
+            pcmParams->nSamplingRate = mSampleRate;
+
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return SimpleSoftOMXComponent::internalGetParameter(index, params);
+    }
+}
+
+OMX_ERRORTYPE SoftAACEncoder::internalSetParameter(
+        OMX_INDEXTYPE index, const OMX_PTR params) {
+    switch (index) {
+        case OMX_IndexParamStandardComponentRole:
+        {
+            const OMX_PARAM_COMPONENTROLETYPE *roleParams =
+                (const OMX_PARAM_COMPONENTROLETYPE *)params;
+
+            if (strncmp((const char *)roleParams->cRole,
+                        "audio_encoder.aac",
+                        OMX_MAX_STRINGNAME_SIZE - 1)) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioPortFormat:
+        {
+            const OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
+                (const OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+
+            if (formatParams->nPortIndex > 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (formatParams->nIndex > 0) {
+                return OMX_ErrorNoMore;
+            }
+
+            if ((formatParams->nPortIndex == 0
+                        && formatParams->eEncoding != OMX_AUDIO_CodingPCM)
+                || (formatParams->nPortIndex == 1
+                        && formatParams->eEncoding != OMX_AUDIO_CodingAAC)) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioAac:
+        {
+            OMX_AUDIO_PARAM_AACPROFILETYPE *aacParams =
+                (OMX_AUDIO_PARAM_AACPROFILETYPE *)params;
+
+            if (aacParams->nPortIndex != 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            mBitRate = aacParams->nBitRate;
+            mNumChannels = aacParams->nChannels;
+            mSampleRate = aacParams->nSampleRate;
+
+            if (setAudioParams() != OK) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioPcm:
+        {
+            OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+                (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+            if (pcmParams->nPortIndex != 0) {
+                return OMX_ErrorUndefined;
+            }
+
+            mNumChannels = pcmParams->nChannels;
+            mSampleRate = pcmParams->nSamplingRate;
+
+            if (setAudioParams() != OK) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+
+        default:
+            return SimpleSoftOMXComponent::internalSetParameter(index, params);
+    }
+}
+
+status_t SoftAACEncoder::setAudioParams() {
+    // We call this whenever sample rate, number of channels or bitrate change
+    // in reponse to setParameter calls.
+
+    ALOGV("setAudioParams: %lu Hz, %lu channels, %lu bps",
+         mSampleRate, mNumChannels, mBitRate);
+
+    status_t err = setAudioSpecificConfigData();
+
+    if (err != OK) {
+        return err;
+    }
+
+    AACENC_PARAM params;
+    memset(&params, 0, sizeof(params));
+    params.sampleRate = mSampleRate;
+    params.bitRate = mBitRate;
+    params.nChannels = mNumChannels;
+    params.adtsUsed = 0;  // We add adts header in the file writer if needed.
+    if (VO_ERR_NONE != mApiHandle->SetParam(
+                mEncoderHandle, VO_PID_AAC_ENCPARAM,  &params)) {
+        ALOGE("Failed to set AAC encoder parameters");
+        return UNKNOWN_ERROR;
+    }
+
+    return OK;
+}
+
+static status_t getSampleRateTableIndex(int32_t sampleRate, int32_t &index) {
+    static const int32_t kSampleRateTable[] = {
+        96000, 88200, 64000, 48000, 44100, 32000,
+        24000, 22050, 16000, 12000, 11025, 8000
+    };
+    const int32_t tableSize =
+        sizeof(kSampleRateTable) / sizeof(kSampleRateTable[0]);
+
+    for (int32_t i = 0; i < tableSize; ++i) {
+        if (sampleRate == kSampleRateTable[i]) {
+            index = i;
+            return OK;
+        }
+    }
+
+    return UNKNOWN_ERROR;
+}
+
+status_t SoftAACEncoder::setAudioSpecificConfigData() {
+    // The AAC encoder's audio specific config really only encodes
+    // number of channels and the sample rate (mapped to an index into
+    // a fixed sample rate table).
+
+    int32_t index;
+    status_t err = getSampleRateTableIndex(mSampleRate, index);
+    if (err != OK) {
+        ALOGE("Unsupported sample rate (%lu Hz)", mSampleRate);
+        return err;
+    }
+
+    if (mNumChannels > 2 || mNumChannels <= 0) {
+        ALOGE("Unsupported number of channels(%lu)", mNumChannels);
+        return UNKNOWN_ERROR;
+    }
+
+    // OMX_AUDIO_AACObjectLC
+    mAudioSpecificConfigData[0] = ((0x02 << 3) | (index >> 1));
+    mAudioSpecificConfigData[1] = ((index & 0x01) << 7) | (mNumChannels << 3);
+
+    return OK;
+}
+
+void SoftAACEncoder::onQueueFilled(OMX_U32 portIndex) {
+    if (mSignalledError) {
+        return;
+    }
+
+    List<BufferInfo *> &inQueue = getPortQueue(0);
+    List<BufferInfo *> &outQueue = getPortQueue(1);
+
+    if (!mSentCodecSpecificData) {
+        // The very first thing we want to output is the codec specific
+        // data. It does not require any input data but we will need an
+        // output buffer to store it in.
+
+        if (outQueue.empty()) {
+            return;
+        }
+
+        BufferInfo *outInfo = *outQueue.begin();
+        OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+        outHeader->nFilledLen = sizeof(mAudioSpecificConfigData);
+        outHeader->nFlags = OMX_BUFFERFLAG_CODECCONFIG;
+
+        uint8_t *out = outHeader->pBuffer + outHeader->nOffset;
+        memcpy(out, mAudioSpecificConfigData, sizeof(mAudioSpecificConfigData));
+
+#if 0
+        ALOGI("sending codec specific data.");
+        hexdump(out, sizeof(mAudioSpecificConfigData));
+#endif
+
+        outQueue.erase(outQueue.begin());
+        outInfo->mOwnedByUs = false;
+        notifyFillBufferDone(outHeader);
+
+        mSentCodecSpecificData = true;
+    }
+
+    size_t numBytesPerInputFrame =
+        mNumChannels * kNumSamplesPerFrame * sizeof(int16_t);
+
+    for (;;) {
+        // We do the following until we run out of buffers.
+
+        while (mInputSize < numBytesPerInputFrame) {
+            // As long as there's still input data to be read we
+            // will drain "kNumSamplesPerFrame * mNumChannels" samples
+            // into the "mInputFrame" buffer and then encode those
+            // as a unit into an output buffer.
+
+            if (mSawInputEOS || inQueue.empty()) {
+                return;
+            }
+
+            BufferInfo *inInfo = *inQueue.begin();
+            OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+
+            const void *inData = inHeader->pBuffer + inHeader->nOffset;
+
+            size_t copy = numBytesPerInputFrame - mInputSize;
+            if (copy > inHeader->nFilledLen) {
+                copy = inHeader->nFilledLen;
+            }
+
+            if (mInputFrame == NULL) {
+                mInputFrame = new int16_t[kNumSamplesPerFrame * mNumChannels];
+            }
+
+            if (mInputSize == 0) {
+                mInputTimeUs = inHeader->nTimeStamp;
+            }
+
+            memcpy((uint8_t *)mInputFrame + mInputSize, inData, copy);
+            mInputSize += copy;
+
+            inHeader->nOffset += copy;
+            inHeader->nFilledLen -= copy;
+
+            // "Time" on the input buffer has in effect advanced by the
+            // number of audio frames we just advanced nOffset by.
+            inHeader->nTimeStamp +=
+                (copy * 1000000ll / mSampleRate)
+                    / (mNumChannels * sizeof(int16_t));
+
+            if (inHeader->nFilledLen == 0) {
+                if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+                    ALOGV("saw input EOS");
+                    mSawInputEOS = true;
+
+                    // Pad any remaining data with zeroes.
+                    memset((uint8_t *)mInputFrame + mInputSize,
+                           0,
+                           numBytesPerInputFrame - mInputSize);
+
+                    mInputSize = numBytesPerInputFrame;
+                }
+
+                inQueue.erase(inQueue.begin());
+                inInfo->mOwnedByUs = false;
+                notifyEmptyBufferDone(inHeader);
+
+                inData = NULL;
+                inHeader = NULL;
+                inInfo = NULL;
+            }
+        }
+
+        // At this  point we have all the input data necessary to encode
+        // a single frame, all we need is an output buffer to store the result
+        // in.
+
+        if (outQueue.empty()) {
+            return;
+        }
+
+        BufferInfo *outInfo = *outQueue.begin();
+        OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+
+        VO_CODECBUFFER inputData;
+        memset(&inputData, 0, sizeof(inputData));
+        inputData.Buffer = (unsigned char *)mInputFrame;
+        inputData.Length = numBytesPerInputFrame;
+        CHECK(VO_ERR_NONE ==
+                mApiHandle->SetInputData(mEncoderHandle, &inputData));
+
+        VO_CODECBUFFER outputData;
+        memset(&outputData, 0, sizeof(outputData));
+        VO_AUDIO_OUTPUTINFO outputInfo;
+        memset(&outputInfo, 0, sizeof(outputInfo));
+
+        uint8_t *outPtr = (uint8_t *)outHeader->pBuffer + outHeader->nOffset;
+        size_t outAvailable = outHeader->nAllocLen - outHeader->nOffset;
+
+        VO_U32 ret = VO_ERR_NONE;
+        size_t nOutputBytes = 0;
+        do {
+            outputData.Buffer = outPtr;
+            outputData.Length = outAvailable - nOutputBytes;
+            ret = mApiHandle->GetOutputData(
+                    mEncoderHandle, &outputData, &outputInfo);
+            if (ret == VO_ERR_NONE) {
+                outPtr += outputData.Length;
+                nOutputBytes += outputData.Length;
+            }
+        } while (ret != VO_ERR_INPUT_BUFFER_SMALL);
+
+        outHeader->nFilledLen = nOutputBytes;
+
+        outHeader->nFlags = OMX_BUFFERFLAG_ENDOFFRAME;
+
+        if (mSawInputEOS) {
+            // We also tag this output buffer with EOS if it corresponds
+            // to the final input buffer.
+            outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+        }
+
+        outHeader->nTimeStamp = mInputTimeUs;
+
+#if 0
+        ALOGI("sending %d bytes of data (time = %lld us, flags = 0x%08lx)",
+              nOutputBytes, mInputTimeUs, outHeader->nFlags);
+
+        hexdump(outHeader->pBuffer + outHeader->nOffset, outHeader->nFilledLen);
+#endif
+
+        outQueue.erase(outQueue.begin());
+        outInfo->mOwnedByUs = false;
+        notifyFillBufferDone(outHeader);
+
+        outHeader = NULL;
+        outInfo = NULL;
+
+        mInputSize = 0;
+    }
+}
+
+}  // namespace android
+
+android::SoftOMXComponent *createSoftOMXComponent(
+        const char *name, const OMX_CALLBACKTYPE *callbacks,
+        OMX_PTR appData, OMX_COMPONENTTYPE **component) {
+    return new android::SoftAACEncoder(name, callbacks, appData, component);
+}
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder.h b/media/libstagefright/codecs/aacenc/SoftAACEncoder.h
new file mode 100644
index 0000000..d148eb7
--- /dev/null
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFT_AAC_ENCODER_H_
+
+#define SOFT_AAC_ENCODER_H_
+
+#include "SimpleSoftOMXComponent.h"
+
+struct VO_AUDIO_CODECAPI;
+struct VO_MEM_OPERATOR;
+
+namespace android {
+
+struct SoftAACEncoder : public SimpleSoftOMXComponent {
+    SoftAACEncoder(
+            const char *name,
+            const OMX_CALLBACKTYPE *callbacks,
+            OMX_PTR appData,
+            OMX_COMPONENTTYPE **component);
+
+protected:
+    virtual ~SoftAACEncoder();
+
+    virtual OMX_ERRORTYPE internalGetParameter(
+            OMX_INDEXTYPE index, OMX_PTR params);
+
+    virtual OMX_ERRORTYPE internalSetParameter(
+            OMX_INDEXTYPE index, const OMX_PTR params);
+
+    virtual void onQueueFilled(OMX_U32 portIndex);
+
+private:
+    enum {
+        kNumBuffers             = 4,
+        kNumSamplesPerFrame     = 1024,
+    };
+
+    void *mEncoderHandle;
+    VO_AUDIO_CODECAPI *mApiHandle;
+    VO_MEM_OPERATOR  *mMemOperator;
+
+    OMX_U32 mNumChannels;
+    OMX_U32 mSampleRate;
+    OMX_U32 mBitRate;
+
+    bool mSentCodecSpecificData;
+    size_t mInputSize;
+    int16_t *mInputFrame;
+    int64_t mInputTimeUs;
+
+    bool mSawInputEOS;
+
+    uint8_t mAudioSpecificConfigData[2];
+
+    bool mSignalledError;
+
+    void initPorts();
+    status_t initEncoder();
+
+    status_t setAudioSpecificConfigData();
+    status_t setAudioParams();
+
+    DISALLOW_EVIL_CONSTRUCTORS(SoftAACEncoder);
+};
+
+}  // namespace android
+
+#endif  // SOFT_AAC_ENCODER_H_
diff --git a/media/libstagefright/codecs/aacenc/src/asm/ARMV7/PrePostMDCT_v7.s b/media/libstagefright/codecs/aacenc/src/asm/ARMV7/PrePostMDCT_v7.s
index b2bc9d9..7f6b881 100644
--- a/media/libstagefright/codecs/aacenc/src/asm/ARMV7/PrePostMDCT_v7.s
+++ b/media/libstagefright/codecs/aacenc/src/asm/ARMV7/PrePostMDCT_v7.s
@@ -23,9 +23,13 @@
 
 	.section .text
 	.global	PreMDCT
+	.fnstart
 
 PreMDCT:
 	stmdb     sp!, {r4 - r11, lr}
+	.save	  {r4 - r11, lr}
+	fstmfdd   sp!, {d8 - d15}
+	.vsave	  {d8 - d15}
 
 	add         r9, r0, r1, lsl #2
 	sub         r3, r9, #32
@@ -74,14 +78,20 @@
 	bne       	PreMDCT_LOOP
 
 PreMDCT_END:
+	fldmfdd   sp!, {d8 - d15}
 	ldmia     sp!, {r4 - r11, pc}
 	@ENDP  @ |PreMDCT|
+	.fnend
 
 	.section .text
 	.global	PostMDCT
+	.fnstart
 
 PostMDCT:
 	stmdb     sp!, {r4 - r11, lr}
+	.save	  {r4 - r11, lr}
+	fstmfdd   sp!, {d8 - d15}
+	.vsave	  {d8 - d15}
 
 	add         r9, r0, r1, lsl #2
 	sub         r3, r9, #32
@@ -129,7 +139,8 @@
 	bne       	PostMDCT_LOOP
 
 PostMDCT_END:
+	fldmfdd   sp!, {d8 - d15}
 	ldmia     sp!, {r4 - r11, pc}
 
 	@ENDP  		@ |PostMDCT|
-	.end
+	.fnend
diff --git a/media/libstagefright/codecs/aacenc/src/asm/ARMV7/R4R8First_v7.s b/media/libstagefright/codecs/aacenc/src/asm/ARMV7/R4R8First_v7.s
index 3033156..03fa6a9 100644
--- a/media/libstagefright/codecs/aacenc/src/asm/ARMV7/R4R8First_v7.s
+++ b/media/libstagefright/codecs/aacenc/src/asm/ARMV7/R4R8First_v7.s
@@ -23,9 +23,13 @@
 
 	.section .text
 	.global	Radix8First
+	.fnstart
 
 Radix8First:
 	stmdb     		sp!, {r4 - r11, lr}
+	.save	  		{r4 - r11, lr}
+	fstmfdd   		sp!, {d8 - d15}
+	.vsave	  		{d8 - d15}
 
 	ldr       		r3, SQRT1_2
 	cmp       		r1, #0
@@ -103,17 +107,23 @@
 	bne       			Radix8First_LOOP
 
 Radix8First_END:
+	fldmfdd   sp!, {d8 - d15}
 	ldmia     sp!, {r4 - r11, pc}
 SQRT1_2:
 	.word      0x2d413ccd
 
 	@ENDP  @ |Radix8First|
+	.fnend
 
 	.section .text
 	.global	Radix4First
+	.fnstart
 
 Radix4First:
 	stmdb     	sp!, {r4 - r11, lr}
+	.save	  	{r4 - r11, lr}
+	fstmfdd   	sp!, {d8 - d15}
+	.vsave	  	{d8 - d15}
 
 	cmp       	r1, #0
 	beq       	Radix4First_END
@@ -140,7 +150,8 @@
 	bne       		Radix4First_LOOP
 
 Radix4First_END:
+	fldmfdd   		sp!, {d8 - d15}
 	ldmia    		sp!, {r4 - r11, pc}
 
 	@ENDP  @ |Radix4First|
-	.end
+	.fnend
diff --git a/media/libstagefright/codecs/aacenc/src/asm/ARMV7/Radix4FFT_v7.s b/media/libstagefright/codecs/aacenc/src/asm/ARMV7/Radix4FFT_v7.s
index f874825..431bc30 100644
--- a/media/libstagefright/codecs/aacenc/src/asm/ARMV7/Radix4FFT_v7.s
+++ b/media/libstagefright/codecs/aacenc/src/asm/ARMV7/Radix4FFT_v7.s
@@ -23,9 +23,13 @@
 
 	.section .text
 	.global	Radix4FFT
+	.fnstart
 
 Radix4FFT:
 	stmdb    sp!, {r4 - r11, lr}
+	.save	 {r4 - r11, lr}
+	fstmfdd  sp!, {d8 - d15}
+	.vsave	 {d8 - d15}
 
 	mov			r1, r1, asr #2
 	cmp     	r1, #0
@@ -137,7 +141,8 @@
 	bne     			Radix4FFT_LOOP1
 
 Radix4FFT_END:
+	fldmfdd   			sp!, {d8 - d15}
 	ldmia   			sp!, {r4 - r11, pc}
 
 	@ENDP  @ |Radix4FFT|
-	.end
+	.fnend
diff --git a/media/libstagefright/codecs/amrnb/dec/Android.mk b/media/libstagefright/codecs/amrnb/dec/Android.mk
index 23a22ef..b81306d 100644
--- a/media/libstagefright/codecs/amrnb/dec/Android.mk
+++ b/media/libstagefright/codecs/amrnb/dec/Android.mk
@@ -61,12 +61,12 @@
 
 LOCAL_C_INCLUDES := \
         frameworks/base/media/libstagefright/include \
-        frameworks/base/include/media/stagefright/openmax \
+        frameworks/base/media/libstagefright/codecs/amrwb/src \
+        frameworks/native/include/media/openmax \
         $(LOCAL_PATH)/src \
         $(LOCAL_PATH)/include \
         $(LOCAL_PATH)/../common/include \
-        $(LOCAL_PATH)/../common \
-        frameworks/base/media/libstagefright/codecs/amrwb/src \
+        $(LOCAL_PATH)/../common
 
 LOCAL_CFLAGS := -DOSCL_IMPORT_REF=
 
diff --git a/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp b/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
index 7602f2d..796caa4 100644
--- a/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
+++ b/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
@@ -236,6 +236,18 @@
             return OMX_ErrorNone;
         }
 
+        case OMX_IndexParamAudioPcm:
+        {
+            const OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+                (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+            if (pcmParams->nPortIndex != 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
         default:
             return SimpleSoftOMXComponent::internalSetParameter(index, params);
     }
diff --git a/media/libstagefright/codecs/amrnb/enc/AMRNBEncoder.cpp b/media/libstagefright/codecs/amrnb/enc/AMRNBEncoder.cpp
deleted file mode 100644
index 3afbc4f..0000000
--- a/media/libstagefright/codecs/amrnb/enc/AMRNBEncoder.cpp
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "AMRNBEncoder.h"
-
-#include "gsmamr_enc.h"
-
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDebug.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MetaData.h>
-
-namespace android {
-
-static const int32_t kNumSamplesPerFrame = 160;
-static const int32_t kSampleRate = 8000;
-
-AMRNBEncoder::AMRNBEncoder(const sp<MediaSource> &source, const sp<MetaData> &meta)
-    : mSource(source),
-      mMeta(meta),
-      mStarted(false),
-      mBufferGroup(NULL),
-      mEncState(NULL),
-      mSidState(NULL),
-      mAnchorTimeUs(0),
-      mNumFramesOutput(0),
-      mInputBuffer(NULL),
-      mMode(MR475),
-      mNumInputSamples(0) {
-}
-
-AMRNBEncoder::~AMRNBEncoder() {
-    if (mStarted) {
-        stop();
-    }
-}
-
-static Mode PickModeFromBitrate(int32_t bps) {
-    if (bps <= 4750) {
-        return MR475;
-    } else if (bps <= 5150) {
-        return MR515;
-    } else if (bps <= 5900) {
-        return MR59;
-    } else if (bps <= 6700) {
-        return MR67;
-    } else if (bps <= 7400) {
-        return MR74;
-    } else if (bps <= 7950) {
-        return MR795;
-    } else if (bps <= 10200) {
-        return MR102;
-    } else {
-        return MR122;
-    }
-}
-
-status_t AMRNBEncoder::start(MetaData *params) {
-    if (mStarted) {
-        ALOGW("Call start() when encoder already started");
-        return OK;
-    }
-
-    mBufferGroup = new MediaBufferGroup;
-    mBufferGroup->add_buffer(new MediaBuffer(32));
-
-    CHECK_EQ(AMREncodeInit(
-                &mEncState, &mSidState, false /* dtx_enable */),
-             0);
-
-    status_t err = mSource->start(params);
-    if (err != OK) {
-        ALOGE("AudioSource is not available");
-        return err;
-    }
-
-    mAnchorTimeUs = 0;
-    mNumFramesOutput = 0;
-    mStarted = true;
-    mNumInputSamples = 0;
-
-    int32_t bitrate;
-    if (params && params->findInt32(kKeyBitRate, &bitrate)) {
-        mMode = PickModeFromBitrate(bitrate);
-    } else {
-        mMode = MR475;
-    }
-
-    return OK;
-}
-
-status_t AMRNBEncoder::stop() {
-    if (!mStarted) {
-        ALOGW("Call stop() when encoder has not started.");
-        return OK;
-    }
-
-    if (mInputBuffer) {
-        mInputBuffer->release();
-        mInputBuffer = NULL;
-    }
-
-    delete mBufferGroup;
-    mBufferGroup = NULL;
-
-    mSource->stop();
-
-    AMREncodeExit(&mEncState, &mSidState);
-    mEncState = mSidState = NULL;
-
-    mStarted = false;
-
-    return OK;
-}
-
-sp<MetaData> AMRNBEncoder::getFormat() {
-    sp<MetaData> srcFormat = mSource->getFormat();
-
-    mMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AMR_NB);
-
-    int64_t durationUs;
-    if (srcFormat->findInt64(kKeyDuration, &durationUs)) {
-        mMeta->setInt64(kKeyDuration, durationUs);
-    }
-
-    mMeta->setCString(kKeyDecoderComponent, "AMRNBEncoder");
-
-    return mMeta;
-}
-
-status_t AMRNBEncoder::read(
-        MediaBuffer **out, const ReadOptions *options) {
-    status_t err;
-
-    *out = NULL;
-
-    int64_t seekTimeUs;
-    ReadOptions::SeekMode mode;
-    CHECK(options == NULL || !options->getSeekTo(&seekTimeUs, &mode));
-    bool readFromSource = false;
-    int64_t wallClockTimeUs = -1;
-
-    while (mNumInputSamples < kNumSamplesPerFrame) {
-        if (mInputBuffer == NULL) {
-            err = mSource->read(&mInputBuffer, options);
-
-            if (err != OK) {
-                if (mNumInputSamples == 0) {
-                    return ERROR_END_OF_STREAM;
-                }
-                memset(&mInputFrame[mNumInputSamples],
-                       0,
-                       sizeof(int16_t)
-                            * (kNumSamplesPerFrame - mNumInputSamples));
-                mNumInputSamples = kNumSamplesPerFrame;
-                break;
-            }
-
-            size_t align = mInputBuffer->range_length() % sizeof(int16_t);
-            CHECK_EQ(align, 0);
-            readFromSource = true;
-
-            int64_t timeUs;
-            if (mInputBuffer->meta_data()->findInt64(kKeyDriftTime, &timeUs)) {
-                wallClockTimeUs = timeUs;
-            }
-            if (mInputBuffer->meta_data()->findInt64(kKeyAnchorTime, &timeUs)) {
-                mAnchorTimeUs = timeUs;
-            }
-        } else {
-            readFromSource = false;
-        }
-
-        size_t copy =
-            (kNumSamplesPerFrame - mNumInputSamples) * sizeof(int16_t);
-
-        if (copy > mInputBuffer->range_length()) {
-            copy = mInputBuffer->range_length();
-        }
-
-        memcpy(&mInputFrame[mNumInputSamples],
-               (const uint8_t *)mInputBuffer->data()
-                    + mInputBuffer->range_offset(),
-               copy);
-
-        mNumInputSamples += copy / sizeof(int16_t);
-
-        mInputBuffer->set_range(
-                mInputBuffer->range_offset() + copy,
-                mInputBuffer->range_length() - copy);
-
-        if (mInputBuffer->range_length() == 0) {
-            mInputBuffer->release();
-            mInputBuffer = NULL;
-        }
-    }
-
-    MediaBuffer *buffer;
-    CHECK_EQ(mBufferGroup->acquire_buffer(&buffer), OK);
-
-    uint8_t *outPtr = (uint8_t *)buffer->data();
-
-    Frame_Type_3GPP frameType;
-    int res = AMREncode(
-            mEncState, mSidState, (Mode)mMode,
-            mInputFrame, outPtr, &frameType, AMR_TX_WMF);
-
-    CHECK(res >= 0);
-    CHECK((size_t)res < buffer->size());
-
-    // Convert header byte from WMF to IETF format.
-    outPtr[0] = ((outPtr[0] << 3) | 4) & 0x7c;
-
-    buffer->set_range(0, res);
-
-    // Each frame of 160 samples is 20ms long.
-    int64_t mediaTimeUs = mNumFramesOutput * 20000LL;
-    buffer->meta_data()->setInt64(
-            kKeyTime, mAnchorTimeUs + mediaTimeUs);
-
-    if (readFromSource && wallClockTimeUs != -1) {
-        buffer->meta_data()->setInt64(kKeyDriftTime,
-            mediaTimeUs - wallClockTimeUs);
-    }
-
-    ++mNumFramesOutput;
-
-    *out = buffer;
-
-    mNumInputSamples = 0;
-
-    return OK;
-}
-
-}  // namespace android
diff --git a/media/libstagefright/codecs/amrnb/enc/Android.mk b/media/libstagefright/codecs/amrnb/enc/Android.mk
index b6aed81..28246ae 100644
--- a/media/libstagefright/codecs/amrnb/enc/Android.mk
+++ b/media/libstagefright/codecs/amrnb/enc/Android.mk
@@ -2,7 +2,6 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES := \
-        AMRNBEncoder.cpp \
 	src/amrencode.cpp \
  	src/autocorr.cpp \
  	src/c1035pf.cpp \
@@ -74,3 +73,30 @@
 LOCAL_MODULE := libstagefright_amrnbenc
 
 include $(BUILD_STATIC_LIBRARY)
+
+################################################################################
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+        SoftAMRNBEncoder.cpp
+
+LOCAL_C_INCLUDES := \
+        frameworks/base/media/libstagefright/include \
+        frameworks/native/include/media/openmax \
+        $(LOCAL_PATH)/src \
+        $(LOCAL_PATH)/include \
+        $(LOCAL_PATH)/../common/include \
+        $(LOCAL_PATH)/../common
+
+LOCAL_STATIC_LIBRARIES := \
+        libstagefright_amrnbenc
+
+LOCAL_SHARED_LIBRARIES := \
+        libstagefright_omx libstagefright_foundation libutils \
+        libstagefright_amrnb_common
+
+LOCAL_MODULE := libstagefright_soft_amrnbenc
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp
new file mode 100644
index 0000000..07f8b4f
--- /dev/null
+++ b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp
@@ -0,0 +1,404 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SoftAMRNBEncoder"
+#include <utils/Log.h>
+
+#include "SoftAMRNBEncoder.h"
+
+#include "gsmamr_enc.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/hexdump.h>
+
+namespace android {
+
+static const int32_t kSampleRate = 8000;
+
+template<class T>
+static void InitOMXParams(T *params) {
+    params->nSize = sizeof(T);
+    params->nVersion.s.nVersionMajor = 1;
+    params->nVersion.s.nVersionMinor = 0;
+    params->nVersion.s.nRevision = 0;
+    params->nVersion.s.nStep = 0;
+}
+
+SoftAMRNBEncoder::SoftAMRNBEncoder(
+        const char *name,
+        const OMX_CALLBACKTYPE *callbacks,
+        OMX_PTR appData,
+        OMX_COMPONENTTYPE **component)
+    : SimpleSoftOMXComponent(name, callbacks, appData, component),
+      mEncState(NULL),
+      mSidState(NULL),
+      mBitRate(0),
+      mMode(MR475),
+      mInputSize(0),
+      mInputTimeUs(-1ll),
+      mSawInputEOS(false),
+      mSignalledError(false) {
+    initPorts();
+    CHECK_EQ(initEncoder(), (status_t)OK);
+}
+
+SoftAMRNBEncoder::~SoftAMRNBEncoder() {
+    if (mEncState != NULL) {
+        AMREncodeExit(&mEncState, &mSidState);
+        mEncState = mSidState = NULL;
+    }
+}
+
+void SoftAMRNBEncoder::initPorts() {
+    OMX_PARAM_PORTDEFINITIONTYPE def;
+    InitOMXParams(&def);
+
+    def.nPortIndex = 0;
+    def.eDir = OMX_DirInput;
+    def.nBufferCountMin = kNumBuffers;
+    def.nBufferCountActual = def.nBufferCountMin;
+    def.nBufferSize = kNumSamplesPerFrame * sizeof(int16_t);
+    def.bEnabled = OMX_TRUE;
+    def.bPopulated = OMX_FALSE;
+    def.eDomain = OMX_PortDomainAudio;
+    def.bBuffersContiguous = OMX_FALSE;
+    def.nBufferAlignment = 1;
+
+    def.format.audio.cMIMEType = const_cast<char *>("audio/raw");
+    def.format.audio.pNativeRender = NULL;
+    def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+    def.format.audio.eEncoding = OMX_AUDIO_CodingPCM;
+
+    addPort(def);
+
+    def.nPortIndex = 1;
+    def.eDir = OMX_DirOutput;
+    def.nBufferCountMin = kNumBuffers;
+    def.nBufferCountActual = def.nBufferCountMin;
+    def.nBufferSize = 8192;
+    def.bEnabled = OMX_TRUE;
+    def.bPopulated = OMX_FALSE;
+    def.eDomain = OMX_PortDomainAudio;
+    def.bBuffersContiguous = OMX_FALSE;
+    def.nBufferAlignment = 2;
+
+    def.format.audio.cMIMEType = const_cast<char *>("audio/3gpp");
+    def.format.audio.pNativeRender = NULL;
+    def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+    def.format.audio.eEncoding = OMX_AUDIO_CodingAMR;
+
+    addPort(def);
+}
+
+status_t SoftAMRNBEncoder::initEncoder() {
+    if (AMREncodeInit(&mEncState, &mSidState, false /* dtx_enable */) != 0) {
+        return UNKNOWN_ERROR;
+    }
+
+    return OK;
+}
+
+OMX_ERRORTYPE SoftAMRNBEncoder::internalGetParameter(
+        OMX_INDEXTYPE index, OMX_PTR params) {
+    switch (index) {
+        case OMX_IndexParamAudioPortFormat:
+        {
+            OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
+                (OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+
+            if (formatParams->nPortIndex > 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (formatParams->nIndex > 0) {
+                return OMX_ErrorNoMore;
+            }
+
+            formatParams->eEncoding =
+                (formatParams->nPortIndex == 0)
+                    ? OMX_AUDIO_CodingPCM : OMX_AUDIO_CodingAMR;
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioAmr:
+        {
+            OMX_AUDIO_PARAM_AMRTYPE *amrParams =
+                (OMX_AUDIO_PARAM_AMRTYPE *)params;
+
+            if (amrParams->nPortIndex != 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            amrParams->nChannels = 1;
+            amrParams->nBitRate = mBitRate;
+            amrParams->eAMRBandMode = (OMX_AUDIO_AMRBANDMODETYPE)(mMode + 1);
+            amrParams->eAMRDTXMode = OMX_AUDIO_AMRDTXModeOff;
+            amrParams->eAMRFrameFormat = OMX_AUDIO_AMRFrameFormatFSF;
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioPcm:
+        {
+            OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+                (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+            if (pcmParams->nPortIndex != 0) {
+                return OMX_ErrorUndefined;
+            }
+
+            pcmParams->eNumData = OMX_NumericalDataSigned;
+            pcmParams->eEndian = OMX_EndianBig;
+            pcmParams->bInterleaved = OMX_TRUE;
+            pcmParams->nBitPerSample = 16;
+            pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
+            pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelCF;
+
+            pcmParams->nChannels = 1;
+            pcmParams->nSamplingRate = kSampleRate;
+
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return SimpleSoftOMXComponent::internalGetParameter(index, params);
+    }
+}
+
+OMX_ERRORTYPE SoftAMRNBEncoder::internalSetParameter(
+        OMX_INDEXTYPE index, const OMX_PTR params) {
+    switch (index) {
+        case OMX_IndexParamStandardComponentRole:
+        {
+            const OMX_PARAM_COMPONENTROLETYPE *roleParams =
+                (const OMX_PARAM_COMPONENTROLETYPE *)params;
+
+            if (strncmp((const char *)roleParams->cRole,
+                        "audio_encoder.amrnb",
+                        OMX_MAX_STRINGNAME_SIZE - 1)) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioPortFormat:
+        {
+            const OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
+                (const OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+
+            if (formatParams->nPortIndex > 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (formatParams->nIndex > 0) {
+                return OMX_ErrorNoMore;
+            }
+
+            if ((formatParams->nPortIndex == 0
+                        && formatParams->eEncoding != OMX_AUDIO_CodingPCM)
+                || (formatParams->nPortIndex == 1
+                        && formatParams->eEncoding != OMX_AUDIO_CodingAMR)) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioAmr:
+        {
+            OMX_AUDIO_PARAM_AMRTYPE *amrParams =
+                (OMX_AUDIO_PARAM_AMRTYPE *)params;
+
+            if (amrParams->nPortIndex != 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (amrParams->nChannels != 1
+                    || amrParams->eAMRDTXMode != OMX_AUDIO_AMRDTXModeOff
+                    || amrParams->eAMRFrameFormat
+                            != OMX_AUDIO_AMRFrameFormatFSF
+                    || amrParams->eAMRBandMode < OMX_AUDIO_AMRBandModeNB0
+                    || amrParams->eAMRBandMode > OMX_AUDIO_AMRBandModeNB7) {
+                return OMX_ErrorUndefined;
+            }
+
+            mBitRate = amrParams->nBitRate;
+            mMode = amrParams->eAMRBandMode - 1;
+
+            amrParams->eAMRDTXMode = OMX_AUDIO_AMRDTXModeOff;
+            amrParams->eAMRFrameFormat = OMX_AUDIO_AMRFrameFormatFSF;
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioPcm:
+        {
+            OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+                (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+            if (pcmParams->nPortIndex != 0) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (pcmParams->nChannels != 1
+                    || pcmParams->nSamplingRate != kSampleRate) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+
+        default:
+            return SimpleSoftOMXComponent::internalSetParameter(index, params);
+    }
+}
+
+void SoftAMRNBEncoder::onQueueFilled(OMX_U32 portIndex) {
+    if (mSignalledError) {
+        return;
+    }
+
+    List<BufferInfo *> &inQueue = getPortQueue(0);
+    List<BufferInfo *> &outQueue = getPortQueue(1);
+
+    size_t numBytesPerInputFrame = kNumSamplesPerFrame * sizeof(int16_t);
+
+    for (;;) {
+        // We do the following until we run out of buffers.
+
+        while (mInputSize < numBytesPerInputFrame) {
+            // As long as there's still input data to be read we
+            // will drain "kNumSamplesPerFrame" samples
+            // into the "mInputFrame" buffer and then encode those
+            // as a unit into an output buffer.
+
+            if (mSawInputEOS || inQueue.empty()) {
+                return;
+            }
+
+            BufferInfo *inInfo = *inQueue.begin();
+            OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+
+            const void *inData = inHeader->pBuffer + inHeader->nOffset;
+
+            size_t copy = numBytesPerInputFrame - mInputSize;
+            if (copy > inHeader->nFilledLen) {
+                copy = inHeader->nFilledLen;
+            }
+
+            if (mInputSize == 0) {
+                mInputTimeUs = inHeader->nTimeStamp;
+            }
+
+            memcpy((uint8_t *)mInputFrame + mInputSize, inData, copy);
+            mInputSize += copy;
+
+            inHeader->nOffset += copy;
+            inHeader->nFilledLen -= copy;
+
+            // "Time" on the input buffer has in effect advanced by the
+            // number of audio frames we just advanced nOffset by.
+            inHeader->nTimeStamp +=
+                (copy * 1000000ll / kSampleRate) / sizeof(int16_t);
+
+            if (inHeader->nFilledLen == 0) {
+                if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+                    ALOGV("saw input EOS");
+                    mSawInputEOS = true;
+
+                    // Pad any remaining data with zeroes.
+                    memset((uint8_t *)mInputFrame + mInputSize,
+                           0,
+                           numBytesPerInputFrame - mInputSize);
+
+                    mInputSize = numBytesPerInputFrame;
+                }
+
+                inQueue.erase(inQueue.begin());
+                inInfo->mOwnedByUs = false;
+                notifyEmptyBufferDone(inHeader);
+
+                inData = NULL;
+                inHeader = NULL;
+                inInfo = NULL;
+            }
+        }
+
+        // At this  point we have all the input data necessary to encode
+        // a single frame, all we need is an output buffer to store the result
+        // in.
+
+        if (outQueue.empty()) {
+            return;
+        }
+
+        BufferInfo *outInfo = *outQueue.begin();
+        OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+
+        uint8_t *outPtr = outHeader->pBuffer + outHeader->nOffset;
+        size_t outAvailable = outHeader->nAllocLen - outHeader->nOffset;
+
+        Frame_Type_3GPP frameType;
+        int res = AMREncode(
+                mEncState, mSidState, (Mode)mMode,
+                mInputFrame, outPtr, &frameType, AMR_TX_WMF);
+
+        CHECK_GE(res, 0);
+        CHECK_LE((size_t)res, outAvailable);
+
+        // Convert header byte from WMF to IETF format.
+        outPtr[0] = ((outPtr[0] << 3) | 4) & 0x7c;
+
+        outHeader->nFilledLen = res;
+        outHeader->nFlags = OMX_BUFFERFLAG_ENDOFFRAME;
+
+        if (mSawInputEOS) {
+            // We also tag this output buffer with EOS if it corresponds
+            // to the final input buffer.
+            outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+        }
+
+        outHeader->nTimeStamp = mInputTimeUs;
+
+#if 0
+        ALOGI("sending %d bytes of data (time = %lld us, flags = 0x%08lx)",
+              nOutputBytes, mInputTimeUs, outHeader->nFlags);
+
+        hexdump(outHeader->pBuffer + outHeader->nOffset, outHeader->nFilledLen);
+#endif
+
+        outQueue.erase(outQueue.begin());
+        outInfo->mOwnedByUs = false;
+        notifyFillBufferDone(outHeader);
+
+        outHeader = NULL;
+        outInfo = NULL;
+
+        mInputSize = 0;
+    }
+}
+
+}  // namespace android
+
+android::SoftOMXComponent *createSoftOMXComponent(
+        const char *name, const OMX_CALLBACKTYPE *callbacks,
+        OMX_PTR appData, OMX_COMPONENTTYPE **component) {
+    return new android::SoftAMRNBEncoder(name, callbacks, appData, component);
+}
diff --git a/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.h b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.h
new file mode 100644
index 0000000..50178c4
--- /dev/null
+++ b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFT_AMRNB_ENCODER_H_
+
+#define SOFT_AMRNB_ENCODER_H_
+
+#include "SimpleSoftOMXComponent.h"
+
+namespace android {
+
+struct SoftAMRNBEncoder : public SimpleSoftOMXComponent {
+    SoftAMRNBEncoder(
+            const char *name,
+            const OMX_CALLBACKTYPE *callbacks,
+            OMX_PTR appData,
+            OMX_COMPONENTTYPE **component);
+
+protected:
+    virtual ~SoftAMRNBEncoder();
+
+    virtual OMX_ERRORTYPE internalGetParameter(
+            OMX_INDEXTYPE index, OMX_PTR params);
+
+    virtual OMX_ERRORTYPE internalSetParameter(
+            OMX_INDEXTYPE index, const OMX_PTR params);
+
+    virtual void onQueueFilled(OMX_U32 portIndex);
+
+private:
+    enum {
+        kNumBuffers             = 4,
+        kNumSamplesPerFrame     = 160,
+    };
+
+    void *mEncState;
+    void *mSidState;
+
+    OMX_U32 mBitRate;
+    int mMode;
+
+    size_t mInputSize;
+    int16_t mInputFrame[kNumSamplesPerFrame];
+    int64_t mInputTimeUs;
+
+    bool mSawInputEOS;
+    bool mSignalledError;
+
+    void initPorts();
+    status_t initEncoder();
+
+    status_t setAudioParams();
+
+    DISALLOW_EVIL_CONSTRUCTORS(SoftAMRNBEncoder);
+};
+
+}  // namespace android
+
+#endif  // SOFT_AMRNB_ENCODER_H_
diff --git a/media/libstagefright/codecs/amrwbenc/AMRWBEncoder.cpp b/media/libstagefright/codecs/amrwbenc/AMRWBEncoder.cpp
deleted file mode 100644
index 60b1163..0000000
--- a/media/libstagefright/codecs/amrwbenc/AMRWBEncoder.cpp
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "AMRWBEncoder"
-#include <utils/Log.h>
-
-#include "AMRWBEncoder.h"
-#include "voAMRWB.h"
-#include "cmnMemory.h"
-
-#include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDebug.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MetaData.h>
-
-namespace android {
-
-static const int32_t kNumSamplesPerFrame = 320;
-static const int32_t kBitsPerSample = 16;
-static const int32_t kInputBufferSize = (kBitsPerSample / 8) * kNumSamplesPerFrame;
-static const int32_t kSampleRate = 16000;
-static const int32_t kNumChannels = 1;
-
-AMRWBEncoder::AMRWBEncoder(const sp<MediaSource> &source, const sp<MetaData> &meta)
-    : mSource(source),
-      mMeta(meta),
-      mStarted(false),
-      mBufferGroup(NULL),
-      mInputBuffer(NULL),
-      mEncoderHandle(NULL),
-      mApiHandle(NULL),
-      mMemOperator(NULL),
-      mAnchorTimeUs(0),
-      mNumFramesOutput(0),
-      mNumInputSamples(0) {
-}
-
-static VOAMRWBMODE pickModeFromBitRate(int32_t bps) {
-    CHECK(bps >= 0);
-    if (bps <= 6600) {
-        return VOAMRWB_MD66;
-    } else if (bps <= 8850) {
-        return VOAMRWB_MD885;
-    } else if (bps <= 12650) {
-        return VOAMRWB_MD1265;
-    } else if (bps <= 14250) {
-        return VOAMRWB_MD1425;
-    } else if (bps <= 15850) {
-        return VOAMRWB_MD1585;
-    } else if (bps <= 18250) {
-        return VOAMRWB_MD1825;
-    } else if (bps <= 19850) {
-        return VOAMRWB_MD1985;
-    } else if (bps <= 23050) {
-        return VOAMRWB_MD2305;
-    }
-    return VOAMRWB_MD2385;
-}
-
-status_t AMRWBEncoder::initCheck() {
-    CHECK(mApiHandle == NULL && mEncoderHandle == NULL);
-    CHECK(mMeta->findInt32(kKeyBitRate, &mBitRate));
-
-    mApiHandle = new VO_AUDIO_CODECAPI;
-    CHECK(mApiHandle);
-
-    if (VO_ERR_NONE != voGetAMRWBEncAPI(mApiHandle)) {
-        ALOGE("Failed to get api handle");
-        return UNKNOWN_ERROR;
-    }
-
-    mMemOperator = new VO_MEM_OPERATOR;
-    CHECK(mMemOperator != NULL);
-    mMemOperator->Alloc = cmnMemAlloc;
-    mMemOperator->Copy = cmnMemCopy;
-    mMemOperator->Free = cmnMemFree;
-    mMemOperator->Set = cmnMemSet;
-    mMemOperator->Check = cmnMemCheck;
-
-    VO_CODEC_INIT_USERDATA userData;
-    memset(&userData, 0, sizeof(userData));
-    userData.memflag = VO_IMF_USERMEMOPERATOR;
-    userData.memData = (VO_PTR) mMemOperator;
-    if (VO_ERR_NONE != mApiHandle->Init(&mEncoderHandle, VO_AUDIO_CodingAMRWB, &userData)) {
-        ALOGE("Failed to init AMRWB encoder");
-        return UNKNOWN_ERROR;
-    }
-
-    // Configure AMRWB encoder$
-    VOAMRWBMODE mode = pickModeFromBitRate(mBitRate);
-    if (VO_ERR_NONE != mApiHandle->SetParam(mEncoderHandle, VO_PID_AMRWB_MODE,  &mode)) {
-        ALOGE("Failed to set AMRWB encoder mode to %d", mode);
-        return UNKNOWN_ERROR;
-    }
-
-    VOAMRWBFRAMETYPE type = VOAMRWB_RFC3267;
-    if (VO_ERR_NONE != mApiHandle->SetParam(mEncoderHandle, VO_PID_AMRWB_FRAMETYPE, &type)) {
-        ALOGE("Failed to set AMRWB encoder frame type to %d", type);
-        return UNKNOWN_ERROR;
-    }
-
-    return OK;
-}
-
-AMRWBEncoder::~AMRWBEncoder() {
-    if (mStarted) {
-        stop();
-    }
-}
-
-status_t AMRWBEncoder::start(MetaData *params) {
-    if (mStarted) {
-        ALOGW("Call start() when encoder already started");
-        return OK;
-    }
-
-    mBufferGroup = new MediaBufferGroup;
-
-    // The largest buffer size is header + 477 bits
-    mBufferGroup->add_buffer(new MediaBuffer(1024));
-
-    CHECK_EQ(OK, initCheck());
-
-    mNumFramesOutput = 0;
-
-    status_t err = mSource->start(params);
-    if (err != OK) {
-        ALOGE("AudioSource is not available");
-        return err;
-    }
-    mStarted = true;
-
-    return OK;
-}
-
-status_t AMRWBEncoder::stop() {
-    if (!mStarted) {
-        ALOGW("Call stop() when encoder has not started");
-        return OK;
-    }
-
-    if (mInputBuffer) {
-        mInputBuffer->release();
-        mInputBuffer = NULL;
-    }
-
-    delete mBufferGroup;
-    mBufferGroup = NULL;
-
-
-    CHECK_EQ(VO_ERR_NONE, mApiHandle->Uninit(mEncoderHandle));
-    mEncoderHandle = NULL;
-
-    delete mApiHandle;
-    mApiHandle = NULL;
-
-    delete mMemOperator;
-    mMemOperator;
-
-    mStarted = false;
-
-    mSource->stop();
-    return OK;
-}
-
-sp<MetaData> AMRWBEncoder::getFormat() {
-    sp<MetaData> srcFormat = mSource->getFormat();
-
-    mMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AMR_WB);
-
-    int64_t durationUs;
-    if (srcFormat->findInt64(kKeyDuration, &durationUs)) {
-        mMeta->setInt64(kKeyDuration, durationUs);
-    }
-
-    mMeta->setCString(kKeyDecoderComponent, "AMRWBEncoder");
-
-    return mMeta;
-}
-
-status_t AMRWBEncoder::read(
-        MediaBuffer **out, const ReadOptions *options) {
-    status_t err;
-
-    *out = NULL;
-
-    int64_t seekTimeUs;
-    ReadOptions::SeekMode mode;
-    CHECK(options == NULL || !options->getSeekTo(&seekTimeUs, &mode));
-    bool readFromSource = false;
-    int64_t wallClockTimeUs = -1;
-
-    while (mNumInputSamples < kNumSamplesPerFrame) {
-        if (mInputBuffer == NULL) {
-            err = mSource->read(&mInputBuffer, options);
-
-            if (err != OK) {
-                if (mNumInputSamples == 0) {
-                    return ERROR_END_OF_STREAM;
-                }
-                memset(&mInputFrame[mNumInputSamples],
-                       0,
-                       sizeof(int16_t)
-                            * (kNumSamplesPerFrame - mNumInputSamples));
-                mNumInputSamples = 0;
-                break;
-            }
-
-            size_t align = mInputBuffer->range_length() % sizeof(int16_t);
-            CHECK_EQ(align, 0);
-
-            int64_t timeUs;
-            if (mInputBuffer->meta_data()->findInt64(kKeyDriftTime, &timeUs)) {
-                wallClockTimeUs = timeUs;
-            }
-            if (mInputBuffer->meta_data()->findInt64(kKeyAnchorTime, &timeUs)) {
-                mAnchorTimeUs = timeUs;
-            }
-            readFromSource = true;
-        } else {
-            readFromSource = false;
-        }
-
-        size_t copy =
-            (kNumSamplesPerFrame - mNumInputSamples) * sizeof(int16_t);
-
-        if (copy > mInputBuffer->range_length()) {
-            copy = mInputBuffer->range_length();
-        }
-
-        memcpy(&mInputFrame[mNumInputSamples],
-               (const uint8_t *)mInputBuffer->data()
-                    + mInputBuffer->range_offset(),
-               copy);
-
-        mInputBuffer->set_range(
-                mInputBuffer->range_offset() + copy,
-                mInputBuffer->range_length() - copy);
-
-        if (mInputBuffer->range_length() == 0) {
-            mInputBuffer->release();
-            mInputBuffer = NULL;
-        }
-
-        mNumInputSamples += copy / sizeof(int16_t);
-        if (mNumInputSamples >= kNumSamplesPerFrame) {
-            mNumInputSamples %= kNumSamplesPerFrame;
-            break;  // Get a whole input frame 640 bytes
-        }
-    }
-
-    VO_CODECBUFFER inputData;
-    memset(&inputData, 0, sizeof(inputData));
-    inputData.Buffer = (unsigned char*) mInputFrame;
-    inputData.Length = kInputBufferSize;
-    CHECK(VO_ERR_NONE == mApiHandle->SetInputData(mEncoderHandle,&inputData));
-
-    MediaBuffer *buffer;
-    CHECK_EQ(mBufferGroup->acquire_buffer(&buffer), OK);
-    uint8_t *outPtr = (uint8_t *)buffer->data();
-
-    VO_CODECBUFFER outputData;
-    memset(&outputData, 0, sizeof(outputData));
-    VO_AUDIO_OUTPUTINFO outputInfo;
-    memset(&outputInfo, 0, sizeof(outputInfo));
-
-    VO_U32 ret = VO_ERR_NONE;
-    outputData.Buffer = outPtr;
-    outputData.Length = buffer->size();
-    ret = mApiHandle->GetOutputData(mEncoderHandle, &outputData, &outputInfo);
-    CHECK(ret == VO_ERR_NONE || ret == VO_ERR_INPUT_BUFFER_SMALL);
-
-    buffer->set_range(0, outputData.Length);
-    ++mNumFramesOutput;
-
-    int64_t mediaTimeUs = mNumFramesOutput * 20000LL;
-    buffer->meta_data()->setInt64(kKeyTime, mAnchorTimeUs + mediaTimeUs);
-    if (readFromSource && wallClockTimeUs != -1) {
-        buffer->meta_data()->setInt64(kKeyDriftTime, mediaTimeUs - wallClockTimeUs);
-    }
-
-    *out = buffer;
-    return OK;
-}
-
-}  // namespace android
diff --git a/media/libstagefright/codecs/amrwbenc/Android.mk b/media/libstagefright/codecs/amrwbenc/Android.mk
index ae43870..d3c3041 100644
--- a/media/libstagefright/codecs/amrwbenc/Android.mk
+++ b/media/libstagefright/codecs/amrwbenc/Android.mk
@@ -5,7 +5,6 @@
 
 
 LOCAL_SRC_FILES := \
-	AMRWBEncoder.cpp \
 	src/autocorr.c \
 	src/az_isp.c \
 	src/bits.c \
@@ -117,4 +116,26 @@
 
 include $(BUILD_STATIC_LIBRARY)
 
+################################################################################
 
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+        SoftAMRWBEncoder.cpp
+
+LOCAL_C_INCLUDES := \
+	frameworks/base/media/libstagefright/include \
+	frameworks/base/media/libstagefright/codecs/common/include \
+	frameworks/native/include/media/openmax
+
+LOCAL_STATIC_LIBRARIES := \
+        libstagefright_amrwbenc
+
+LOCAL_SHARED_LIBRARIES := \
+        libstagefright_omx libstagefright_foundation libutils \
+        libstagefright_enc_common
+
+LOCAL_MODULE := libstagefright_soft_amrwbenc
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp
new file mode 100644
index 0000000..9ccb49c
--- /dev/null
+++ b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp
@@ -0,0 +1,459 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SoftAMRWBEncoder"
+#include <utils/Log.h>
+
+#include "SoftAMRWBEncoder.h"
+
+#include "cmnMemory.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/hexdump.h>
+
+namespace android {
+
+static const int32_t kSampleRate = 16000;
+
+template<class T>
+static void InitOMXParams(T *params) {
+    params->nSize = sizeof(T);
+    params->nVersion.s.nVersionMajor = 1;
+    params->nVersion.s.nVersionMinor = 0;
+    params->nVersion.s.nRevision = 0;
+    params->nVersion.s.nStep = 0;
+}
+
+SoftAMRWBEncoder::SoftAMRWBEncoder(
+        const char *name,
+        const OMX_CALLBACKTYPE *callbacks,
+        OMX_PTR appData,
+        OMX_COMPONENTTYPE **component)
+    : SimpleSoftOMXComponent(name, callbacks, appData, component),
+      mEncoderHandle(NULL),
+      mApiHandle(NULL),
+      mMemOperator(NULL),
+      mBitRate(0),
+      mMode(VOAMRWB_MD66),
+      mInputSize(0),
+      mInputTimeUs(-1ll),
+      mSawInputEOS(false),
+      mSignalledError(false) {
+    initPorts();
+    CHECK_EQ(initEncoder(), (status_t)OK);
+}
+
+SoftAMRWBEncoder::~SoftAMRWBEncoder() {
+    if (mEncoderHandle != NULL) {
+        CHECK_EQ(VO_ERR_NONE, mApiHandle->Uninit(mEncoderHandle));
+        mEncoderHandle = NULL;
+    }
+
+    delete mApiHandle;
+    mApiHandle = NULL;
+
+    delete mMemOperator;
+    mMemOperator = NULL;
+}
+
+void SoftAMRWBEncoder::initPorts() {
+    OMX_PARAM_PORTDEFINITIONTYPE def;
+    InitOMXParams(&def);
+
+    def.nPortIndex = 0;
+    def.eDir = OMX_DirInput;
+    def.nBufferCountMin = kNumBuffers;
+    def.nBufferCountActual = def.nBufferCountMin;
+    def.nBufferSize = kNumSamplesPerFrame * sizeof(int16_t);
+    def.bEnabled = OMX_TRUE;
+    def.bPopulated = OMX_FALSE;
+    def.eDomain = OMX_PortDomainAudio;
+    def.bBuffersContiguous = OMX_FALSE;
+    def.nBufferAlignment = 1;
+
+    def.format.audio.cMIMEType = const_cast<char *>("audio/raw");
+    def.format.audio.pNativeRender = NULL;
+    def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+    def.format.audio.eEncoding = OMX_AUDIO_CodingPCM;
+
+    addPort(def);
+
+    def.nPortIndex = 1;
+    def.eDir = OMX_DirOutput;
+    def.nBufferCountMin = kNumBuffers;
+    def.nBufferCountActual = def.nBufferCountMin;
+    def.nBufferSize = 8192;
+    def.bEnabled = OMX_TRUE;
+    def.bPopulated = OMX_FALSE;
+    def.eDomain = OMX_PortDomainAudio;
+    def.bBuffersContiguous = OMX_FALSE;
+    def.nBufferAlignment = 2;
+
+    def.format.audio.cMIMEType = const_cast<char *>("audio/amr-wb");
+    def.format.audio.pNativeRender = NULL;
+    def.format.audio.bFlagErrorConcealment = OMX_FALSE;
+    def.format.audio.eEncoding = OMX_AUDIO_CodingAMR;
+
+    addPort(def);
+}
+
+status_t SoftAMRWBEncoder::initEncoder() {
+    mApiHandle = new VO_AUDIO_CODECAPI;
+
+    if (VO_ERR_NONE != voGetAMRWBEncAPI(mApiHandle)) {
+        ALOGE("Failed to get api handle");
+        return UNKNOWN_ERROR;
+    }
+
+    mMemOperator = new VO_MEM_OPERATOR;
+    mMemOperator->Alloc = cmnMemAlloc;
+    mMemOperator->Copy = cmnMemCopy;
+    mMemOperator->Free = cmnMemFree;
+    mMemOperator->Set = cmnMemSet;
+    mMemOperator->Check = cmnMemCheck;
+
+    VO_CODEC_INIT_USERDATA userData;
+    memset(&userData, 0, sizeof(userData));
+    userData.memflag = VO_IMF_USERMEMOPERATOR;
+    userData.memData = (VO_PTR) mMemOperator;
+
+    if (VO_ERR_NONE != mApiHandle->Init(
+                &mEncoderHandle, VO_AUDIO_CodingAMRWB, &userData)) {
+        ALOGE("Failed to init AMRWB encoder");
+        return UNKNOWN_ERROR;
+    }
+
+    VOAMRWBFRAMETYPE type = VOAMRWB_RFC3267;
+    if (VO_ERR_NONE != mApiHandle->SetParam(
+                mEncoderHandle, VO_PID_AMRWB_FRAMETYPE, &type)) {
+        ALOGE("Failed to set AMRWB encoder frame type to %d", type);
+        return UNKNOWN_ERROR;
+    }
+
+    return OK;
+}
+
+OMX_ERRORTYPE SoftAMRWBEncoder::internalGetParameter(
+        OMX_INDEXTYPE index, OMX_PTR params) {
+    switch (index) {
+        case OMX_IndexParamAudioPortFormat:
+        {
+            OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
+                (OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+
+            if (formatParams->nPortIndex > 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (formatParams->nIndex > 0) {
+                return OMX_ErrorNoMore;
+            }
+
+            formatParams->eEncoding =
+                (formatParams->nPortIndex == 0)
+                    ? OMX_AUDIO_CodingPCM : OMX_AUDIO_CodingAMR;
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioAmr:
+        {
+            OMX_AUDIO_PARAM_AMRTYPE *amrParams =
+                (OMX_AUDIO_PARAM_AMRTYPE *)params;
+
+            if (amrParams->nPortIndex != 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            amrParams->nChannels = 1;
+            amrParams->nBitRate = mBitRate;
+
+            amrParams->eAMRBandMode =
+                (OMX_AUDIO_AMRBANDMODETYPE)(mMode + OMX_AUDIO_AMRBandModeWB0);
+
+            amrParams->eAMRDTXMode = OMX_AUDIO_AMRDTXModeOff;
+            amrParams->eAMRFrameFormat = OMX_AUDIO_AMRFrameFormatFSF;
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioPcm:
+        {
+            OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+                (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+            if (pcmParams->nPortIndex != 0) {
+                return OMX_ErrorUndefined;
+            }
+
+            pcmParams->eNumData = OMX_NumericalDataSigned;
+            pcmParams->eEndian = OMX_EndianBig;
+            pcmParams->bInterleaved = OMX_TRUE;
+            pcmParams->nBitPerSample = 16;
+            pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
+            pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelCF;
+
+            pcmParams->nChannels = 1;
+            pcmParams->nSamplingRate = kSampleRate;
+
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return SimpleSoftOMXComponent::internalGetParameter(index, params);
+    }
+}
+
+OMX_ERRORTYPE SoftAMRWBEncoder::internalSetParameter(
+        OMX_INDEXTYPE index, const OMX_PTR params) {
+    switch (index) {
+        case OMX_IndexParamStandardComponentRole:
+        {
+            const OMX_PARAM_COMPONENTROLETYPE *roleParams =
+                (const OMX_PARAM_COMPONENTROLETYPE *)params;
+
+            if (strncmp((const char *)roleParams->cRole,
+                        "audio_encoder.amrwb",
+                        OMX_MAX_STRINGNAME_SIZE - 1)) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioPortFormat:
+        {
+            const OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
+                (const OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+
+            if (formatParams->nPortIndex > 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (formatParams->nIndex > 0) {
+                return OMX_ErrorNoMore;
+            }
+
+            if ((formatParams->nPortIndex == 0
+                        && formatParams->eEncoding != OMX_AUDIO_CodingPCM)
+                || (formatParams->nPortIndex == 1
+                        && formatParams->eEncoding != OMX_AUDIO_CodingAMR)) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioAmr:
+        {
+            OMX_AUDIO_PARAM_AMRTYPE *amrParams =
+                (OMX_AUDIO_PARAM_AMRTYPE *)params;
+
+            if (amrParams->nPortIndex != 1) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (amrParams->nChannels != 1
+                    || amrParams->eAMRDTXMode != OMX_AUDIO_AMRDTXModeOff
+                    || amrParams->eAMRFrameFormat
+                            != OMX_AUDIO_AMRFrameFormatFSF
+                    || amrParams->eAMRBandMode < OMX_AUDIO_AMRBandModeWB0
+                    || amrParams->eAMRBandMode > OMX_AUDIO_AMRBandModeWB8) {
+                return OMX_ErrorUndefined;
+            }
+
+            mBitRate = amrParams->nBitRate;
+
+            mMode = (VOAMRWBMODE)(
+                    amrParams->eAMRBandMode - OMX_AUDIO_AMRBandModeWB0);
+
+            amrParams->eAMRDTXMode = OMX_AUDIO_AMRDTXModeOff;
+            amrParams->eAMRFrameFormat = OMX_AUDIO_AMRFrameFormatFSF;
+
+            if (VO_ERR_NONE !=
+                    mApiHandle->SetParam(
+                        mEncoderHandle, VO_PID_AMRWB_MODE,  &mMode)) {
+                ALOGE("Failed to set AMRWB encoder mode to %d", mMode);
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamAudioPcm:
+        {
+            OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
+                (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+
+            if (pcmParams->nPortIndex != 0) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (pcmParams->nChannels != 1
+                    || pcmParams->nSamplingRate != (OMX_U32)kSampleRate) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+
+        default:
+            return SimpleSoftOMXComponent::internalSetParameter(index, params);
+    }
+}
+
+void SoftAMRWBEncoder::onQueueFilled(OMX_U32 portIndex) {
+    if (mSignalledError) {
+        return;
+    }
+
+    List<BufferInfo *> &inQueue = getPortQueue(0);
+    List<BufferInfo *> &outQueue = getPortQueue(1);
+
+    size_t numBytesPerInputFrame = kNumSamplesPerFrame * sizeof(int16_t);
+
+    for (;;) {
+        // We do the following until we run out of buffers.
+
+        while (mInputSize < numBytesPerInputFrame) {
+            // As long as there's still input data to be read we
+            // will drain "kNumSamplesPerFrame" samples
+            // into the "mInputFrame" buffer and then encode those
+            // as a unit into an output buffer.
+
+            if (mSawInputEOS || inQueue.empty()) {
+                return;
+            }
+
+            BufferInfo *inInfo = *inQueue.begin();
+            OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+
+            const void *inData = inHeader->pBuffer + inHeader->nOffset;
+
+            size_t copy = numBytesPerInputFrame - mInputSize;
+            if (copy > inHeader->nFilledLen) {
+                copy = inHeader->nFilledLen;
+            }
+
+            if (mInputSize == 0) {
+                mInputTimeUs = inHeader->nTimeStamp;
+            }
+
+            memcpy((uint8_t *)mInputFrame + mInputSize, inData, copy);
+            mInputSize += copy;
+
+            inHeader->nOffset += copy;
+            inHeader->nFilledLen -= copy;
+
+            // "Time" on the input buffer has in effect advanced by the
+            // number of audio frames we just advanced nOffset by.
+            inHeader->nTimeStamp +=
+                (copy * 1000000ll / kSampleRate) / sizeof(int16_t);
+
+            if (inHeader->nFilledLen == 0) {
+                if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+                    ALOGV("saw input EOS");
+                    mSawInputEOS = true;
+
+                    // Pad any remaining data with zeroes.
+                    memset((uint8_t *)mInputFrame + mInputSize,
+                           0,
+                           numBytesPerInputFrame - mInputSize);
+
+                    mInputSize = numBytesPerInputFrame;
+                }
+
+                inQueue.erase(inQueue.begin());
+                inInfo->mOwnedByUs = false;
+                notifyEmptyBufferDone(inHeader);
+
+                inData = NULL;
+                inHeader = NULL;
+                inInfo = NULL;
+            }
+        }
+
+        // At this  point we have all the input data necessary to encode
+        // a single frame, all we need is an output buffer to store the result
+        // in.
+
+        if (outQueue.empty()) {
+            return;
+        }
+
+        BufferInfo *outInfo = *outQueue.begin();
+        OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+
+        uint8_t *outPtr = outHeader->pBuffer + outHeader->nOffset;
+        size_t outAvailable = outHeader->nAllocLen - outHeader->nOffset;
+
+        VO_CODECBUFFER inputData;
+        memset(&inputData, 0, sizeof(inputData));
+        inputData.Buffer = (unsigned char *) mInputFrame;
+        inputData.Length = mInputSize;
+
+        CHECK_EQ(VO_ERR_NONE,
+                 mApiHandle->SetInputData(mEncoderHandle, &inputData));
+
+        VO_CODECBUFFER outputData;
+        memset(&outputData, 0, sizeof(outputData));
+        VO_AUDIO_OUTPUTINFO outputInfo;
+        memset(&outputInfo, 0, sizeof(outputInfo));
+
+        outputData.Buffer = outPtr;
+        outputData.Length = outAvailable;
+        VO_U32 ret = mApiHandle->GetOutputData(
+                mEncoderHandle, &outputData, &outputInfo);
+        CHECK(ret == VO_ERR_NONE || ret == VO_ERR_INPUT_BUFFER_SMALL);
+
+        outHeader->nFilledLen = outputData.Length;
+        outHeader->nFlags = OMX_BUFFERFLAG_ENDOFFRAME;
+
+        if (mSawInputEOS) {
+            // We also tag this output buffer with EOS if it corresponds
+            // to the final input buffer.
+            outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+        }
+
+        outHeader->nTimeStamp = mInputTimeUs;
+
+#if 0
+        ALOGI("sending %ld bytes of data (time = %lld us, flags = 0x%08lx)",
+              outHeader->nFilledLen, mInputTimeUs, outHeader->nFlags);
+
+        hexdump(outHeader->pBuffer + outHeader->nOffset, outHeader->nFilledLen);
+#endif
+
+        outQueue.erase(outQueue.begin());
+        outInfo->mOwnedByUs = false;
+        notifyFillBufferDone(outHeader);
+
+        outHeader = NULL;
+        outInfo = NULL;
+
+        mInputSize = 0;
+    }
+}
+
+}  // namespace android
+
+android::SoftOMXComponent *createSoftOMXComponent(
+        const char *name, const OMX_CALLBACKTYPE *callbacks,
+        OMX_PTR appData, OMX_COMPONENTTYPE **component) {
+    return new android::SoftAMRWBEncoder(name, callbacks, appData, component);
+}
diff --git a/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.h b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.h
new file mode 100644
index 0000000..d0c1dab
--- /dev/null
+++ b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFT_AMRWB_ENCODER_H_
+
+#define SOFT_AMRWB_ENCODER_H_
+
+#include "SimpleSoftOMXComponent.h"
+
+#include "voAMRWB.h"
+
+struct VO_AUDIO_CODECAPI;
+struct VO_MEM_OPERATOR;
+
+namespace android {
+
+struct SoftAMRWBEncoder : public SimpleSoftOMXComponent {
+    SoftAMRWBEncoder(
+            const char *name,
+            const OMX_CALLBACKTYPE *callbacks,
+            OMX_PTR appData,
+            OMX_COMPONENTTYPE **component);
+
+protected:
+    virtual ~SoftAMRWBEncoder();
+
+    virtual OMX_ERRORTYPE internalGetParameter(
+            OMX_INDEXTYPE index, OMX_PTR params);
+
+    virtual OMX_ERRORTYPE internalSetParameter(
+            OMX_INDEXTYPE index, const OMX_PTR params);
+
+    virtual void onQueueFilled(OMX_U32 portIndex);
+
+private:
+    enum {
+        kNumBuffers             = 4,
+        kNumSamplesPerFrame     = 320,
+    };
+
+    void *mEncoderHandle;
+    VO_AUDIO_CODECAPI *mApiHandle;
+    VO_MEM_OPERATOR *mMemOperator;
+
+    OMX_U32 mBitRate;
+    VOAMRWBMODE mMode;
+
+    size_t mInputSize;
+    int16_t mInputFrame[kNumSamplesPerFrame];
+    int64_t mInputTimeUs;
+
+    bool mSawInputEOS;
+    bool mSignalledError;
+
+    void initPorts();
+    status_t initEncoder();
+
+    DISALLOW_EVIL_CONSTRUCTORS(SoftAMRWBEncoder);
+};
+
+}  // namespace android
+
+#endif  // SOFT_AMRWB_ENCODER_H_
diff --git a/media/libstagefright/codecs/avc/enc/AVCEncoder.cpp b/media/libstagefright/codecs/avc/enc/AVCEncoder.cpp
index e202a2b..7533f07 100644
--- a/media/libstagefright/codecs/avc/enc/AVCEncoder.cpp
+++ b/media/libstagefright/codecs/avc/enc/AVCEncoder.cpp
@@ -24,8 +24,8 @@
 #include "avcenc_int.h"
 #include "OMX_Video.h"
 
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
@@ -417,7 +417,7 @@
     *out = NULL;
 
     MediaBuffer *outputBuffer;
-    CHECK_EQ(OK, mGroup->acquire_buffer(&outputBuffer));
+    CHECK_EQ((status_t)OK, mGroup->acquire_buffer(&outputBuffer));
     uint8_t *outPtr = (uint8_t *) outputBuffer->data();
     uint32_t dataLength = outputBuffer->size();
 
@@ -557,9 +557,9 @@
     encoderStatus = PVAVCEncodeNAL(mHandle, outPtr, &dataLength, &type);
     if (encoderStatus == AVCENC_SUCCESS) {
         outputBuffer->meta_data()->setInt32(kKeyIsSyncFrame, mIsIDRFrame);
-        CHECK_EQ(NULL, PVAVCEncGetOverrunBuffer(mHandle));
+        CHECK(NULL == PVAVCEncGetOverrunBuffer(mHandle));
     } else if (encoderStatus == AVCENC_PICTURE_READY) {
-        CHECK_EQ(NULL, PVAVCEncGetOverrunBuffer(mHandle));
+        CHECK(NULL == PVAVCEncGetOverrunBuffer(mHandle));
         if (mIsIDRFrame) {
             outputBuffer->meta_data()->setInt32(kKeyIsSyncFrame, mIsIDRFrame);
             mIsIDRFrame = 0;
diff --git a/media/libstagefright/codecs/avc/enc/Android.mk b/media/libstagefright/codecs/avc/enc/Android.mk
index eb2e320..54cc9b1 100644
--- a/media/libstagefright/codecs/avc/enc/Android.mk
+++ b/media/libstagefright/codecs/avc/enc/Android.mk
@@ -25,8 +25,8 @@
 LOCAL_C_INCLUDES := \
     $(LOCAL_PATH)/src \
     $(LOCAL_PATH)/../common/include \
-    $(TOP)/frameworks/base/include/media/stagefright/openmax \
-    $(TOP)/frameworks/base/media/libstagefright/include
+    $(TOP)/frameworks/base/media/libstagefright/include \
+    $(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS := \
     -D__arm__ \
diff --git a/media/libstagefright/codecs/g711/dec/Android.mk b/media/libstagefright/codecs/g711/dec/Android.mk
index 6692533..42706a5 100644
--- a/media/libstagefright/codecs/g711/dec/Android.mk
+++ b/media/libstagefright/codecs/g711/dec/Android.mk
@@ -6,7 +6,7 @@
 
 LOCAL_C_INCLUDES := \
         frameworks/base/media/libstagefright/include \
-        frameworks/base/include/media/stagefright/openmax \
+        frameworks/native/include/media/openmax
 
 LOCAL_SHARED_LIBRARIES := \
         libstagefright libstagefright_omx libstagefright_foundation libutils
diff --git a/media/libstagefright/codecs/g711/dec/SoftG711.cpp b/media/libstagefright/codecs/g711/dec/SoftG711.cpp
index 32ef003..bcdd3c7 100644
--- a/media/libstagefright/codecs/g711/dec/SoftG711.cpp
+++ b/media/libstagefright/codecs/g711/dec/SoftG711.cpp
@@ -140,7 +140,7 @@
             OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
                 (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
 
-            if (pcmParams->nPortIndex != 0) {
+            if (pcmParams->nPortIndex != 0 && pcmParams->nPortIndex != 1) {
                 return OMX_ErrorUndefined;
             }
 
@@ -148,7 +148,9 @@
                 return OMX_ErrorUndefined;
             }
 
-            mNumChannels = pcmParams->nChannels;
+            if(pcmParams->nPortIndex == 0) {
+                mNumChannels = pcmParams->nChannels;
+            }
 
             return OMX_ErrorNone;
         }
diff --git a/media/libstagefright/codecs/m4v_h263/dec/Android.mk b/media/libstagefright/codecs/m4v_h263/dec/Android.mk
index 2ffa5f2..8c245d1 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/Android.mk
+++ b/media/libstagefright/codecs/m4v_h263/dec/Android.mk
@@ -42,7 +42,7 @@
 	$(LOCAL_PATH)/src \
 	$(LOCAL_PATH)/include \
 	$(TOP)/frameworks/base/media/libstagefright/include \
-	$(TOP)/frameworks/base/include/media/stagefright/openmax
+	$(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS := -DOSCL_EXPORT_REF= -DOSCL_IMPORT_REF=
 
@@ -59,7 +59,7 @@
 	$(LOCAL_PATH)/src \
 	$(LOCAL_PATH)/include \
         frameworks/base/media/libstagefright/include \
-        frameworks/base/include/media/stagefright/openmax \
+        frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS := -DOSCL_EXPORT_REF= -DOSCL_IMPORT_REF=
 
diff --git a/media/libstagefright/codecs/m4v_h263/enc/Android.mk b/media/libstagefright/codecs/m4v_h263/enc/Android.mk
index 43318e9..2b7c938 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/Android.mk
+++ b/media/libstagefright/codecs/m4v_h263/enc/Android.mk
@@ -31,7 +31,7 @@
 LOCAL_C_INCLUDES := \
     $(LOCAL_PATH)/src \
     $(LOCAL_PATH)/include \
-    $(TOP)/frameworks/base/include/media/stagefright/openmax \
-    $(TOP)/frameworks/base/media/libstagefright/include
+    $(TOP)/frameworks/base/media/libstagefright/include \
+    $(TOP)/frameworks/native/include/media/openmax
 
 include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libstagefright/codecs/m4v_h263/enc/M4vH263Encoder.cpp b/media/libstagefright/codecs/m4v_h263/enc/M4vH263Encoder.cpp
index d538603..20b0f8d 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/M4vH263Encoder.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/M4vH263Encoder.cpp
@@ -23,8 +23,8 @@
 #include "mp4enc_api.h"
 #include "OMX_Video.h"
 
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/MediaBufferGroup.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
@@ -379,7 +379,7 @@
     *out = NULL;
 
     MediaBuffer *outputBuffer;
-    CHECK_EQ(OK, mGroup->acquire_buffer(&outputBuffer));
+    CHECK_EQ((status_t)OK, mGroup->acquire_buffer(&outputBuffer));
     uint8_t *outPtr = (uint8_t *) outputBuffer->data();
     int32_t dataLength = outputBuffer->size();
 
@@ -467,7 +467,7 @@
         mInputBuffer = NULL;
         return UNKNOWN_ERROR;
     }
-    CHECK_EQ(NULL, PVGetOverrunBuffer(mHandle));
+    CHECK(NULL == PVGetOverrunBuffer(mHandle));
     if (hintTrack.CodeType == 0) {  // I-frame serves as sync frame
         outputBuffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
     }
diff --git a/media/libstagefright/codecs/mp3dec/Android.mk b/media/libstagefright/codecs/mp3dec/Android.mk
index a08c9f0..ed51aa5 100644
--- a/media/libstagefright/codecs/mp3dec/Android.mk
+++ b/media/libstagefright/codecs/mp3dec/Android.mk
@@ -65,7 +65,7 @@
 
 LOCAL_C_INCLUDES := \
         frameworks/base/media/libstagefright/include \
-        frameworks/base/include/media/stagefright/openmax \
+        frameworks/native/include/media/openmax \
         $(LOCAL_PATH)/src \
         $(LOCAL_PATH)/include
 
diff --git a/media/libstagefright/codecs/on2/dec/Android.mk b/media/libstagefright/codecs/on2/dec/Android.mk
index 32bbd6b..2997228 100644
--- a/media/libstagefright/codecs/on2/dec/Android.mk
+++ b/media/libstagefright/codecs/on2/dec/Android.mk
@@ -9,7 +9,7 @@
         $(TOP)/external/libvpx/vpx_codec \
         $(TOP)/external/libvpx/vpx_ports \
         frameworks/base/media/libstagefright/include \
-        frameworks/base/include/media/stagefright/openmax \
+        frameworks/native/include/media/openmax \
 
 LOCAL_STATIC_LIBRARIES := \
         libvpx
diff --git a/media/libstagefright/codecs/on2/h264dec/Android.mk b/media/libstagefright/codecs/on2/h264dec/Android.mk
index 5b3c876..43c20f0 100644
--- a/media/libstagefright/codecs/on2/h264dec/Android.mk
+++ b/media/libstagefright/codecs/on2/h264dec/Android.mk
@@ -35,7 +35,7 @@
 
 LOCAL_C_INCLUDES := $(LOCAL_PATH)/./inc \
 	frameworks/base/media/libstagefright/include \
-	frameworks/base/include/media/stagefright/openmax \
+	frameworks/native/include/media/openmax \
 
 MY_ASM := \
 	./source/arm_neon_asm_gcc/h264bsdWriteMacroblock.S \
diff --git a/media/libstagefright/codecs/vorbis/dec/Android.mk b/media/libstagefright/codecs/vorbis/dec/Android.mk
index f33f3ac..fca70b7 100644
--- a/media/libstagefright/codecs/vorbis/dec/Android.mk
+++ b/media/libstagefright/codecs/vorbis/dec/Android.mk
@@ -7,7 +7,7 @@
 LOCAL_C_INCLUDES := \
         external/tremolo \
         frameworks/base/media/libstagefright/include \
-        frameworks/base/include/media/stagefright/openmax \
+        frameworks/native/include/media/openmax \
 
 LOCAL_SHARED_LIBRARIES := \
         libvorbisidec libstagefright libstagefright_omx \
diff --git a/media/libstagefright/colorconversion/Android.mk b/media/libstagefright/colorconversion/Android.mk
index 62ba40f..59a64ba 100644
--- a/media/libstagefright/colorconversion/Android.mk
+++ b/media/libstagefright/colorconversion/Android.mk
@@ -6,7 +6,7 @@
         SoftwareRenderer.cpp
 
 LOCAL_C_INCLUDES := \
-        $(TOP)/frameworks/base/include/media/stagefright/openmax \
+        $(TOP)/frameworks/native/include/media/openmax \
         $(TOP)/hardware/msm7k
 
 LOCAL_MODULE:= libstagefright_color_conversion
diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp
index 5cc3f78..597167f 100644
--- a/media/libstagefright/colorconversion/ColorConverter.cpp
+++ b/media/libstagefright/colorconversion/ColorConverter.cpp
@@ -18,8 +18,8 @@
 #define LOG_TAG "ColorConverter"
 #include <utils/Log.h>
 
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/ColorConverter.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MediaErrors.h>
 
 namespace android {
@@ -144,8 +144,8 @@
         return ERROR_UNSUPPORTED;
     }
 
-    uint32_t *dst_ptr = (uint32_t *)dst.mBits
-        + (dst.mCropTop * dst.mWidth + dst.mCropLeft) / 2;
+    uint16_t *dst_ptr = (uint16_t *)dst.mBits
+        + dst.mCropTop * dst.mWidth + dst.mCropLeft;
 
     const uint8_t *src_ptr = (const uint8_t *)src.mBits
         + (src.mCropTop * dst.mWidth + src.mCropLeft) * 2;
@@ -182,11 +182,15 @@
                 | ((kAdjustedClip[g2] >> 2) << 5)
                 | (kAdjustedClip[b2] >> 3);
 
-            dst_ptr[x / 2] = (rgb2 << 16) | rgb1;
+            if (x + 1 < src.cropWidth()) {
+                *(uint32_t *)(&dst_ptr[x]) = (rgb2 << 16) | rgb1;
+            } else {
+                dst_ptr[x] = rgb1;
+            }
         }
 
         src_ptr += src.mWidth * 2;
-        dst_ptr += dst.mWidth / 2;
+        dst_ptr += dst.mWidth;
     }
 
     return OK;
@@ -290,15 +294,14 @@
         const BitmapParams &src, const BitmapParams &dst) {
     uint8_t *kAdjustedClip = initClip();
 
-    if (!((dst.mWidth & 3) == 0
-            && (src.mCropLeft & 1) == 0
+    if (!((src.mCropLeft & 1) == 0
             && src.cropWidth() == dst.cropWidth()
             && src.cropHeight() == dst.cropHeight())) {
         return ERROR_UNSUPPORTED;
     }
 
-    uint32_t *dst_ptr = (uint32_t *)dst.mBits
-        + (dst.mCropTop * dst.mWidth + dst.mCropLeft) / 2;
+    uint16_t *dst_ptr = (uint16_t *)dst.mBits
+        + dst.mCropTop * dst.mWidth + dst.mCropLeft;
 
     const uint8_t *src_y =
         (const uint8_t *)src.mBits + src.mCropTop * src.mWidth + src.mCropLeft;
@@ -340,7 +343,11 @@
                 | ((kAdjustedClip[g2] >> 2) << 5)
                 | (kAdjustedClip[r2] >> 3);
 
-            dst_ptr[x / 2] = (rgb2 << 16) | rgb1;
+            if (x + 1 < src.cropWidth()) {
+                *(uint32_t *)(&dst_ptr[x]) = (rgb2 << 16) | rgb1;
+            } else {
+                dst_ptr[x] = rgb1;
+            }
         }
 
         src_y += src.mWidth;
@@ -349,7 +356,7 @@
             src_u += src.mWidth;
         }
 
-        dst_ptr += dst.mWidth / 2;
+        dst_ptr += dst.mWidth;
     }
 
     return OK;
@@ -361,15 +368,14 @@
 
     uint8_t *kAdjustedClip = initClip();
 
-    if (!((dst.mWidth & 3) == 0
-            && (src.mCropLeft & 1) == 0
+    if (!((src.mCropLeft & 1) == 0
             && src.cropWidth() == dst.cropWidth()
             && src.cropHeight() == dst.cropHeight())) {
         return ERROR_UNSUPPORTED;
     }
 
-    uint32_t *dst_ptr = (uint32_t *)dst.mBits
-        + (dst.mCropTop * dst.mWidth + dst.mCropLeft) / 2;
+    uint16_t *dst_ptr = (uint16_t *)dst.mBits
+        + dst.mCropTop * dst.mWidth + dst.mCropLeft;
 
     const uint8_t *src_y =
         (const uint8_t *)src.mBits + src.mCropTop * src.mWidth + src.mCropLeft;
@@ -411,7 +417,11 @@
                 | ((kAdjustedClip[g2] >> 2) << 5)
                 | (kAdjustedClip[r2] >> 3);
 
-            dst_ptr[x / 2] = (rgb2 << 16) | rgb1;
+            if (x + 1 < src.cropWidth()) {
+                *(uint32_t *)(&dst_ptr[x]) = (rgb2 << 16) | rgb1;
+            } else {
+                dst_ptr[x] = rgb1;
+            }
         }
 
         src_y += src.mWidth;
@@ -420,7 +430,7 @@
             src_u += src.mWidth;
         }
 
-        dst_ptr += dst.mWidth / 2;
+        dst_ptr += dst.mWidth;
     }
 
     return OK;
@@ -430,15 +440,14 @@
         const BitmapParams &src, const BitmapParams &dst) {
     uint8_t *kAdjustedClip = initClip();
 
-    if (!((dst.mWidth & 3) == 0
-            && (src.mCropLeft & 1) == 0
+    if (!((src.mCropLeft & 1) == 0
             && src.cropWidth() == dst.cropWidth()
             && src.cropHeight() == dst.cropHeight())) {
         return ERROR_UNSUPPORTED;
     }
 
-    uint32_t *dst_ptr = (uint32_t *)dst.mBits
-        + (dst.mCropTop * dst.mWidth + dst.mCropLeft) / 2;
+    uint16_t *dst_ptr = (uint16_t *)dst.mBits
+        + dst.mCropTop * dst.mWidth + dst.mCropLeft;
 
     const uint8_t *src_y = (const uint8_t *)src.mBits;
 
@@ -478,7 +487,11 @@
                 | ((kAdjustedClip[g2] >> 2) << 5)
                 | (kAdjustedClip[b2] >> 3);
 
-            dst_ptr[x / 2] = (rgb2 << 16) | rgb1;
+            if (x + 1 < src.cropWidth()) {
+                *(uint32_t *)(&dst_ptr[x]) = (rgb2 << 16) | rgb1;
+            } else {
+                dst_ptr[x] = rgb1;
+            }
         }
 
         src_y += src.mWidth;
@@ -487,7 +500,7 @@
             src_u += src.mWidth;
         }
 
-        dst_ptr += dst.mWidth / 2;
+        dst_ptr += dst.mWidth;
     }
 
     return OK;
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index e892f92..059d6b9 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -19,12 +19,9 @@
 
 #include "../include/SoftwareRenderer.h"
 
-#include <binder/MemoryHeapBase.h>
-#include <binder/MemoryHeapPmem.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/MetaData.h>
-#include <surfaceflinger/Surface.h>
-#include <ui/android_native_buffer.h>
+#include <system/window.h>
 #include <ui/GraphicBufferMapper.h>
 #include <gui/ISurfaceTexture.h>
 
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index 0a6776e..8b01ac6 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -19,6 +19,7 @@
 #include <ctype.h>
 
 #include "AAtomizer.h"
+#include "ABuffer.h"
 #include "ADebug.h"
 #include "ALooperRoster.h"
 #include "AString.h"
@@ -73,6 +74,7 @@
 
         case kTypeObject:
         case kTypeMessage:
+        case kTypeBuffer:
         {
             if (item->u.refValue != NULL) {
                 item->u.refValue->decStrong(this);
@@ -157,14 +159,23 @@
     item->u.stringValue = new AString(s, len < 0 ? strlen(s) : len);
 }
 
-void AMessage::setObject(const char *name, const sp<RefBase> &obj) {
+void AMessage::setObjectInternal(
+        const char *name, const sp<RefBase> &obj, Type type) {
     Item *item = allocateItem(name);
-    item->mType = kTypeObject;
+    item->mType = type;
 
     if (obj != NULL) { obj->incStrong(this); }
     item->u.refValue = obj.get();
 }
 
+void AMessage::setObject(const char *name, const sp<RefBase> &obj) {
+    setObjectInternal(name, obj, kTypeObject);
+}
+
+void AMessage::setBuffer(const char *name, const sp<ABuffer> &buffer) {
+    setObjectInternal(name, sp<RefBase>(buffer), kTypeBuffer);
+}
+
 void AMessage::setMessage(const char *name, const sp<AMessage> &obj) {
     Item *item = allocateItem(name);
     item->mType = kTypeMessage;
@@ -203,6 +214,15 @@
     return false;
 }
 
+bool AMessage::findBuffer(const char *name, sp<ABuffer> *buf) const {
+    const Item *item = findItem(name, kTypeBuffer);
+    if (item) {
+        *buf = (ABuffer *)(item->u.refValue);
+        return true;
+    }
+    return false;
+}
+
 bool AMessage::findMessage(const char *name, sp<AMessage> *obj) const {
     const Item *item = findItem(name, kTypeMessage);
     if (item) {
@@ -273,6 +293,7 @@
             }
 
             case kTypeObject:
+            case kTypeBuffer:
             {
                 to->u.refValue = from->u.refValue;
                 to->u.refValue->incStrong(msg.get());
@@ -377,6 +398,10 @@
                 tmp = StringPrintf(
                         "RefBase *%s = %p", item.mName, item.u.refValue);
                 break;
+            case kTypeBuffer:
+                tmp = StringPrintf(
+                        "ABuffer *%s = %p", item.mName, item.u.refValue);
+                break;
             case kTypeMessage:
                 tmp = StringPrintf(
                         "AMessage %s = %s",
@@ -542,4 +567,20 @@
     }
 }
 
+size_t AMessage::countEntries() const {
+    return mNumItems;
+}
+
+const char *AMessage::getEntryNameAt(size_t index, Type *type) const {
+    if (index >= mNumItems) {
+        *type = kTypeInt32;
+
+        return NULL;
+    }
+
+    *type = mItems[index].mType;
+
+    return mItems[index].mName;
+}
+
 }  // namespace android
diff --git a/media/libstagefright/httplive/Android.mk b/media/libstagefright/httplive/Android.mk
index 9225e41..a5990c3 100644
--- a/media/libstagefright/httplive/Android.mk
+++ b/media/libstagefright/httplive/Android.mk
@@ -9,9 +9,9 @@
 
 LOCAL_C_INCLUDES:= \
 	$(JNI_H_INCLUDE) \
-	$(TOP)/frameworks/base/include/media/stagefright/openmax \
-        $(TOP)/frameworks/base/media/libstagefright \
-        $(TOP)/external/openssl/include
+	$(TOP)/frameworks/base/media/libstagefright \
+	$(TOP)/frameworks/native/include/media/openmax \
+	$(TOP)/external/openssl/include
 
 LOCAL_MODULE:= libstagefright_httplive
 
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 0df66f1..0cddd2e 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -215,7 +215,9 @@
     mDisconnectPending = false;
 }
 
-status_t LiveSession::fetchFile(const char *url, sp<ABuffer> *out) {
+status_t LiveSession::fetchFile(
+        const char *url, sp<ABuffer> *out,
+        int64_t range_offset, int64_t range_length) {
     *out = NULL;
 
     sp<DataSource> source;
@@ -234,8 +236,18 @@
             }
         }
 
-        status_t err = mHTTPDataSource->connect(
-                url, mExtraHeaders.isEmpty() ? NULL : &mExtraHeaders);
+        KeyedVector<String8, String8> headers = mExtraHeaders;
+        if (range_offset > 0 || range_length >= 0) {
+            headers.add(
+                    String8("Range"),
+                    String8(
+                        StringPrintf(
+                            "bytes=%lld-%s",
+                            range_offset,
+                            range_length < 0
+                                ? "" : StringPrintf("%lld", range_offset + range_length - 1).c_str()).c_str()));
+        }
+        status_t err = mHTTPDataSource->connect(url, &headers);
 
         if (err != OK) {
             return err;
@@ -270,9 +282,21 @@
             buffer = copy;
         }
 
+        size_t maxBytesToRead = bufferRemaining;
+        if (range_length >= 0) {
+            int64_t bytesLeftInRange = range_length - buffer->size();
+            if (bytesLeftInRange < maxBytesToRead) {
+                maxBytesToRead = bytesLeftInRange;
+
+                if (bytesLeftInRange == 0) {
+                    break;
+                }
+            }
+        }
+
         ssize_t n = source->readAt(
                 buffer->size(), buffer->data() + buffer->size(),
-                bufferRemaining);
+                maxBytesToRead);
 
         if (n < 0) {
             return n;
@@ -659,8 +683,15 @@
         explicitDiscontinuity = true;
     }
 
+    int64_t range_offset, range_length;
+    if (!itemMeta->findInt64("range-offset", &range_offset)
+            || !itemMeta->findInt64("range-length", &range_length)) {
+        range_offset = 0;
+        range_length = -1;
+    }
+
     sp<ABuffer> buffer;
-    status_t err = fetchFile(uri.c_str(), &buffer);
+    status_t err = fetchFile(uri.c_str(), &buffer, range_offset, range_length);
     if (err != OK) {
         ALOGE("failed to fetch .ts segment at url '%s'", uri.c_str());
         mDataSource->queueEOS(err);
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 5e30488..7d3cf05 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -152,6 +152,7 @@
 
     const char *data = (const char *)_data;
     size_t offset = 0;
+    uint64_t segmentRangeOffset = 0;
     while (offset < size) {
         size_t offsetLF = offset;
         while (offsetLF < size && data[offsetLF] != '\n') {
@@ -218,6 +219,24 @@
                 }
                 mIsVariantPlaylist = true;
                 err = parseStreamInf(line, &itemMeta);
+            } else if (line.startsWith("#EXT-X-BYTERANGE")) {
+                if (mIsVariantPlaylist) {
+                    return ERROR_MALFORMED;
+                }
+
+                uint64_t length, offset;
+                err = parseByteRange(line, segmentRangeOffset, &length, &offset);
+
+                if (err == OK) {
+                    if (itemMeta == NULL) {
+                        itemMeta = new AMessage;
+                    }
+
+                    itemMeta->setInt64("range-offset", offset);
+                    itemMeta->setInt64("range-length", length);
+
+                    segmentRangeOffset = offset + length;
+                }
             }
 
             if (err != OK) {
@@ -447,6 +466,52 @@
 }
 
 // static
+status_t M3UParser::parseByteRange(
+        const AString &line, uint64_t curOffset,
+        uint64_t *length, uint64_t *offset) {
+    ssize_t colonPos = line.find(":");
+
+    if (colonPos < 0) {
+        return ERROR_MALFORMED;
+    }
+
+    ssize_t atPos = line.find("@", colonPos + 1);
+
+    AString lenStr;
+    if (atPos < 0) {
+        lenStr = AString(line, colonPos + 1, line.size() - colonPos - 1);
+    } else {
+        lenStr = AString(line, colonPos + 1, atPos - colonPos - 1);
+    }
+
+    lenStr.trim();
+
+    const char *s = lenStr.c_str();
+    char *end;
+    *length = strtoull(s, &end, 10);
+
+    if (s == end || *end != '\0') {
+        return ERROR_MALFORMED;
+    }
+
+    if (atPos >= 0) {
+        AString offStr = AString(line, atPos + 1, line.size() - atPos - 1);
+        offStr.trim();
+
+        const char *s = offStr.c_str();
+        *offset = strtoull(s, &end, 10);
+
+        if (s == end || *end != '\0') {
+            return ERROR_MALFORMED;
+        }
+    } else {
+        *offset = curOffset;
+    }
+
+    return OK;
+}
+
+// static
 status_t M3UParser::ParseInt32(const char *s, int32_t *x) {
     char *end;
     long lval = strtol(s, &end, 10);
diff --git a/media/libstagefright/id3/Android.mk b/media/libstagefright/id3/Android.mk
index 23c8e44..ff35d4a 100644
--- a/media/libstagefright/id3/Android.mk
+++ b/media/libstagefright/id3/Android.mk
@@ -16,7 +16,7 @@
 	testid3.cpp
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright libutils libbinder
+	libstagefright libutils libbinder libstagefright_foundation
 
 LOCAL_STATIC_LIBRARIES := \
         libstagefright_id3
diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp
index 6dde9d8..ca14054 100644
--- a/media/libstagefright/id3/ID3.cpp
+++ b/media/libstagefright/id3/ID3.cpp
@@ -20,8 +20,8 @@
 
 #include "../include/ID3.h"
 
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/Utils.h>
 #include <utils/String8.h>
 #include <byteswap.h>
@@ -463,40 +463,65 @@
     tmp = NULL;
 }
 
-void ID3::Iterator::getString(String8 *id) const {
+// the 2nd argument is used to get the data following the \0 in a comment field
+void ID3::Iterator::getString(String8 *id, String8 *comment) const {
+    getstring(id, false);
+    if (comment != NULL) {
+        getstring(comment, true);
+    }
+}
+
+// comment fields (COM/COMM) contain an initial short descriptor, followed by \0,
+// followed by more data. The data following the \0 can be retrieved by setting
+// "otherdata" to true.
+void ID3::Iterator::getstring(String8 *id, bool otherdata) const {
     id->setTo("");
 
-    if (mFrameData == NULL) {
+    const uint8_t *frameData = mFrameData;
+    if (frameData == NULL) {
         return;
     }
 
+    uint8_t encoding = *frameData;
+
     if (mParent.mVersion == ID3_V1 || mParent.mVersion == ID3_V1_1) {
         if (mOffset == 126 || mOffset == 127) {
             // Special treatment for the track number and genre.
             char tmp[16];
-            sprintf(tmp, "%d", (int)*mFrameData);
+            sprintf(tmp, "%d", (int)*frameData);
 
             id->setTo(tmp);
             return;
         }
 
-        convertISO8859ToString8(mFrameData, mFrameSize, id);
+        convertISO8859ToString8(frameData, mFrameSize, id);
         return;
     }
 
     size_t n = mFrameSize - getHeaderLength() - 1;
+    if (otherdata) {
+        // skip past the encoding, language, and the 0 separator
+        frameData += 4;
+        int32_t i = n - 4;
+        while(--i >= 0 && *++frameData != 0) ;
+        int skipped = (frameData - mFrameData);
+        if (skipped >= n) {
+            return;
+        }
+        n -= skipped;
+    }
 
-    if (*mFrameData == 0x00) {
+    if (encoding == 0x00) {
         // ISO 8859-1
-        convertISO8859ToString8(mFrameData + 1, n, id);
-    } else if (*mFrameData == 0x03) {
+        convertISO8859ToString8(frameData + 1, n, id);
+    } else if (encoding == 0x03) {
         // UTF-8
-        id->setTo((const char *)(mFrameData + 1), n);
-    } else if (*mFrameData == 0x02) {
+        id->setTo((const char *)(frameData + 1), n);
+    } else if (encoding == 0x02) {
         // UTF-16 BE, no byte order mark.
         // API wants number of characters, not number of bytes...
         int len = n / 2;
-        const char16_t *framedata = (const char16_t *) (mFrameData + 1);
+        const char16_t *framedata = (const char16_t *) (frameData + 1);
         char16_t *framedatacopy = NULL;
 #if BYTE_ORDER == LITTLE_ENDIAN
         framedatacopy = new char16_t[len];
@@ -513,7 +538,7 @@
         // UCS-2
         // API wants number of characters, not number of bytes...
         int len = n / 2;
-        const char16_t *framedata = (const char16_t *) (mFrameData + 1);
+        const char16_t *framedata = (const char16_t *) (frameData + 1);
         char16_t *framedatacopy = NULL;
         if (*framedata == 0xfffe) {
             // endianness marker doesn't match host endianness, convert
diff --git a/media/libstagefright/id3/testid3.cpp b/media/libstagefright/id3/testid3.cpp
index 0741045..bc4572c 100644
--- a/media/libstagefright/id3/testid3.cpp
+++ b/media/libstagefright/id3/testid3.cpp
@@ -23,7 +23,7 @@
 
 #include <binder/ProcessState.h>
 #include <media/stagefright/FileSource.h>
-#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/foundation/ADebug.h>
 
 #define MAXPATHLEN 256
 
@@ -70,7 +70,7 @@
 
 void scanFile(const char *path) {
     sp<FileSource> file = new FileSource(path);
-    CHECK_EQ(file->initCheck(), OK);
+    CHECK_EQ(file->initCheck(), (status_t)OK);
 
     ID3 tag(file);
     if (!tag.isValid()) {
diff --git a/media/libstagefright/include/AACDecoder.h b/media/libstagefright/include/AACDecoder.h
deleted file mode 100644
index 886a3b7..0000000
--- a/media/libstagefright/include/AACDecoder.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef AAC_DECODER_H_
-
-#define AAC_DECODER_H_
-
-#include <media/stagefright/MediaSource.h>
-
-struct tPVMP4AudioDecoderExternal;
-
-namespace android {
-
-struct MediaBufferGroup;
-struct MetaData;
-
-struct AACDecoder : public MediaSource {
-    AACDecoder(const sp<MediaSource> &source);
-
-    virtual status_t start(MetaData *params);
-    virtual status_t stop();
-
-    virtual sp<MetaData> getFormat();
-
-    virtual status_t read(
-            MediaBuffer **buffer, const ReadOptions *options);
-
-protected:
-    virtual ~AACDecoder();
-
-private:
-    sp<MetaData>    mMeta;
-    sp<MediaSource> mSource;
-    bool mStarted;
-
-    MediaBufferGroup *mBufferGroup;
-
-    tPVMP4AudioDecoderExternal *mConfig;
-    void *mDecoderBuf;
-    int64_t mAnchorTimeUs;
-    int64_t mNumSamplesOutput;
-    status_t mInitCheck;
-    int64_t  mNumDecodedBuffers;
-    int32_t  mUpsamplingFactor;
-
-    MediaBuffer *mInputBuffer;
-
-    status_t initCheck();
-    AACDecoder(const AACDecoder &);
-    AACDecoder &operator=(const AACDecoder &);
-};
-
-}  // namespace android
-
-#endif  // AAC_DECODER_H_
diff --git a/media/libstagefright/include/AACExtractor.h b/media/libstagefright/include/AACExtractor.h
index 8e5657b..e98ca82 100644
--- a/media/libstagefright/include/AACExtractor.h
+++ b/media/libstagefright/include/AACExtractor.h
@@ -29,7 +29,7 @@
 
 class AACExtractor : public MediaExtractor {
 public:
-    AACExtractor(const sp<DataSource> &source);
+    AACExtractor(const sp<DataSource> &source, const sp<AMessage> &meta);
 
     virtual size_t countTracks();
     virtual sp<MediaSource> getTrack(size_t index);
diff --git a/media/libstagefright/include/AMRNBDecoder.h b/media/libstagefright/include/AMRNBDecoder.h
deleted file mode 100644
index cf24eda..0000000
--- a/media/libstagefright/include/AMRNBDecoder.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef AMR_NB_DECODER_H_
-
-#define AMR_NB_DECODER_H_
-
-#include <media/stagefright/MediaSource.h>
-
-namespace android {
-
-struct MediaBufferGroup;
-
-struct AMRNBDecoder : public MediaSource {
-    AMRNBDecoder(const sp<MediaSource> &source);
-
-    virtual status_t start(MetaData *params);
-    virtual status_t stop();
-
-    virtual sp<MetaData> getFormat();
-
-    virtual status_t read(
-            MediaBuffer **buffer, const ReadOptions *options);
-
-protected:
-    virtual ~AMRNBDecoder();
-
-private:
-    sp<MediaSource> mSource;
-    bool mStarted;
-
-    MediaBufferGroup *mBufferGroup;
-
-    void *mState;
-    int64_t mAnchorTimeUs;
-    int64_t mNumSamplesOutput;
-
-    MediaBuffer *mInputBuffer;
-
-    AMRNBDecoder(const AMRNBDecoder &);
-    AMRNBDecoder &operator=(const AMRNBDecoder &);
-};
-
-}  // namespace android
-
-#endif  // AMR_NB_DECODER_H_
diff --git a/media/libstagefright/include/AMRNBEncoder.h b/media/libstagefright/include/AMRNBEncoder.h
deleted file mode 100644
index 71160e6..0000000
--- a/media/libstagefright/include/AMRNBEncoder.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef AMR_NB_ENCODER_H_
-
-#define AMR_NB_ENCODER_H_
-
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-
-namespace android {
-
-struct MediaBufferGroup;
-
-struct AMRNBEncoder : public MediaSource {
-    AMRNBEncoder(const sp<MediaSource> &source, const sp<MetaData> &meta);
-
-    virtual status_t start(MetaData *params);
-    virtual status_t stop();
-
-    virtual sp<MetaData> getFormat();
-
-    virtual status_t read(
-            MediaBuffer **buffer, const ReadOptions *options);
-
-protected:
-    virtual ~AMRNBEncoder();
-
-private:
-    sp<MediaSource> mSource;
-    sp<MetaData>    mMeta;
-    bool mStarted;
-
-    MediaBufferGroup *mBufferGroup;
-
-    void *mEncState;
-    void *mSidState;
-    int64_t mAnchorTimeUs;
-    int64_t mNumFramesOutput;
-
-    MediaBuffer *mInputBuffer;
-    int mMode;
-
-    int16_t mInputFrame[160];
-    int32_t mNumInputSamples;
-
-    AMRNBEncoder(const AMRNBEncoder &);
-    AMRNBEncoder &operator=(const AMRNBEncoder &);
-};
-
-}  // namespace android
-
-#endif  // AMR_NB_ENCODER_H_
diff --git a/media/libstagefright/include/AMRWBDecoder.h b/media/libstagefright/include/AMRWBDecoder.h
deleted file mode 100644
index 927c51c..0000000
--- a/media/libstagefright/include/AMRWBDecoder.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef AMR_WB_DECODER_H_
-
-#define AMR_WB_DECODER_H_
-
-#include <media/stagefright/MediaSource.h>
-
-namespace android {
-
-struct MediaBufferGroup;
-
-struct AMRWBDecoder : public MediaSource {
-    AMRWBDecoder(const sp<MediaSource> &source);
-
-    virtual status_t start(MetaData *params);
-    virtual status_t stop();
-
-    virtual sp<MetaData> getFormat();
-
-    virtual status_t read(
-            MediaBuffer **buffer, const ReadOptions *options);
-
-protected:
-    virtual ~AMRWBDecoder();
-
-private:
-    sp<MediaSource> mSource;
-    bool mStarted;
-
-    MediaBufferGroup *mBufferGroup;
-
-    void *mState;
-    void *mDecoderBuf;
-    int16_t *mDecoderCookie;
-    int64_t mAnchorTimeUs;
-    int64_t mNumSamplesOutput;
-    int16_t mInputSampleBuffer[477];
-
-    MediaBuffer *mInputBuffer;
-
-    AMRWBDecoder(const AMRWBDecoder &);
-    AMRWBDecoder &operator=(const AMRWBDecoder &);
-};
-
-}  // namespace android
-
-#endif  // AMR_WB_DECODER_H_
diff --git a/media/libstagefright/include/AMRWBEncoder.h b/media/libstagefright/include/AMRWBEncoder.h
deleted file mode 100644
index f2d155f..0000000
--- a/media/libstagefright/include/AMRWBEncoder.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef AMR_WB_ENCODER_H
-#define AMR_WB_ENCODER_H
-
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-
-struct VO_AUDIO_CODECAPI;
-struct VO_MEM_OPERATOR;
-
-namespace android {
-
-struct MediaBufferGroup;
-
-class AMRWBEncoder: public MediaSource {
-    public:
-        AMRWBEncoder(const sp<MediaSource> &source, const sp<MetaData> &meta);
-
-        virtual status_t start(MetaData *params);
-        virtual status_t stop();
-        virtual sp<MetaData> getFormat();
-        virtual status_t read(
-                MediaBuffer **buffer, const ReadOptions *options);
-
-
-    protected:
-        virtual ~AMRWBEncoder();
-
-    private:
-        sp<MediaSource>   mSource;
-        sp<MetaData>      mMeta;
-        bool              mStarted;
-        MediaBufferGroup *mBufferGroup;
-        MediaBuffer      *mInputBuffer;
-        status_t          mInitCheck;
-        int32_t           mBitRate;
-        void             *mEncoderHandle;
-        VO_AUDIO_CODECAPI *mApiHandle;
-        VO_MEM_OPERATOR  *mMemOperator;
-
-        int64_t mAnchorTimeUs;
-        int64_t mNumFramesOutput;
-
-        int16_t mInputFrame[320];
-        int32_t mNumInputSamples;
-
-        status_t initCheck();
-
-        AMRWBEncoder& operator=(const AMRWBEncoder &rhs);
-        AMRWBEncoder(const AMRWBEncoder& copy);
-
-};
-
-}
-
-#endif  //#ifndef AMR_WB_ENCODER_H
-
diff --git a/media/libstagefright/include/AVCDecoder.h b/media/libstagefright/include/AVCDecoder.h
deleted file mode 100644
index eb3b142..0000000
--- a/media/libstagefright/include/AVCDecoder.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef AVC_DECODER_H_
-
-#define AVC_DECODER_H_
-
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaSource.h>
-#include <utils/Vector.h>
-
-struct tagAVCHandle;
-
-namespace android {
-
-struct AVCDecoder : public MediaSource,
-                    public MediaBufferObserver {
-    AVCDecoder(const sp<MediaSource> &source);
-
-    virtual status_t start(MetaData *params);
-    virtual status_t stop();
-
-    virtual sp<MetaData> getFormat();
-
-    virtual status_t read(
-            MediaBuffer **buffer, const ReadOptions *options);
-
-    virtual void signalBufferReturned(MediaBuffer *buffer);
-
-protected:
-    virtual ~AVCDecoder();
-
-private:
-    sp<MediaSource> mSource;
-    bool mStarted;
-
-    sp<MetaData> mFormat;
-
-    Vector<MediaBuffer *> mCodecSpecificData;
-
-    tagAVCHandle *mHandle;
-    Vector<MediaBuffer *> mFrames;
-    MediaBuffer *mInputBuffer;
-
-    int64_t mAnchorTimeUs;
-    int64_t mNumSamplesOutput;
-    int64_t mPendingSeekTimeUs;
-    MediaSource::ReadOptions::SeekMode mPendingSeekMode;
-
-    int64_t mTargetTimeUs;
-
-    bool mSPSSeen;
-    bool mPPSSeen;
-
-    void addCodecSpecificData(const uint8_t *data, size_t size);
-
-    static int32_t ActivateSPSWrapper(
-            void *userData, unsigned int sizeInMbs, unsigned int numBuffers);
-
-    static int32_t BindFrameWrapper(
-            void *userData, int32_t index, uint8_t **yuv);
-
-    static void UnbindFrame(void *userData, int32_t index);
-
-    int32_t activateSPS(
-            unsigned int sizeInMbs, unsigned int numBuffers);
-
-    int32_t bindFrame(int32_t index, uint8_t **yuv);
-
-    void releaseFrames();
-
-    MediaBuffer *drainOutputBuffer();
-
-    AVCDecoder(const AVCDecoder &);
-    AVCDecoder &operator=(const AVCDecoder &);
-};
-
-}  // namespace android
-
-#endif  // AVC_DECODER_H_
diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h
index 0985f47..06e9468 100644
--- a/media/libstagefright/include/AwesomePlayer.h
+++ b/media/libstagefright/include/AwesomePlayer.h
@@ -41,7 +41,7 @@
 class DrmManagerClinet;
 class DecryptHandle;
 
-class TimedTextPlayer;
+class TimedTextDriver;
 struct WVMExtractor;
 
 struct AwesomeRenderer : public RefBase {
@@ -90,6 +90,7 @@
 
     status_t setParameter(int key, const Parcel &request);
     status_t getParameter(int key, Parcel *reply);
+    status_t invoke(const Parcel &request, Parcel *reply);
     status_t setCacheStatCollectFreq(const Parcel &request);
 
     status_t seekTo(int64_t timeUs);
@@ -100,8 +101,6 @@
     void postAudioEOS(int64_t delayUs = 0ll);
     void postAudioSeekComplete();
 
-    status_t setTimedTextTrackIndex(int32_t index);
-
     status_t dump(int fd, const Vector<String16> &args) const;
 
 private:
@@ -136,7 +135,7 @@
         INCOGNITO           = 0x8000,
 
         TEXT_RUNNING        = 0x10000,
-        TEXTPLAYER_STARTED  = 0x20000,
+        TEXTPLAYER_INITIALIZED  = 0x20000,
 
         SLOW_DECODER_HACK   = 0x40000,
     };
@@ -232,7 +231,7 @@
     sp<DecryptHandle> mDecryptHandle;
 
     int64_t mLastVideoTimeUs;
-    TimedTextPlayer *mTextPlayer;
+    TimedTextDriver *mTextDriver;
     mutable Mutex mTimedTextLock;
 
     sp<WVMExtractor> mWVMExtractor;
@@ -258,7 +257,7 @@
     void setVideoSource(sp<MediaSource> source);
     status_t initVideoDecoder(uint32_t flags = 0);
 
-    void addTextSource(sp<MediaSource> source);
+    void addTextSource(const sp<MediaSource>& source);
 
     void onStreamDone();
 
@@ -290,6 +289,7 @@
 
     bool isStreamingHTTP() const;
     void sendCacheStats();
+    void checkDrmStatus(const sp<DataSource>& dataSource);
 
     enum FlagMode {
         SET,
@@ -325,4 +325,3 @@
 }  // namespace android
 
 #endif  // AWESOME_PLAYER_H_
-
diff --git a/media/libstagefright/include/ChromiumHTTPDataSource.h b/media/libstagefright/include/ChromiumHTTPDataSource.h
index 18f8913..82e08fd 100644
--- a/media/libstagefright/include/ChromiumHTTPDataSource.h
+++ b/media/libstagefright/include/ChromiumHTTPDataSource.h
@@ -43,7 +43,7 @@
     virtual status_t getSize(off64_t *size);
     virtual uint32_t flags();
 
-    virtual sp<DecryptHandle> DrmInitialization();
+    virtual sp<DecryptHandle> DrmInitialization(const char *mime);
 
     virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client);
 
diff --git a/media/libstagefright/include/DataUriSource.h b/media/libstagefright/include/DataUriSource.h
new file mode 100644
index 0000000..d223c06
--- /dev/null
+++ b/media/libstagefright/include/DataUriSource.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DATA_URI_SOURCE_H_
+
+#define DATA_URI_SOURCE_H_
+
+#include <stdio.h>
+
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/foundation/AString.h>
+
+namespace android {
+
+class DataUriSource : public DataSource {
+public:
+    DataUriSource(const char *uri);
+
+    virtual status_t initCheck() const {
+        return mInited;
+    }
+
+    virtual ssize_t readAt(off64_t offset, void *data, size_t size);
+
+    virtual status_t getSize(off64_t *size) {
+        if (mInited != OK) {
+            return mInited;
+        }
+
+        *size = mData.size();
+        return OK;
+    }
+
+    virtual String8 getUri() {
+        return mDataUri;
+    }
+
+    virtual String8 getMIMEType() const {
+        return mMimeType;
+    }
+
+protected:
+    virtual ~DataUriSource() {
+        // Nothing to delete.
+    }
+
+private:
+    const String8 mDataUri;
+
+    String8 mMimeType;
+    // Use AString because individual bytes may not be valid UTF8 chars.
+    AString mData;
+    status_t mInited;
+
+    // Disallow copy and assign.
+    DataUriSource(const DataUriSource &);
+    DataUriSource &operator=(const DataUriSource &);
+};
+
+}  // namespace android
+
+#endif  // DATA_URI_SOURCE_H_
diff --git a/media/libstagefright/include/G711Decoder.h b/media/libstagefright/include/G711Decoder.h
deleted file mode 100644
index 8b5143a..0000000
--- a/media/libstagefright/include/G711Decoder.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef G711_DECODER_H_
-
-#define G711_DECODER_H_
-
-#include <media/stagefright/MediaSource.h>
-
-namespace android {
-
-struct MediaBufferGroup;
-
-struct G711Decoder : public MediaSource {
-    G711Decoder(const sp<MediaSource> &source);
-
-    virtual status_t start(MetaData *params);
-    virtual status_t stop();
-
-    virtual sp<MetaData> getFormat();
-
-    virtual status_t read(
-            MediaBuffer **buffer, const ReadOptions *options);
-
-protected:
-    virtual ~G711Decoder();
-
-private:
-    sp<MediaSource> mSource;
-    bool mStarted;
-    bool mIsMLaw;
-
-    MediaBufferGroup *mBufferGroup;
-
-    static void DecodeALaw(int16_t *out, const uint8_t *in, size_t inSize);
-    static void DecodeMLaw(int16_t *out, const uint8_t *in, size_t inSize);
-
-    G711Decoder(const G711Decoder &);
-    G711Decoder &operator=(const G711Decoder &);
-};
-
-}  // namespace android
-
-#endif  // G711_DECODER_H_
diff --git a/media/libstagefright/include/ID3.h b/media/libstagefright/include/ID3.h
index 98c82a4..8714008 100644
--- a/media/libstagefright/include/ID3.h
+++ b/media/libstagefright/include/ID3.h
@@ -50,7 +50,7 @@
 
         bool done() const;
         void getID(String8 *id) const;
-        void getString(String8 *s) const;
+        void getString(String8 *s, String8 *ss = NULL) const;
         const uint8_t *getData(size_t *length) const;
         void next();
 
@@ -65,6 +65,7 @@
         void findFrame();
 
         size_t getHeaderLength() const;
+        void getstring(String8 *s, bool secondhalf) const;
 
         Iterator(const Iterator &);
         Iterator &operator=(const Iterator &);
diff --git a/media/libstagefright/include/LiveSession.h b/media/libstagefright/include/LiveSession.h
index 116ed0e..3a11612 100644
--- a/media/libstagefright/include/LiveSession.h
+++ b/media/libstagefright/include/LiveSession.h
@@ -120,7 +120,10 @@
     void onMonitorQueue();
     void onSeek(const sp<AMessage> &msg);
 
-    status_t fetchFile(const char *url, sp<ABuffer> *out);
+    status_t fetchFile(
+            const char *url, sp<ABuffer> *out,
+            int64_t range_offset = 0, int64_t range_length = -1);
+
     sp<M3UParser> fetchPlaylist(const char *url, bool *unchanged);
     size_t getBandwidthIndex();
 
diff --git a/media/libstagefright/include/M3UParser.h b/media/libstagefright/include/M3UParser.h
index 478582d..e30d6fd 100644
--- a/media/libstagefright/include/M3UParser.h
+++ b/media/libstagefright/include/M3UParser.h
@@ -72,6 +72,10 @@
     static status_t parseCipherInfo(
             const AString &line, sp<AMessage> *meta, const AString &baseURI);
 
+    static status_t parseByteRange(
+            const AString &line, uint64_t curOffset,
+            uint64_t *length, uint64_t *offset);
+
     static status_t ParseInt32(const char *s, int32_t *x);
     static status_t ParseDouble(const char *s, double *x);
 
diff --git a/media/libstagefright/include/M4vH263Decoder.h b/media/libstagefright/include/M4vH263Decoder.h
deleted file mode 100644
index 7d73e30..0000000
--- a/media/libstagefright/include/M4vH263Decoder.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef M4V_H263_DECODER_H_
-
-#define M4V_H263_DECODER_H_
-
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaSource.h>
-
-struct tagvideoDecControls;
-
-namespace android {
-
-struct M4vH263Decoder : public MediaSource,
-                        public MediaBufferObserver {
-    M4vH263Decoder(const sp<MediaSource> &source);
-
-    virtual status_t start(MetaData *params);
-    virtual status_t stop();
-
-    virtual sp<MetaData> getFormat();
-
-    virtual status_t read(
-            MediaBuffer **buffer, const ReadOptions *options);
-
-    virtual void signalBufferReturned(MediaBuffer *buffer);
-
-protected:
-    virtual ~M4vH263Decoder();
-
-private:
-    sp<MediaSource> mSource;
-    bool mStarted;
-    int32_t mWidth, mHeight;
-
-    sp<MetaData> mFormat;
-
-    tagvideoDecControls *mHandle;
-    MediaBuffer *mFrames[2];
-    MediaBuffer *mInputBuffer;
-
-    int64_t mNumSamplesOutput;
-    int64_t mTargetTimeUs;
-
-    void allocateFrames(int32_t width, int32_t height);
-    void releaseFrames();
-
-    M4vH263Decoder(const M4vH263Decoder &);
-    M4vH263Decoder &operator=(const M4vH263Decoder &);
-};
-
-}  // namespace android
-
-#endif  // M4V_H263_DECODER_H_
diff --git a/media/libstagefright/include/MP3Decoder.h b/media/libstagefright/include/MP3Decoder.h
deleted file mode 100644
index 4086fb6..0000000
--- a/media/libstagefright/include/MP3Decoder.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MP3_DECODER_H_
-
-#define MP3_DECODER_H_
-
-#include <media/stagefright/MediaSource.h>
-
-struct tPVMP3DecoderExternal;
-
-namespace android {
-
-struct MediaBufferGroup;
-
-struct MP3Decoder : public MediaSource {
-    MP3Decoder(const sp<MediaSource> &source);
-
-    virtual status_t start(MetaData *params);
-    virtual status_t stop();
-
-    virtual sp<MetaData> getFormat();
-
-    virtual status_t read(
-            MediaBuffer **buffer, const ReadOptions *options);
-
-protected:
-    virtual ~MP3Decoder();
-
-private:
-    sp<MediaSource> mSource;
-    sp<MetaData> mMeta;
-    int32_t mNumChannels;
-
-    bool mStarted;
-
-    MediaBufferGroup *mBufferGroup;
-
-    tPVMP3DecoderExternal *mConfig;
-    void *mDecoderBuf;
-    int64_t mAnchorTimeUs;
-    int64_t mNumFramesOutput;
-
-    MediaBuffer *mInputBuffer;
-
-    void init();
-
-    MP3Decoder(const MP3Decoder &);
-    MP3Decoder &operator=(const MP3Decoder &);
-};
-
-}  // namespace android
-
-#endif  // MP3_DECODER_H_
diff --git a/media/libstagefright/include/NuCachedSource2.h b/media/libstagefright/include/NuCachedSource2.h
index 7a03e7e..c27a29b 100644
--- a/media/libstagefright/include/NuCachedSource2.h
+++ b/media/libstagefright/include/NuCachedSource2.h
@@ -40,7 +40,7 @@
     virtual status_t getSize(off64_t *size);
     virtual uint32_t flags();
 
-    virtual sp<DecryptHandle> DrmInitialization();
+    virtual sp<DecryptHandle> DrmInitialization(const char* mime);
     virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client);
     virtual String8 getUri();
 
diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h
index 53e764f..2c87b34 100644
--- a/media/libstagefright/include/OMX.h
+++ b/media/libstagefright/include/OMX.h
@@ -31,7 +31,7 @@
 public:
     OMX();
 
-    virtual bool livesLocally(pid_t pid);
+    virtual bool livesLocally(node_id node, pid_t pid);
 
     virtual status_t listNodes(List<ComponentInfo> *list);
 
diff --git a/media/libstagefright/include/SoftwareRenderer.h b/media/libstagefright/include/SoftwareRenderer.h
index 8f2ea95..7ab0042 100644
--- a/media/libstagefright/include/SoftwareRenderer.h
+++ b/media/libstagefright/include/SoftwareRenderer.h
@@ -20,7 +20,7 @@
 
 #include <media/stagefright/ColorConverter.h>
 #include <utils/RefBase.h>
-#include <ui/android_native_buffer.h>
+#include <system/window.h>
 
 namespace android {
 
diff --git a/media/libstagefright/include/ThrottledSource.h b/media/libstagefright/include/ThrottledSource.h
index 8928a4a..7fe7c06 100644
--- a/media/libstagefright/include/ThrottledSource.h
+++ b/media/libstagefright/include/ThrottledSource.h
@@ -35,6 +35,11 @@
     virtual status_t getSize(off64_t *size);
     virtual uint32_t flags();
 
+    virtual String8 getMIMEType() const {
+        return mSource->getMIMEType();
+    }
+
+
 private:
     Mutex mLock;
 
diff --git a/media/libstagefright/include/VPXDecoder.h b/media/libstagefright/include/VPXDecoder.h
deleted file mode 100644
index 3b8362d..0000000
--- a/media/libstagefright/include/VPXDecoder.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef VPX_DECODER_H_
-
-#define VPX_DECODER_H_
-
-#include <media/stagefright/MediaSource.h>
-#include <utils/Vector.h>
-
-namespace android {
-
-struct MediaBufferGroup;
-
-struct VPXDecoder : public MediaSource {
-    VPXDecoder(const sp<MediaSource> &source);
-
-    virtual status_t start(MetaData *params);
-    virtual status_t stop();
-
-    virtual sp<MetaData> getFormat();
-
-    virtual status_t read(
-            MediaBuffer **buffer, const ReadOptions *options);
-
-protected:
-    virtual ~VPXDecoder();
-
-private:
-    sp<MediaSource> mSource;
-    bool mStarted;
-    int32_t mWidth, mHeight;
-    size_t mBufferSize;
-
-    void *mCtx;
-    MediaBufferGroup *mBufferGroup;
-
-    int64_t mTargetTimeUs;
-
-    sp<MetaData> mFormat;
-
-    VPXDecoder(const VPXDecoder &);
-    VPXDecoder &operator=(const VPXDecoder &);
-};
-
-}  // namespace android
-
-#endif  // VPX_DECODER_H_
-
diff --git a/media/libstagefright/include/VorbisDecoder.h b/media/libstagefright/include/VorbisDecoder.h
deleted file mode 100644
index 13e8b77..0000000
--- a/media/libstagefright/include/VorbisDecoder.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef VORBIS_DECODER_H_
-
-#define VORBIS_DECODER_H_
-
-#include <media/stagefright/MediaSource.h>
-
-struct vorbis_dsp_state;
-struct vorbis_info;
-
-namespace android {
-
-struct MediaBufferGroup;
-
-struct VorbisDecoder : public MediaSource {
-    VorbisDecoder(const sp<MediaSource> &source);
-
-    virtual status_t start(MetaData *params);
-    virtual status_t stop();
-
-    virtual sp<MetaData> getFormat();
-
-    virtual status_t read(
-            MediaBuffer **buffer, const ReadOptions *options);
-
-protected:
-    virtual ~VorbisDecoder();
-
-private:
-    enum {
-        kMaxNumSamplesPerBuffer = 8192 * 2
-    };
-
-    sp<MediaSource> mSource;
-    bool mStarted;
-
-    MediaBufferGroup *mBufferGroup;
-
-    int32_t mNumChannels;
-    int32_t mSampleRate;
-    int64_t mAnchorTimeUs;
-    int64_t mNumFramesOutput;
-    int32_t mNumFramesLeftOnPage;
-
-    vorbis_dsp_state *mState;
-    vorbis_info *mVi;
-
-    int decodePacket(MediaBuffer *packet, MediaBuffer *out);
-
-    VorbisDecoder(const VorbisDecoder &);
-    VorbisDecoder &operator=(const VorbisDecoder &);
-};
-
-}  // namespace android
-
-#endif  // VORBIS_DECODER_H_
-
diff --git a/media/libstagefright/include/WAVExtractor.h b/media/libstagefright/include/WAVExtractor.h
index ce1f33a..c567ccd 100644
--- a/media/libstagefright/include/WAVExtractor.h
+++ b/media/libstagefright/include/WAVExtractor.h
@@ -47,6 +47,7 @@
     bool mValidFormat;
     uint16_t mWaveFormat;
     uint16_t mNumChannels;
+    uint32_t mChannelMask;
     uint32_t mSampleRate;
     uint16_t mBitsPerSample;
     off64_t mDataOffset;
diff --git a/media/libstagefright/include/WVMExtractor.h b/media/libstagefright/include/WVMExtractor.h
index deecd25..3c3ca89 100644
--- a/media/libstagefright/include/WVMExtractor.h
+++ b/media/libstagefright/include/WVMExtractor.h
@@ -23,6 +23,8 @@
 
 namespace android {
 
+struct AMessage;
+class String8;
 class DataSource;
 
 class WVMLoadableExtractor : public MediaExtractor {
@@ -32,6 +34,7 @@
 
     virtual int64_t getCachedDurationUs(status_t *finalStatus) = 0;
     virtual void setAdaptiveStreamingMode(bool adaptive) = 0;
+    virtual void setUID(uid_t uid) = 0;
 };
 
 class WVMExtractor : public MediaExtractor {
@@ -58,6 +61,10 @@
     // is used.
     void setAdaptiveStreamingMode(bool adaptive);
 
+    void setUID(uid_t uid);
+
+    static bool getVendorLibHandle();
+
 protected:
     virtual ~WVMExtractor();
 
@@ -69,6 +76,10 @@
     WVMExtractor &operator=(const WVMExtractor &);
 };
 
+bool SniffWVM(
+        const sp<DataSource> &source, String8 *mimeType, float *confidence,
+        sp<AMessage> *);
+
 }  // namespace android
 
 #endif  // DRM_EXTRACTOR_H_
diff --git a/media/libstagefright/include/XINGSeeker.h b/media/libstagefright/include/XINGSeeker.h
index ec5bd9b..c408576 100644
--- a/media/libstagefright/include/XINGSeeker.h
+++ b/media/libstagefright/include/XINGSeeker.h
@@ -31,13 +31,19 @@
     virtual bool getDuration(int64_t *durationUs);
     virtual bool getOffsetForTime(int64_t *timeUs, off64_t *pos);
 
+    virtual int32_t getEncoderDelay();
+    virtual int32_t getEncoderPadding();
+
 private:
     int64_t mFirstFramePos;
     int64_t mDurationUs;
     int32_t mSizeBytes;
+    int32_t mEncoderDelay;
+    int32_t mEncoderPadding;
 
     // TOC entries in XING header. Skip the first one since it's always 0.
-    unsigned char mTableOfContents[99];
+    unsigned char mTOC[99];
+    bool mTOCValid;
 
     XINGSeeker();
 
diff --git a/media/libstagefright/matroska/Android.mk b/media/libstagefright/matroska/Android.mk
index 1f1c68b..e67da4c 100644
--- a/media/libstagefright/matroska/Android.mk
+++ b/media/libstagefright/matroska/Android.mk
@@ -7,7 +7,7 @@
 LOCAL_C_INCLUDES:= \
         $(JNI_H_INCLUDE) \
         $(TOP)/external/libvpx/mkvparser \
-        $(TOP)/frameworks/base/include/media/stagefright/openmax \
+        $(TOP)/frameworks/native/include/media/openmax \
 
 LOCAL_CFLAGS += -Wno-multichar
 
diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp
index 4fbf47e..a0db719 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.cpp
+++ b/media/libstagefright/matroska/MatroskaExtractor.cpp
@@ -93,7 +93,10 @@
 
     void advance();
     void reset();
-    void seek(int64_t seekTimeUs, bool seekToKeyFrame);
+
+    void seek(
+            int64_t seekTimeUs, bool seekToKeyFrame,
+            int64_t *actualFrameTimeUs);
 
     const mkvparser::Block *block() const;
     int64_t blockTimeUs() const;
@@ -303,22 +306,52 @@
     } while (!eos() && block()->GetTrackNumber() != mTrackNum);
 }
 
-void BlockIterator::seek(int64_t seekTimeUs, bool seekToKeyFrame) {
+void BlockIterator::seek(
+        int64_t seekTimeUs, bool seekToKeyFrame,
+        int64_t *actualFrameTimeUs) {
     Mutex::Autolock autoLock(mExtractor->mLock);
 
-    mCluster = mExtractor->mSegment->FindCluster(seekTimeUs * 1000ll);
+    *actualFrameTimeUs = -1ll;
+
+    int64_t seekTimeNs = seekTimeUs * 1000ll;
+
+    mCluster = mExtractor->mSegment->FindCluster(seekTimeNs);
     mBlockEntry = NULL;
     mBlockEntryIndex = 0;
 
-    do {
-        advance_l();
-    }
-    while (!eos() && block()->GetTrackNumber() != mTrackNum);
+    long prevKeyFrameBlockEntryIndex = -1;
 
-    if (seekToKeyFrame) {
-        while (!eos() && !mBlockEntry->GetBlock()->IsKey()) {
-            advance_l();
+    for (;;) {
+        advance_l();
+
+        if (eos()) {
+            break;
         }
+
+        if (block()->GetTrackNumber() != mTrackNum) {
+            continue;
+        }
+
+        if (block()->IsKey()) {
+            prevKeyFrameBlockEntryIndex = mBlockEntryIndex - 1;
+        }
+
+        int64_t timeNs = block()->GetTime(mCluster);
+
+        if (timeNs >= seekTimeNs) {
+            *actualFrameTimeUs = (timeNs + 500ll) / 1000ll;
+            break;
+        }
+    }
+
+    if (eos()) {
+        return;
+    }
+
+    if (seekToKeyFrame && !block()->IsKey()) {
+        CHECK_GE(prevKeyFrameBlockEntryIndex, 0);
+        mBlockEntryIndex = prevKeyFrameBlockEntryIndex;
+        advance_l();
     }
 }
 
@@ -397,6 +430,8 @@
         MediaBuffer **out, const ReadOptions *options) {
     *out = NULL;
 
+    int64_t targetSampleTimeUs = -1ll;
+
     int64_t seekTimeUs;
     ReadOptions::SeekMode mode;
     if (options && options->getSeekTo(&seekTimeUs, &mode)
@@ -406,10 +441,14 @@
         // Apparently keyframe indication in audio tracks is unreliable,
         // fortunately in all our currently supported audio encodings every
         // frame is effectively a keyframe.
-        mBlockIter.seek(seekTimeUs, !mIsAudio);
+        int64_t actualFrameTimeUs;
+        mBlockIter.seek(seekTimeUs, !mIsAudio, &actualFrameTimeUs);
+
+        if (mode == ReadOptions::SEEK_CLOSEST) {
+            targetSampleTimeUs = actualFrameTimeUs;
+        }
     }
 
-again:
     while (mPendingFrames.empty()) {
         status_t err = readBlock();
 
@@ -424,6 +463,11 @@
     mPendingFrames.erase(mPendingFrames.begin());
 
     if (mType != AVC) {
+        if (targetSampleTimeUs >= 0ll) {
+            frame->meta_data()->setInt64(
+                    kKeyTargetTime, targetSampleTimeUs);
+        }
+
         *out = frame;
 
         return OK;
@@ -506,6 +550,11 @@
     frame->release();
     frame = NULL;
 
+    if (targetSampleTimeUs >= 0ll) {
+        buffer->meta_data()->setInt64(
+                kKeyTargetTime, targetSampleTimeUs);
+    }
+
     *out = buffer;
 
     return OK;
@@ -610,36 +659,41 @@
     return mIsLiveStreaming;
 }
 
-static void addESDSFromAudioSpecificInfo(
-        const sp<MetaData> &meta, const void *asi, size_t asiSize) {
+static void addESDSFromCodecPrivate(
+        const sp<MetaData> &meta,
+        bool isAudio, const void *priv, size_t privSize) {
     static const uint8_t kStaticESDS[] = {
         0x03, 22,
         0x00, 0x00,     // ES_ID
         0x00,           // streamDependenceFlag, URL_Flag, OCRstreamFlag
 
         0x04, 17,
-        0x40,                       // Audio ISO/IEC 14496-3
+        0x40,           // ObjectTypeIndication
         0x00, 0x00, 0x00, 0x00,
         0x00, 0x00, 0x00, 0x00,
         0x00, 0x00, 0x00, 0x00,
 
         0x05,
-        // AudioSpecificInfo (with size prefix) follows
+        // CodecSpecificInfo (with size prefix) follows
     };
 
     // Make sure all sizes can be coded in a single byte.
-    CHECK(asiSize + 22 - 2 < 128);
-    size_t esdsSize = sizeof(kStaticESDS) + asiSize + 1;
+    CHECK(privSize + 22 - 2 < 128);
+    size_t esdsSize = sizeof(kStaticESDS) + privSize + 1;
     uint8_t *esds = new uint8_t[esdsSize];
     memcpy(esds, kStaticESDS, sizeof(kStaticESDS));
     uint8_t *ptr = esds + sizeof(kStaticESDS);
-    *ptr++ = asiSize;
-    memcpy(ptr, asi, asiSize);
+    *ptr++ = privSize;
+    memcpy(ptr, priv, privSize);
 
     // Increment by codecPrivateSize less 2 bytes that are accounted for
     // already in lengths of 22/17
-    esds[1] += asiSize - 2;
-    esds[6] += asiSize - 2;
+    esds[1] += privSize - 2;
+    esds[6] += privSize - 2;
+
+    // Set ObjectTypeIndication.
+    esds[7] = isAudio ? 0x40   // Audio ISO/IEC 14496-3
+                      : 0x20;  // Visual ISO/IEC 14496-2
 
     meta->setData(kKeyESDS, 0, esds, esdsSize);
 
@@ -707,9 +761,21 @@
                 if (!strcmp("V_MPEG4/ISO/AVC", codecID)) {
                     meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
                     meta->setData(kKeyAVCC, 0, codecPrivate, codecPrivateSize);
+                } else if (!strcmp("V_MPEG4/ISO/ASP", codecID)) {
+                    if (codecPrivateSize > 0) {
+                        meta->setCString(
+                                kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG4);
+                        addESDSFromCodecPrivate(
+                                meta, false, codecPrivate, codecPrivateSize);
+                    } else {
+                        ALOGW("%s is detected, but does not have configuration.",
+                                codecID);
+                        continue;
+                    }
                 } else if (!strcmp("V_VP8", codecID)) {
                     meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_VPX);
                 } else {
+                    ALOGW("%s is not supported.", codecID);
                     continue;
                 }
 
@@ -727,13 +793,16 @@
                     meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AAC);
                     CHECK(codecPrivateSize >= 2);
 
-                    addESDSFromAudioSpecificInfo(
-                            meta, codecPrivate, codecPrivateSize);
+                    addESDSFromCodecPrivate(
+                            meta, true, codecPrivate, codecPrivateSize);
                 } else if (!strcmp("A_VORBIS", codecID)) {
                     meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_VORBIS);
 
                     addVorbisCodecInfo(meta, codecPrivate, codecPrivateSize);
+                } else if (!strcmp("A_MPEG/L3", codecID)) {
+                    meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG);
                 } else {
+                    ALOGW("%s is not supported.", codecID);
                     continue;
                 }
 
diff --git a/media/libstagefright/mpeg2ts/Android.mk b/media/libstagefright/mpeg2ts/Android.mk
index 578c669..ac4c2a1 100644
--- a/media/libstagefright/mpeg2ts/Android.mk
+++ b/media/libstagefright/mpeg2ts/Android.mk
@@ -11,8 +11,8 @@
 
 LOCAL_C_INCLUDES:= \
 	$(JNI_H_INCLUDE) \
-	$(TOP)/frameworks/base/include/media/stagefright/openmax \
-        $(TOP)/frameworks/base/media/libstagefright
+	$(TOP)/frameworks/base/media/libstagefright \
+	$(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_MODULE:= libstagefright_mpeg2ts
 
diff --git a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
index 03033f5..e1589b4 100644
--- a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
+++ b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
@@ -22,8 +22,8 @@
 #include "include/LiveSession.h"
 #include "include/NuCachedSource2.h"
 
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/DataSource.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaSource.h>
diff --git a/media/libstagefright/omx/Android.mk b/media/libstagefright/omx/Android.mk
index d844f3d..083c7ef 100644
--- a/media/libstagefright/omx/Android.mk
+++ b/media/libstagefright/omx/Android.mk
@@ -5,7 +5,6 @@
 
 LOCAL_SRC_FILES:=                     \
         OMX.cpp                       \
-        OMXComponentBase.cpp          \
         OMXMaster.cpp                 \
         OMXNodeInstance.cpp           \
         SimpleSoftOMXComponent.cpp    \
@@ -14,7 +13,8 @@
 
 LOCAL_C_INCLUDES += \
         frameworks/base/media/libstagefright \
-        $(TOP)/frameworks/base/include/media/stagefright/openmax
+        $(TOP)/frameworks/native/include/media/hardware \
+        $(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_SHARED_LIBRARIES :=               \
         libbinder                       \
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index 694b12d..f11fcd2 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -25,7 +25,7 @@
 #include "../include/OMXNodeInstance.h"
 
 #include <binder/IMemory.h>
-#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/foundation/ADebug.h>
 #include <utils/threads.h>
 
 #include "OMXMaster.h"
@@ -102,7 +102,7 @@
     if (status != WOULD_BLOCK) {
         // Other than join to self, the only other error return codes are
         // whatever readyToRun() returns, and we don't override that
-        CHECK_EQ(status, NO_ERROR);
+        CHECK_EQ(status, (status_t)NO_ERROR);
     }
 }
 
@@ -185,7 +185,7 @@
     instance->onObserverDied(mMaster);
 }
 
-bool OMX::livesLocally(pid_t pid) {
+bool OMX::livesLocally(node_id node, pid_t pid) {
     return pid == getpid();
 }
 
diff --git a/media/libstagefright/omx/OMXComponentBase.cpp b/media/libstagefright/omx/OMXComponentBase.cpp
deleted file mode 100644
index 35227a0..0000000
--- a/media/libstagefright/omx/OMXComponentBase.cpp
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "OMXComponentBase.h"
-
-#include <stdlib.h>
-
-#include <media/stagefright/MediaDebug.h>
-
-namespace android {
-
-OMXComponentBase::OMXComponentBase(
-        const OMX_CALLBACKTYPE *callbacks,
-        OMX_PTR appData)
-    : mCallbacks(callbacks),
-      mAppData(appData),
-      mComponentHandle(NULL) {
-}
-
-OMXComponentBase::~OMXComponentBase() {}
-
-void OMXComponentBase::setComponentHandle(OMX_COMPONENTTYPE *handle) {
-    CHECK_EQ(mComponentHandle, NULL);
-    mComponentHandle = handle;
-}
-
-void OMXComponentBase::postEvent(
-        OMX_EVENTTYPE event, OMX_U32 param1, OMX_U32 param2) {
-    (*mCallbacks->EventHandler)(
-            mComponentHandle, mAppData, event, param1, param2, NULL);
-}
-
-void OMXComponentBase::postFillBufferDone(OMX_BUFFERHEADERTYPE *bufHdr) {
-    (*mCallbacks->FillBufferDone)(mComponentHandle, mAppData, bufHdr);
-}
-
-void OMXComponentBase::postEmptyBufferDone(OMX_BUFFERHEADERTYPE *bufHdr) {
-    (*mCallbacks->EmptyBufferDone)(mComponentHandle, mAppData, bufHdr);
-}
-
-static OMXComponentBase *getBase(OMX_HANDLETYPE hComponent) {
-    return (OMXComponentBase *)
-        ((OMX_COMPONENTTYPE *)hComponent)->pComponentPrivate;
-}
-
-static OMX_ERRORTYPE SendCommandWrapper(
-        OMX_IN  OMX_HANDLETYPE hComponent,
-        OMX_IN  OMX_COMMANDTYPE Cmd,
-        OMX_IN  OMX_U32 nParam1,
-        OMX_IN  OMX_PTR pCmdData) {
-    return getBase(hComponent)->sendCommand(Cmd, nParam1, pCmdData);
-}
-
-static OMX_ERRORTYPE GetParameterWrapper(
-        OMX_IN  OMX_HANDLETYPE hComponent, 
-        OMX_IN  OMX_INDEXTYPE nParamIndex,  
-        OMX_INOUT OMX_PTR pComponentParameterStructure) {
-    return getBase(hComponent)->getParameter(
-            nParamIndex, pComponentParameterStructure);
-}
-
-static OMX_ERRORTYPE SetParameterWrapper(
-        OMX_IN  OMX_HANDLETYPE hComponent, 
-        OMX_IN  OMX_INDEXTYPE nIndex,
-        OMX_IN  OMX_PTR pComponentParameterStructure) {
-    return getBase(hComponent)->getParameter(
-            nIndex, pComponentParameterStructure);
-}
-
-static OMX_ERRORTYPE GetConfigWrapper(
-        OMX_IN  OMX_HANDLETYPE hComponent,
-        OMX_IN  OMX_INDEXTYPE nIndex, 
-        OMX_INOUT OMX_PTR pComponentConfigStructure) {
-    return getBase(hComponent)->getConfig(nIndex, pComponentConfigStructure);
-}
-
-static OMX_ERRORTYPE SetConfigWrapper(
-        OMX_IN  OMX_HANDLETYPE hComponent,
-        OMX_IN  OMX_INDEXTYPE nIndex, 
-        OMX_IN  OMX_PTR pComponentConfigStructure) {
-    return getBase(hComponent)->setConfig(nIndex, pComponentConfigStructure);
-}
-
-static OMX_ERRORTYPE GetExtensionIndexWrapper(
-        OMX_IN  OMX_HANDLETYPE hComponent,
-        OMX_IN  OMX_STRING cParameterName,
-        OMX_OUT OMX_INDEXTYPE* pIndexType) {
-    return getBase(hComponent)->getExtensionIndex(cParameterName, pIndexType);
-}
-
-static OMX_ERRORTYPE GetStateWrapper(
-        OMX_IN  OMX_HANDLETYPE hComponent,
-        OMX_OUT OMX_STATETYPE* pState) {
-    return getBase(hComponent)->getState(pState);
-}
-
-static OMX_ERRORTYPE UseBufferWrapper(
-        OMX_IN OMX_HANDLETYPE hComponent,
-        OMX_INOUT OMX_BUFFERHEADERTYPE** ppBufferHdr,
-        OMX_IN OMX_U32 nPortIndex,
-        OMX_IN OMX_PTR pAppPrivate,
-        OMX_IN OMX_U32 nSizeBytes,
-        OMX_IN OMX_U8* pBuffer) {
-    return getBase(hComponent)->useBuffer(
-            ppBufferHdr, nPortIndex, pAppPrivate, nSizeBytes, pBuffer);
-}
-
-static OMX_ERRORTYPE AllocateBufferWrapper(
-        OMX_IN OMX_HANDLETYPE hComponent,
-        OMX_INOUT OMX_BUFFERHEADERTYPE** ppBuffer,
-        OMX_IN OMX_U32 nPortIndex,
-        OMX_IN OMX_PTR pAppPrivate,
-        OMX_IN OMX_U32 nSizeBytes) {
-    return getBase(hComponent)->allocateBuffer(
-            ppBuffer, nPortIndex, pAppPrivate, nSizeBytes);
-}
-
-static OMX_ERRORTYPE FreeBufferWrapper(
-        OMX_IN  OMX_HANDLETYPE hComponent,
-        OMX_IN  OMX_U32 nPortIndex,
-        OMX_IN  OMX_BUFFERHEADERTYPE* pBuffer) {
-    return getBase(hComponent)->freeBuffer(nPortIndex, pBuffer);
-}
-
-static OMX_ERRORTYPE EmptyThisBufferWrapper(
-        OMX_IN  OMX_HANDLETYPE hComponent,
-        OMX_IN  OMX_BUFFERHEADERTYPE* pBuffer) {
-    return getBase(hComponent)->emptyThisBuffer(pBuffer);
-}
-
-static OMX_ERRORTYPE FillThisBufferWrapper(
-        OMX_IN  OMX_HANDLETYPE hComponent,
-        OMX_IN  OMX_BUFFERHEADERTYPE* pBuffer) {
-    return getBase(hComponent)->fillThisBuffer(pBuffer);
-}
-
-static OMX_ERRORTYPE ComponentDeInitWrapper(
-        OMX_IN  OMX_HANDLETYPE hComponent) {
-    delete getBase(hComponent);
-    delete (OMX_COMPONENTTYPE *)hComponent;
-
-    return OMX_ErrorNone;
-}
-
-static OMX_ERRORTYPE ComponentRoleEnumWrapper(
-        OMX_IN OMX_HANDLETYPE hComponent,
-        OMX_OUT OMX_U8 *cRole,
-        OMX_IN OMX_U32 nIndex) {
-    return getBase(hComponent)->enumerateRoles(cRole, nIndex);
-}
-
-// static
-OMX_COMPONENTTYPE *OMXComponentBase::MakeComponent(OMXComponentBase *base) {
-    OMX_COMPONENTTYPE *result = new OMX_COMPONENTTYPE;
-
-    result->nSize = sizeof(OMX_COMPONENTTYPE);
-    result->nVersion.s.nVersionMajor = 1;
-    result->nVersion.s.nVersionMinor = 0;
-    result->nVersion.s.nRevision = 0;
-    result->nVersion.s.nStep = 0;
-    result->pComponentPrivate = base;
-    result->pApplicationPrivate = NULL;
-
-    result->GetComponentVersion = NULL;
-    result->SendCommand = SendCommandWrapper;
-    result->GetParameter = GetParameterWrapper;
-    result->SetParameter = SetParameterWrapper;
-    result->GetConfig = GetConfigWrapper;
-    result->SetConfig = SetConfigWrapper;
-    result->GetExtensionIndex = GetExtensionIndexWrapper;
-    result->GetState = GetStateWrapper;
-    result->ComponentTunnelRequest = NULL;
-    result->UseBuffer = UseBufferWrapper;
-    result->AllocateBuffer = AllocateBufferWrapper;
-    result->FreeBuffer = FreeBufferWrapper;
-    result->EmptyThisBuffer = EmptyThisBufferWrapper;
-    result->FillThisBuffer = FillThisBufferWrapper;
-    result->SetCallbacks = NULL;
-    result->ComponentDeInit = ComponentDeInitWrapper;
-    result->UseEGLImage = NULL;
-    result->ComponentRoleEnum = ComponentRoleEnumWrapper;
-
-    base->setComponentHandle(result);
-
-    return result;
-}
-
-}  // namespace android
diff --git a/media/libstagefright/omx/OMXComponentBase.h b/media/libstagefright/omx/OMXComponentBase.h
deleted file mode 100644
index fd0df0b..0000000
--- a/media/libstagefright/omx/OMXComponentBase.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef OMX_COMPONENT_BASE_H_
-
-#define OMX_COMPONENT_BASE_H_
-
-#include <OMX_Component.h>
-
-namespace android {
-
-struct OMXComponentBase {
-    OMXComponentBase(
-            const OMX_CALLBACKTYPE *callbacks,
-            OMX_PTR appData);
-
-    virtual ~OMXComponentBase();
-
-    virtual OMX_ERRORTYPE sendCommand(
-            OMX_COMMANDTYPE cmd, OMX_U32 param, OMX_PTR cmdData) = 0;
-
-    virtual OMX_ERRORTYPE getParameter(
-            OMX_INDEXTYPE index, OMX_PTR params) = 0;
-
-    virtual OMX_ERRORTYPE setParameter(
-            OMX_INDEXTYPE index, const OMX_PTR params) = 0;
-
-    virtual OMX_ERRORTYPE getConfig(
-            OMX_INDEXTYPE index, OMX_PTR config) = 0;
-
-    virtual OMX_ERRORTYPE setConfig(
-            OMX_INDEXTYPE index, const OMX_PTR config) = 0;
-
-    virtual OMX_ERRORTYPE getExtensionIndex(
-            const OMX_STRING name, OMX_INDEXTYPE *index) = 0;
-
-    virtual OMX_ERRORTYPE useBuffer(
-            OMX_BUFFERHEADERTYPE **bufHdr,
-            OMX_U32 portIndex,
-            OMX_PTR appPrivate,
-            OMX_U32 size,
-            OMX_U8 *buffer) = 0;
-
-    virtual OMX_ERRORTYPE allocateBuffer(
-            OMX_BUFFERHEADERTYPE **bufHdr,
-            OMX_U32 portIndex,
-            OMX_PTR appPrivate,
-            OMX_U32 size) = 0;
-
-    virtual OMX_ERRORTYPE freeBuffer(
-            OMX_U32 portIndex,
-            OMX_BUFFERHEADERTYPE *buffer) = 0;
-
-    virtual OMX_ERRORTYPE emptyThisBuffer(OMX_BUFFERHEADERTYPE *buffer) = 0;
-    virtual OMX_ERRORTYPE fillThisBuffer(OMX_BUFFERHEADERTYPE *buffer) = 0;
-
-    virtual OMX_ERRORTYPE enumerateRoles(OMX_U8 *role, OMX_U32 index) = 0;
-
-    virtual OMX_ERRORTYPE getState(OMX_STATETYPE *state) = 0;
-
-    // Wraps a given OMXComponentBase instance into an OMX_COMPONENTTYPE
-    // as required by OpenMAX APIs.
-    static OMX_COMPONENTTYPE *MakeComponent(OMXComponentBase *base);
-
-protected:
-    void postEvent(OMX_EVENTTYPE event, OMX_U32 param1, OMX_U32 param2);
-    void postFillBufferDone(OMX_BUFFERHEADERTYPE *bufHdr);
-    void postEmptyBufferDone(OMX_BUFFERHEADERTYPE *bufHdr);
-
-private:
-    void setComponentHandle(OMX_COMPONENTTYPE *handle);
-
-    const OMX_CALLBACKTYPE *mCallbacks;
-    OMX_PTR mAppData;
-    OMX_COMPONENTTYPE *mComponentHandle;
-
-    OMXComponentBase(const OMXComponentBase &);
-    OMXComponentBase &operator=(const OMXComponentBase &);
-};
-
-}  // namespace android
-
-#endif  // OMX_COMPONENT_BASE_H_
diff --git a/media/libstagefright/omx/OMXMaster.cpp b/media/libstagefright/omx/OMXMaster.cpp
index d698939..6b6d0ab 100644
--- a/media/libstagefright/omx/OMXMaster.cpp
+++ b/media/libstagefright/omx/OMXMaster.cpp
@@ -24,7 +24,7 @@
 
 #include <dlfcn.h>
 
-#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/foundation/ADebug.h>
 
 namespace android {
 
diff --git a/media/libstagefright/omx/OMXMaster.h b/media/libstagefright/omx/OMXMaster.h
index feee1f9..6069741 100644
--- a/media/libstagefright/omx/OMXMaster.h
+++ b/media/libstagefright/omx/OMXMaster.h
@@ -18,7 +18,7 @@
 
 #define OMX_MASTER_H_
 
-#include <media/stagefright/OMXPluginBase.h>
+#include <OMXPluginBase.h>
 
 #include <utils/threads.h>
 #include <utils/KeyedVector.h>
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 8938e33..bff3def 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -24,8 +24,8 @@
 #include <OMX_Component.h>
 
 #include <binder/IMemory.h>
-#include <media/stagefright/HardwareAPI.h>
-#include <media/stagefright/MediaDebug.h>
+#include <HardwareAPI.h>
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/MediaErrors.h>
 
 namespace android {
@@ -91,11 +91,11 @@
 }
 
 OMXNodeInstance::~OMXNodeInstance() {
-    CHECK_EQ(mHandle, NULL);
+    CHECK(mHandle == NULL);
 }
 
 void OMXNodeInstance::setHandle(OMX::node_id node_id, OMX_HANDLETYPE handle) {
-    CHECK_EQ(mHandle, NULL);
+    CHECK(mHandle == NULL);
     mNodeID = node_id;
     mHandle = handle;
 }
diff --git a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
index 0914f32..c79e01f 100644
--- a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
+++ b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
@@ -333,8 +333,9 @@
 
 void SimpleSoftOMXComponent::onMessageReceived(const sp<AMessage> &msg) {
     Mutex::Autolock autoLock(mLock);
-
-    switch (msg->what()) {
+    uint32_t msgType = msg->what();
+    ALOGV("msgType = %d", msgType);
+    switch (msgType) {
         case kWhatSendCommand:
         {
             int32_t cmd, param;
@@ -354,27 +355,27 @@
             CHECK(mState == OMX_StateExecuting && mTargetState == mState);
 
             bool found = false;
-            for (size_t i = 0; i < mPorts.size(); ++i) {
-                PortInfo *port = &mPorts.editItemAt(i);
+            size_t portIndex = (kWhatEmptyThisBuffer == msgType)?
+                    header->nInputPortIndex: header->nOutputPortIndex;
+            PortInfo *port = &mPorts.editItemAt(portIndex);
 
-                for (size_t j = 0; j < port->mBuffers.size(); ++j) {
-                    BufferInfo *buffer = &port->mBuffers.editItemAt(j);
+            for (size_t j = 0; j < port->mBuffers.size(); ++j) {
+                BufferInfo *buffer = &port->mBuffers.editItemAt(j);
 
-                    if (buffer->mHeader == header) {
-                        CHECK(!buffer->mOwnedByUs);
+                if (buffer->mHeader == header) {
+                    CHECK(!buffer->mOwnedByUs);
 
-                        buffer->mOwnedByUs = true;
+                    buffer->mOwnedByUs = true;
 
-                        CHECK((msg->what() == kWhatEmptyThisBuffer
-                                    && port->mDef.eDir == OMX_DirInput)
-                                || (port->mDef.eDir == OMX_DirOutput));
+                    CHECK((msgType == kWhatEmptyThisBuffer
+                            && port->mDef.eDir == OMX_DirInput)
+                            || (port->mDef.eDir == OMX_DirOutput));
 
-                        port->mQueue.push_back(buffer);
-                        onQueueFilled(i);
+                    port->mQueue.push_back(buffer);
+                    onQueueFilled(portIndex);
 
-                        found = true;
-                        break;
-                    }
+                    found = true;
+                    break;
                 }
             }
 
diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp
index da3ae42..99ffe7d 100644
--- a/media/libstagefright/omx/SoftOMXPlugin.cpp
+++ b/media/libstagefright/omx/SoftOMXPlugin.cpp
@@ -35,8 +35,11 @@
 
 } kComponents[] = {
     { "OMX.google.aac.decoder", "aacdec", "audio_decoder.aac" },
+    { "OMX.google.aac.encoder", "aacenc", "audio_encoder.aac" },
     { "OMX.google.amrnb.decoder", "amrdec", "audio_decoder.amrnb" },
+    { "OMX.google.amrnb.encoder", "amrnbenc", "audio_encoder.amrnb" },
     { "OMX.google.amrwb.decoder", "amrdec", "audio_decoder.amrwb" },
+    { "OMX.google.amrwb.encoder", "amrwbenc", "audio_encoder.amrwb" },
     { "OMX.google.h264.decoder", "h264dec", "video_decoder.avc" },
     { "OMX.google.g711.alaw.decoder", "g711dec", "audio_decoder.g711alaw" },
     { "OMX.google.g711.mlaw.decoder", "g711dec", "audio_decoder.g711mlaw" },
diff --git a/media/libstagefright/omx/SoftOMXPlugin.h b/media/libstagefright/omx/SoftOMXPlugin.h
index f93c323..c89cd87 100644
--- a/media/libstagefright/omx/SoftOMXPlugin.h
+++ b/media/libstagefright/omx/SoftOMXPlugin.h
@@ -19,7 +19,7 @@
 #define SOFT_OMX_PLUGIN_H_
 
 #include <media/stagefright/foundation/ABase.h>
-#include <media/stagefright/OMXPluginBase.h>
+#include <OMXPluginBase.h>
 
 namespace android {
 
diff --git a/media/libstagefright/omx/tests/Android.mk b/media/libstagefright/omx/tests/Android.mk
index bf69428..07d47a8 100644
--- a/media/libstagefright/omx/tests/Android.mk
+++ b/media/libstagefright/omx/tests/Android.mk
@@ -5,13 +5,15 @@
 	OMXHarness.cpp  \
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright libbinder libmedia libutils
+	libstagefright libbinder libmedia libutils libstagefright_foundation
 
-LOCAL_C_INCLUDES:= \
+LOCAL_C_INCLUDES := \
 	$(JNI_H_INCLUDE) \
 	frameworks/base/media/libstagefright \
-	$(TOP)/frameworks/base/include/media/stagefright/openmax
+	$(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_MODULE:= omx_tests
+LOCAL_MODULE := omx_tests
+
+LOCAL_MODULE_TAGS := tests
 
 include $(BUILD_EXECUTABLE)
diff --git a/media/libstagefright/omx/tests/OMXHarness.cpp b/media/libstagefright/omx/tests/OMXHarness.cpp
index 8faf544..fab1771 100644
--- a/media/libstagefright/omx/tests/OMXHarness.cpp
+++ b/media/libstagefright/omx/tests/OMXHarness.cpp
@@ -26,9 +26,9 @@
 #include <binder/IServiceManager.h>
 #include <binder/MemoryDealer.h>
 #include <media/IMediaPlayerService.h>
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaDebug.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaExtractor.h>
@@ -155,7 +155,7 @@
         if (err == TIMED_OUT) {
             return err;
         }
-        CHECK_EQ(err, OK);
+        CHECK_EQ(err, (status_t)OK);
     }
 }
 
@@ -317,7 +317,7 @@
     EXPECT_SUCCESS(err, "allocatePortBuffers(input)");
 
     err = dequeueMessageForNode(node, &msg, DEFAULT_TIMEOUT);
-    CHECK_EQ(err, TIMED_OUT);
+    CHECK_EQ(err, (status_t)TIMED_OUT);
 
     Vector<Buffer> outputBuffers;
     err = allocatePortBuffers(dealer, node, 1, &outputBuffers);
@@ -412,7 +412,7 @@
     // Make sure node doesn't just transition to loaded before we are done
     // freeing all input and output buffers.
     err = dequeueMessageForNode(node, &msg, DEFAULT_TIMEOUT);
-    CHECK_EQ(err, TIMED_OUT);
+    CHECK_EQ(err, (status_t)TIMED_OUT);
 
     for (size_t i = 0; i < inputBuffers.size(); ++i) {
         err = mOMX->freeBuffer(node, 0, inputBuffers[i].mID);
@@ -420,7 +420,7 @@
     }
 
     err = dequeueMessageForNode(node, &msg, DEFAULT_TIMEOUT);
-    CHECK_EQ(err, TIMED_OUT);
+    CHECK_EQ(err, (status_t)TIMED_OUT);
 
     for (size_t i = 0; i < outputBuffers.size(); ++i) {
         err = mOMX->freeBuffer(node, 1, outputBuffers[i].mID);
@@ -584,7 +584,7 @@
         return UNKNOWN_ERROR;
     }
 
-    CHECK_EQ(seekSource->start(), OK);
+    CHECK_EQ(seekSource->start(), (status_t)OK);
 
     sp<MediaSource> codec = OMXCodec::Create(
             mOMX, source->getFormat(), false /* createEncoder */,
@@ -592,7 +592,7 @@
 
     CHECK(codec != NULL);
 
-    CHECK_EQ(codec->start(), OK);
+    CHECK_EQ(codec->start(), (status_t)OK);
 
     int64_t durationUs;
     CHECK(source->getFormat()->findInt64(kKeyDuration, &durationUs));
@@ -638,7 +638,7 @@
                     requestedSeekTimeUs, MediaSource::ReadOptions::SEEK_NEXT_SYNC);
 
             if (seekSource->read(&buffer, &options) != OK) {
-                CHECK_EQ(buffer, NULL);
+                CHECK(buffer == NULL);
                 actualSeekTimeUs = -1;
             } else {
                 CHECK(buffer != NULL);
@@ -659,7 +659,7 @@
             err = codec->read(&buffer, &options);
             options.clearSeekTo();
             if (err == INFO_FORMAT_CHANGED) {
-                CHECK_EQ(buffer, NULL);
+                CHECK(buffer == NULL);
                 continue;
             }
             if (err == OK) {
@@ -670,7 +670,7 @@
                     continue;
                 }
             } else {
-                CHECK_EQ(buffer, NULL);
+                CHECK(buffer == NULL);
             }
 
             break;
@@ -679,7 +679,7 @@
         if (requestedSeekTimeUs < 0) {
             // Linear read.
             if (err != OK) {
-                CHECK_EQ(buffer, NULL);
+                CHECK(buffer == NULL);
             } else {
                 CHECK(buffer != NULL);
                 buffer->release();
@@ -694,8 +694,8 @@
                    "We attempted to seek beyond EOS and expected "
                    "ERROR_END_OF_STREAM to be returned, but instead "
                    "we found some other error.");
-            CHECK_EQ(err, ERROR_END_OF_STREAM);
-            CHECK_EQ(buffer, NULL);
+            CHECK_EQ(err, (status_t)ERROR_END_OF_STREAM);
+            CHECK(buffer == NULL);
         } else {
             EXPECT(err == OK,
                    "Expected a valid buffer to be returned from "
@@ -715,7 +715,7 @@
                 buffer->release();
                 buffer = NULL;
 
-                CHECK_EQ(codec->stop(), OK);
+                CHECK_EQ(codec->stop(), (status_t)OK);
 
                 return UNKNOWN_ERROR;
             }
@@ -725,7 +725,7 @@
         }
     }
 
-    CHECK_EQ(codec->stop(), OK);
+    CHECK_EQ(codec->stop(), (status_t)OK);
 
     return OK;
 }
@@ -841,7 +841,7 @@
     srand(seed);
 
     sp<Harness> h = new Harness;
-    CHECK_EQ(h->initCheck(), OK);
+    CHECK_EQ(h->initCheck(), (status_t)OK);
 
     if (argc == 0) {
         h->testAll();
diff --git a/media/libstagefright/rtsp/AAMRAssembler.cpp b/media/libstagefright/rtsp/AAMRAssembler.cpp
index 9d72b1f..fb8abc5 100644
--- a/media/libstagefright/rtsp/AAMRAssembler.cpp
+++ b/media/libstagefright/rtsp/AAMRAssembler.cpp
@@ -211,7 +211,7 @@
     }
 
     sp<AMessage> msg = mNotifyMsg->dup();
-    msg->setObject("access-unit", accessUnit);
+    msg->setBuffer("access-unit", accessUnit);
     msg->post();
 
     queue->erase(queue->begin());
diff --git a/media/libstagefright/rtsp/AAVCAssembler.cpp b/media/libstagefright/rtsp/AAVCAssembler.cpp
index ed8b1df..7ea132e 100644
--- a/media/libstagefright/rtsp/AAVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AAVCAssembler.cpp
@@ -345,7 +345,7 @@
     mAccessUnitDamaged = false;
 
     sp<AMessage> msg = mNotifyMsg->dup();
-    msg->setObject("access-unit", accessUnit);
+    msg->setBuffer("access-unit", accessUnit);
     msg->post();
 }
 
diff --git a/media/libstagefright/rtsp/AH263Assembler.cpp b/media/libstagefright/rtsp/AH263Assembler.cpp
index 498295c..ded70fa 100644
--- a/media/libstagefright/rtsp/AH263Assembler.cpp
+++ b/media/libstagefright/rtsp/AH263Assembler.cpp
@@ -166,7 +166,7 @@
     mAccessUnitDamaged = false;
 
     sp<AMessage> msg = mNotifyMsg->dup();
-    msg->setObject("access-unit", accessUnit);
+    msg->setBuffer("access-unit", accessUnit);
     msg->post();
 }
 
diff --git a/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp b/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp
index b0c7007..24c2f30 100644
--- a/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp
+++ b/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp
@@ -571,7 +571,7 @@
     mAccessUnitDamaged = false;
 
     sp<AMessage> msg = mNotifyMsg->dup();
-    msg->setObject("access-unit", accessUnit);
+    msg->setBuffer("access-unit", accessUnit);
     msg->post();
 }
 
diff --git a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp b/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
index 2f2e2c2..687d72b 100644
--- a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
+++ b/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
@@ -368,7 +368,7 @@
     mAccessUnitDamaged = false;
 
     sp<AMessage> msg = mNotifyMsg->dup();
-    msg->setObject("access-unit", accessUnit);
+    msg->setBuffer("access-unit", accessUnit);
     msg->post();
 }
 
diff --git a/media/libstagefright/rtsp/ARTPConnection.cpp b/media/libstagefright/rtsp/ARTPConnection.cpp
index 8c9dd8d..44988a3 100644
--- a/media/libstagefright/rtsp/ARTPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTPConnection.cpp
@@ -639,7 +639,7 @@
 void ARTPConnection::injectPacket(int index, const sp<ABuffer> &buffer) {
     sp<AMessage> msg = new AMessage(kWhatInjectPacket, id());
     msg->setInt32("index", index);
-    msg->setObject("buffer", buffer);
+    msg->setBuffer("buffer", buffer);
     msg->post();
 }
 
@@ -647,10 +647,8 @@
     int32_t index;
     CHECK(msg->findInt32("index", &index));
 
-    sp<RefBase> obj;
-    CHECK(msg->findObject("buffer", &obj));
-
-    sp<ABuffer> buffer = static_cast<ABuffer *>(obj.get());
+    sp<ABuffer> buffer;
+    CHECK(msg->findBuffer("buffer", &buffer));
 
     List<StreamInfo>::iterator it = mStreams.begin();
     while (it != mStreams.end()
diff --git a/media/libstagefright/rtsp/ARTPSession.cpp b/media/libstagefright/rtsp/ARTPSession.cpp
index 7a05b88..ba4e33c 100644
--- a/media/libstagefright/rtsp/ARTPSession.cpp
+++ b/media/libstagefright/rtsp/ARTPSession.cpp
@@ -145,10 +145,8 @@
                 break;
             }
 
-            sp<RefBase> obj;
-            CHECK(msg->findObject("access-unit", &obj));
-
-            sp<ABuffer> accessUnit = static_cast<ABuffer *>(obj.get());
+            sp<ABuffer> accessUnit;
+            CHECK(msg->findBuffer("access-unit", &accessUnit));
 
             uint64_t ntpTime;
             CHECK(accessUnit->meta()->findInt64(
diff --git a/media/libstagefright/rtsp/ARTSPConnection.cpp b/media/libstagefright/rtsp/ARTSPConnection.cpp
index 80a010e..539a888 100644
--- a/media/libstagefright/rtsp/ARTSPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTSPConnection.cpp
@@ -612,7 +612,7 @@
 
         if (mObserveBinaryMessage != NULL) {
             sp<AMessage> notify = mObserveBinaryMessage->dup();
-            notify->setObject("buffer", buffer);
+            notify->setBuffer("buffer", buffer);
             notify->post();
         } else {
             ALOGW("received binary data, but no one cares.");
diff --git a/media/libstagefright/rtsp/ARawAudioAssembler.cpp b/media/libstagefright/rtsp/ARawAudioAssembler.cpp
index 98bee82..0da5dd2 100644
--- a/media/libstagefright/rtsp/ARawAudioAssembler.cpp
+++ b/media/libstagefright/rtsp/ARawAudioAssembler.cpp
@@ -94,7 +94,7 @@
     }
 
     sp<AMessage> msg = mNotifyMsg->dup();
-    msg->setObject("access-unit", buffer);
+    msg->setBuffer("access-unit", buffer);
     msg->post();
 
     queue->erase(queue->begin());
diff --git a/media/libstagefright/rtsp/Android.mk b/media/libstagefright/rtsp/Android.mk
index 8230347..b3bc37c 100644
--- a/media/libstagefright/rtsp/Android.mk
+++ b/media/libstagefright/rtsp/Android.mk
@@ -19,9 +19,9 @@
 
 LOCAL_C_INCLUDES:= \
 	$(JNI_H_INCLUDE) \
-	$(TOP)/frameworks/base/include/media/stagefright/openmax \
-        $(TOP)/frameworks/base/media/libstagefright/include \
-        $(TOP)/external/openssl/include
+	$(TOP)/frameworks/base/media/libstagefright/include \
+	$(TOP)/frameworks/native/include/media/openmax \
+	$(TOP)/external/openssl/include
 
 LOCAL_MODULE:= libstagefright_rtsp
 
@@ -47,7 +47,7 @@
 LOCAL_C_INCLUDES:= \
 	$(JNI_H_INCLUDE) \
 	frameworks/base/media/libstagefright \
-	$(TOP)/frameworks/base/include/media/stagefright/openmax
+	$(TOP)/frameworks/native/include/media/openmax
 
 LOCAL_CFLAGS += -Wno-multichar
 
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index 2391c5c..deee30f 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -122,6 +122,7 @@
           mSetupTracksSuccessful(false),
           mSeekPending(false),
           mFirstAccessUnit(true),
+          mAllTracksHaveTime(false),
           mNTPAnchorUs(-1),
           mMediaAnchorUs(-1),
           mLastMediaTimeUs(0),
@@ -723,6 +724,7 @@
                 mSetupTracksSuccessful = false;
                 mSeekPending = false;
                 mFirstAccessUnit = true;
+                mAllTracksHaveTime = false;
                 mNTPAnchorUs = -1;
                 mMediaAnchorUs = -1;
                 mNumAccessUnitsReceived = 0;
@@ -855,10 +857,8 @@
                     return;
                 }
 
-                sp<RefBase> obj;
-                CHECK(msg->findObject("access-unit", &obj));
-
-                sp<ABuffer> accessUnit = static_cast<ABuffer *>(obj.get());
+                sp<ABuffer> accessUnit;
+                CHECK(msg->findBuffer("access-unit", &accessUnit));
 
                 uint32_t seqNum = (uint32_t)accessUnit->int32Data();
 
@@ -930,6 +930,7 @@
                     info->mNTPAnchorUs = -1;
                 }
 
+                mAllTracksHaveTime = false;
                 mNTPAnchorUs = -1;
 
                 int64_t timeUs;
@@ -1002,9 +1003,8 @@
 
             case 'biny':
             {
-                sp<RefBase> obj;
-                CHECK(msg->findObject("buffer", &obj));
-                sp<ABuffer> buffer = static_cast<ABuffer *>(obj.get());
+                sp<ABuffer> buffer;
+                CHECK(msg->findBuffer("buffer", &buffer));
 
                 int32_t index;
                 CHECK(buffer->meta()->findInt32("index", &index));
@@ -1037,6 +1037,14 @@
                         ALOGW("Never received any data, disconnecting.");
                         (new AMessage('abor', id()))->post();
                     }
+                } else {
+                    if (!mAllTracksHaveTime) {
+                        ALOGW("We received some RTCP packets, but time "
+                              "could not be established on all tracks, now "
+                              "using fake timestamps");
+
+                        fakeTimestamps();
+                    }
                 }
                 break;
             }
@@ -1211,6 +1219,7 @@
     bool mSeekPending;
     bool mFirstAccessUnit;
 
+    bool mAllTracksHaveTime;
     int64_t mNTPAnchorUs;
     int64_t mMediaAnchorUs;
     int64_t mLastMediaTimeUs;
@@ -1357,6 +1366,7 @@
     }
 
     void fakeTimestamps() {
+        mNTPAnchorUs = -1ll;
         for (size_t i = 0; i < mTracks.size(); ++i) {
             onTimeUpdate(i, 0, 0ll);
         }
@@ -1377,6 +1387,21 @@
             mNTPAnchorUs = ntpTimeUs;
             mMediaAnchorUs = mLastMediaTimeUs;
         }
+
+        if (!mAllTracksHaveTime) {
+            bool allTracksHaveTime = true;
+            for (size_t i = 0; i < mTracks.size(); ++i) {
+                TrackInfo *track = &mTracks.editItemAt(i);
+                if (track->mNTPAnchorUs < 0) {
+                    allTracksHaveTime = false;
+                    break;
+                }
+            }
+            if (allTracksHaveTime) {
+                mAllTracksHaveTime = true;
+                ALOGI("Time now established for all tracks.");
+            }
+        }
     }
 
     void onAccessUnitComplete(
@@ -1403,7 +1428,7 @@
 
         TrackInfo *track = &mTracks.editItemAt(trackIndex);
 
-        if (mNTPAnchorUs < 0 || mMediaAnchorUs < 0 || track->mNTPAnchorUs < 0) {
+        if (!mAllTracksHaveTime) {
             ALOGV("storing accessUnit, no time established yet");
             track->mPackets.push_back(accessUnit);
             return;
@@ -1460,7 +1485,7 @@
         sp<AMessage> msg = mNotify->dup();
         msg->setInt32("what", kWhatAccessUnit);
         msg->setSize("trackIndex", trackIndex);
-        msg->setObject("accessUnit", accessUnit);
+        msg->setBuffer("accessUnit", accessUnit);
         msg->post();
     }
 
diff --git a/media/libstagefright/tests/Android.mk b/media/libstagefright/tests/Android.mk
index 357feb1..656a630 100644
--- a/media/libstagefright/tests/Android.mk
+++ b/media/libstagefright/tests/Android.mk
@@ -38,7 +38,7 @@
     external/stlport/stlport \
 	frameworks/base/media/libstagefright \
 	frameworks/base/media/libstagefright/include \
-	$(TOP)/frameworks/base/include/media/stagefright/openmax \
+	$(TOP)/frameworks/native/include/media/openmax \
 
 include $(BUILD_EXECUTABLE)
 
diff --git a/media/libstagefright/tests/SurfaceMediaSource_test.cpp b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
index 76b507f..fe77cf7 100644
--- a/media/libstagefright/tests/SurfaceMediaSource_test.cpp
+++ b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
@@ -26,16 +26,16 @@
 #include <media/stagefright/SurfaceMediaSource.h>
 #include <media/mediarecorder.h>
 
-#include <gui/SurfaceTextureClient.h>
 #include <ui/GraphicBuffer.h>
-#include <surfaceflinger/ISurfaceComposer.h>
-#include <surfaceflinger/Surface.h>
-#include <surfaceflinger/SurfaceComposerClient.h>
+#include <gui/SurfaceTextureClient.h>
+#include <gui/ISurfaceComposer.h>
+#include <gui/Surface.h>
+#include <gui/SurfaceComposerClient.h>
 
 #include <binder/ProcessState.h>
 #include <ui/FramebufferNativeWindow.h>
 
-#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/MediaBufferGroup.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
@@ -107,7 +107,7 @@
                     window.get(), NULL);
         } else {
             ALOGV("No actual display. Choosing EGLSurface based on SurfaceMediaSource");
-            sp<SurfaceMediaSource> sms = new SurfaceMediaSource(
+            sp<ISurfaceTexture> sms = new SurfaceMediaSource(
                     getSurfaceWidth(), getSurfaceHeight());
             sp<SurfaceTextureClient> stc = new SurfaceTextureClient(sms);
             sp<ANativeWindow> window = stc;
@@ -360,7 +360,8 @@
         android::ProcessState::self()->startThreadPool();
         mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight);
         mSMS->setSynchronousMode(true);
-        mSTC = new SurfaceTextureClient(mSMS);
+        // Manual cast is required to avoid constructor ambiguity
+        mSTC = new SurfaceTextureClient(static_cast<sp<ISurfaceTexture> >( mSMS));
         mANW = mSTC;
     }
 
@@ -395,7 +396,7 @@
         ALOGV("SMS-GLTest::SetUp()");
         android::ProcessState::self()->startThreadPool();
         mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight);
-        mSTC = new SurfaceTextureClient(mSMS);
+        mSTC = new SurfaceTextureClient(static_cast<sp<ISurfaceTexture> >( mSMS));
         mANW = mSTC;
 
         // Doing the setup related to the GL Side
@@ -475,7 +476,7 @@
     mr->setVideoFrameRate(fps);
     mr->prepare();
     ALOGV("Starting MediaRecorder...");
-    CHECK_EQ(OK, mr->start());
+    CHECK_EQ((status_t)OK, mr->start());
     return mr;
 }
 
@@ -757,7 +758,7 @@
 
     ASSERT_EQ(NO_ERROR, native_window_api_disconnect(mANW.get(), NATIVE_WINDOW_API_CPU));
     ALOGV("Stopping MediaRecorder...");
-    CHECK_EQ(OK, mr->stop());
+    CHECK_EQ((status_t)OK, mr->stop());
     mr.clear();
     close(fd);
 }
@@ -773,7 +774,7 @@
     ALOGV("Verify creating a surface w/ right config + dummy writer*********");
 
     mSMS = new SurfaceMediaSource(mYuvTexWidth, mYuvTexHeight);
-    mSTC = new SurfaceTextureClient(mSMS);
+    mSTC = new SurfaceTextureClient(static_cast<sp<ISurfaceTexture> >( mSMS));
     mANW = mSTC;
 
     DummyRecorder writer(mSMS);
@@ -886,7 +887,7 @@
     mEglSurface = EGL_NO_SURFACE;
 
     ALOGV("Stopping MediaRecorder...");
-    CHECK_EQ(OK, mr->stop());
+    CHECK_EQ((status_t)OK, mr->stop());
     mr.clear();
     close(fd);
 }
@@ -929,7 +930,7 @@
     mEglSurface = EGL_NO_SURFACE;
 
     ALOGV("Stopping MediaRecorder...");
-    CHECK_EQ(OK, mr->stop());
+    CHECK_EQ((status_t)OK, mr->stop());
     mr.clear();
     close(fd);
 }
diff --git a/media/libstagefright/timedtext/Android.mk b/media/libstagefright/timedtext/Android.mk
index 59d0e15..d2d5f7b 100644
--- a/media/libstagefright/timedtext/Android.mk
+++ b/media/libstagefright/timedtext/Android.mk
@@ -3,14 +3,17 @@
 
 LOCAL_SRC_FILES:=                 \
         TextDescriptions.cpp      \
-        TimedTextParser.cpp       \
+        TimedTextDriver.cpp       \
+        TimedText3GPPSource.cpp \
+        TimedTextSource.cpp       \
+        TimedTextSRTSource.cpp    \
         TimedTextPlayer.cpp
 
 LOCAL_CFLAGS += -Wno-multichar
 LOCAL_C_INCLUDES:= \
         $(JNI_H_INCLUDE) \
-        $(TOP)/frameworks/base/media/libstagefright \
-        $(TOP)/frameworks/base/include/media/stagefright/openmax
+        $(TOP)/frameworks/base/include/media/stagefright/timedtext \
+        $(TOP)/frameworks/base/media/libstagefright
 
 LOCAL_MODULE:= libstagefright_timedtext
 
diff --git a/media/libstagefright/timedtext/TimedText3GPPSource.cpp b/media/libstagefright/timedtext/TimedText3GPPSource.cpp
new file mode 100644
index 0000000..4854121
--- /dev/null
+++ b/media/libstagefright/timedtext/TimedText3GPPSource.cpp
@@ -0,0 +1,119 @@
+ /*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TimedText3GPPSource"
+#include <utils/Log.h>
+
+#include <binder/Parcel.h>
+#include <media/stagefright/foundation/ADebug.h>  // CHECK_XX macro
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaDefs.h>  // for MEDIA_MIMETYPE_xxx
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+
+#include "TimedText3GPPSource.h"
+#include "TextDescriptions.h"
+
+namespace android {
+
+TimedText3GPPSource::TimedText3GPPSource(const sp<MediaSource>& mediaSource)
+    : mSource(mediaSource) {
+}
+
+TimedText3GPPSource::~TimedText3GPPSource() {
+}
+
+status_t TimedText3GPPSource::read(
+        int64_t *startTimeUs, int64_t *endTimeUs, Parcel *parcel,
+        const MediaSource::ReadOptions *options) {
+    MediaBuffer *textBuffer = NULL;
+    status_t err = mSource->read(&textBuffer, options);
+    if (err != OK) {
+        return err;
+    }
+    CHECK(textBuffer != NULL);
+    textBuffer->meta_data()->findInt64(kKeyTime, startTimeUs);
+    CHECK_GE(*startTimeUs, 0);
+    extractAndAppendLocalDescriptions(*startTimeUs, textBuffer, parcel);
+    textBuffer->release();
+    // endTimeUs is a dummy parameter for 3gpp timed text format.
+    // Set a negative value to it to mark it is unavailable.
+    *endTimeUs = -1;
+    return OK;
+}
+
+// Each text sample consists of a string of text, optionally with sample
+// modifier description. The modifier description could specify a new
+// text style for the string of text. These descriptions are present only
+// if they are needed. This method is used to extract the modifier
+// description and append it at the end of the text.
+status_t TimedText3GPPSource::extractAndAppendLocalDescriptions(
+        int64_t timeUs, const MediaBuffer *textBuffer, Parcel *parcel) {
+    const void *data;
+    size_t size = 0;
+    int32_t flag = TextDescriptions::LOCAL_DESCRIPTIONS;
+
+    const char *mime;
+    CHECK(mSource->getFormat()->findCString(kKeyMIMEType, &mime));
+    CHECK(strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) == 0);
+
+    data = textBuffer->data();
+    size = textBuffer->size();
+
+    if (size > 0) {
+      parcel->freeData();
+      flag |= TextDescriptions::IN_BAND_TEXT_3GPP;
+      return TextDescriptions::getParcelOfDescriptions(
+          (const uint8_t *)data, size, flag, timeUs / 1000, parcel);
+    }
+    return OK;
+}
+
+// To extract and send the global text descriptions for all the text samples
+// in the text track or text file.
+// TODO: send error message to application via notifyListener()...?
+status_t TimedText3GPPSource::extractGlobalDescriptions(Parcel *parcel) {
+    const void *data;
+    size_t size = 0;
+    int32_t flag = TextDescriptions::GLOBAL_DESCRIPTIONS;
+
+    const char *mime;
+    CHECK(mSource->getFormat()->findCString(kKeyMIMEType, &mime));
+    CHECK(strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) == 0);
+
+    uint32_t type;
+    // get the 'tx3g' box content. This box contains the text descriptions
+    // used to render the text track
+    if (!mSource->getFormat()->findData(
+            kKeyTextFormatData, &type, &data, &size)) {
+        return ERROR_MALFORMED;
+    }
+
+    if (size > 0) {
+        flag |= TextDescriptions::IN_BAND_TEXT_3GPP;
+        return TextDescriptions::getParcelOfDescriptions(
+                (const uint8_t *)data, size, flag, 0, parcel);
+    }
+    return OK;
+}
+
+sp<MetaData> TimedText3GPPSource::getFormat() {
+    return mSource->getFormat();
+}
+
+}  // namespace android
diff --git a/media/libstagefright/timedtext/TimedText3GPPSource.h b/media/libstagefright/timedtext/TimedText3GPPSource.h
new file mode 100644
index 0000000..4170940
--- /dev/null
+++ b/media/libstagefright/timedtext/TimedText3GPPSource.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TIMED_TEXT_3GPP_SOURCE_H_
+#define TIMED_TEXT_3GPP_SOURCE_H_
+
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+
+#include "TimedTextSource.h"
+
+namespace android {
+
+class MediaBuffer;
+class Parcel;
+
+class TimedText3GPPSource : public TimedTextSource {
+public:
+    TimedText3GPPSource(const sp<MediaSource>& mediaSource);
+    virtual status_t start() { return mSource->start(); }
+    virtual status_t stop() { return mSource->stop(); }
+    virtual status_t read(
+            int64_t *startTimeUs,
+            int64_t *endTimeUs,
+            Parcel *parcel,
+            const MediaSource::ReadOptions *options = NULL);
+    virtual status_t extractGlobalDescriptions(Parcel *parcel);
+    virtual sp<MetaData> getFormat();
+
+protected:
+    virtual ~TimedText3GPPSource();
+
+private:
+    sp<MediaSource> mSource;
+
+    status_t extractAndAppendLocalDescriptions(
+            int64_t timeUs, const MediaBuffer *textBuffer, Parcel *parcel);
+
+    DISALLOW_EVIL_CONSTRUCTORS(TimedText3GPPSource);
+};
+
+}  // namespace android
+
+#endif  // TIMED_TEXT_3GPP_SOURCE_H_
diff --git a/media/libstagefright/timedtext/TimedTextDriver.cpp b/media/libstagefright/timedtext/TimedTextDriver.cpp
new file mode 100644
index 0000000..8ee15f8
--- /dev/null
+++ b/media/libstagefright/timedtext/TimedTextDriver.cpp
@@ -0,0 +1,222 @@
+ /*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TimedTextDriver"
+#include <utils/Log.h>
+
+#include <binder/IPCThreadState.h>
+
+#include <media/mediaplayer.h>
+#include <media/MediaPlayerInterface.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/timedtext/TimedTextDriver.h>
+
+#include "TextDescriptions.h"
+#include "TimedTextPlayer.h"
+#include "TimedTextSource.h"
+
+namespace android {
+
+TimedTextDriver::TimedTextDriver(
+        const wp<MediaPlayerBase> &listener)
+    : mLooper(new ALooper),
+      mListener(listener),
+      mState(UNINITIALIZED) {
+    mLooper->setName("TimedTextDriver");
+    mLooper->start();
+    mPlayer = new TimedTextPlayer(listener);
+    mLooper->registerHandler(mPlayer);
+}
+
+TimedTextDriver::~TimedTextDriver() {
+    mTextSourceVector.clear();
+    mLooper->stop();
+}
+
+status_t TimedTextDriver::selectTrack_l(int32_t index) {
+    if (index >= (int)(mTextSourceVector.size())) {
+        return BAD_VALUE;
+    }
+
+    sp<TimedTextSource> source;
+    source = mTextSourceVector.itemAt(index);
+    mPlayer->setDataSource(source);
+    if (mState == UNINITIALIZED) {
+        mState = PAUSED;
+    }
+    mCurrentTrackIndex = index;
+    return OK;
+}
+
+status_t TimedTextDriver::start() {
+    Mutex::Autolock autoLock(mLock);
+    switch (mState) {
+        case UNINITIALIZED:
+            return INVALID_OPERATION;
+        case PLAYING:
+            return OK;
+        case PAUSED:
+            mPlayer->start();
+            break;
+        default:
+            TRESPASS();
+    }
+    mState = PLAYING;
+    return OK;
+}
+
+// TODO: Test if pause() works properly.
+// Scenario 1: start - pause - resume
+// Scenario 2: start - seek
+// Scenario 3: start - pause - seek - resume
+status_t TimedTextDriver::pause() {
+    Mutex::Autolock autoLock(mLock);
+    switch (mState) {
+        case UNINITIALIZED:
+            return INVALID_OPERATION;
+        case PLAYING:
+            mPlayer->pause();
+            break;
+        case PAUSED:
+            return OK;
+        default:
+            TRESPASS();
+    }
+    mState = PAUSED;
+    return OK;
+}
+
+status_t TimedTextDriver::selectTrack(int32_t index) {
+    status_t ret = OK;
+    Mutex::Autolock autoLock(mLock);
+    switch (mState) {
+        case UNINITIALIZED:
+        case PAUSED:
+            ret = selectTrack_l(index);
+            break;
+        case PLAYING:
+            mPlayer->pause();
+            ret = selectTrack_l(index);
+            if (ret != OK) {
+                break;
+            }
+            mPlayer->start();
+            break;
+        defaut:
+            TRESPASS();
+    }
+    return ret;
+}
+
+status_t TimedTextDriver::unselectTrack(int32_t index) {
+    if (mCurrentTrackIndex != index) {
+        return INVALID_OPERATION;
+    }
+    status_t err = pause();
+    if (err != OK) {
+        return err;
+    }
+    Mutex::Autolock autoLock(mLock);
+    mState = UNINITIALIZED;
+    return OK;
+}
+
+status_t TimedTextDriver::seekToAsync(int64_t timeUs) {
+    mPlayer->seekToAsync(timeUs);
+    return OK;
+}
+
+status_t TimedTextDriver::addInBandTextSource(
+        const sp<MediaSource>& mediaSource) {
+    sp<TimedTextSource> source =
+            TimedTextSource::CreateTimedTextSource(mediaSource);
+    if (source == NULL) {
+        return ERROR_UNSUPPORTED;
+    }
+    Mutex::Autolock autoLock(mLock);
+    mTextSourceVector.add(source);
+    return OK;
+}
+
+status_t TimedTextDriver::addOutOfBandTextSource(
+        const char *uri, const char *mimeType) {
+    // TODO: Define "TimedTextSource::CreateFromURI(uri)"
+    // and move below lines there..?
+
+    // To support local subtitle file only for now
+    if (strncasecmp("file://", uri, 7)) {
+        return ERROR_UNSUPPORTED;
+    }
+    sp<DataSource> dataSource =
+            DataSource::CreateFromURI(uri);
+    if (dataSource == NULL) {
+        return ERROR_UNSUPPORTED;
+    }
+
+    sp<TimedTextSource> source;
+    if (strcasecmp(mimeType, MEDIA_MIMETYPE_TEXT_SUBRIP) == 0) {
+        source = TimedTextSource::CreateTimedTextSource(
+                dataSource, TimedTextSource::OUT_OF_BAND_FILE_SRT);
+    }
+
+    if (source == NULL) {
+        return ERROR_UNSUPPORTED;
+    }
+
+    Mutex::Autolock autoLock(mLock);
+    mTextSourceVector.add(source);
+    return OK;
+}
+
+status_t TimedTextDriver::addOutOfBandTextSource(
+        int fd, off64_t offset, size_t length, const char *mimeType) {
+    // Not supported yet. This requires DataSource::sniff to detect various text
+    // formats such as srt/smi/ttml.
+    return ERROR_UNSUPPORTED;
+}
+
+void TimedTextDriver::getTrackInfo(Parcel *parcel) {
+    Mutex::Autolock autoLock(mLock);
+    Vector<sp<TimedTextSource> >::const_iterator iter;
+    parcel->writeInt32(mTextSourceVector.size());
+    for (iter = mTextSourceVector.begin();
+         iter != mTextSourceVector.end(); ++iter) {
+        sp<MetaData> meta = (*iter)->getFormat();
+        if (meta != NULL) {
+            // There are two fields.
+            parcel->writeInt32(2);
+
+            // track type.
+            parcel->writeInt32(MEDIA_TRACK_TYPE_TIMEDTEXT);
+
+            const char *lang = "und";
+            meta->findCString(kKeyMediaLanguage, &lang);
+            parcel->writeString16(String16(lang));
+        } else {
+            parcel->writeInt32(0);
+        }
+    }
+}
+
+}  // namespace android
diff --git a/media/libstagefright/timedtext/TimedTextParser.cpp b/media/libstagefright/timedtext/TimedTextParser.cpp
deleted file mode 100644
index 0bada16..0000000
--- a/media/libstagefright/timedtext/TimedTextParser.cpp
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "TimedTextParser.h"
-#include <media/stagefright/DataSource.h>
-
-namespace android {
-
-TimedTextParser::TimedTextParser()
-    : mDataSource(NULL),
-      mOffset(0),
-      mIndex(0) {
-}
-
-TimedTextParser::~TimedTextParser() {
-    reset();
-}
-
-status_t TimedTextParser::init(
-        const sp<DataSource> &dataSource, FileType fileType) {
-    mDataSource = dataSource;
-    mFileType = fileType;
-
-    status_t err;
-    if ((err = scanFile()) != OK) {
-        reset();
-        return err;
-    }
-
-    return OK;
-}
-
-void TimedTextParser::reset() {
-    mDataSource.clear();
-    mTextVector.clear();
-    mOffset = 0;
-    mIndex = 0;
-}
-
-// scan the text file to get start/stop time and the
-// offset of each piece of text content
-status_t TimedTextParser::scanFile() {
-    if (mFileType != OUT_OF_BAND_FILE_SRT) {
-        return ERROR_UNSUPPORTED;
-    }
-
-    off64_t offset = 0;
-    int64_t startTimeUs;
-    bool endOfFile = false;
-
-    while (!endOfFile) {
-        TextInfo info;
-        status_t err = getNextInSrtFileFormat(&offset, &startTimeUs, &info);
-
-        if (err != OK) {
-            if (err == ERROR_END_OF_STREAM) {
-                endOfFile = true;
-            } else {
-                return err;
-            }
-        } else {
-            mTextVector.add(startTimeUs, info);
-        }
-    }
-
-    if (mTextVector.isEmpty()) {
-        return ERROR_MALFORMED;
-    }
-    return OK;
-}
-
-// read one line started from *offset and store it into data.
-status_t TimedTextParser::readNextLine(off64_t *offset, AString *data) {
-    char character;
-
-    data->clear();
-
-    while (true) {
-        ssize_t err;
-        if ((err = mDataSource->readAt(*offset, &character, 1)) < 1) {
-            if (err == 0) {
-                return ERROR_END_OF_STREAM;
-            }
-            return ERROR_IO;
-        }
-
-        (*offset) ++;
-
-        // a line could end with CR, LF or CR + LF
-        if (character == 10) {
-            break;
-        } else if (character == 13) {
-            if ((err = mDataSource->readAt(*offset, &character, 1)) < 1) {
-                if (err == 0) { // end of the stream
-                    return OK;
-                }
-                return ERROR_IO;
-            }
-
-            (*offset) ++;
-
-            if (character != 10) {
-                (*offset) --;
-            }
-            break;
-        }
-
-        data->append(character);
-    }
-
-    return OK;
-}
-
-/* SRT format:
- *  Subtitle number
- *  Start time --> End time
- *  Text of subtitle (one or more lines)
- *  Blank line
- *
- * .srt file example:
- *  1
- *  00:00:20,000 --> 00:00:24,400
- *  Altocumulus clouds occur between six thousand
- *
- *  2
- *  00:00:24,600 --> 00:00:27,800
- *  and twenty thousand feet above ground level.
- */
-status_t TimedTextParser::getNextInSrtFileFormat(
-        off64_t *offset, int64_t *startTimeUs, TextInfo *info) {
-    AString data;
-    status_t err;
-    if ((err = readNextLine(offset, &data)) != OK) {
-        return err;
-    }
-
-    // to skip the first line
-    if ((err = readNextLine(offset, &data)) != OK) {
-        return err;
-    }
-
-    int hour1, hour2, min1, min2, sec1, sec2, msec1, msec2;
-    // the start time format is: hours:minutes:seconds,milliseconds
-    // 00:00:24,600 --> 00:00:27,800
-    if (sscanf(data.c_str(), "%02d:%02d:%02d,%03d --> %02d:%02d:%02d,%03d",
-                &hour1, &min1, &sec1, &msec1, &hour2, &min2, &sec2, &msec2) != 8) {
-        return ERROR_MALFORMED;
-    }
-
-    *startTimeUs = ((hour1 * 3600 + min1 * 60 + sec1) * 1000 + msec1) * 1000ll;
-    info->endTimeUs = ((hour2 * 3600 + min2 * 60 + sec2) * 1000 + msec2) * 1000ll;
-    if (info->endTimeUs <= *startTimeUs) {
-        return ERROR_MALFORMED;
-    }
-
-    info->offset = *offset;
-
-    bool needMoreData = true;
-    while (needMoreData) {
-        if ((err = readNextLine(offset, &data)) != OK) {
-            if (err == ERROR_END_OF_STREAM) {
-                needMoreData = false;
-            } else {
-                return err;
-            }
-        }
-
-        if (needMoreData) {
-            data.trim();
-            if (data.empty()) {
-                // it's an empty line used to separate two subtitles
-                needMoreData = false;
-            }
-        }
-    }
-
-    info->textLen = *offset - info->offset;
-
-    return OK;
-}
-
-status_t TimedTextParser::getText(
-        AString *text, int64_t *startTimeUs, int64_t *endTimeUs,
-        const MediaSource::ReadOptions *options) {
-    Mutex::Autolock autoLock(mLock);
-
-    text->clear();
-
-    int64_t seekTimeUs;
-    MediaSource::ReadOptions::SeekMode mode;
-    if (options && options->getSeekTo(&seekTimeUs, &mode)) {
-        int64_t lastEndTimeUs = mTextVector.valueAt(mTextVector.size() - 1).endTimeUs;
-        int64_t firstStartTimeUs = mTextVector.keyAt(0);
-
-        if (seekTimeUs < 0 || seekTimeUs > lastEndTimeUs) {
-            return ERROR_OUT_OF_RANGE;
-        } else if (seekTimeUs < firstStartTimeUs) {
-            mIndex = 0;
-        } else {
-            // binary search
-            ssize_t low = 0;
-            ssize_t high = mTextVector.size() - 1;
-            ssize_t mid = 0;
-            int64_t currTimeUs;
-
-            while (low <= high) {
-                mid = low + (high - low)/2;
-                currTimeUs = mTextVector.keyAt(mid);
-                const int diff = currTimeUs - seekTimeUs;
-
-                if (diff == 0) {
-                    break;
-                } else if (diff < 0) {
-                    low = mid + 1;
-                } else {
-                    if ((high == mid + 1)
-                            && (seekTimeUs < mTextVector.keyAt(high))) {
-                        break;
-                    }
-                    high = mid - 1;
-                }
-            }
-
-            mIndex = mid;
-        }
-    }
-
-    TextInfo info = mTextVector.valueAt(mIndex);
-    *startTimeUs = mTextVector.keyAt(mIndex);
-    *endTimeUs = info.endTimeUs;
-    mIndex ++;
-
-    char *str = new char[info.textLen];
-    if (mDataSource->readAt(info.offset, str, info.textLen) < info.textLen) {
-        delete[] str;
-        return ERROR_IO;
-    }
-
-    text->append(str, info.textLen);
-    delete[] str;
-    return OK;
-}
-
-}  // namespace android
diff --git a/media/libstagefright/timedtext/TimedTextParser.h b/media/libstagefright/timedtext/TimedTextParser.h
deleted file mode 100644
index 44774c2..0000000
--- a/media/libstagefright/timedtext/TimedTextParser.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef TIMED_TEXT_PARSER_H_
-
-#define TIMED_TEXT_PARSER_H_
-
-#include <media/MediaPlayerInterface.h>
-#include <media/stagefright/foundation/ABase.h>
-#include <media/stagefright/foundation/AString.h>
-#include <media/stagefright/MediaSource.h>
-
-namespace android {
-
-class DataSource;
-
-class TimedTextParser : public RefBase {
-public:
-    TimedTextParser();
-    virtual ~TimedTextParser();
-
-    enum FileType {
-        OUT_OF_BAND_FILE_SRT = 1,
-    };
-
-    status_t getText(AString *text, int64_t *startTimeUs, int64_t *endTimeUs,
-                     const MediaSource::ReadOptions *options = NULL);
-    status_t init(const sp<DataSource> &dataSource, FileType fileType);
-    void reset();
-
-private:
-    Mutex mLock;
-
-    sp<DataSource> mDataSource;
-    off64_t mOffset;
-
-    struct TextInfo {
-        int64_t endTimeUs;
-        // the offset of the text in the original file
-        off64_t offset;
-        int textLen;
-    };
-
-    int mIndex;
-    FileType mFileType;
-
-    // the key indicated the start time of the text
-    KeyedVector<int64_t, TextInfo> mTextVector;
-
-    status_t getNextInSrtFileFormat(
-            off64_t *offset, int64_t *startTimeUs, TextInfo *info);
-    status_t readNextLine(off64_t *offset, AString *data);
-
-    status_t scanFile();
-
-    DISALLOW_EVIL_CONSTRUCTORS(TimedTextParser);
-};
-
-}  // namespace android
-
-#endif  // TIMED_TEXT_PARSER_H_
-
diff --git a/media/libstagefright/timedtext/TimedTextPlayer.cpp b/media/libstagefright/timedtext/TimedTextPlayer.cpp
index 3014b0b..917c62a 100644
--- a/media/libstagefright/timedtext/TimedTextPlayer.cpp
+++ b/media/libstagefright/timedtext/TimedTextPlayer.cpp
@@ -1,5 +1,5 @@
  /*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2012 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -18,399 +18,195 @@
 #define LOG_TAG "TimedTextPlayer"
 #include <utils/Log.h>
 
-#include <binder/IPCThreadState.h>
-
-#include <media/stagefright/MediaDebug.h>
-#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/timedtext/TimedTextDriver.h>
 #include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/FileSource.h>
-#include <media/stagefright/Utils.h>
+#include <media/MediaPlayerInterface.h>
 
-#include "include/AwesomePlayer.h"
 #include "TimedTextPlayer.h"
-#include "TimedTextParser.h"
-#include "TextDescriptions.h"
+
+#include "TimedTextSource.h"
 
 namespace android {
 
-struct TimedTextEvent : public TimedEventQueue::Event {
-    TimedTextEvent(
-            TimedTextPlayer *player,
-            void (TimedTextPlayer::*method)())
-        : mPlayer(player),
-          mMethod(method) {
-    }
+static const int64_t kAdjustmentProcessingTimeUs = 100000ll;
+static const int64_t kWaitTimeUsToRetryRead = 100000ll;
 
-protected:
-    virtual ~TimedTextEvent() {}
-
-    virtual void fire(TimedEventQueue *queue, int64_t /* now_us */) {
-        (mPlayer->*mMethod)();
-    }
-
-private:
-    TimedTextPlayer *mPlayer;
-    void (TimedTextPlayer::*mMethod)();
-
-    TimedTextEvent(const TimedTextEvent &);
-    TimedTextEvent &operator=(const TimedTextEvent &);
-};
-
-TimedTextPlayer::TimedTextPlayer(
-        AwesomePlayer *observer,
-        const wp<MediaPlayerBase> &listener,
-        TimedEventQueue *queue)
-    : mSource(NULL),
-      mOutOfBandSource(NULL),
-      mSeekTimeUs(0),
-      mStarted(false),
-      mTextEventPending(false),
-      mQueue(queue),
-      mListener(listener),
-      mObserver(observer),
-      mTextBuffer(NULL),
-      mTextParser(NULL),
-      mTextType(kNoText) {
-    mTextEvent = new TimedTextEvent(this, &TimedTextPlayer::onTextEvent);
+TimedTextPlayer::TimedTextPlayer(const wp<MediaPlayerBase> &listener)
+    : mListener(listener),
+      mSource(NULL),
+      mSendSubtitleGeneration(0) {
 }
 
 TimedTextPlayer::~TimedTextPlayer() {
-    if (mStarted) {
-        reset();
+    if (mSource != NULL) {
+        mSource->stop();
+        mSource.clear();
+        mSource = NULL;
     }
-
-    mTextTrackVector.clear();
-    mTextOutOfBandVector.clear();
 }
 
-status_t TimedTextPlayer::start(uint8_t index) {
-    CHECK(!mStarted);
-
-    if (index >=
-            mTextTrackVector.size() + mTextOutOfBandVector.size()) {
-        ALOGE("Incorrect text track index: %d", index);
-        return BAD_VALUE;
-    }
-
-    status_t err;
-    if (index < mTextTrackVector.size()) { // start an in-band text
-        mSource = mTextTrackVector.itemAt(index);
-
-        err = mSource->start();
-
-        if (err != OK) {
-            return err;
-        }
-        mTextType = kInBandText;
-    } else { // start an out-of-band text
-        OutOfBandText text =
-            mTextOutOfBandVector.itemAt(index - mTextTrackVector.size());
-
-        mOutOfBandSource = text.source;
-        TimedTextParser::FileType fileType = text.type;
-
-        if (mTextParser == NULL) {
-            mTextParser = new TimedTextParser();
-        }
-
-        if ((err = mTextParser->init(mOutOfBandSource, fileType)) != OK) {
-            return err;
-        }
-        mTextType = kOutOfBandText;
-    }
-
-    // send sample description format
-    if ((err = extractAndSendGlobalDescriptions()) != OK) {
-        return err;
-    }
-
-    int64_t positionUs;
-    mObserver->getPosition(&positionUs);
-    seekTo(positionUs);
-
-    postTextEvent();
-
-    mStarted = true;
-
-    return OK;
+void TimedTextPlayer::start() {
+    sp<AMessage> msg = new AMessage(kWhatSeek, id());
+    msg->setInt64("seekTimeUs", -1);
+    msg->post();
 }
 
 void TimedTextPlayer::pause() {
-    CHECK(mStarted);
-
-    cancelTextEvent();
+    (new AMessage(kWhatPause, id()))->post();
 }
 
-void TimedTextPlayer::resume() {
-    CHECK(mStarted);
-
-    postTextEvent();
+void TimedTextPlayer::seekToAsync(int64_t timeUs) {
+    sp<AMessage> msg = new AMessage(kWhatSeek, id());
+    msg->setInt64("seekTimeUs", timeUs);
+    msg->post();
 }
 
-void TimedTextPlayer::reset() {
-    CHECK(mStarted);
+void TimedTextPlayer::setDataSource(sp<TimedTextSource> source) {
+    sp<AMessage> msg = new AMessage(kWhatSetSource, id());
+    msg->setObject("source", source);
+    msg->post();
+}
 
-    // send an empty text to clear the screen
-    notifyListener(MEDIA_TIMED_TEXT);
-
-    cancelTextEvent();
-
-    mSeeking = false;
-    mStarted = false;
-
-    if (mTextType == kInBandText) {
-        if (mTextBuffer != NULL) {
-            mTextBuffer->release();
-            mTextBuffer = NULL;
+void TimedTextPlayer::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatPause: {
+            mSendSubtitleGeneration++;
+            break;
         }
-
-        if (mSource != NULL) {
-            mSource->stop();
-            mSource.clear();
-            mSource = NULL;
+        case kWhatSeek: {
+            int64_t seekTimeUs = 0;
+            msg->findInt64("seekTimeUs", &seekTimeUs);
+            if (seekTimeUs < 0) {
+                sp<MediaPlayerBase> listener = mListener.promote();
+                if (listener != NULL) {
+                    int32_t positionMs = 0;
+                    listener->getCurrentPosition(&positionMs);
+                    seekTimeUs = positionMs * 1000ll;
+                }
+            }
+            doSeekAndRead(seekTimeUs);
+            break;
         }
-    } else {
-        if (mTextParser != NULL) {
-            mTextParser.clear();
-            mTextParser = NULL;
+        case kWhatSendSubtitle: {
+            int32_t generation;
+            CHECK(msg->findInt32("generation", &generation));
+            if (generation != mSendSubtitleGeneration) {
+              // Drop obsolete msg.
+              break;
+            }
+            sp<RefBase> obj;
+            msg->findObject("subtitle", &obj);
+            if (obj != NULL) {
+                sp<ParcelEvent> parcelEvent;
+                parcelEvent = static_cast<ParcelEvent*>(obj.get());
+                notifyListener(&(parcelEvent->parcel));
+            } else {
+                notifyListener();
+            }
+            doRead();
+            break;
         }
-        if (mOutOfBandSource != NULL) {
-            mOutOfBandSource.clear();
-            mOutOfBandSource = NULL;
+        case kWhatSetSource: {
+            sp<RefBase> obj;
+            msg->findObject("source", &obj);
+            if (obj == NULL) break;
+            if (mSource != NULL) {
+                mSource->stop();
+            }
+            mSource = static_cast<TimedTextSource*>(obj.get());
+            status_t err = mSource->start();
+            if (err != OK) {
+                notifyError(err);
+                break;
+            }
+            Parcel parcel;
+            err = mSource->extractGlobalDescriptions(&parcel);
+            if (err != OK) {
+                notifyError(err);
+                break;
+            }
+            notifyListener(&parcel);
+            break;
         }
     }
 }
 
-status_t TimedTextPlayer::seekTo(int64_t time_us) {
-    Mutex::Autolock autoLock(mLock);
-
-    mSeeking = true;
-    mSeekTimeUs = time_us;
-
-    postTextEvent();
-
-    return OK;
-}
-
-status_t TimedTextPlayer::setTimedTextTrackIndex(int32_t index) {
-    if (index >=
-            (int)(mTextTrackVector.size() + mTextOutOfBandVector.size())) {
-        return BAD_VALUE;
-    }
-
-    if (mStarted) {
-        reset();
-    }
-
-    if (index >= 0) {
-        return start(index);
-    }
-    return OK;
-}
-
-void TimedTextPlayer::onTextEvent() {
-    Mutex::Autolock autoLock(mLock);
-
-    if (!mTextEventPending) {
-        return;
-    }
-    mTextEventPending = false;
-
-    if (mData.dataSize() > 0) {
-        notifyListener(MEDIA_TIMED_TEXT, &mData);
-        mData.freeData();
-    }
-
+void TimedTextPlayer::doSeekAndRead(int64_t seekTimeUs) {
     MediaSource::ReadOptions options;
-    if (mSeeking) {
-        options.setSeekTo(mSeekTimeUs,
-                MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
-        mSeeking = false;
-
-        notifyListener(MEDIA_TIMED_TEXT); //empty text to clear the screen
-    }
-
-    int64_t positionUs, timeUs;
-    mObserver->getPosition(&positionUs);
-
-    if (mTextType == kInBandText) {
-        if (mSource->read(&mTextBuffer, &options) != OK) {
-            return;
-        }
-
-        mTextBuffer->meta_data()->findInt64(kKeyTime, &timeUs);
-    } else {
-        int64_t endTimeUs;
-        if (mTextParser->getText(
-                    &mText, &timeUs, &endTimeUs, &options) != OK) {
-            return;
-        }
-    }
-
-    if (timeUs > 0) {
-        extractAndAppendLocalDescriptions(timeUs);
-    }
-
-    if (mTextType == kInBandText) {
-        if (mTextBuffer != NULL) {
-            mTextBuffer->release();
-            mTextBuffer = NULL;
-        }
-    } else {
-        mText.clear();
-    }
-
-    //send the text now
-    if (timeUs <= positionUs + 100000ll) {
-        postTextEvent();
-    } else {
-        postTextEvent(timeUs - positionUs - 100000ll);
-    }
+    options.setSeekTo(seekTimeUs, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
+    doRead(&options);
 }
 
-void TimedTextPlayer::postTextEvent(int64_t delayUs) {
-    if (mTextEventPending) {
+void TimedTextPlayer::doRead(MediaSource::ReadOptions* options) {
+    int64_t startTimeUs = 0;
+    int64_t endTimeUs = 0;
+    sp<ParcelEvent> parcelEvent = new ParcelEvent();
+    status_t err = mSource->read(&startTimeUs, &endTimeUs,
+                                 &(parcelEvent->parcel), options);
+    if (err == WOULD_BLOCK) {
+        postTextEventDelayUs(NULL, kWaitTimeUsToRetryRead);
+        return;
+    } else if (err != OK) {
+        notifyError(err);
         return;
     }
 
-    mTextEventPending = true;
-    mQueue->postEventWithDelay(mTextEvent, delayUs < 0 ? 10000 : delayUs);
+    postTextEvent(parcelEvent, startTimeUs);
+    if (endTimeUs > 0) {
+        CHECK_GE(endTimeUs, startTimeUs);
+        // send an empty timed text to clear the subtitle when it reaches to the
+        // end time.
+        postTextEvent(NULL, endTimeUs);
+    }
 }
 
-void TimedTextPlayer::cancelTextEvent() {
-    mQueue->cancelEvent(mTextEvent->eventID());
-    mTextEventPending = false;
-}
+void TimedTextPlayer::postTextEvent(const sp<ParcelEvent>& parcel, int64_t timeUs) {
+    sp<MediaPlayerBase> listener = mListener.promote();
+    if (listener != NULL) {
+        int64_t positionUs, delayUs;
+        int32_t positionMs = 0;
+        listener->getCurrentPosition(&positionMs);
+        positionUs = positionMs * 1000ll;
 
-void TimedTextPlayer::addTextSource(sp<MediaSource> source) {
-    Mutex::Autolock autoLock(mLock);
-    mTextTrackVector.add(source);
-}
-
-status_t TimedTextPlayer::setParameter(int key, const Parcel &request) {
-    Mutex::Autolock autoLock(mLock);
-
-    if (key == KEY_PARAMETER_TIMED_TEXT_ADD_OUT_OF_BAND_SOURCE) {
-        const String16 uri16 = request.readString16();
-        String8 uri = String8(uri16);
-        KeyedVector<String8, String8> headers;
-
-        // To support local subtitle file only for now
-        if (strncasecmp("file://", uri.string(), 7)) {
-            return INVALID_OPERATION;
-        }
-        sp<DataSource> dataSource =
-            DataSource::CreateFromURI(uri, &headers);
-        status_t err = dataSource->initCheck();
-
-        if (err != OK) {
-            return err;
-        }
-
-        OutOfBandText text;
-        text.source = dataSource;
-        if (uri.getPathExtension() == String8(".srt")) {
-            text.type = TimedTextParser::OUT_OF_BAND_FILE_SRT;
+        if (timeUs <= positionUs + kAdjustmentProcessingTimeUs) {
+            delayUs = 0;
         } else {
-            return ERROR_UNSUPPORTED;
+            delayUs = timeUs - positionUs - kAdjustmentProcessingTimeUs;
         }
-
-        mTextOutOfBandVector.add(text);
-
-        return OK;
+        postTextEventDelayUs(parcel, delayUs);
     }
-    return INVALID_OPERATION;
 }
 
-void TimedTextPlayer::notifyListener(int msg, const Parcel *parcel) {
-    if (mListener != NULL) {
-        sp<MediaPlayerBase> listener = mListener.promote();
+void TimedTextPlayer::postTextEventDelayUs(const sp<ParcelEvent>& parcel, int64_t delayUs) {
+    sp<MediaPlayerBase> listener = mListener.promote();
+    if (listener != NULL) {
+        sp<AMessage> msg = new AMessage(kWhatSendSubtitle, id());
+        msg->setInt32("generation", mSendSubtitleGeneration);
+        if (parcel != NULL) {
+            msg->setObject("subtitle", parcel);
+        }
+        msg->post(delayUs);
+    }
+}
 
-        if (listener != NULL) {
-            if (parcel && (parcel->dataSize() > 0)) {
-                listener->sendEvent(msg, 0, 0, parcel);
-            } else { // send an empty timed text to clear the screen
-                listener->sendEvent(msg);
-            }
+void TimedTextPlayer::notifyError(int error) {
+    sp<MediaPlayerBase> listener = mListener.promote();
+    if (listener != NULL) {
+        listener->sendEvent(MEDIA_INFO, MEDIA_INFO_TIMED_TEXT_ERROR, error);
+    }
+}
+
+void TimedTextPlayer::notifyListener(const Parcel *parcel) {
+    sp<MediaPlayerBase> listener = mListener.promote();
+    if (listener != NULL) {
+        if (parcel != NULL && (parcel->dataSize() > 0)) {
+            listener->sendEvent(MEDIA_TIMED_TEXT, 0, 0, parcel);
+        } else {  // send an empty timed text to clear the screen
+            listener->sendEvent(MEDIA_TIMED_TEXT);
         }
     }
 }
 
-// Each text sample consists of a string of text, optionally with sample
-// modifier description. The modifier description could specify a new
-// text style for the string of text. These descriptions are present only
-// if they are needed. This method is used to extract the modifier
-// description and append it at the end of the text.
-status_t TimedTextPlayer::extractAndAppendLocalDescriptions(int64_t timeUs) {
-    const void *data;
-    size_t size = 0;
-    int32_t flag = TextDescriptions::LOCAL_DESCRIPTIONS;
-
-    if (mTextType == kInBandText) {
-        const char *mime;
-        CHECK(mSource->getFormat()->findCString(kKeyMIMEType, &mime));
-
-        if (!strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP)) {
-            flag |= TextDescriptions::IN_BAND_TEXT_3GPP;
-            data = mTextBuffer->data();
-            size = mTextBuffer->size();
-        } else {
-            // support 3GPP only for now
-            return ERROR_UNSUPPORTED;
-        }
-    } else {
-        data = mText.c_str();
-        size = mText.size();
-        flag |= TextDescriptions::OUT_OF_BAND_TEXT_SRT;
-    }
-
-    if ((size > 0) && (flag != TextDescriptions::LOCAL_DESCRIPTIONS)) {
-        mData.freeData();
-        return TextDescriptions::getParcelOfDescriptions(
-                (const uint8_t *)data, size, flag, timeUs / 1000, &mData);
-    }
-
-    return OK;
-}
-
-// To extract and send the global text descriptions for all the text samples
-// in the text track or text file.
-status_t TimedTextPlayer::extractAndSendGlobalDescriptions() {
-    const void *data;
-    size_t size = 0;
-    int32_t flag = TextDescriptions::GLOBAL_DESCRIPTIONS;
-
-    if (mTextType == kInBandText) {
-        const char *mime;
-        CHECK(mSource->getFormat()->findCString(kKeyMIMEType, &mime));
-
-        // support 3GPP only for now
-        if (!strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP)) {
-            uint32_t type;
-            // get the 'tx3g' box content. This box contains the text descriptions
-            // used to render the text track
-            if (!mSource->getFormat()->findData(
-                        kKeyTextFormatData, &type, &data, &size)) {
-                return ERROR_MALFORMED;
-            }
-
-            flag |= TextDescriptions::IN_BAND_TEXT_3GPP;
-        }
-    }
-
-    if ((size > 0) && (flag != TextDescriptions::GLOBAL_DESCRIPTIONS)) {
-        Parcel parcel;
-        if (TextDescriptions::getParcelOfDescriptions(
-                (const uint8_t *)data, size, flag, 0, &parcel) == OK) {
-            if (parcel.dataSize() > 0) {
-                notifyListener(MEDIA_TIMED_TEXT, &parcel);
-            }
-        }
-    }
-
-    return OK;
-}
-}
+}  // namespace android
diff --git a/media/libstagefright/timedtext/TimedTextPlayer.h b/media/libstagefright/timedtext/TimedTextPlayer.h
index a744db5..47aff03 100644
--- a/media/libstagefright/timedtext/TimedTextPlayer.h
+++ b/media/libstagefright/timedtext/TimedTextPlayer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2012 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -15,98 +15,61 @@
  */
 
 #ifndef TIMEDTEXT_PLAYER_H_
-
 #define TIMEDTEXT_PLAYER_H_
 
-#include <media/MediaPlayerInterface.h>
+#include <binder/Parcel.h>
 #include <media/stagefright/foundation/ABase.h>
-#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/MediaSource.h>
+#include <utils/RefBase.h>
 
-#include "include/TimedEventQueue.h"
-#include "TimedTextParser.h"
+#include "TimedTextSource.h"
 
 namespace android {
 
-class MediaSource;
-class AwesomePlayer;
-class MediaBuffer;
+class AMessage;
+class MediaPlayerBase;
+class TimedTextDriver;
+class TimedTextSource;
 
-class TimedTextPlayer {
+class TimedTextPlayer : public AHandler {
 public:
-    TimedTextPlayer(AwesomePlayer *observer,
-                    const wp<MediaPlayerBase> &listener,
-                    TimedEventQueue *queue);
+    TimedTextPlayer(const wp<MediaPlayerBase> &listener);
 
     virtual ~TimedTextPlayer();
 
-    // index: the index of the text track which will
-    // be turned on
-    status_t start(uint8_t index);
-
+    void start();
     void pause();
+    void seekToAsync(int64_t timeUs);
+    void setDataSource(sp<TimedTextSource> source);
 
-    void resume();
-
-    status_t seekTo(int64_t time_us);
-
-    void addTextSource(sp<MediaSource> source);
-
-    status_t setTimedTextTrackIndex(int32_t index);
-    status_t setParameter(int key, const Parcel &request);
+protected:
+    virtual void onMessageReceived(const sp<AMessage> &msg);
 
 private:
-    enum TextType {
-        kNoText        = 0,
-        kInBandText    = 1,
-        kOutOfBandText = 2,
+    enum {
+        kWhatPause = 'paus',
+        kWhatSeek = 'seek',
+        kWhatSendSubtitle = 'send',
+        kWhatSetSource = 'ssrc',
     };
 
-    Mutex mLock;
-
-    sp<MediaSource> mSource;
-    sp<DataSource> mOutOfBandSource;
-
-    bool mSeeking;
-    int64_t mSeekTimeUs;
-
-    bool mStarted;
-
-    sp<TimedEventQueue::Event> mTextEvent;
-    bool mTextEventPending;
-
-    TimedEventQueue *mQueue;
+    // To add Parcel into an AMessage as an object, it should be 'RefBase'.
+    struct ParcelEvent : public RefBase {
+        Parcel parcel;
+    };
 
     wp<MediaPlayerBase> mListener;
-    AwesomePlayer *mObserver;
+    sp<TimedTextSource> mSource;
+    int32_t mSendSubtitleGeneration;
 
-    MediaBuffer *mTextBuffer;
-    Parcel mData;
-
-    // for in-band timed text
-    Vector<sp<MediaSource> > mTextTrackVector;
-
-    // for out-of-band timed text
-    struct OutOfBandText {
-        TimedTextParser::FileType type;
-        sp<DataSource> source;
-    };
-    Vector<OutOfBandText > mTextOutOfBandVector;
-
-    sp<TimedTextParser> mTextParser;
-    AString mText;
-
-    TextType mTextType;
-
-    void reset();
-
+    void doSeekAndRead(int64_t seekTimeUs);
+    void doRead(MediaSource::ReadOptions* options = NULL);
     void onTextEvent();
-    void postTextEvent(int64_t delayUs = -1);
-    void cancelTextEvent();
-
-    void notifyListener(int msg, const Parcel *parcel = NULL);
-
-    status_t extractAndAppendLocalDescriptions(int64_t timeUs);
-    status_t extractAndSendGlobalDescriptions();
+    void postTextEvent(const sp<ParcelEvent>& parcel = NULL, int64_t timeUs = -1);
+    void postTextEventDelayUs(const sp<ParcelEvent>& parcel = NULL, int64_t delayUs = -1);
+    void notifyError(int error = 0);
+    void notifyListener(const Parcel *parcel = NULL);
 
     DISALLOW_EVIL_CONSTRUCTORS(TimedTextPlayer);
 };
diff --git a/media/libstagefright/timedtext/TimedTextSRTSource.cpp b/media/libstagefright/timedtext/TimedTextSRTSource.cpp
new file mode 100644
index 0000000..7b1f7f6
--- /dev/null
+++ b/media/libstagefright/timedtext/TimedTextSRTSource.cpp
@@ -0,0 +1,286 @@
+ /*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TimedTextSRTSource"
+#include <utils/Log.h>
+
+#include <binder/Parcel.h>
+#include <media/stagefright/foundation/ADebug.h>  // for CHECK_xx
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaDefs.h>  // for MEDIA_MIMETYPE_xxx
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+
+#include "TimedTextSRTSource.h"
+#include "TextDescriptions.h"
+
+namespace android {
+
+TimedTextSRTSource::TimedTextSRTSource(const sp<DataSource>& dataSource)
+        : mSource(dataSource),
+          mMetaData(new MetaData),
+          mIndex(0) {
+}
+
+TimedTextSRTSource::~TimedTextSRTSource() {
+}
+
+status_t TimedTextSRTSource::start() {
+    status_t err = scanFile();
+    if (err != OK) {
+        reset();
+    }
+    // TODO: Need to detect the language, because SRT doesn't give language
+    // information explicitly.
+    mMetaData->setCString(kKeyMediaLanguage, "");
+    return err;
+}
+
+void TimedTextSRTSource::reset() {
+    mMetaData->clear();
+    mTextVector.clear();
+    mIndex = 0;
+}
+
+status_t TimedTextSRTSource::stop() {
+    reset();
+    return OK;
+}
+
+status_t TimedTextSRTSource::read(
+        int64_t *startTimeUs,
+        int64_t *endTimeUs,
+        Parcel *parcel,
+        const MediaSource::ReadOptions *options) {
+    AString text;
+    status_t err = getText(options, &text, startTimeUs, endTimeUs);
+    if (err != OK) {
+        return err;
+    }
+
+    CHECK_GE(*startTimeUs, 0);
+    extractAndAppendLocalDescriptions(*startTimeUs, text, parcel);
+    return OK;
+}
+
+status_t TimedTextSRTSource::scanFile() {
+    off64_t offset = 0;
+    int64_t startTimeUs;
+    bool endOfFile = false;
+
+    while (!endOfFile) {
+        TextInfo info;
+        status_t err = getNextSubtitleInfo(&offset, &startTimeUs, &info);
+        switch (err) {
+            case OK:
+                mTextVector.add(startTimeUs, info);
+                break;
+            case ERROR_END_OF_STREAM:
+                endOfFile = true;
+                break;
+            default:
+                return err;
+        }
+    }
+    if (mTextVector.isEmpty()) {
+        return ERROR_MALFORMED;
+    }
+    return OK;
+}
+
+/* SRT format:
+ *   Subtitle number
+ *   Start time --> End time
+ *   Text of subtitle (one or more lines)
+ *   Blank lines
+ *
+ * .srt file example:
+ * 1
+ * 00:00:20,000 --> 00:00:24,400
+ * Altocumulus clouds occr between six thousand
+ *
+ * 2
+ * 00:00:24,600 --> 00:00:27,800
+ * and twenty thousand feet above ground level.
+ */
+status_t TimedTextSRTSource::getNextSubtitleInfo(
+          off64_t *offset, int64_t *startTimeUs, TextInfo *info) {
+    AString data;
+    status_t err;
+
+    // To skip blank lines.
+    do {
+        if ((err = readNextLine(offset, &data)) != OK) {
+            return err;
+        }
+        data.trim();
+    } while (data.empty());
+
+    // Just ignore the first non-blank line which is subtitle sequence number.
+    if ((err = readNextLine(offset, &data)) != OK) {
+        return err;
+    }
+    int hour1, hour2, min1, min2, sec1, sec2, msec1, msec2;
+    // the start time format is: hours:minutes:seconds,milliseconds
+    // 00:00:24,600 --> 00:00:27,800
+    if (sscanf(data.c_str(), "%02d:%02d:%02d,%03d --> %02d:%02d:%02d,%03d",
+               &hour1, &min1, &sec1, &msec1, &hour2, &min2, &sec2, &msec2) != 8) {
+        return ERROR_MALFORMED;
+    }
+
+    *startTimeUs = ((hour1 * 3600 + min1 * 60 + sec1) * 1000 + msec1) * 1000ll;
+    info->endTimeUs = ((hour2 * 3600 + min2 * 60 + sec2) * 1000 + msec2) * 1000ll;
+    if (info->endTimeUs <= *startTimeUs) {
+        return ERROR_MALFORMED;
+    }
+
+    info->offset = *offset;
+    bool needMoreData = true;
+    while (needMoreData) {
+        if ((err = readNextLine(offset, &data)) != OK) {
+            if (err == ERROR_END_OF_STREAM) {
+                needMoreData = false;
+            } else {
+                return err;
+            }
+        }
+
+        if (needMoreData) {
+            data.trim();
+            if (data.empty()) {
+                // it's an empty line used to separate two subtitles
+                needMoreData = false;
+            }
+        }
+    }
+    info->textLen = *offset - info->offset;
+    return OK;
+}
+
+status_t TimedTextSRTSource::readNextLine(off64_t *offset, AString *data) {
+    data->clear();
+    while (true) {
+        ssize_t readSize;
+        char character;
+        if ((readSize = mSource->readAt(*offset, &character, 1)) < 1) {
+            if (readSize == 0) {
+                return ERROR_END_OF_STREAM;
+            }
+            return ERROR_IO;
+        }
+
+        (*offset)++;
+
+        // a line could end with CR, LF or CR + LF
+        if (character == 10) {
+            break;
+        } else if (character == 13) {
+            if ((readSize = mSource->readAt(*offset, &character, 1)) < 1) {
+                if (readSize == 0) {  // end of the stream
+                    return OK;
+                }
+                return ERROR_IO;
+            }
+
+            (*offset)++;
+            if (character != 10) {
+                (*offset)--;
+            }
+            break;
+        }
+        data->append(character);
+    }
+    return OK;
+}
+
+status_t TimedTextSRTSource::getText(
+        const MediaSource::ReadOptions *options,
+        AString *text, int64_t *startTimeUs, int64_t *endTimeUs) {
+    text->clear();
+    int64_t seekTimeUs;
+    MediaSource::ReadOptions::SeekMode mode;
+    if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) {
+        int64_t lastEndTimeUs =
+                mTextVector.valueAt(mTextVector.size() - 1).endTimeUs;
+        int64_t firstStartTimeUs = mTextVector.keyAt(0);
+        if (seekTimeUs < 0 || seekTimeUs > lastEndTimeUs) {
+            return ERROR_OUT_OF_RANGE;
+        } else if (seekTimeUs < firstStartTimeUs) {
+            mIndex = 0;
+        } else {
+            // binary search
+            ssize_t low = 0;
+            ssize_t high = mTextVector.size() - 1;
+            ssize_t mid = 0;
+            int64_t currTimeUs;
+
+            while (low <= high) {
+                mid = low + (high - low)/2;
+                currTimeUs = mTextVector.keyAt(mid);
+                const int diff = currTimeUs - seekTimeUs;
+
+                if (diff == 0) {
+                    break;
+                } else if (diff < 0) {
+                    low = mid + 1;
+                } else {
+                    if ((high == mid + 1)
+                        && (seekTimeUs < mTextVector.keyAt(high))) {
+                        break;
+                    }
+                    high = mid - 1;
+                }
+            }
+            mIndex = mid;
+        }
+    }
+    const TextInfo &info = mTextVector.valueAt(mIndex);
+    *startTimeUs = mTextVector.keyAt(mIndex);
+    *endTimeUs = info.endTimeUs;
+    mIndex++;
+
+    char *str = new char[info.textLen];
+    if (mSource->readAt(info.offset, str, info.textLen) < info.textLen) {
+        delete[] str;
+        return ERROR_IO;
+    }
+    text->append(str, info.textLen);
+    delete[] str;
+    return OK;
+}
+
+status_t TimedTextSRTSource::extractAndAppendLocalDescriptions(
+        int64_t timeUs, const AString &text, Parcel *parcel) {
+    const void *data = text.c_str();
+    size_t size = text.size();
+    int32_t flag = TextDescriptions::LOCAL_DESCRIPTIONS |
+                   TextDescriptions::OUT_OF_BAND_TEXT_SRT;
+
+    if (size > 0) {
+        return TextDescriptions::getParcelOfDescriptions(
+                (const uint8_t *)data, size, flag, timeUs / 1000, parcel);
+    }
+    return OK;
+}
+
+sp<MetaData> TimedTextSRTSource::getFormat() {
+    return mMetaData;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/timedtext/TimedTextSRTSource.h b/media/libstagefright/timedtext/TimedTextSRTSource.h
new file mode 100644
index 0000000..e1371b8
--- /dev/null
+++ b/media/libstagefright/timedtext/TimedTextSRTSource.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TIMED_TEXT_SRT_SOURCE_H_
+#define TIMED_TEXT_SRT_SOURCE_H_
+
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+#include <utils/Compat.h>  // off64_t
+
+#include "TimedTextSource.h"
+
+namespace android {
+
+class AString;
+class DataSource;
+class MediaBuffer;
+class Parcel;
+
+class TimedTextSRTSource : public TimedTextSource {
+public:
+    TimedTextSRTSource(const sp<DataSource>& dataSource);
+    virtual status_t start();
+    virtual status_t stop();
+    virtual status_t read(
+            int64_t *startTimeUs,
+            int64_t *endTimeUs,
+            Parcel *parcel,
+            const MediaSource::ReadOptions *options = NULL);
+    virtual sp<MetaData> getFormat();
+
+protected:
+    virtual ~TimedTextSRTSource();
+
+private:
+    sp<DataSource> mSource;
+    sp<MetaData> mMetaData;
+
+    struct TextInfo {
+        int64_t endTimeUs;
+        // The offset of the text in the original file.
+        off64_t offset;
+        int textLen;
+    };
+
+    int mIndex;
+    KeyedVector<int64_t, TextInfo> mTextVector;
+
+    void reset();
+    status_t scanFile();
+    status_t getNextSubtitleInfo(
+            off64_t *offset, int64_t *startTimeUs, TextInfo *info);
+    status_t readNextLine(off64_t *offset, AString *data);
+    status_t getText(
+            const MediaSource::ReadOptions *options,
+            AString *text, int64_t *startTimeUs, int64_t *endTimeUs);
+    status_t extractAndAppendLocalDescriptions(
+            int64_t timeUs, const AString &text, Parcel *parcel);
+
+    DISALLOW_EVIL_CONSTRUCTORS(TimedTextSRTSource);
+};
+
+}  // namespace android
+
+#endif  // TIMED_TEXT_SRT_SOURCE_H_
diff --git a/media/libstagefright/timedtext/TimedTextSource.cpp b/media/libstagefright/timedtext/TimedTextSource.cpp
new file mode 100644
index 0000000..953f7b5
--- /dev/null
+++ b/media/libstagefright/timedtext/TimedTextSource.cpp
@@ -0,0 +1,66 @@
+ /*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TimedTextSource"
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ADebug.h>  // CHECK_XX macro
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaDefs.h>  // for MEDIA_MIMETYPE_xxx
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+
+#include "TimedTextSource.h"
+
+#include "TimedText3GPPSource.h"
+#include "TimedTextSRTSource.h"
+
+namespace android {
+
+// static
+sp<TimedTextSource> TimedTextSource::CreateTimedTextSource(
+        const sp<MediaSource>& mediaSource) {
+    const char *mime;
+    CHECK(mediaSource->getFormat()->findCString(kKeyMIMEType, &mime));
+    if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) == 0) {
+        return new TimedText3GPPSource(mediaSource);
+    }
+    ALOGE("Unsupported mime type for subtitle. : %s", mime);
+    return NULL;
+}
+
+// static
+sp<TimedTextSource> TimedTextSource::CreateTimedTextSource(
+        const sp<DataSource>& dataSource, FileType filetype) {
+    switch(filetype) {
+        case OUT_OF_BAND_FILE_SRT:
+            return new TimedTextSRTSource(dataSource);
+        case OUT_OF_BAND_FILE_SMI:
+            // TODO: Implement for SMI.
+            ALOGE("Supporting SMI is not implemented yet");
+            break;
+        default:
+            ALOGE("Undefined subtitle format. : %d", filetype);
+    }
+    return NULL;
+}
+
+sp<MetaData> TimedTextSource::getFormat() {
+    return NULL;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/timedtext/TimedTextSource.h b/media/libstagefright/timedtext/TimedTextSource.h
new file mode 100644
index 0000000..756cc31
--- /dev/null
+++ b/media/libstagefright/timedtext/TimedTextSource.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TIMED_TEXT_SOURCE_H_
+#define TIMED_TEXT_SOURCE_H_
+
+#include <media/stagefright/foundation/ABase.h>  // for DISALLOW_XXX macro.
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>  // for MediaSource::ReadOptions
+#include <utils/RefBase.h>
+
+namespace android {
+
+class DataSource;
+class MetaData;
+class Parcel;
+
+class TimedTextSource : public RefBase {
+ public:
+  enum FileType {
+      OUT_OF_BAND_FILE_SRT = 1,
+      OUT_OF_BAND_FILE_SMI = 2,
+  };
+  static sp<TimedTextSource> CreateTimedTextSource(
+      const sp<MediaSource>& source);
+  static sp<TimedTextSource> CreateTimedTextSource(
+      const sp<DataSource>& source, FileType filetype);
+  TimedTextSource() {}
+  virtual status_t start() = 0;
+  virtual status_t stop() = 0;
+  // Returns subtitle parcel and its start time.
+  virtual status_t read(
+          int64_t *startTimeUs,
+          int64_t *endTimeUs,
+          Parcel *parcel,
+          const MediaSource::ReadOptions *options = NULL) = 0;
+  virtual status_t extractGlobalDescriptions(Parcel *parcel) {
+      return INVALID_OPERATION;
+  }
+  virtual sp<MetaData> getFormat();
+
+ protected:
+  virtual ~TimedTextSource() { }
+
+ private:
+  DISALLOW_EVIL_CONSTRUCTORS(TimedTextSource);
+};
+
+}  // namespace android
+
+#endif  // TIMED_TEXT_SOURCE_H_
diff --git a/media/libstagefright/yuv/YUVCanvas.cpp b/media/libstagefright/yuv/YUVCanvas.cpp
index 38aa779..4c9fee8 100644
--- a/media/libstagefright/yuv/YUVCanvas.cpp
+++ b/media/libstagefright/yuv/YUVCanvas.cpp
@@ -17,7 +17,7 @@
 #define LOG_NDEBUG 0
 #define LOG_TAG "YUVCanvas"
 
-#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/YUVCanvas.h>
 #include <media/stagefright/YUVImage.h>
 #include <ui/Rect.h>
diff --git a/media/libstagefright/yuv/YUVImage.cpp b/media/libstagefright/yuv/YUVImage.cpp
index 0d67c96..7b9000b 100644
--- a/media/libstagefright/yuv/YUVImage.cpp
+++ b/media/libstagefright/yuv/YUVImage.cpp
@@ -17,9 +17,9 @@
 #define LOG_NDEBUG 0
 #define LOG_TAG "YUVImage"
 
+#include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/YUVImage.h>
 #include <ui/Rect.h>
-#include <media/stagefright/MediaDebug.h>
 
 namespace android {
 
diff --git a/media/mediaserver/Android.mk b/media/mediaserver/Android.mk
index 0559812..4e9b4cf 100644
--- a/media/mediaserver/Android.mk
+++ b/media/mediaserver/Android.mk
@@ -11,12 +11,12 @@
 	libutils \
 	libbinder
 
-base := $(LOCAL_PATH)/../..
-
+# FIXME The duplicate audioflinger is temporary
 LOCAL_C_INCLUDES := \
-    $(base)/services/audioflinger \
-    $(base)/services/camera/libcameraservice \
-    $(base)/media/libmediaplayerservice
+    frameworks/native/services/audioflinger \
+    frameworks/base/services/audioflinger \
+    frameworks/base/services/camera/libcameraservice \
+    frameworks/base/media/libmediaplayerservice
 
 LOCAL_MODULE:= mediaserver
 
diff --git a/media/mediaserver/main_mediaserver.cpp b/media/mediaserver/main_mediaserver.cpp
index f078192..6b1abb1 100644
--- a/media/mediaserver/main_mediaserver.cpp
+++ b/media/mediaserver/main_mediaserver.cpp
@@ -15,21 +15,19 @@
 ** limitations under the License.
 */
 
-// System headers required for setgroups, etc.
-#include <sys/types.h>
-#include <unistd.h>
-#include <grp.h>
+#define LOG_TAG "mediaserver"
+//#define LOG_NDEBUG 0
 
 #include <binder/IPCThreadState.h>
 #include <binder/ProcessState.h>
 #include <binder/IServiceManager.h>
 #include <utils/Log.h>
 
-#include <AudioFlinger.h>
-#include <CameraService.h>
-#include <MediaPlayerService.h>
-#include <AudioPolicyService.h>
-#include <private/android_filesystem_config.h>
+// from LOCAL_C_INCLUDES
+#include "AudioFlinger.h"
+#include "CameraService.h"
+#include "MediaPlayerService.h"
+#include "AudioPolicyService.h"
 
 using namespace android;
 
diff --git a/media/mtp/Android.mk b/media/mtp/Android.mk
index e590bab..fc7fc4f 100644
--- a/media/mtp/Android.mk
+++ b/media/mtp/Android.mk
@@ -39,6 +39,9 @@
 
 LOCAL_CFLAGS := -DMTP_DEVICE -DMTP_HOST
 
+# Needed for <bionic_time.h>
+LOCAL_C_INCLUDES := bionic/libc/private
+
 LOCAL_SHARED_LIBRARIES := libutils libcutils libusbhost libbinder
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index fa49592..257f62c 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -6,18 +6,24 @@
     AudioFlinger.cpp            \
     AudioMixer.cpp.arm          \
     AudioResampler.cpp.arm      \
-    AudioResamplerSinc.cpp.arm  \
-    AudioResamplerCubic.cpp.arm \
-    AudioPolicyService.cpp
+    AudioPolicyService.cpp      \
+    ServiceUtilities.cpp
+#   AudioResamplerSinc.cpp.arm
+#   AudioResamplerCubic.cpp.arm
 
 LOCAL_C_INCLUDES := \
-    system/media/audio_effects/include
+    $(call include-path-for, audio-effects) \
+    $(call include-path-for, audio-utils)
 
+# FIXME keep libmedia_native but remove libmedia after split
 LOCAL_SHARED_LIBRARIES := \
+    libaudioutils \
+    libcommon_time_client \
     libcutils \
     libutils \
     libbinder \
     libmedia \
+    libmedia_native \
     libhardware \
     libhardware_legacy \
     libeffects \
diff --git a/services/audioflinger/AudioBufferProvider.h b/services/audioflinger/AudioBufferProvider.h
index 81c5c39..43e4de7 100644
--- a/services/audioflinger/AudioBufferProvider.h
+++ b/services/audioflinger/AudioBufferProvider.h
@@ -17,8 +17,6 @@
 #ifndef ANDROID_AUDIO_BUFFER_PROVIDER_H
 #define ANDROID_AUDIO_BUFFER_PROVIDER_H
 
-#include <stdint.h>
-#include <sys/types.h>
 #include <utils/Errors.h>
 
 namespace android {
@@ -29,6 +27,7 @@
 public:
 
     struct Buffer {
+        Buffer() : raw(NULL), frameCount(0) { }
         union {
             void*       raw;
             short*      i16;
@@ -38,8 +37,15 @@
     };
 
     virtual ~AudioBufferProvider() {}
-    
-    virtual status_t getNextBuffer(Buffer* buffer) = 0;
+
+    // value representing an invalid presentation timestamp
+    static const int64_t kInvalidPTS = 0x7FFFFFFFFFFFFFFFLL;    // <stdint.h> is too painful
+
+    // pts is the local time when the next sample yielded by getNextBuffer
+    // will be rendered.
+    // Pass kInvalidPTS if the PTS is unknown or not applicable.
+    virtual status_t getNextBuffer(Buffer* buffer, int64_t pts = kInvalidPTS) = 0;
+
     virtual void releaseBuffer(Buffer* buffer) = 0;
 };
 
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index dab76a3..c0b18fa 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1,4 +1,4 @@
-/* //device/include/server/AudioFlinger/AudioFlinger.cpp
+/*
 **
 ** Copyright 2007, The Android Open Source Project
 **
@@ -35,10 +35,14 @@
 
 #include <cutils/bitops.h>
 #include <cutils/properties.h>
+#include <cutils/compiler.h>
 
-#include <media/AudioTrack.h>
-#include <media/AudioRecord.h>
+#undef ADD_BATTERY_DATA
+
+#ifdef ADD_BATTERY_DATA
 #include <media/IMediaPlayerService.h>
+#include <media/IMediaDeathNotifier.h>
+#endif
 
 #include <private/media/AudioTrackShared.h>
 #include <private/media/AudioEffectShared.h>
@@ -48,27 +52,36 @@
 
 #include "AudioMixer.h"
 #include "AudioFlinger.h"
+#include "ServiceUtilities.h"
 
 #include <media/EffectsFactoryApi.h>
 #include <audio_effects/effect_visualizer.h>
 #include <audio_effects/effect_ns.h>
 #include <audio_effects/effect_aec.h>
 
-#include <cpustats/ThreadCpuUsage.h>
+#include <audio_utils/primitives.h>
+
 #include <powermanager/PowerManager.h>
+
 // #define DEBUG_CPU_USAGE 10  // log statistics every n wall clock seconds
+#ifdef DEBUG_CPU_USAGE
+#include <cpustats/CentralTendencyStatistics.h>
+#include <cpustats/ThreadCpuUsage.h>
+#endif
+
+#include <common_time/cc_helper.h>
+#include <common_time/local_clock.h>
 
 // ----------------------------------------------------------------------------
 
 
 namespace android {
 
-static const char* kDeadlockedString = "AudioFlinger may be deadlocked\n";
-static const char* kHardwareLockedString = "Hardware lock is taken\n";
+static const char kDeadlockedString[] = "AudioFlinger may be deadlocked\n";
+static const char kHardwareLockedString[] = "Hardware lock is taken\n";
 
-//static const nsecs_t kStandbyTimeInNsecs = seconds(3);
 static const float MAX_GAIN = 4096.0f;
-static const float MAX_GAIN_INT = 0x1000;
+static const uint32_t MAX_GAIN_INT = 0x1000;
 
 // retry counts for buffer fill timeout
 // 50 * ~20msecs = 1 second
@@ -80,49 +93,38 @@
 static const int8_t kMaxTrackRetriesDirect = 2;
 
 static const int kDumpLockRetries = 50;
-static const int kDumpLockSleep = 20000;
+static const int kDumpLockSleepUs = 20000;
 
-static const nsecs_t kWarningThrottle = seconds(5);
+// don't warn about blocked writes or record buffer overflows more often than this
+static const nsecs_t kWarningThrottleNs = seconds(5);
 
 // RecordThread loop sleep time upon application overrun or audio HAL read error
 static const int kRecordThreadSleepUs = 5000;
 
-static const nsecs_t kSetParametersTimeout = seconds(2);
+// maximum time to wait for setParameters to complete
+static const nsecs_t kSetParametersTimeoutNs = seconds(2);
 
 // minimum sleep time for the mixer thread loop when tracks are active but in underrun
 static const uint32_t kMinThreadSleepTimeUs = 5000;
 // maximum divider applied to the active sleep time in the mixer thread loop
 static const uint32_t kMaxThreadSleepTimeShift = 2;
 
+nsecs_t AudioFlinger::mStandbyTimeInNsecs = kDefaultStandbyTimeInNsecs;
 
 // ----------------------------------------------------------------------------
 
-static bool recordingAllowed() {
-    if (getpid() == IPCThreadState::self()->getCallingPid()) return true;
-    bool ok = checkCallingPermission(String16("android.permission.RECORD_AUDIO"));
-    if (!ok) ALOGE("Request requires android.permission.RECORD_AUDIO");
-    return ok;
-}
-
-static bool settingsAllowed() {
-    if (getpid() == IPCThreadState::self()->getCallingPid()) return true;
-    bool ok = checkCallingPermission(String16("android.permission.MODIFY_AUDIO_SETTINGS"));
-    if (!ok) ALOGE("Request requires android.permission.MODIFY_AUDIO_SETTINGS");
-    return ok;
-}
-
+#ifdef ADD_BATTERY_DATA
 // To collect the amplifier usage
 static void addBatteryData(uint32_t params) {
-    sp<IBinder> binder =
-        defaultServiceManager()->getService(String16("media.player"));
-    sp<IMediaPlayerService> service = interface_cast<IMediaPlayerService>(binder);
-    if (service.get() == NULL) {
-        ALOGW("Cannot connect to the MediaPlayerService for battery tracking");
+    sp<IMediaPlayerService> service = IMediaDeathNotifier::getMediaPlayerService();
+    if (service == NULL) {
+        // it already logged
         return;
     }
 
     service->addBatteryData(params);
 }
+#endif
 
 static int load_audio_interface(const char *if_name, const hw_module_t **mod,
                                 audio_hw_device_t **dev)
@@ -147,7 +149,7 @@
     return rc;
 }
 
-static const char *audio_interfaces[] = {
+static const char * const audio_interfaces[] = {
     "primary",
     "a2dp",
     "usb",
@@ -158,8 +160,14 @@
 
 AudioFlinger::AudioFlinger()
     : BnAudioFlinger(),
-        mPrimaryHardwareDev(0), mMasterVolume(1.0f), mMasterMute(false), mNextUniqueId(1),
-        mBtNrecIsOff(false)
+      mPrimaryHardwareDev(NULL),
+      mHardwareStatus(AUDIO_HW_IDLE), // see also onFirstRef()
+      mMasterVolume(1.0f),
+      mMasterVolumeSupportLvl(MVS_NONE),
+      mMasterMute(false),
+      mNextUniqueId(1),
+      mMode(AUDIO_MODE_INVALID),
+      mBtNrecIsOff(false)
 {
 }
 
@@ -170,7 +178,18 @@
     Mutex::Autolock _l(mLock);
 
     /* TODO: move all this work into an Init() function */
-    mHardwareStatus = AUDIO_HW_IDLE;
+    char val_str[PROPERTY_VALUE_MAX] = { 0 };
+    if (property_get("ro.audio.flinger_standbytime_ms", val_str, NULL) >= 0) {
+        uint32_t int_val;
+        if (1 == sscanf(val_str, "%u", &int_val)) {
+            mStandbyTimeInNsecs = milliseconds(int_val);
+            ALOGI("Using %u mSec as standby time.", int_val);
+        } else {
+            mStandbyTimeInNsecs = kDefaultStandbyTimeInNsecs;
+            ALOGI("Using default %u mSec as standby time.",
+                    (uint32_t)(mStandbyTimeInNsecs / 1000000));
+        }
+    }
 
     for (size_t i = 0; i < ARRAY_SIZE(audio_interfaces); i++) {
         const hw_module_t *mod;
@@ -181,52 +200,83 @@
             continue;
 
         ALOGI("Loaded %s audio interface from %s (%s)", audio_interfaces[i],
-             mod->name, mod->id);
+            mod->name, mod->id);
         mAudioHwDevs.push(dev);
 
-        if (!mPrimaryHardwareDev) {
+        if (mPrimaryHardwareDev == NULL) {
             mPrimaryHardwareDev = dev;
             ALOGI("Using '%s' (%s.%s) as the primary audio interface",
-                 mod->name, mod->id, audio_interfaces[i]);
+                mod->name, mod->id, audio_interfaces[i]);
         }
     }
 
-    mHardwareStatus = AUDIO_HW_INIT;
-
-    if (!mPrimaryHardwareDev || mAudioHwDevs.size() == 0) {
+    if (mPrimaryHardwareDev == NULL) {
         ALOGE("Primary audio interface not found");
-        return;
+        // proceed, all later accesses to mPrimaryHardwareDev verify it's safe with initCheck()
     }
 
+    // Currently (mPrimaryHardwareDev == NULL) == (mAudioHwDevs.size() == 0), but the way the
+    // primary HW dev is selected can change so these conditions might not always be equivalent.
+    // When that happens, re-visit all the code that assumes this.
+
+    AutoMutex lock(mHardwareLock);
+
+    // Determine the level of master volume support the primary audio HAL has,
+    // and set the initial master volume at the same time.
+    float initialVolume = 1.0;
+    mMasterVolumeSupportLvl = MVS_NONE;
+    if (0 == mPrimaryHardwareDev->init_check(mPrimaryHardwareDev)) {
+        audio_hw_device_t *dev = mPrimaryHardwareDev;
+
+        mHardwareStatus = AUDIO_HW_GET_MASTER_VOLUME;
+        if ((NULL != dev->get_master_volume) &&
+            (NO_ERROR == dev->get_master_volume(dev, &initialVolume))) {
+            mMasterVolumeSupportLvl = MVS_FULL;
+        } else {
+            mMasterVolumeSupportLvl = MVS_SETONLY;
+            initialVolume = 1.0;
+        }
+
+        mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
+        if ((NULL == dev->set_master_volume) ||
+            (NO_ERROR != dev->set_master_volume(dev, initialVolume))) {
+            mMasterVolumeSupportLvl = MVS_NONE;
+        }
+        mHardwareStatus = AUDIO_HW_IDLE;
+    }
+
+    // Set the mode for each audio HAL, and try to set the initial volume (if
+    // supported) for all of the non-primary audio HALs.
     for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
         audio_hw_device_t *dev = mAudioHwDevs[i];
 
         mHardwareStatus = AUDIO_HW_INIT;
         rc = dev->init_check(dev);
+        mHardwareStatus = AUDIO_HW_IDLE;
         if (rc == 0) {
-            AutoMutex lock(mHardwareLock);
-
-            mMode = AUDIO_MODE_NORMAL;
+            mMode = AUDIO_MODE_NORMAL;  // assigned multiple times with same value
             mHardwareStatus = AUDIO_HW_SET_MODE;
             dev->set_mode(dev, mMode);
-            mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
-            dev->set_master_volume(dev, 1.0f);
+
+            if ((dev != mPrimaryHardwareDev) &&
+                (NULL != dev->set_master_volume)) {
+                mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
+                dev->set_master_volume(dev, initialVolume);
+            }
+
             mHardwareStatus = AUDIO_HW_IDLE;
         }
     }
-}
 
-status_t AudioFlinger::initCheck() const
-{
-    Mutex::Autolock _l(mLock);
-    if (mPrimaryHardwareDev == NULL || mAudioHwDevs.size() == 0)
-        return NO_INIT;
-    return NO_ERROR;
+    mMasterVolumeSW = (MVS_NONE == mMasterVolumeSupportLvl)
+                    ? initialVolume
+                    : 1.0;
+    mMasterVolume   = initialVolume;
+    mHardwareStatus = AUDIO_HW_IDLE;
 }
 
 AudioFlinger::~AudioFlinger()
 {
-    int num_devs = mAudioHwDevs.size();
 
     while (!mRecordThreads.isEmpty()) {
         // closeInput() will remove first entry from mRecordThreads
@@ -237,11 +287,10 @@
         closeOutput(mPlaybackThreads.keyAt(0));
     }
 
-    for (int i = 0; i < num_devs; i++) {
-        audio_hw_device_t *dev = mAudioHwDevs[i];
-        audio_hw_device_close(dev);
+    for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
+        // no mHardwareLock needed, as there are no other references to this
+        audio_hw_device_close(mAudioHwDevs[i]);
     }
-    mAudioHwDevs.clear();
 }
 
 audio_hw_device_t* AudioFlinger::findSuitableHwDev_l(uint32_t devices)
@@ -263,21 +312,18 @@
 
     result.append("Clients:\n");
     for (size_t i = 0; i < mClients.size(); ++i) {
-        wp<Client> wClient = mClients.valueAt(i);
-        if (wClient != 0) {
-            sp<Client> client = wClient.promote();
-            if (client != 0) {
-                snprintf(buffer, SIZE, "  pid: %d\n", client->pid());
-                result.append(buffer);
-            }
+        sp<Client> client = mClients.valueAt(i).promote();
+        if (client != 0) {
+            snprintf(buffer, SIZE, "  pid: %d\n", client->pid());
+            result.append(buffer);
         }
     }
 
     result.append("Global session refs:\n");
-    result.append(" session pid cnt\n");
+    result.append(" session pid count\n");
     for (size_t i = 0; i < mAudioSessionRefs.size(); i++) {
         AudioSessionRef *r = mAudioSessionRefs[i];
-        snprintf(buffer, SIZE, " %7d %3d %3d\n", r->sessionid, r->pid, r->cnt);
+        snprintf(buffer, SIZE, " %7d %3d %3d\n", r->mSessionid, r->mPid, r->mCnt);
         result.append(buffer);
     }
     write(fd, result.string(), result.size());
@@ -290,9 +336,12 @@
     const size_t SIZE = 256;
     char buffer[SIZE];
     String8 result;
-    int hardwareStatus = mHardwareStatus;
+    hardware_call_state hardwareStatus = mHardwareStatus;
 
-    snprintf(buffer, SIZE, "Hardware status: %d\n", hardwareStatus);
+    snprintf(buffer, SIZE, "Hardware status: %d\n"
+                           "Standby Time mSec: %u\n",
+                            hardwareStatus,
+                            (uint32_t)(mStandbyTimeInNsecs / 1000000));
     result.append(buffer);
     write(fd, result.string(), result.size());
     return NO_ERROR;
@@ -320,14 +369,14 @@
             locked = true;
             break;
         }
-        usleep(kDumpLockSleep);
+        usleep(kDumpLockSleepUs);
     }
     return locked;
 }
 
 status_t AudioFlinger::dump(int fd, const Vector<String16>& args)
 {
-    if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
+    if (!dumpAllowed()) {
         dumpPermissionDenial(fd, args);
     } else {
         // get state of hardware lock
@@ -370,32 +419,47 @@
     return NO_ERROR;
 }
 
+sp<AudioFlinger::Client> AudioFlinger::registerPid_l(pid_t pid)
+{
+    // If pid is already in the mClients wp<> map, then use that entry
+    // (for which promote() is always != 0), otherwise create a new entry and Client.
+    sp<Client> client = mClients.valueFor(pid).promote();
+    if (client == 0) {
+        client = new Client(this, pid);
+        mClients.add(pid, client);
+    }
+
+    return client;
+}
 
 // IAudioFlinger interface
 
 
 sp<IAudioTrack> AudioFlinger::createTrack(
         pid_t pid,
-        int streamType,
+        audio_stream_type_t streamType,
         uint32_t sampleRate,
-        uint32_t format,
+        audio_format_t format,
         uint32_t channelMask,
         int frameCount,
+        // FIXME dead, remove from IAudioFlinger
         uint32_t flags,
         const sp<IMemory>& sharedBuffer,
-        int output,
+        audio_io_handle_t output,
+        bool isTimed,
         int *sessionId,
         status_t *status)
 {
     sp<PlaybackThread::Track> track;
     sp<TrackHandle> trackHandle;
     sp<Client> client;
-    wp<Client> wclient;
     status_t lStatus;
     int lSessionId;
 
-    if (streamType >= AUDIO_STREAM_CNT) {
-        ALOGE("invalid stream type");
+    // client AudioTrack::set already implements AUDIO_STREAM_DEFAULT => AUDIO_STREAM_MUSIC,
+    // but if someone uses binder directly they could bypass that and cause us to crash
+    if (uint32_t(streamType) >= AUDIO_STREAM_CNT) {
+        ALOGE("createTrack() invalid stream type %d", streamType);
         lStatus = BAD_VALUE;
         goto Exit;
     }
@@ -410,14 +474,7 @@
             goto Exit;
         }
 
-        wclient = mClients.valueFor(pid);
-
-        if (wclient != NULL) {
-            client = wclient.promote();
-        } else {
-            client = new Client(this, pid);
-            mClients.add(pid, client);
-        }
+        client = registerPid_l(pid);
 
         ALOGV("createTrack() sessionId: %d", (sessionId == NULL) ? -2 : *sessionId);
         if (sessionId != NULL && *sessionId != AUDIO_SESSION_OUTPUT_MIX) {
@@ -427,6 +484,7 @@
                     // prevent same audio session on different output threads
                     uint32_t sessions = t->hasAudioSession(*sessionId);
                     if (sessions & PlaybackThread::TRACK_SESSION) {
+                        ALOGE("createTrack() session ID %d already in use", *sessionId);
                         lStatus = BAD_VALUE;
                         goto Exit;
                     }
@@ -447,7 +505,7 @@
         ALOGV("createTrack() lSessionId: %d", lSessionId);
 
         track = thread->createTrack_l(client, streamType, sampleRate, format,
-                channelMask, frameCount, sharedBuffer, lSessionId, &lStatus);
+                channelMask, frameCount, sharedBuffer, lSessionId, isTimed, &lStatus);
 
         // move effect chain to this output thread if an effect on same session was waiting
         // for a track to be created
@@ -467,13 +525,13 @@
     }
 
 Exit:
-    if(status) {
+    if (status != NULL) {
         *status = lStatus;
     }
     return trackHandle;
 }
 
-uint32_t AudioFlinger::sampleRate(int output) const
+uint32_t AudioFlinger::sampleRate(audio_io_handle_t output) const
 {
     Mutex::Autolock _l(mLock);
     PlaybackThread *thread = checkPlaybackThread_l(output);
@@ -484,7 +542,7 @@
     return thread->sampleRate();
 }
 
-int AudioFlinger::channelCount(int output) const
+int AudioFlinger::channelCount(audio_io_handle_t output) const
 {
     Mutex::Autolock _l(mLock);
     PlaybackThread *thread = checkPlaybackThread_l(output);
@@ -495,18 +553,18 @@
     return thread->channelCount();
 }
 
-uint32_t AudioFlinger::format(int output) const
+audio_format_t AudioFlinger::format(audio_io_handle_t output) const
 {
     Mutex::Autolock _l(mLock);
     PlaybackThread *thread = checkPlaybackThread_l(output);
     if (thread == NULL) {
         ALOGW("format() unknown thread %d", output);
-        return 0;
+        return AUDIO_FORMAT_INVALID;
     }
     return thread->format();
 }
 
-size_t AudioFlinger::frameCount(int output) const
+size_t AudioFlinger::frameCount(audio_io_handle_t output) const
 {
     Mutex::Autolock _l(mLock);
     PlaybackThread *thread = checkPlaybackThread_l(output);
@@ -517,7 +575,7 @@
     return thread->frameCount();
 }
 
-uint32_t AudioFlinger::latency(int output) const
+uint32_t AudioFlinger::latency(audio_io_handle_t output) const
 {
     Mutex::Autolock _l(mLock);
     PlaybackThread *thread = checkPlaybackThread_l(output);
@@ -540,25 +598,34 @@
         return PERMISSION_DENIED;
     }
 
+    float swmv = value;
+
     // when hw supports master volume, don't scale in sw mixer
-    { // scope for the lock
-        AutoMutex lock(mHardwareLock);
-        mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
-        if (mPrimaryHardwareDev->set_master_volume(mPrimaryHardwareDev, value) == NO_ERROR) {
-            value = 1.0f;
+    if (MVS_NONE != mMasterVolumeSupportLvl) {
+        for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
+            AutoMutex lock(mHardwareLock);
+            audio_hw_device_t *dev = mAudioHwDevs[i];
+
+            mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
+            if (NULL != dev->set_master_volume) {
+                dev->set_master_volume(dev, value);
+            }
+            mHardwareStatus = AUDIO_HW_IDLE;
         }
-        mHardwareStatus = AUDIO_HW_IDLE;
+
+        swmv = 1.0;
     }
 
     Mutex::Autolock _l(mLock);
-    mMasterVolume = value;
-    for (uint32_t i = 0; i < mPlaybackThreads.size(); i++)
-       mPlaybackThreads.valueAt(i)->setMasterVolume(value);
+    mMasterVolume   = value;
+    mMasterVolumeSW = swmv;
+    for (size_t i = 0; i < mPlaybackThreads.size(); i++)
+        mPlaybackThreads.valueAt(i)->setMasterVolume(swmv);
 
     return NO_ERROR;
 }
 
-status_t AudioFlinger::setMode(int mode)
+status_t AudioFlinger::setMode(audio_mode_t mode)
 {
     status_t ret = initCheck();
     if (ret != NO_ERROR) {
@@ -569,7 +636,7 @@
     if (!settingsAllowed()) {
         return PERMISSION_DENIED;
     }
-    if ((mode < 0) || (mode >= AUDIO_MODE_CNT)) {
+    if (uint32_t(mode) >= AUDIO_MODE_CNT) {
         ALOGW("Illegal value: setMode(%d)", mode);
         return BAD_VALUE;
     }
@@ -584,8 +651,8 @@
     if (NO_ERROR == ret) {
         Mutex::Autolock _l(mLock);
         mMode = mode;
-        for (uint32_t i = 0; i < mPlaybackThreads.size(); i++)
-           mPlaybackThreads.valueAt(i)->setMode(mode);
+        for (size_t i = 0; i < mPlaybackThreads.size(); i++)
+            mPlaybackThreads.valueAt(i)->setMode(mode);
     }
 
     return ret;
@@ -618,6 +685,7 @@
     }
 
     bool state = AUDIO_MODE_INVALID;
+    AutoMutex lock(mHardwareLock);
     mHardwareStatus = AUDIO_HW_GET_MIC_MUTE;
     mPrimaryHardwareDev->get_mic_mute(mPrimaryHardwareDev, &state);
     mHardwareStatus = AUDIO_HW_IDLE;
@@ -632,31 +700,61 @@
     }
 
     Mutex::Autolock _l(mLock);
+    // This is an optimization, so PlaybackThread doesn't have to look at the one from AudioFlinger
     mMasterMute = muted;
-    for (uint32_t i = 0; i < mPlaybackThreads.size(); i++)
-       mPlaybackThreads.valueAt(i)->setMasterMute(muted);
+    for (size_t i = 0; i < mPlaybackThreads.size(); i++)
+        mPlaybackThreads.valueAt(i)->setMasterMute(muted);
 
     return NO_ERROR;
 }
 
 float AudioFlinger::masterVolume() const
 {
-    return mMasterVolume;
+    Mutex::Autolock _l(mLock);
+    return masterVolume_l();
+}
+
+float AudioFlinger::masterVolumeSW() const
+{
+    Mutex::Autolock _l(mLock);
+    return masterVolumeSW_l();
 }
 
 bool AudioFlinger::masterMute() const
 {
-    return mMasterMute;
+    Mutex::Autolock _l(mLock);
+    return masterMute_l();
 }
 
-status_t AudioFlinger::setStreamVolume(int stream, float value, int output)
+float AudioFlinger::masterVolume_l() const
+{
+    if (MVS_FULL == mMasterVolumeSupportLvl) {
+        float ret_val;
+        AutoMutex lock(mHardwareLock);
+
+        mHardwareStatus = AUDIO_HW_GET_MASTER_VOLUME;
+        ALOG_ASSERT((NULL != mPrimaryHardwareDev) &&
+                    (NULL != mPrimaryHardwareDev->get_master_volume),
+                "can't get master volume");
+
+        mPrimaryHardwareDev->get_master_volume(mPrimaryHardwareDev, &ret_val);
+        mHardwareStatus = AUDIO_HW_IDLE;
+        return ret_val;
+    }
+
+    return mMasterVolume;
+}
+
+status_t AudioFlinger::setStreamVolume(audio_stream_type_t stream, float value,
+        audio_io_handle_t output)
 {
     // check calling permissions
     if (!settingsAllowed()) {
         return PERMISSION_DENIED;
     }
 
-    if (stream < 0 || uint32_t(stream) >= AUDIO_STREAM_CNT) {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        ALOGE("setStreamVolume() invalid stream %d", stream);
         return BAD_VALUE;
     }
 
@@ -672,8 +770,8 @@
     mStreamTypes[stream].volume = value;
 
     if (thread == NULL) {
-        for (uint32_t i = 0; i < mPlaybackThreads.size(); i++) {
-           mPlaybackThreads.valueAt(i)->setStreamVolume(stream, value);
+        for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+            mPlaybackThreads.valueAt(i)->setStreamVolume(stream, value);
         }
     } else {
         thread->setStreamVolume(stream, value);
@@ -682,29 +780,30 @@
     return NO_ERROR;
 }
 
-status_t AudioFlinger::setStreamMute(int stream, bool muted)
+status_t AudioFlinger::setStreamMute(audio_stream_type_t stream, bool muted)
 {
     // check calling permissions
     if (!settingsAllowed()) {
         return PERMISSION_DENIED;
     }
 
-    if (stream < 0 || uint32_t(stream) >= AUDIO_STREAM_CNT ||
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT ||
         uint32_t(stream) == AUDIO_STREAM_ENFORCED_AUDIBLE) {
+        ALOGE("setStreamMute() invalid stream %d", stream);
         return BAD_VALUE;
     }
 
     AutoMutex lock(mLock);
     mStreamTypes[stream].mute = muted;
     for (uint32_t i = 0; i < mPlaybackThreads.size(); i++)
-       mPlaybackThreads.valueAt(i)->setStreamMute(stream, muted);
+        mPlaybackThreads.valueAt(i)->setStreamMute(stream, muted);
 
     return NO_ERROR;
 }
 
-float AudioFlinger::streamVolume(int stream, int output) const
+float AudioFlinger::streamVolume(audio_stream_type_t stream, audio_io_handle_t output) const
 {
-    if (stream < 0 || uint32_t(stream) >= AUDIO_STREAM_CNT) {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
         return 0.0f;
     }
 
@@ -717,26 +816,25 @@
         }
         volume = thread->streamVolume(stream);
     } else {
-        volume = mStreamTypes[stream].volume;
+        volume = streamVolume_l(stream);
     }
 
     return volume;
 }
 
-bool AudioFlinger::streamMute(int stream) const
+bool AudioFlinger::streamMute(audio_stream_type_t stream) const
 {
-    if (stream < 0 || stream >= (int)AUDIO_STREAM_CNT) {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
         return true;
     }
 
-    return mStreamTypes[stream].mute;
+    AutoMutex lock(mLock);
+    return streamMute_l(stream);
 }
 
-status_t AudioFlinger::setParameters(int ioHandle, const String8& keyValuePairs)
+status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs)
 {
-    status_t result;
-
-    ALOGV("setParameters(): io %d, keyvalue %s, tid %d, calling tid %d",
+    ALOGV("setParameters(): io %d, keyvalue %s, tid %d, calling pid %d",
             ioHandle, keyValuePairs.string(), gettid(), IPCThreadState::self()->getCallingPid());
     // check calling permissions
     if (!settingsAllowed()) {
@@ -745,15 +843,17 @@
 
     // ioHandle == 0 means the parameters are global to the audio hardware interface
     if (ioHandle == 0) {
-        AutoMutex lock(mHardwareLock);
-        mHardwareStatus = AUDIO_SET_PARAMETER;
         status_t final_result = NO_ERROR;
+        {
+        AutoMutex lock(mHardwareLock);
+        mHardwareStatus = AUDIO_HW_SET_PARAMETER;
         for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
             audio_hw_device_t *dev = mAudioHwDevs[i];
-            result = dev->set_parameters(dev, keyValuePairs.string());
+            status_t result = dev->set_parameters(dev, keyValuePairs.string());
             final_result = result ?: final_result;
         }
         mHardwareStatus = AUDIO_HW_IDLE;
+        }
         // disable AEC and NS if the device is a BT SCO headset supporting those pre processings
         AudioParameter param = AudioParameter(keyValuePairs);
         String8 value;
@@ -790,7 +890,7 @@
         thread = checkPlaybackThread_l(ioHandle);
         if (thread == NULL) {
             thread = checkRecordThread_l(ioHandle);
-        } else if (thread.get() == primaryPlaybackThread_l()) {
+        } else if (thread == primaryPlaybackThread_l()) {
             // indicate output device change to all input threads for pre processing
             AudioParameter param = AudioParameter(keyValuePairs);
             int value;
@@ -802,25 +902,30 @@
             }
         }
     }
-    if (thread != NULL) {
-        result = thread->setParameters(keyValuePairs);
-        return result;
+    if (thread != 0) {
+        return thread->setParameters(keyValuePairs);
     }
     return BAD_VALUE;
 }
 
-String8 AudioFlinger::getParameters(int ioHandle, const String8& keys)
+String8 AudioFlinger::getParameters(audio_io_handle_t ioHandle, const String8& keys) const
 {
-//    ALOGV("getParameters() io %d, keys %s, tid %d, calling tid %d",
+//    ALOGV("getParameters() io %d, keys %s, tid %d, calling pid %d",
 //            ioHandle, keys.string(), gettid(), IPCThreadState::self()->getCallingPid());
 
     if (ioHandle == 0) {
         String8 out_s8;
 
         for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
+            char *s;
+            {
+            AutoMutex lock(mHardwareLock);
+            mHardwareStatus = AUDIO_HW_GET_PARAMETER;
             audio_hw_device_t *dev = mAudioHwDevs[i];
-            char *s = dev->get_parameters(dev, keys.string());
-            out_s8 += String8(s);
+            s = dev->get_parameters(dev, keys.string());
+            mHardwareStatus = AUDIO_HW_IDLE;
+            }
+            out_s8 += String8(s ? s : "");
             free(s);
         }
         return out_s8;
@@ -839,17 +944,21 @@
     return String8("");
 }
 
-size_t AudioFlinger::getInputBufferSize(uint32_t sampleRate, int format, int channelCount)
+size_t AudioFlinger::getInputBufferSize(uint32_t sampleRate, audio_format_t format, int channelCount) const
 {
     status_t ret = initCheck();
     if (ret != NO_ERROR) {
         return 0;
     }
 
-    return mPrimaryHardwareDev->get_input_buffer_size(mPrimaryHardwareDev, sampleRate, format, channelCount);
+    AutoMutex lock(mHardwareLock);
+    mHardwareStatus = AUDIO_HW_GET_INPUT_BUFFER_SIZE;
+    size_t size = mPrimaryHardwareDev->get_input_buffer_size(mPrimaryHardwareDev, sampleRate, format, channelCount);
+    mHardwareStatus = AUDIO_HW_IDLE;
+    return size;
 }
 
-unsigned int AudioFlinger::getInputFramesLost(int ioHandle)
+unsigned int AudioFlinger::getInputFramesLost(audio_io_handle_t ioHandle) const
 {
     if (ioHandle == 0) {
         return 0;
@@ -877,14 +986,15 @@
     }
 
     AutoMutex lock(mHardwareLock);
-    mHardwareStatus = AUDIO_SET_VOICE_VOLUME;
+    mHardwareStatus = AUDIO_HW_SET_VOICE_VOLUME;
     ret = mPrimaryHardwareDev->set_voice_volume(mPrimaryHardwareDev, value);
     mHardwareStatus = AUDIO_HW_IDLE;
 
     return ret;
 }
 
-status_t AudioFlinger::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, int output)
+status_t AudioFlinger::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames,
+        audio_io_handle_t output) const
 {
     status_t status;
 
@@ -903,7 +1013,7 @@
 
     Mutex::Autolock _l(mLock);
 
-    int pid = IPCThreadState::self()->getCallingPid();
+    pid_t pid = IPCThreadState::self()->getCallingPid();
     if (mNotificationClients.indexOfKey(pid) < 0) {
         sp<NotificationClient> notificationClient = new NotificationClient(this,
                                                                             client,
@@ -931,26 +1041,22 @@
 {
     Mutex::Autolock _l(mLock);
 
-    int index = mNotificationClients.indexOfKey(pid);
-    if (index >= 0) {
-        sp <NotificationClient> client = mNotificationClients.valueFor(pid);
-        ALOGV("removeNotificationClient() %p, pid %d", client.get(), pid);
-        mNotificationClients.removeItem(pid);
-    }
+    mNotificationClients.removeItem(pid);
 
     ALOGV("%d died, releasing its sessions", pid);
-    int num = mAudioSessionRefs.size();
+    size_t num = mAudioSessionRefs.size();
     bool removed = false;
-    for (int i = 0; i< num; i++) {
+    for (size_t i = 0; i< num; ) {
         AudioSessionRef *ref = mAudioSessionRefs.itemAt(i);
-        ALOGV(" pid %d @ %d", ref->pid, i);
-        if (ref->pid == pid) {
-            ALOGV(" removing entry for pid %d session %d", pid, ref->sessionid);
+        ALOGV(" pid %d @ %d", ref->mPid, i);
+        if (ref->mPid == pid) {
+            ALOGV(" removing entry for pid %d session %d", pid, ref->mSessionid);
             mAudioSessionRefs.removeAt(i);
             delete ref;
             removed = true;
-            i--;
             num--;
+        } else {
+            i++;
         }
     }
     if (removed) {
@@ -959,11 +1065,12 @@
 }
 
 // audioConfigChanged_l() must be called with AudioFlinger::mLock held
-void AudioFlinger::audioConfigChanged_l(int event, int ioHandle, void *param2)
+void AudioFlinger::audioConfigChanged_l(int event, audio_io_handle_t ioHandle, const void *param2)
 {
     size_t size = mNotificationClients.size();
     for (size_t i = 0; i < size; i++) {
-        mNotificationClients.valueAt(i)->client()->ioConfigChanged(event, ioHandle, param2);
+        mNotificationClients.valueAt(i)->audioFlingerClient()->ioConfigChanged(event, ioHandle,
+                                                                               param2);
     }
 }
 
@@ -977,19 +1084,24 @@
 
 // ----------------------------------------------------------------------------
 
-AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, int id, uint32_t device)
+AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
+        uint32_t device, type_t type)
     :   Thread(false),
-        mAudioFlinger(audioFlinger), mSampleRate(0), mFrameCount(0), mChannelCount(0),
-        mFrameSize(1), mFormat(0), mStandby(false), mId(id), mExiting(false),
-        mDevice(device)
+        mType(type),
+        mAudioFlinger(audioFlinger), mSampleRate(0), mFrameCount(0),
+        // mChannelMask
+        mChannelCount(0),
+        mFrameSize(1), mFormat(AUDIO_FORMAT_INVALID),
+        mParamStatus(NO_ERROR),
+        mStandby(false), mId(id),
+        mDevice(device),
+        mDeathRecipient(new PMDeathRecipient(this))
 {
-    mDeathRecipient = new PMDeathRecipient(this);
 }
 
 AudioFlinger::ThreadBase::~ThreadBase()
 {
     mParamCond.broadcast();
-    mNewParameters.clear();
     // do not lock the mutex in destructor
     releaseWakeLock_l();
     if (mPowerManager != 0) {
@@ -1000,40 +1112,26 @@
 
 void AudioFlinger::ThreadBase::exit()
 {
-    // keep a strong ref on ourself so that we wont get
-    // destroyed in the middle of requestExitAndWait()
-    sp <ThreadBase> strongMe = this;
-
     ALOGV("ThreadBase::exit");
     {
-        AutoMutex lock(&mLock);
-        mExiting = true;
+        // This lock prevents the following race in thread (uniprocessor for illustration):
+        //  if (!exitPending()) {
+        //      // context switch from here to exit()
+        //      // exit() calls requestExit(), what exitPending() observes
+        //      // exit() calls signal(), which is dropped since no waiters
+        //      // context switch back from exit() to here
+        //      mWaitWorkCV.wait(...);
+        //      // now thread is hung
+        //  }
+        AutoMutex lock(mLock);
         requestExit();
         mWaitWorkCV.signal();
     }
+    // When Thread::requestExitAndWait is made virtual and this method is renamed to
+    // "virtual status_t requestExitAndWait()", replace by "return Thread::requestExitAndWait();"
     requestExitAndWait();
 }
 
-uint32_t AudioFlinger::ThreadBase::sampleRate() const
-{
-    return mSampleRate;
-}
-
-int AudioFlinger::ThreadBase::channelCount() const
-{
-    return (int)mChannelCount;
-}
-
-uint32_t AudioFlinger::ThreadBase::format() const
-{
-    return mFormat;
-}
-
-size_t AudioFlinger::ThreadBase::frameCount() const
-{
-    return mFrameCount;
-}
-
 status_t AudioFlinger::ThreadBase::setParameters(const String8& keyValuePairs)
 {
     status_t status;
@@ -1045,7 +1143,7 @@
     mWaitWorkCV.signal();
     // wait condition with timeout in case the thread loop has exited
     // before the request could be processed
-    if (mParamCond.waitRelative(mLock, kSetParametersTimeout) == NO_ERROR) {
+    if (mParamCond.waitRelative(mLock, kSetParametersTimeoutNs) == NO_ERROR) {
         status = mParamStatus;
         mWaitWorkCV.signal();
     } else {
@@ -1063,9 +1161,9 @@
 // sendConfigEvent_l() must be called with ThreadBase::mLock held
 void AudioFlinger::ThreadBase::sendConfigEvent_l(int event, int param)
 {
-    ConfigEvent *configEvent = new ConfigEvent();
-    configEvent->mEvent = event;
-    configEvent->mParam = param;
+    ConfigEvent configEvent;
+    configEvent.mEvent = event;
+    configEvent.mParam = param;
     mConfigEvents.add(configEvent);
     ALOGV("sendConfigEvent() num events %d event %d, param %d", mConfigEvents.size(), event, param);
     mWaitWorkCV.signal();
@@ -1074,17 +1172,16 @@
 void AudioFlinger::ThreadBase::processConfigEvents()
 {
     mLock.lock();
-    while(!mConfigEvents.isEmpty()) {
+    while (!mConfigEvents.isEmpty()) {
         ALOGV("processConfigEvents() remaining events %d", mConfigEvents.size());
-        ConfigEvent *configEvent = mConfigEvents[0];
+        ConfigEvent configEvent = mConfigEvents[0];
         mConfigEvents.removeAt(0);
         // release mLock before locking AudioFlinger mLock: lock order is always
         // AudioFlinger then ThreadBase to avoid cross deadlock
         mLock.unlock();
         mAudioFlinger->mLock.lock();
-        audioConfigChanged_l(configEvent->mEvent, configEvent->mParam);
+        audioConfigChanged_l(configEvent.mEvent, configEvent.mParam);
         mAudioFlinger->mLock.unlock();
-        delete configEvent;
         mLock.lock();
     }
     mLock.unlock();
@@ -1102,6 +1199,10 @@
         write(fd, buffer, strlen(buffer));
     }
 
+    snprintf(buffer, SIZE, "io handle: %d\n", mId);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "TID: %d\n", getTid());
+    result.append(buffer);
     snprintf(buffer, SIZE, "standby: %d\n", mStandby);
     result.append(buffer);
     snprintf(buffer, SIZE, "Sample rate: %d\n", mSampleRate);
@@ -1114,7 +1215,7 @@
     result.append(buffer);
     snprintf(buffer, SIZE, "Format: %d\n", mFormat);
     result.append(buffer);
-    snprintf(buffer, SIZE, "Frame size: %d\n", mFrameSize);
+    snprintf(buffer, SIZE, "Frame size: %u\n", mFrameSize);
     result.append(buffer);
 
     snprintf(buffer, SIZE, "\nPending setParameters commands: \n");
@@ -1131,7 +1232,7 @@
     snprintf(buffer, SIZE, " Index event param\n");
     result.append(buffer);
     for (size_t i = 0; i < mConfigEvents.size(); i++) {
-        snprintf(buffer, SIZE, " %02d    %02d    %d\n", i, mConfigEvents[i]->mEvent, mConfigEvents[i]->mParam);
+        snprintf(buffer, SIZE, " %02d    %02d    %d\n", i, mConfigEvents[i].mEvent, mConfigEvents[i].mParam);
         result.append(buffer);
     }
     result.append("\n");
@@ -1236,8 +1337,7 @@
 void AudioFlinger::ThreadBase::setEffectSuspended_l(
         const effect_uuid_t *type, bool suspend, int sessionId)
 {
-    sp<EffectChain> chain;
-    chain = getEffectChain_l(sessionId);
+    sp<EffectChain> chain = getEffectChain_l(sessionId);
     if (chain != 0) {
         if (type != NULL) {
             chain->setEffectSuspended_l(type, suspend);
@@ -1251,7 +1351,7 @@
 
 void AudioFlinger::ThreadBase::checkSuspendOnAddEffectChain_l(const sp<EffectChain>& chain)
 {
-    int index = mSuspendedSessions.indexOfKey(chain->sessionId());
+    ssize_t index = mSuspendedSessions.indexOfKey(chain->sessionId());
     if (index < 0) {
         return;
     }
@@ -1260,13 +1360,13 @@
             mSuspendedSessions.editValueAt(index);
 
     for (size_t i = 0; i < sessionEffects.size(); i++) {
-        sp <SuspendedSessionDesc> desc = sessionEffects.valueAt(i);
+        sp<SuspendedSessionDesc> desc = sessionEffects.valueAt(i);
         for (int j = 0; j < desc->mRefCount; j++) {
             if (sessionEffects.keyAt(i) == EffectChain::kKeyForSuspendAll) {
                 chain->setEffectSuspendedAll_l(true);
             } else {
                 ALOGV("checkSuspendOnAddEffectChain_l() suspending effects %08x",
-                     desc->mType.timeLow);
+                    desc->mType.timeLow);
                 chain->setEffectSuspended_l(&desc->mType, true);
             }
         }
@@ -1277,7 +1377,7 @@
                                                          bool suspend,
                                                          int sessionId)
 {
-    int index = mSuspendedSessions.indexOfKey(sessionId);
+    ssize_t index = mSuspendedSessions.indexOfKey(sessionId);
 
     KeyedVector <int, sp<SuspendedSessionDesc> > sessionEffects;
 
@@ -1301,7 +1401,7 @@
     }
     index = sessionEffects.indexOfKey(key);
 
-    sp <SuspendedSessionDesc> desc;
+    sp<SuspendedSessionDesc> desc;
     if (suspend) {
         if (index >= 0) {
             desc = sessionEffects.valueAt(index);
@@ -1365,24 +1465,39 @@
 
 AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinger,
                                              AudioStreamOut* output,
-                                             int id,
-                                             uint32_t device)
-    :   ThreadBase(audioFlinger, id, device),
-        mMixBuffer(0), mSuspended(0), mBytesWritten(0), mOutput(output),
-        mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false)
+                                             audio_io_handle_t id,
+                                             uint32_t device,
+                                             type_t type)
+    :   ThreadBase(audioFlinger, id, device, type),
+        mMixBuffer(NULL), mSuspended(0), mBytesWritten(0),
+        // Assumes constructor is called by AudioFlinger with it's mLock held,
+        // but it would be safer to explicitly pass initial masterMute as parameter
+        mMasterMute(audioFlinger->masterMute_l()),
+        // mStreamTypes[] initialized in constructor body
+        mOutput(output),
+        // Assumes constructor is called by AudioFlinger with it's mLock held,
+        // but it would be safer to explicitly pass initial masterVolume as parameter
+        mMasterVolume(audioFlinger->masterVolumeSW_l()),
+        mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
+        mMixerStatus(MIXER_IDLE),
+        mPrevMixerStatus(MIXER_IDLE),
+        standbyDelay(AudioFlinger::mStandbyTimeInNsecs)
 {
-    snprintf(mName, kNameLength, "AudioOut_%d", id);
+    snprintf(mName, kNameLength, "AudioOut_%X", id);
 
     readOutputParameters();
 
-    mMasterVolume = mAudioFlinger->masterVolume();
-    mMasterMute = mAudioFlinger->masterMute();
-
-    for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
-        mStreamTypes[stream].volume = mAudioFlinger->streamVolumeInternal(stream);
-        mStreamTypes[stream].mute = mAudioFlinger->streamMute(stream);
-        mStreamTypes[stream].valid = true;
+    // mStreamTypes[AUDIO_STREAM_CNT] is initialized by stream_type_t default constructor
+    // There is no AUDIO_STREAM_MIN, and ++ operator does not compile
+    for (audio_stream_type_t stream = (audio_stream_type_t) 0; stream < AUDIO_STREAM_CNT;
+            stream = (audio_stream_type_t) (stream + 1)) {
+        mStreamTypes[stream].volume = mAudioFlinger->streamVolume_l(stream);
+        mStreamTypes[stream].mute = mAudioFlinger->streamMute_l(stream);
+        // initialized by stream_type_t default constructor
+        // mStreamTypes[stream].valid = true;
     }
+    // mStreamTypes[AUDIO_STREAM_CNT] exists but isn't explicitly initialized here,
+    // because mAudioFlinger doesn't have one to copy from
 }
 
 AudioFlinger::PlaybackThread::~PlaybackThread()
@@ -1419,13 +1534,10 @@
     result.append(buffer);
     result.append("   Name  Clien Typ Fmt Chn mask   Session Buf  S M F SRate LeftV RighV  Serv       User       Main buf   Aux Buf\n");
     for (size_t i = 0; i < mActiveTracks.size(); ++i) {
-        wp<Track> wTrack = mActiveTracks[i];
-        if (wTrack != 0) {
-            sp<Track> track = wTrack.promote();
-            if (track != 0) {
-                track->dump(buffer, SIZE);
-                result.append(buffer);
-            }
+        sp<Track> track = mActiveTracks[i].promote();
+        if (track != 0) {
+            track->dump(buffer, SIZE);
+            result.append(buffer);
         }
     }
     write(fd, result.string(), result.size());
@@ -1479,13 +1591,14 @@
 // PlaybackThread::createTrack_l() must be called with AudioFlinger::mLock held
 sp<AudioFlinger::PlaybackThread::Track>  AudioFlinger::PlaybackThread::createTrack_l(
         const sp<AudioFlinger::Client>& client,
-        int streamType,
+        audio_stream_type_t streamType,
         uint32_t sampleRate,
-        uint32_t format,
+        audio_format_t format,
         uint32_t channelMask,
         int frameCount,
         const sp<IMemory>& sharedBuffer,
         int sessionId,
+        bool isTimed,
         status_t *status)
 {
     sp<Track> track;
@@ -1522,22 +1635,28 @@
         // all tracks in same audio session must share the same routing strategy otherwise
         // conflicts will happen when tracks are moved from one output to another by audio policy
         // manager
-        uint32_t strategy =
-                AudioSystem::getStrategyForStream((audio_stream_type_t)streamType);
+        uint32_t strategy = AudioSystem::getStrategyForStream(streamType);
         for (size_t i = 0; i < mTracks.size(); ++i) {
             sp<Track> t = mTracks[i];
-            if (t != 0) {
-                if (sessionId == t->sessionId() &&
-                        strategy != AudioSystem::getStrategyForStream((audio_stream_type_t)t->type())) {
+            if (t != 0 && !t->isOutputTrack()) {
+                uint32_t actual = AudioSystem::getStrategyForStream(t->streamType());
+                if (sessionId == t->sessionId() && strategy != actual) {
+                    ALOGE("createTrack_l() mismatched strategy; expected %u but found %u",
+                            strategy, actual);
                     lStatus = BAD_VALUE;
                     goto Exit;
                 }
             }
         }
 
-        track = new Track(this, client, streamType, sampleRate, format,
-                channelMask, frameCount, sharedBuffer, sessionId);
-        if (track->getCblk() == NULL || track->name() < 0) {
+        if (!isTimed) {
+            track = new Track(this, client, streamType, sampleRate, format,
+                    channelMask, frameCount, sharedBuffer, sessionId);
+        } else {
+            track = TimedTrack::create(this, client, streamType, sampleRate, format,
+                    channelMask, frameCount, sharedBuffer, sessionId);
+        }
+        if (track == NULL || track->getCblk() == NULL || track->name() < 0) {
             lStatus = NO_MEMORY;
             goto Exit;
         }
@@ -1547,7 +1666,7 @@
         if (chain != 0) {
             ALOGV("createTrack_l() setting main buffer %p", chain->inBuffer());
             track->setMainBuffer(chain->inBuffer());
-            chain->setStrategy(AudioSystem::getStrategyForStream((audio_stream_type_t)track->type()));
+            chain->setStrategy(AudioSystem::getStrategyForStream(track->streamType()));
             chain->incTrackCnt();
         }
 
@@ -1555,14 +1674,14 @@
         // createTrack() was called by the client process.
         if (!mStreamTypes[streamType].valid) {
             ALOGW("createTrack_l() on thread %p: invalidating track on stream %d",
-                 this, streamType);
+                this, streamType);
             android_atomic_or(CBLK_INVALID_ON, &track->mCblk->flags);
         }
     }
     lStatus = NO_ERROR;
 
 Exit:
-    if(status) {
+    if (status) {
         *status = lStatus;
     }
     return track;
@@ -1578,50 +1697,36 @@
     }
 }
 
-status_t AudioFlinger::PlaybackThread::setMasterVolume(float value)
+void AudioFlinger::PlaybackThread::setMasterVolume(float value)
 {
+    Mutex::Autolock _l(mLock);
     mMasterVolume = value;
-    return NO_ERROR;
 }
 
-status_t AudioFlinger::PlaybackThread::setMasterMute(bool muted)
+void AudioFlinger::PlaybackThread::setMasterMute(bool muted)
 {
-    mMasterMute = muted;
-    return NO_ERROR;
+    Mutex::Autolock _l(mLock);
+    setMasterMute_l(muted);
 }
 
-float AudioFlinger::PlaybackThread::masterVolume() const
+void AudioFlinger::PlaybackThread::setStreamVolume(audio_stream_type_t stream, float value)
 {
-    return mMasterVolume;
-}
-
-bool AudioFlinger::PlaybackThread::masterMute() const
-{
-    return mMasterMute;
-}
-
-status_t AudioFlinger::PlaybackThread::setStreamVolume(int stream, float value)
-{
+    Mutex::Autolock _l(mLock);
     mStreamTypes[stream].volume = value;
-    return NO_ERROR;
 }
 
-status_t AudioFlinger::PlaybackThread::setStreamMute(int stream, bool muted)
+void AudioFlinger::PlaybackThread::setStreamMute(audio_stream_type_t stream, bool muted)
 {
+    Mutex::Autolock _l(mLock);
     mStreamTypes[stream].mute = muted;
-    return NO_ERROR;
 }
 
-float AudioFlinger::PlaybackThread::streamVolume(int stream) const
+float AudioFlinger::PlaybackThread::streamVolume(audio_stream_type_t stream) const
 {
+    Mutex::Autolock _l(mLock);
     return mStreamTypes[stream].volume;
 }
 
-bool AudioFlinger::PlaybackThread::streamMute(int stream) const
-{
-    return mStreamTypes[stream].mute;
-}
-
 // addTrack_l() must be called with ThreadBase::mLock held
 status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track)
 {
@@ -1691,7 +1796,7 @@
 // audioConfigChanged_l() must be called with AudioFlinger::mLock held
 void AudioFlinger::PlaybackThread::audioConfigChanged_l(int event, int param) {
     AudioSystem::OutputDescriptor desc;
-    void *param2 = 0;
+    void *param2 = NULL;
 
     ALOGV("PlaybackThread::audioConfigChanged_l, thread %p, event %d, param %d", this, event, param);
 
@@ -1721,12 +1826,12 @@
     mChannelMask = mOutput->stream->common.get_channels(&mOutput->stream->common);
     mChannelCount = (uint16_t)popcount(mChannelMask);
     mFormat = mOutput->stream->common.get_format(&mOutput->stream->common);
-    mFrameSize = (uint16_t)audio_stream_frame_size(&mOutput->stream->common);
+    mFrameSize = audio_stream_frame_size(&mOutput->stream->common);
     mFrameCount = mOutput->stream->common.get_buffer_size(&mOutput->stream->common) / mFrameSize;
 
     // FIXME - Current mixer implementation only supports stereo output: Always
     // Allocate a stereo buffer even if HW output is mono.
-    if (mMixBuffer != NULL) delete[] mMixBuffer;
+    delete[] mMixBuffer;
     mMixBuffer = new int16_t[mFrameCount * 2];
     memset(mMixBuffer, 0, mFrameCount * 2 * sizeof(int16_t));
 
@@ -1744,7 +1849,7 @@
 
 status_t AudioFlinger::PlaybackThread::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames)
 {
-    if (halFrames == 0 || dspFrames == 0) {
+    if (halFrames == NULL || dspFrames == NULL) {
         return BAD_VALUE;
     }
     Mutex::Autolock _l(mLock);
@@ -1787,14 +1892,14 @@
         sp<Track> track = mTracks[i];
         if (sessionId == track->sessionId() &&
                 !(track->mCblk->flags & CBLK_INVALID_MSK)) {
-            return AudioSystem::getStrategyForStream((audio_stream_type_t) track->type());
+            return AudioSystem::getStrategyForStream(track->streamType());
         }
     }
     return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
 }
 
 
-AudioFlinger::AudioStreamOut* AudioFlinger::PlaybackThread::getOutput()
+AudioFlinger::AudioStreamOut* AudioFlinger::PlaybackThread::getOutput() const
 {
     Mutex::Autolock _l(mLock);
     return mOutput;
@@ -1831,13 +1936,11 @@
 
 // ----------------------------------------------------------------------------
 
-AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, int id, uint32_t device)
-    :   PlaybackThread(audioFlinger, output, id, device),
-        mAudioMixer(0), mPrevMixerStatus(MIXER_IDLE)
+AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
+        audio_io_handle_t id, uint32_t device, type_t type)
+    :   PlaybackThread(audioFlinger, output, id, device, type)
 {
-    mType = ThreadBase::MIXER;
     mAudioMixer = new AudioMixer(mFrameCount, mSampleRate);
-
     // FIXME - Current mixer implementation only supports stereo output
     if (mChannelCount == 1) {
         ALOGE("Invalid audio hardware channel count");
@@ -1849,234 +1952,388 @@
     delete mAudioMixer;
 }
 
-bool AudioFlinger::MixerThread::threadLoop()
+class CpuStats {
+public:
+    CpuStats();
+    void sample(const String8 &title);
+#ifdef DEBUG_CPU_USAGE
+private:
+    ThreadCpuUsage mCpuUsage;           // instantaneous thread CPU usage in wall clock ns
+    CentralTendencyStatistics mWcStats; // statistics on thread CPU usage in wall clock ns
+
+    CentralTendencyStatistics mHzStats; // statistics on thread CPU usage in cycles
+
+    int mCpuNum;                        // thread's current CPU number
+    int mCpukHz;                        // frequency of thread's current CPU in kHz
+#endif
+};
+
+CpuStats::CpuStats()
+#ifdef DEBUG_CPU_USAGE
+    : mCpuNum(-1), mCpukHz(-1)
+#endif
+{
+}
+
+void CpuStats::sample(const String8 &title) {
+#ifdef DEBUG_CPU_USAGE
+    // get current thread's delta CPU time in wall clock ns
+    double wcNs;
+    bool valid = mCpuUsage.sampleAndEnable(wcNs);
+
+    // record sample for wall clock statistics
+    if (valid) {
+        mWcStats.sample(wcNs);
+    }
+
+    // get the current CPU number
+    int cpuNum = sched_getcpu();
+
+    // get the current CPU frequency in kHz
+    int cpukHz = mCpuUsage.getCpukHz(cpuNum);
+
+    // check if either CPU number or frequency changed
+    if (cpuNum != mCpuNum || cpukHz != mCpukHz) {
+        mCpuNum = cpuNum;
+        mCpukHz = cpukHz;
+        // ignore sample for purposes of cycles
+        valid = false;
+    }
+
+    // if no change in CPU number or frequency, then record sample for cycle statistics
+    if (valid && mCpukHz > 0) {
+        double cycles = wcNs * cpukHz * 0.000001;
+        mHzStats.sample(cycles);
+    }
+
+    unsigned n = mWcStats.n();
+    // mCpuUsage.elapsed() is expensive, so don't call it every loop
+    if ((n & 127) == 1) {
+        long long elapsed = mCpuUsage.elapsed();
+        if (elapsed >= DEBUG_CPU_USAGE * 1000000000LL) {
+            double perLoop = elapsed / (double) n;
+            double perLoop100 = perLoop * 0.01;
+            double perLoop1k = perLoop * 0.001;
+            double mean = mWcStats.mean();
+            double stddev = mWcStats.stddev();
+            double minimum = mWcStats.minimum();
+            double maximum = mWcStats.maximum();
+            double meanCycles = mHzStats.mean();
+            double stddevCycles = mHzStats.stddev();
+            double minCycles = mHzStats.minimum();
+            double maxCycles = mHzStats.maximum();
+            mCpuUsage.resetElapsed();
+            mWcStats.reset();
+            mHzStats.reset();
+            ALOGD("CPU usage for %s over past %.1f secs\n"
+                "  (%u mixer loops at %.1f mean ms per loop):\n"
+                "  us per mix loop: mean=%.0f stddev=%.0f min=%.0f max=%.0f\n"
+                "  %% of wall: mean=%.1f stddev=%.1f min=%.1f max=%.1f\n"
+                "  MHz: mean=%.1f, stddev=%.1f, min=%.1f max=%.1f",
+                    title.string(),
+                    elapsed * .000000001, n, perLoop * .000001,
+                    mean * .001,
+                    stddev * .001,
+                    minimum * .001,
+                    maximum * .001,
+                    mean / perLoop100,
+                    stddev / perLoop100,
+                    minimum / perLoop100,
+                    maximum / perLoop100,
+                    meanCycles / perLoop1k,
+                    stddevCycles / perLoop1k,
+                    minCycles / perLoop1k,
+                    maxCycles / perLoop1k);
+
+        }
+    }
+#endif
+};
+
+void AudioFlinger::PlaybackThread::checkSilentMode_l()
+{
+    if (!mMasterMute) {
+        char value[PROPERTY_VALUE_MAX];
+        if (property_get("ro.audio.silent", value, "0") > 0) {
+            char *endptr;
+            unsigned long ul = strtoul(value, &endptr, 0);
+            if (*endptr == '\0' && ul != 0) {
+                ALOGD("Silence is golden");
+                // The setprop command will not allow a property to be changed after
+                // the first time it is set, so we don't have to worry about un-muting.
+                setMasterMute_l(true);
+            }
+        }
+    }
+}
+
+bool AudioFlinger::PlaybackThread::threadLoop()
 {
     Vector< sp<Track> > tracksToRemove;
-    uint32_t mixerStatus = MIXER_IDLE;
-    nsecs_t standbyTime = systemTime();
-    size_t mixBufferSize = mFrameCount * mFrameSize;
-    // FIXME: Relaxed timing because of a certain device that can't meet latency
-    // Should be reduced to 2x after the vendor fixes the driver issue
-    // increase threshold again due to low power audio mode. The way this warning threshold is
-    // calculated and its usefulness should be reconsidered anyway.
-    nsecs_t maxPeriod = seconds(mFrameCount) / mSampleRate * 15;
+
+    standbyTime = systemTime();
+
+    // MIXER
     nsecs_t lastWarning = 0;
-    bool longStandbyExit = false;
-    uint32_t activeSleepTime = activeSleepTimeUs();
-    uint32_t idleSleepTime = idleSleepTimeUs();
-    uint32_t sleepTime = idleSleepTime;
-    uint32_t sleepTimeShift = 0;
-    Vector< sp<EffectChain> > effectChains;
-#ifdef DEBUG_CPU_USAGE
-    ThreadCpuUsage cpu;
-    const CentralTendencyStatistics& stats = cpu.statistics();
-#endif
+if (mType == MIXER) {
+    longStandbyExit = false;
+}
+
+    // DUPLICATING
+    // FIXME could this be made local to while loop?
+    writeFrames = 0;
+
+    cacheParameters_l();
+    sleepTime = idleSleepTime;
+
+if (mType == MIXER) {
+    sleepTimeShift = 0;
+}
+
+    CpuStats cpuStats;
+    const String8 myName(String8::format("thread %p type %d TID %d", this, mType, gettid()));
 
     acquireWakeLock();
 
     while (!exitPending())
     {
-#ifdef DEBUG_CPU_USAGE
-        cpu.sampleAndEnable();
-        unsigned n = stats.n();
-        // cpu.elapsed() is expensive, so don't call it every loop
-        if ((n & 127) == 1) {
-            long long elapsed = cpu.elapsed();
-            if (elapsed >= DEBUG_CPU_USAGE * 1000000000LL) {
-                double perLoop = elapsed / (double) n;
-                double perLoop100 = perLoop * 0.01;
-                double mean = stats.mean();
-                double stddev = stats.stddev();
-                double minimum = stats.minimum();
-                double maximum = stats.maximum();
-                cpu.resetStatistics();
-                ALOGI("CPU usage over past %.1f secs (%u mixer loops at %.1f mean ms per loop):\n  us per mix loop: mean=%.0f stddev=%.0f min=%.0f max=%.0f\n  %% of wall: mean=%.1f stddev=%.1f min=%.1f max=%.1f",
-                        elapsed * .000000001, n, perLoop * .000001,
-                        mean * .001,
-                        stddev * .001,
-                        minimum * .001,
-                        maximum * .001,
-                        mean / perLoop100,
-                        stddev / perLoop100,
-                        minimum / perLoop100,
-                        maximum / perLoop100);
-            }
-        }
-#endif
+        cpuStats.sample(myName);
+
+        Vector< sp<EffectChain> > effectChains;
+
         processConfigEvents();
 
-        mixerStatus = MIXER_IDLE;
         { // scope for mLock
 
             Mutex::Autolock _l(mLock);
 
             if (checkForNewParameters_l()) {
-                mixBufferSize = mFrameCount * mFrameSize;
-                // FIXME: Relaxed timing because of a certain device that can't meet latency
-                // Should be reduced to 2x after the vendor fixes the driver issue
-                // increase threshold again due to low power audio mode. The way this warning
-                // threshold is calculated and its usefulness should be reconsidered anyway.
-                maxPeriod = seconds(mFrameCount) / mSampleRate * 15;
-                activeSleepTime = activeSleepTimeUs();
-                idleSleepTime = idleSleepTimeUs();
+                cacheParameters_l();
             }
 
-            const SortedVector< wp<Track> >& activeTracks = mActiveTracks;
+            saveOutputTracks();
 
             // put audio hardware into standby after short delay
-            if UNLIKELY((!activeTracks.size() && systemTime() > standbyTime) ||
-                        mSuspended) {
+            if (CC_UNLIKELY((!mActiveTracks.size() && systemTime() > standbyTime) ||
+                        mSuspended > 0)) {
                 if (!mStandby) {
-                    ALOGV("Audio hardware entering standby, mixer %p, mSuspended %d\n", this, mSuspended);
-                    mOutput->stream->common.standby(&mOutput->stream->common);
+
+                    threadLoop_standby();
+
                     mStandby = true;
                     mBytesWritten = 0;
                 }
 
-                if (!activeTracks.size() && mConfigEvents.isEmpty()) {
+                if (!mActiveTracks.size() && mConfigEvents.isEmpty()) {
                     // we're about to wait, flush the binder command buffer
                     IPCThreadState::self()->flushCommands();
 
+                    clearOutputTracks();
+
                     if (exitPending()) break;
 
                     releaseWakeLock_l();
                     // wait until we have something to do...
-                    ALOGV("MixerThread %p TID %d going to sleep\n", this, gettid());
+                    ALOGV("%s going to sleep", myName.string());
                     mWaitWorkCV.wait(mLock);
-                    ALOGV("MixerThread %p TID %d waking up\n", this, gettid());
+                    ALOGV("%s waking up", myName.string());
                     acquireWakeLock_l();
 
                     mPrevMixerStatus = MIXER_IDLE;
-                    if (mMasterMute == false) {
-                        char value[PROPERTY_VALUE_MAX];
-                        property_get("ro.audio.silent", value, "0");
-                        if (atoi(value)) {
-                            ALOGD("Silence is golden");
-                            setMasterMute(true);
-                        }
+
+                    checkSilentMode_l();
+
+                    standbyTime = systemTime() + standbyDelay;
+                    sleepTime = idleSleepTime;
+                    if (mType == MIXER) {
+                        sleepTimeShift = 0;
                     }
 
-                    standbyTime = systemTime() + kStandbyTimeInNsecs;
-                    sleepTime = idleSleepTime;
-                    sleepTimeShift = 0;
                     continue;
                 }
             }
 
-            mixerStatus = prepareTracks_l(activeTracks, &tracksToRemove);
+            mixer_state newMixerStatus = prepareTracks_l(&tracksToRemove);
+            // Shift in the new status; this could be a queue if it's
+            // useful to filter the mixer status over several cycles.
+            mPrevMixerStatus = mMixerStatus;
+            mMixerStatus = newMixerStatus;
 
             // prevent any changes in effect chain list and in each effect chain
             // during mixing and effect process as the audio buffers could be deleted
             // or modified if an effect is created or deleted
             lockEffectChains_l(effectChains);
-       }
-
-        if (LIKELY(mixerStatus == MIXER_TRACKS_READY)) {
-            // mix buffers...
-            mAudioMixer->process();
-            // increase sleep time progressively when application underrun condition clears.
-            // Only increase sleep time if the mixer is ready for two consecutive times to avoid
-            // that a steady state of alternating ready/not ready conditions keeps the sleep time
-            // such that we would underrun the audio HAL.
-            if ((sleepTime == 0) && (sleepTimeShift > 0)) {
-                sleepTimeShift--;
-            }
-            sleepTime = 0;
-            standbyTime = systemTime() + kStandbyTimeInNsecs;
-            //TODO: delay standby when effects have a tail
-        } else {
-            // If no tracks are ready, sleep once for the duration of an output
-            // buffer size, then write 0s to the output
-            if (sleepTime == 0) {
-                if (mixerStatus == MIXER_TRACKS_ENABLED) {
-                    sleepTime = activeSleepTime >> sleepTimeShift;
-                    if (sleepTime < kMinThreadSleepTimeUs) {
-                        sleepTime = kMinThreadSleepTimeUs;
-                    }
-                    // reduce sleep time in case of consecutive application underruns to avoid
-                    // starving the audio HAL. As activeSleepTimeUs() is larger than a buffer
-                    // duration we would end up writing less data than needed by the audio HAL if
-                    // the condition persists.
-                    if (sleepTimeShift < kMaxThreadSleepTimeShift) {
-                        sleepTimeShift++;
-                    }
-                } else {
-                    sleepTime = idleSleepTime;
-                }
-            } else if (mBytesWritten != 0 ||
-                       (mixerStatus == MIXER_TRACKS_ENABLED && longStandbyExit)) {
-                memset (mMixBuffer, 0, mixBufferSize);
-                sleepTime = 0;
-                ALOGV_IF((mBytesWritten == 0 && (mixerStatus == MIXER_TRACKS_ENABLED && longStandbyExit)), "anticipated start");
-            }
-            // TODO add standby time extension fct of effect tail
         }
 
-        if (mSuspended) {
+        if (CC_LIKELY(mMixerStatus == MIXER_TRACKS_READY)) {
+            threadLoop_mix();
+        } else {
+            threadLoop_sleepTime();
+        }
+
+        if (mSuspended > 0) {
             sleepTime = suspendSleepTimeUs();
         }
+
+        // only process effects if we're going to write
+        if (sleepTime == 0) {
+            for (size_t i = 0; i < effectChains.size(); i ++) {
+                effectChains[i]->process_l();
+            }
+        }
+
+        // enable changes in effect chain
+        unlockEffectChains(effectChains);
+
         // sleepTime == 0 means we must write to audio hardware
         if (sleepTime == 0) {
-             for (size_t i = 0; i < effectChains.size(); i ++) {
-                 effectChains[i]->process_l();
-             }
-             // enable changes in effect chain
-             unlockEffectChains(effectChains);
-            mLastWriteTime = systemTime();
-            mInWrite = true;
-            mBytesWritten += mixBufferSize;
 
-            int bytesWritten = (int)mOutput->stream->write(mOutput->stream, mMixBuffer, mixBufferSize);
-            if (bytesWritten < 0) mBytesWritten -= mixBufferSize;
-            mNumWrites++;
-            mInWrite = false;
+            threadLoop_write();
+
+if (mType == MIXER) {
+            // write blocked detection
             nsecs_t now = systemTime();
             nsecs_t delta = now - mLastWriteTime;
             if (!mStandby && delta > maxPeriod) {
                 mNumDelayedWrites++;
-                if ((now - lastWarning) > kWarningThrottle) {
+                if ((now - lastWarning) > kWarningThrottleNs) {
                     ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p",
                             ns2ms(delta), mNumDelayedWrites, this);
                     lastWarning = now;
                 }
+                // FIXME this is broken: longStandbyExit should be handled out of the if() and with
+                // a different threshold. Or completely removed for what it is worth anyway...
                 if (mStandby) {
                     longStandbyExit = true;
                 }
             }
+}
+
             mStandby = false;
         } else {
-            // enable changes in effect chain
-            unlockEffectChains(effectChains);
             usleep(sleepTime);
         }
 
-        // finally let go of all our tracks, without the lock held
+        // finally let go of removed track(s), without the lock held
         // since we can't guarantee the destructors won't acquire that
         // same lock.
         tracksToRemove.clear();
 
+        // FIXME I don't understand the need for this here;
+        //       it was in the original code but maybe the
+        //       assignment in saveOutputTracks() makes this unnecessary?
+        clearOutputTracks();
+
         // Effect chains will be actually deleted here if they were removed from
         // mEffectChains list during mixing or effects processing
         effectChains.clear();
+
+        // FIXME Note that the above .clear() is no longer necessary since effectChains
+        // is now local to this block, but will keep it for now (at least until merge done).
     }
 
+if (mType == MIXER || mType == DIRECT) {
+    // put output stream into standby mode
     if (!mStandby) {
         mOutput->stream->common.standby(&mOutput->stream->common);
     }
+}
+if (mType == DUPLICATING) {
+    // for DuplicatingThread, standby mode is handled by the outputTracks
+}
 
     releaseWakeLock();
 
-    ALOGV("MixerThread %p exiting", this);
+    ALOGV("Thread %p type %d exiting", this, mType);
     return false;
 }
 
+// shared by MIXER and DIRECT, overridden by DUPLICATING
+void AudioFlinger::PlaybackThread::threadLoop_write()
+{
+    // FIXME rewrite to reduce number of system calls
+    mLastWriteTime = systemTime();
+    mInWrite = true;
+    mBytesWritten += mixBufferSize;
+    int bytesWritten = (int)mOutput->stream->write(mOutput->stream, mMixBuffer, mixBufferSize);
+    if (bytesWritten < 0) mBytesWritten -= mixBufferSize;
+    mNumWrites++;
+    mInWrite = false;
+}
+
+// shared by MIXER and DIRECT, overridden by DUPLICATING
+void AudioFlinger::PlaybackThread::threadLoop_standby()
+{
+    ALOGV("Audio hardware entering standby, mixer %p, suspend count %u", this, mSuspended);
+    mOutput->stream->common.standby(&mOutput->stream->common);
+}
+
+void AudioFlinger::MixerThread::threadLoop_mix()
+{
+    // obtain the presentation timestamp of the next output buffer
+    int64_t pts;
+    status_t status = INVALID_OPERATION;
+
+    if (NULL != mOutput->stream->get_next_write_timestamp) {
+        status = mOutput->stream->get_next_write_timestamp(
+                mOutput->stream, &pts);
+    }
+
+    if (status != NO_ERROR) {
+        pts = AudioBufferProvider::kInvalidPTS;
+    }
+
+    // mix buffers...
+    mAudioMixer->process(pts);
+    // increase sleep time progressively when application underrun condition clears.
+    // Only increase sleep time if the mixer is ready for two consecutive times to avoid
+    // that a steady state of alternating ready/not ready conditions keeps the sleep time
+    // such that we would underrun the audio HAL.
+    if ((sleepTime == 0) && (sleepTimeShift > 0)) {
+        sleepTimeShift--;
+    }
+    sleepTime = 0;
+    standbyTime = systemTime() + standbyDelay;
+    //TODO: delay standby when effects have a tail
+}
+
+void AudioFlinger::MixerThread::threadLoop_sleepTime()
+{
+    // If no tracks are ready, sleep once for the duration of an output
+    // buffer size, then write 0s to the output
+    if (sleepTime == 0) {
+        if (mMixerStatus == MIXER_TRACKS_ENABLED) {
+            sleepTime = activeSleepTime >> sleepTimeShift;
+            if (sleepTime < kMinThreadSleepTimeUs) {
+                sleepTime = kMinThreadSleepTimeUs;
+            }
+            // reduce sleep time in case of consecutive application underruns to avoid
+            // starving the audio HAL. As activeSleepTimeUs() is larger than a buffer
+            // duration we would end up writing less data than needed by the audio HAL if
+            // the condition persists.
+            if (sleepTimeShift < kMaxThreadSleepTimeShift) {
+                sleepTimeShift++;
+            }
+        } else {
+            sleepTime = idleSleepTime;
+        }
+    } else if (mBytesWritten != 0 ||
+               (mMixerStatus == MIXER_TRACKS_ENABLED && longStandbyExit)) {
+        memset (mMixBuffer, 0, mixBufferSize);
+        sleepTime = 0;
+        ALOGV_IF((mBytesWritten == 0 && (mMixerStatus == MIXER_TRACKS_ENABLED && longStandbyExit)), "anticipated start");
+    }
+    // TODO add standby time extension fct of effect tail
+}
+
 // prepareTracks_l() must be called with ThreadBase::mLock held
-uint32_t AudioFlinger::MixerThread::prepareTracks_l(const SortedVector< wp<Track> >& activeTracks, Vector< sp<Track> > *tracksToRemove)
+AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTracks_l(
+        Vector< sp<Track> > *tracksToRemove)
 {
 
-    uint32_t mixerStatus = MIXER_IDLE;
+    mixer_state mixerStatus = MIXER_IDLE;
     // find out which tracks need to be processed
-    size_t count = activeTracks.size();
+    size_t count = mActiveTracks.size();
     size_t mixedTracks = 0;
     size_t tracksWithEffect = 0;
 
@@ -2096,15 +2353,16 @@
     }
 
     for (size_t i=0 ; i<count ; i++) {
-        sp<Track> t = activeTracks[i].promote();
+        sp<Track> t = mActiveTracks[i].promote();
         if (t == 0) continue;
 
+        // this const just means the local variable doesn't change
         Track* const track = t.get();
         audio_track_cblk_t* cblk = track->cblk();
 
         // The first time a track is added we wait
         // for all its buffers to be filled before processing it
-        mAudioMixer->setActiveTrack(track->name());
+        int name = track->name();
         // make sure that we have enough frames to mix one full buffer.
         // enforce this condition only once to enable draining the buffer in case the client
         // app does not call stop() and relies on underrun to stop:
@@ -2124,13 +2382,13 @@
                 // the minimum track buffer size is normally twice the number of frames necessary
                 // to fill one buffer and the resampler should not leave more than one buffer worth
                 // of unreleased frames after each pass, but just in case...
-                LOG_ASSERT(minFrames <= cblk->frameCount);
+                ALOG_ASSERT(minFrames <= cblk->frameCount);
             }
         }
-        if ((cblk->framesReady() >= minFrames) && track->isReady() &&
+        if ((track->framesReady() >= minFrames) && track->isReady() &&
                 !track->isPaused() && !track->isTerminated())
         {
-            //ALOGV("track %d u=%08x, s=%08x [OK] on thread %p", track->name(), cblk->user, cblk->server, this);
+            //ALOGV("track %d u=%08x, s=%08x [OK] on thread %p", name, cblk->user, cblk->server, this);
 
             mixedTracks++;
 
@@ -2143,8 +2401,8 @@
                 if (chain != 0) {
                     tracksWithEffect++;
                 } else {
-                    ALOGW("prepareTracks_l(): track %08x attached to effect but no chain found on session %d",
-                            track->name(), track->sessionId());
+                    ALOGW("prepareTracks_l(): track %d attached to effect but no chain found on session %d",
+                            name, track->sessionId());
                 }
             }
 
@@ -2157,7 +2415,7 @@
                     track->mState = TrackBase::ACTIVE;
                     param = AudioMixer::RAMP_VOLUME;
                 }
-                mAudioMixer->setParameter(AudioMixer::RESAMPLE, AudioMixer::RESET, NULL);
+                mAudioMixer->setParameter(name, AudioMixer::RESAMPLE, AudioMixer::RESET, NULL);
             } else if (cblk->server != 0) {
                 // If the track is stopped before the first frame was mixed,
                 // do not apply ramp
@@ -2167,7 +2425,7 @@
             // compute volume for this track
             uint32_t vl, vr, va;
             if (track->isMuted() || track->isPausing() ||
-                mStreamTypes[track->type()].mute) {
+                mStreamTypes[track->streamType()].mute) {
                 vl = vr = va = 0;
                 if (track->isPausing()) {
                     track->setPaused();
@@ -2175,12 +2433,33 @@
             } else {
 
                 // read original volumes with volume control
-                float typeVolume = mStreamTypes[track->type()].volume;
+                float typeVolume = mStreamTypes[track->streamType()].volume;
                 float v = masterVolume * typeVolume;
-                vl = (uint32_t)(v * cblk->volume[0]) << 12;
-                vr = (uint32_t)(v * cblk->volume[1]) << 12;
+                uint32_t vlr = cblk->getVolumeLR();
+                vl = vlr & 0xFFFF;
+                vr = vlr >> 16;
+                // track volumes come from shared memory, so can't be trusted and must be clamped
+                if (vl > MAX_GAIN_INT) {
+                    ALOGV("Track left volume out of range: %04X", vl);
+                    vl = MAX_GAIN_INT;
+                }
+                if (vr > MAX_GAIN_INT) {
+                    ALOGV("Track right volume out of range: %04X", vr);
+                    vr = MAX_GAIN_INT;
+                }
+                // now apply the master volume and stream type volume
+                vl = (uint32_t)(v * vl) << 12;
+                vr = (uint32_t)(v * vr) << 12;
+                // assuming master volume and stream type volume each go up to 1.0,
+                // vl and vr are now in 8.24 format
 
-                va = (uint32_t)(v * cblk->sendLevel);
+                uint16_t sendLevel = cblk->getSendLevel_U4_12();
+                // send level comes from shared memory and so may be corrupt
+                if (sendLevel > MAX_GAIN_INT) {
+                    ALOGV("Track send level out of range: %04X", sendLevel);
+                    sendLevel = MAX_GAIN_INT;
+                }
+                va = (uint32_t)(v * sendLevel);
             }
             // Delegate volume control to effect in track effect chain if needed
             if (chain != 0 && chain->setVolume_l(&vl, &vr)) {
@@ -2197,38 +2476,40 @@
             }
 
             // Convert volumes from 8.24 to 4.12 format
-            int16_t left, right, aux;
-            uint32_t v_clamped = (vl + (1 << 11)) >> 12;
-            if (v_clamped > MAX_GAIN_INT) v_clamped = MAX_GAIN_INT;
-            left = int16_t(v_clamped);
-            v_clamped = (vr + (1 << 11)) >> 12;
-            if (v_clamped > MAX_GAIN_INT) v_clamped = MAX_GAIN_INT;
-            right = int16_t(v_clamped);
+            // This additional clamping is needed in case chain->setVolume_l() overshot
+            vl = (vl + (1 << 11)) >> 12;
+            if (vl > MAX_GAIN_INT) vl = MAX_GAIN_INT;
+            vr = (vr + (1 << 11)) >> 12;
+            if (vr > MAX_GAIN_INT) vr = MAX_GAIN_INT;
 
-            if (va > MAX_GAIN_INT) va = MAX_GAIN_INT;
-            aux = int16_t(va);
+            if (va > MAX_GAIN_INT) va = MAX_GAIN_INT;   // va is uint32_t, so no need to check for -
 
             // XXX: these things DON'T need to be done each time
-            mAudioMixer->setBufferProvider(track);
-            mAudioMixer->enable(AudioMixer::MIXING);
+            mAudioMixer->setBufferProvider(name, track);
+            mAudioMixer->enable(name);
 
-            mAudioMixer->setParameter(param, AudioMixer::VOLUME0, (void *)left);
-            mAudioMixer->setParameter(param, AudioMixer::VOLUME1, (void *)right);
-            mAudioMixer->setParameter(param, AudioMixer::AUXLEVEL, (void *)aux);
+            mAudioMixer->setParameter(name, param, AudioMixer::VOLUME0, (void *)vl);
+            mAudioMixer->setParameter(name, param, AudioMixer::VOLUME1, (void *)vr);
+            mAudioMixer->setParameter(name, param, AudioMixer::AUXLEVEL, (void *)va);
             mAudioMixer->setParameter(
+                name,
                 AudioMixer::TRACK,
                 AudioMixer::FORMAT, (void *)track->format());
             mAudioMixer->setParameter(
+                name,
                 AudioMixer::TRACK,
                 AudioMixer::CHANNEL_MASK, (void *)track->channelMask());
             mAudioMixer->setParameter(
+                name,
                 AudioMixer::RESAMPLE,
                 AudioMixer::SAMPLE_RATE,
                 (void *)(cblk->sampleRate));
             mAudioMixer->setParameter(
+                name,
                 AudioMixer::TRACK,
                 AudioMixer::MAIN_BUFFER, (void *)track->mainBuffer());
             mAudioMixer->setParameter(
+                name,
                 AudioMixer::TRACK,
                 AudioMixer::AUX_BUFFER, (void *)track->auxBuffer());
 
@@ -2242,7 +2523,7 @@
                 mixerStatus = MIXER_TRACKS_READY;
             }
         } else {
-            //ALOGV("track %d u=%08x, s=%08x [NOT READY] on thread %p", track->name(), cblk->user, cblk->server, this);
+            //ALOGV("track %d u=%08x, s=%08x [NOT READY] on thread %p", name, cblk->user, cblk->server, this);
             if (track->isStopped()) {
                 track->reset();
             }
@@ -2254,7 +2535,7 @@
                 // No buffers for this track. Give it a few chances to
                 // fill a buffer, then remove it from active list.
                 if (--(track->mRetryCount) <= 0) {
-                    ALOGV("BUFFER TIMEOUT: remove(%d) from active list on thread %p", track->name(), this);
+                    ALOGV("BUFFER TIMEOUT: remove(%d) from active list on thread %p", name, this);
                     tracksToRemove->add(track);
                     // indicate to client process that the track was disabled because of underrun
                     android_atomic_or(CBLK_DISABLED_ON, &cblk->flags);
@@ -2266,13 +2547,13 @@
                     mixerStatus = MIXER_TRACKS_ENABLED;
                 }
             }
-            mAudioMixer->disable(AudioMixer::MIXING);
+            mAudioMixer->disable(name);
         }
     }
 
     // remove all the tracks that need to be...
     count = tracksToRemove->size();
-    if (UNLIKELY(count)) {
+    if (CC_UNLIKELY(count)) {
         for (size_t i=0 ; i<count ; i++) {
             const sp<Track>& track = tracksToRemove->itemAt(i);
             mActiveTracks.remove(track);
@@ -2296,11 +2577,36 @@
         memset(mMixBuffer, 0, mFrameCount * mChannelCount * sizeof(int16_t));
     }
 
-    mPrevMixerStatus = mixerStatus;
     return mixerStatus;
 }
 
-void AudioFlinger::MixerThread::invalidateTracks(int streamType)
+/*
+The derived values that are cached:
+ - mixBufferSize from frame count * frame size
+ - activeSleepTime from activeSleepTimeUs()
+ - idleSleepTime from idleSleepTimeUs()
+ - standbyDelay from mActiveSleepTimeUs (DIRECT only)
+ - maxPeriod from frame count and sample rate (MIXER only)
+
+The parameters that affect these derived values are:
+ - frame count
+ - frame size
+ - sample rate
+ - device type: A2DP or not
+ - device latency
+ - format: PCM or not
+ - active sleep time
+ - idle sleep time
+*/
+
+void AudioFlinger::PlaybackThread::cacheParameters_l()
+{
+    mixBufferSize = mFrameCount * mFrameSize;
+    activeSleepTime = activeSleepTimeUs();
+    idleSleepTime = idleSleepTimeUs();
+}
+
+void AudioFlinger::MixerThread::invalidateTracks(audio_stream_type_t streamType)
 {
     ALOGV ("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %d",
             this,  streamType, mTracks.size());
@@ -2309,14 +2615,14 @@
     size_t size = mTracks.size();
     for (size_t i = 0; i < size; i++) {
         sp<Track> t = mTracks[i];
-        if (t->type() == streamType) {
+        if (t->streamType() == streamType) {
             android_atomic_or(CBLK_INVALID_ON, &t->mCblk->flags);
             t->mCblk->cv.signal();
         }
     }
 }
 
-void AudioFlinger::PlaybackThread::setStreamValid(int streamType, bool valid)
+void AudioFlinger::PlaybackThread::setStreamValid(audio_stream_type_t streamType, bool valid)
 {
     ALOGV ("PlaybackThread::setStreamValid() thread %p, streamType %d, valid %d",
             this,  streamType, valid);
@@ -2353,7 +2659,7 @@
             reconfig = true;
         }
         if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
-            if (value != AUDIO_FORMAT_PCM_16_BIT) {
+            if ((audio_format_t) value != AUDIO_FORMAT_PCM_16_BIT) {
                 status = BAD_VALUE;
             } else {
                 reconfig = true;
@@ -2368,7 +2674,7 @@
         }
         if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
             // do not accept frame count changes if tracks are open as the track buffer
-            // size depends on frame count and correct behavior would not be garantied
+            // size depends on frame count and correct behavior would not be guaranteed
             // if frame count is changed after track creation
             if (!mTracks.isEmpty()) {
                 status = INVALID_OPERATION;
@@ -2377,6 +2683,7 @@
             }
         }
         if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
+#ifdef ADD_BATTERY_DATA
             // when changing the audio output device, call addBatteryData to notify
             // the change
             if ((int)mDevice != value) {
@@ -2397,6 +2704,7 @@
                     addBatteryData(params);
                 }
             }
+#endif
 
             // forward device change to effects that have requested to be
             // aware of attached audio device.
@@ -2410,14 +2718,16 @@
             status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
                                                     keyValuePair.string());
             if (!mStandby && status == INVALID_OPERATION) {
-               mOutput->stream->common.standby(&mOutput->stream->common);
-               mStandby = true;
-               mBytesWritten = 0;
-               status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
+                mOutput->stream->common.standby(&mOutput->stream->common);
+                mStandby = true;
+                mBytesWritten = 0;
+                status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
                                                        keyValuePair.string());
             }
             if (status == NO_ERROR && reconfig) {
                 delete mAudioMixer;
+                // for safety in case readOutputParameters() accesses mAudioMixer (it doesn't)
+                mAudioMixer = NULL;
                 readOutputParameters();
                 mAudioMixer = new AudioMixer(mFrameCount, mSampleRate);
                 for (size_t i = 0; i < mTracks.size() ; i++) {
@@ -2439,7 +2749,7 @@
         mParamCond.signal();
         // wait for condition with time out in case the thread calling ThreadBase::setParameters()
         // already timed out waiting for the status and will never signal the condition.
-        mWaitWorkCV.waitRelative(mLock, kSetParametersTimeout);
+        mWaitWorkCV.waitRelative(mLock, kSetParametersTimeoutNs);
     }
     return reconfig;
 }
@@ -2468,42 +2778,193 @@
     return (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000);
 }
 
-// ----------------------------------------------------------------------------
-AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, int id, uint32_t device)
-    :   PlaybackThread(audioFlinger, output, id, device)
+void AudioFlinger::MixerThread::cacheParameters_l()
 {
-    mType = ThreadBase::DIRECT;
+    PlaybackThread::cacheParameters_l();
+
+    // FIXME: Relaxed timing because of a certain device that can't meet latency
+    // Should be reduced to 2x after the vendor fixes the driver issue
+    // increase threshold again due to low power audio mode. The way this warning
+    // threshold is calculated and its usefulness should be reconsidered anyway.
+    maxPeriod = seconds(mFrameCount) / mSampleRate * 15;
+}
+
+// ----------------------------------------------------------------------------
+AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger,
+        AudioStreamOut* output, audio_io_handle_t id, uint32_t device)
+    :   PlaybackThread(audioFlinger, output, id, device, DIRECT)
+        // mLeftVolFloat, mRightVolFloat
+        // mLeftVolShort, mRightVolShort
+{
 }
 
 AudioFlinger::DirectOutputThread::~DirectOutputThread()
 {
 }
 
-
-static inline int16_t clamp16(int32_t sample)
+AudioFlinger::PlaybackThread::mixer_state AudioFlinger::DirectOutputThread::prepareTracks_l(
+    Vector< sp<Track> > *tracksToRemove
+)
 {
-    if ((sample>>15) ^ (sample>>31))
-        sample = 0x7FFF ^ (sample>>31);
-    return sample;
+    sp<Track> trackToRemove;
+
+    mixer_state mixerStatus = MIXER_IDLE;
+
+    // find out which tracks need to be processed
+    if (mActiveTracks.size() != 0) {
+        sp<Track> t = mActiveTracks[0].promote();
+        // The track died recently
+        if (t == 0) return MIXER_IDLE;
+
+        Track* const track = t.get();
+        audio_track_cblk_t* cblk = track->cblk();
+
+        // The first time a track is added we wait
+        // for all its buffers to be filled before processing it
+        if (cblk->framesReady() && track->isReady() &&
+                !track->isPaused() && !track->isTerminated())
+        {
+            //ALOGV("track %d u=%08x, s=%08x [OK]", track->name(), cblk->user, cblk->server);
+
+            if (track->mFillingUpStatus == Track::FS_FILLED) {
+                track->mFillingUpStatus = Track::FS_ACTIVE;
+                mLeftVolFloat = mRightVolFloat = 0;
+                mLeftVolShort = mRightVolShort = 0;
+                if (track->mState == TrackBase::RESUMING) {
+                    track->mState = TrackBase::ACTIVE;
+                    rampVolume = true;
+                }
+            } else if (cblk->server != 0) {
+                // If the track is stopped before the first frame was mixed,
+                // do not apply ramp
+                rampVolume = true;
+            }
+            // compute volume for this track
+            float left, right;
+            if (track->isMuted() || mMasterMute || track->isPausing() ||
+                mStreamTypes[track->streamType()].mute) {
+                left = right = 0;
+                if (track->isPausing()) {
+                    track->setPaused();
+                }
+            } else {
+                float typeVolume = mStreamTypes[track->streamType()].volume;
+                float v = mMasterVolume * typeVolume;
+                uint32_t vlr = cblk->getVolumeLR();
+                float v_clamped = v * (vlr & 0xFFFF);
+                if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN;
+                left = v_clamped/MAX_GAIN;
+                v_clamped = v * (vlr >> 16);
+                if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN;
+                right = v_clamped/MAX_GAIN;
+            }
+
+            if (left != mLeftVolFloat || right != mRightVolFloat) {
+                mLeftVolFloat = left;
+                mRightVolFloat = right;
+
+                // If audio HAL implements volume control,
+                // force software volume to nominal value
+                if (mOutput->stream->set_volume(mOutput->stream, left, right) == NO_ERROR) {
+                    left = 1.0f;
+                    right = 1.0f;
+                }
+
+                // Convert volumes from float to 8.24
+                uint32_t vl = (uint32_t)(left * (1 << 24));
+                uint32_t vr = (uint32_t)(right * (1 << 24));
+
+                // Delegate volume control to effect in track effect chain if needed
+                // only one effect chain can be present on DirectOutputThread, so if
+                // there is one, the track is connected to it
+                if (!mEffectChains.isEmpty()) {
+                    // Do not ramp volume if volume is controlled by effect
+                    if (mEffectChains[0]->setVolume_l(&vl, &vr)) {
+                        rampVolume = false;
+                    }
+                }
+
+                // Convert volumes from 8.24 to 4.12 format
+                uint32_t v_clamped = (vl + (1 << 11)) >> 12;
+                if (v_clamped > MAX_GAIN_INT) v_clamped = MAX_GAIN_INT;
+                leftVol = (uint16_t)v_clamped;
+                v_clamped = (vr + (1 << 11)) >> 12;
+                if (v_clamped > MAX_GAIN_INT) v_clamped = MAX_GAIN_INT;
+                rightVol = (uint16_t)v_clamped;
+            } else {
+                leftVol = mLeftVolShort;
+                rightVol = mRightVolShort;
+                rampVolume = false;
+            }
+
+            // reset retry count
+            track->mRetryCount = kMaxTrackRetriesDirect;
+            mActiveTrack = t;
+            mixerStatus = MIXER_TRACKS_READY;
+        } else {
+            //ALOGV("track %d u=%08x, s=%08x [NOT READY]", track->name(), cblk->user, cblk->server);
+            if (track->isStopped()) {
+                track->reset();
+            }
+            if (track->isTerminated() || track->isStopped() || track->isPaused()) {
+                // We have consumed all the buffers of this track.
+                // Remove it from the list of active tracks.
+                trackToRemove = track;
+            } else {
+                // No buffers for this track. Give it a few chances to
+                // fill a buffer, then remove it from active list.
+                if (--(track->mRetryCount) <= 0) {
+                    ALOGV("BUFFER TIMEOUT: remove(%d) from active list", track->name());
+                    trackToRemove = track;
+                } else {
+                    mixerStatus = MIXER_TRACKS_ENABLED;
+                }
+            }
+        }
+    }
+
+    // FIXME merge this with similar code for removing multiple tracks
+    // remove all the tracks that need to be...
+    if (CC_UNLIKELY(trackToRemove != 0)) {
+        tracksToRemove->add(trackToRemove);
+        mActiveTracks.remove(trackToRemove);
+        if (!mEffectChains.isEmpty()) {
+            ALOGV("stopping track on chain %p for session Id: %d", mEffectChains[0].get(),
+                    trackToRemove->sessionId());
+            mEffectChains[0]->decActiveTrackCnt();
+        }
+        if (trackToRemove->isTerminated()) {
+            removeTrack_l(trackToRemove);
+        }
+    }
+
+    return mixerStatus;
 }
 
-static inline
-int32_t mul(int16_t in, int16_t v)
+void AudioFlinger::DirectOutputThread::threadLoop_mix()
 {
-#if defined(__arm__) && !defined(__thumb__)
-    int32_t out;
-    asm( "smulbb %[out], %[in], %[v] \n"
-         : [out]"=r"(out)
-         : [in]"%r"(in), [v]"r"(v)
-         : );
-    return out;
-#else
-    return in * int32_t(v);
-#endif
-}
+    AudioBufferProvider::Buffer buffer;
+    size_t frameCount = mFrameCount;
+    int8_t *curBuf = (int8_t *)mMixBuffer;
+    // output audio to hardware
+    while (frameCount) {
+        buffer.frameCount = frameCount;
+        mActiveTrack->getNextBuffer(&buffer);
+        if (CC_UNLIKELY(buffer.raw == NULL)) {
+            memset(curBuf, 0, frameCount * mFrameSize);
+            break;
+        }
+        memcpy(curBuf, buffer.raw, buffer.frameCount * mFrameSize);
+        frameCount -= buffer.frameCount;
+        curBuf += buffer.frameCount * mFrameSize;
+        mActiveTrack->releaseBuffer(&buffer);
+    }
+    sleepTime = 0;
+    standbyTime = systemTime() + standbyDelay;
+    mActiveTrack.clear();
 
-void AudioFlinger::DirectOutputThread::applyVolume(uint16_t leftVol, uint16_t rightVol, bool ramp)
-{
+    // apply volume
+
     // Do not apply volume on compressed audio
     if (!audio_is_linear_pcm(mFormat)) {
         return;
@@ -2514,14 +2975,14 @@
         size_t count = mFrameCount * mChannelCount;
         uint8_t *src = (uint8_t *)mMixBuffer + count-1;
         int16_t *dst = mMixBuffer + count-1;
-        while(count--) {
+        while (count--) {
             *dst-- = (int16_t)(*src--^0x80) << 8;
         }
     }
 
-    size_t frameCount = mFrameCount;
+    frameCount = mFrameCount;
     int16_t *out = mMixBuffer;
-    if (ramp) {
+    if (rampVolume) {
         if (mChannelCount == 1) {
             int32_t d = ((int32_t)leftVol - (int32_t)mLeftVolShort) << 16;
             int32_t vlInc = d / (int32_t)frameCount;
@@ -2567,7 +3028,7 @@
         size_t count = mFrameCount * mChannelCount;
         int16_t *src = mMixBuffer;
         uint8_t *dst = (uint8_t *)mMixBuffer;
-        while(count--) {
+        while (count--) {
             *dst++ = (uint8_t)(((int32_t)*src++ + (1<<7)) >> 8)^0x80;
         }
     }
@@ -2576,289 +3037,18 @@
     mRightVolShort = rightVol;
 }
 
-bool AudioFlinger::DirectOutputThread::threadLoop()
+void AudioFlinger::DirectOutputThread::threadLoop_sleepTime()
 {
-    uint32_t mixerStatus = MIXER_IDLE;
-    sp<Track> trackToRemove;
-    sp<Track> activeTrack;
-    nsecs_t standbyTime = systemTime();
-    int8_t *curBuf;
-    size_t mixBufferSize = mFrameCount*mFrameSize;
-    uint32_t activeSleepTime = activeSleepTimeUs();
-    uint32_t idleSleepTime = idleSleepTimeUs();
-    uint32_t sleepTime = idleSleepTime;
-    // use shorter standby delay as on normal output to release
-    // hardware resources as soon as possible
-    nsecs_t standbyDelay = microseconds(activeSleepTime*2);
-
-    acquireWakeLock();
-
-    while (!exitPending())
-    {
-        bool rampVolume;
-        uint16_t leftVol;
-        uint16_t rightVol;
-        Vector< sp<EffectChain> > effectChains;
-
-        processConfigEvents();
-
-        mixerStatus = MIXER_IDLE;
-
-        { // scope for the mLock
-
-            Mutex::Autolock _l(mLock);
-
-            if (checkForNewParameters_l()) {
-                mixBufferSize = mFrameCount*mFrameSize;
-                activeSleepTime = activeSleepTimeUs();
-                idleSleepTime = idleSleepTimeUs();
-                standbyDelay = microseconds(activeSleepTime*2);
-            }
-
-            // put audio hardware into standby after short delay
-            if UNLIKELY((!mActiveTracks.size() && systemTime() > standbyTime) ||
-                        mSuspended) {
-                // wait until we have something to do...
-                if (!mStandby) {
-                    ALOGV("Audio hardware entering standby, mixer %p\n", this);
-                    mOutput->stream->common.standby(&mOutput->stream->common);
-                    mStandby = true;
-                    mBytesWritten = 0;
-                }
-
-                if (!mActiveTracks.size() && mConfigEvents.isEmpty()) {
-                    // we're about to wait, flush the binder command buffer
-                    IPCThreadState::self()->flushCommands();
-
-                    if (exitPending()) break;
-
-                    releaseWakeLock_l();
-                    ALOGV("DirectOutputThread %p TID %d going to sleep\n", this, gettid());
-                    mWaitWorkCV.wait(mLock);
-                    ALOGV("DirectOutputThread %p TID %d waking up in active mode\n", this, gettid());
-                    acquireWakeLock_l();
-
-                    if (mMasterMute == false) {
-                        char value[PROPERTY_VALUE_MAX];
-                        property_get("ro.audio.silent", value, "0");
-                        if (atoi(value)) {
-                            ALOGD("Silence is golden");
-                            setMasterMute(true);
-                        }
-                    }
-
-                    standbyTime = systemTime() + standbyDelay;
-                    sleepTime = idleSleepTime;
-                    continue;
-                }
-            }
-
-            effectChains = mEffectChains;
-
-            // find out which tracks need to be processed
-            if (mActiveTracks.size() != 0) {
-                sp<Track> t = mActiveTracks[0].promote();
-                if (t == 0) continue;
-
-                Track* const track = t.get();
-                audio_track_cblk_t* cblk = track->cblk();
-
-                // The first time a track is added we wait
-                // for all its buffers to be filled before processing it
-                if (cblk->framesReady() && track->isReady() &&
-                        !track->isPaused() && !track->isTerminated())
-                {
-                    //ALOGV("track %d u=%08x, s=%08x [OK]", track->name(), cblk->user, cblk->server);
-
-                    if (track->mFillingUpStatus == Track::FS_FILLED) {
-                        track->mFillingUpStatus = Track::FS_ACTIVE;
-                        mLeftVolFloat = mRightVolFloat = 0;
-                        mLeftVolShort = mRightVolShort = 0;
-                        if (track->mState == TrackBase::RESUMING) {
-                            track->mState = TrackBase::ACTIVE;
-                            rampVolume = true;
-                        }
-                    } else if (cblk->server != 0) {
-                        // If the track is stopped before the first frame was mixed,
-                        // do not apply ramp
-                        rampVolume = true;
-                    }
-                    // compute volume for this track
-                    float left, right;
-                    if (track->isMuted() || mMasterMute || track->isPausing() ||
-                        mStreamTypes[track->type()].mute) {
-                        left = right = 0;
-                        if (track->isPausing()) {
-                            track->setPaused();
-                        }
-                    } else {
-                        float typeVolume = mStreamTypes[track->type()].volume;
-                        float v = mMasterVolume * typeVolume;
-                        float v_clamped = v * cblk->volume[0];
-                        if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN;
-                        left = v_clamped/MAX_GAIN;
-                        v_clamped = v * cblk->volume[1];
-                        if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN;
-                        right = v_clamped/MAX_GAIN;
-                    }
-
-                    if (left != mLeftVolFloat || right != mRightVolFloat) {
-                        mLeftVolFloat = left;
-                        mRightVolFloat = right;
-
-                        // If audio HAL implements volume control,
-                        // force software volume to nominal value
-                        if (mOutput->stream->set_volume(mOutput->stream, left, right) == NO_ERROR) {
-                            left = 1.0f;
-                            right = 1.0f;
-                        }
-
-                        // Convert volumes from float to 8.24
-                        uint32_t vl = (uint32_t)(left * (1 << 24));
-                        uint32_t vr = (uint32_t)(right * (1 << 24));
-
-                        // Delegate volume control to effect in track effect chain if needed
-                        // only one effect chain can be present on DirectOutputThread, so if
-                        // there is one, the track is connected to it
-                        if (!effectChains.isEmpty()) {
-                            // Do not ramp volume if volume is controlled by effect
-                            if(effectChains[0]->setVolume_l(&vl, &vr)) {
-                                rampVolume = false;
-                            }
-                        }
-
-                        // Convert volumes from 8.24 to 4.12 format
-                        uint32_t v_clamped = (vl + (1 << 11)) >> 12;
-                        if (v_clamped > MAX_GAIN_INT) v_clamped = MAX_GAIN_INT;
-                        leftVol = (uint16_t)v_clamped;
-                        v_clamped = (vr + (1 << 11)) >> 12;
-                        if (v_clamped > MAX_GAIN_INT) v_clamped = MAX_GAIN_INT;
-                        rightVol = (uint16_t)v_clamped;
-                    } else {
-                        leftVol = mLeftVolShort;
-                        rightVol = mRightVolShort;
-                        rampVolume = false;
-                    }
-
-                    // reset retry count
-                    track->mRetryCount = kMaxTrackRetriesDirect;
-                    activeTrack = t;
-                    mixerStatus = MIXER_TRACKS_READY;
-                } else {
-                    //ALOGV("track %d u=%08x, s=%08x [NOT READY]", track->name(), cblk->user, cblk->server);
-                    if (track->isStopped()) {
-                        track->reset();
-                    }
-                    if (track->isTerminated() || track->isStopped() || track->isPaused()) {
-                        // We have consumed all the buffers of this track.
-                        // Remove it from the list of active tracks.
-                        trackToRemove = track;
-                    } else {
-                        // No buffers for this track. Give it a few chances to
-                        // fill a buffer, then remove it from active list.
-                        if (--(track->mRetryCount) <= 0) {
-                            ALOGV("BUFFER TIMEOUT: remove(%d) from active list", track->name());
-                            trackToRemove = track;
-                        } else {
-                            mixerStatus = MIXER_TRACKS_ENABLED;
-                        }
-                    }
-                }
-            }
-
-            // remove all the tracks that need to be...
-            if (UNLIKELY(trackToRemove != 0)) {
-                mActiveTracks.remove(trackToRemove);
-                if (!effectChains.isEmpty()) {
-                    ALOGV("stopping track on chain %p for session Id: %d", effectChains[0].get(),
-                            trackToRemove->sessionId());
-                    effectChains[0]->decActiveTrackCnt();
-                }
-                if (trackToRemove->isTerminated()) {
-                    removeTrack_l(trackToRemove);
-                }
-            }
-
-            lockEffectChains_l(effectChains);
-       }
-
-        if (LIKELY(mixerStatus == MIXER_TRACKS_READY)) {
-            AudioBufferProvider::Buffer buffer;
-            size_t frameCount = mFrameCount;
-            curBuf = (int8_t *)mMixBuffer;
-            // output audio to hardware
-            while (frameCount) {
-                buffer.frameCount = frameCount;
-                activeTrack->getNextBuffer(&buffer);
-                if (UNLIKELY(buffer.raw == 0)) {
-                    memset(curBuf, 0, frameCount * mFrameSize);
-                    break;
-                }
-                memcpy(curBuf, buffer.raw, buffer.frameCount * mFrameSize);
-                frameCount -= buffer.frameCount;
-                curBuf += buffer.frameCount * mFrameSize;
-                activeTrack->releaseBuffer(&buffer);
-            }
-            sleepTime = 0;
-            standbyTime = systemTime() + standbyDelay;
+    if (sleepTime == 0) {
+        if (mMixerStatus == MIXER_TRACKS_ENABLED) {
+            sleepTime = activeSleepTime;
         } else {
-            if (sleepTime == 0) {
-                if (mixerStatus == MIXER_TRACKS_ENABLED) {
-                    sleepTime = activeSleepTime;
-                } else {
-                    sleepTime = idleSleepTime;
-                }
-            } else if (mBytesWritten != 0 && audio_is_linear_pcm(mFormat)) {
-                memset (mMixBuffer, 0, mFrameCount * mFrameSize);
-                sleepTime = 0;
-            }
+            sleepTime = idleSleepTime;
         }
-
-        if (mSuspended) {
-            sleepTime = suspendSleepTimeUs();
-        }
-        // sleepTime == 0 means we must write to audio hardware
-        if (sleepTime == 0) {
-            if (mixerStatus == MIXER_TRACKS_READY) {
-                applyVolume(leftVol, rightVol, rampVolume);
-            }
-            for (size_t i = 0; i < effectChains.size(); i ++) {
-                effectChains[i]->process_l();
-            }
-            unlockEffectChains(effectChains);
-
-            mLastWriteTime = systemTime();
-            mInWrite = true;
-            mBytesWritten += mixBufferSize;
-            int bytesWritten = (int)mOutput->stream->write(mOutput->stream, mMixBuffer, mixBufferSize);
-            if (bytesWritten < 0) mBytesWritten -= mixBufferSize;
-            mNumWrites++;
-            mInWrite = false;
-            mStandby = false;
-        } else {
-            unlockEffectChains(effectChains);
-            usleep(sleepTime);
-        }
-
-        // finally let go of removed track, without the lock held
-        // since we can't guarantee the destructors won't acquire that
-        // same lock.
-        trackToRemove.clear();
-        activeTrack.clear();
-
-        // Effect chains will be actually deleted here if they were removed from
-        // mEffectChains list during mixing or effects processing
-        effectChains.clear();
+    } else if (mBytesWritten != 0 && audio_is_linear_pcm(mFormat)) {
+        memset (mMixBuffer, 0, mFrameCount * mFrameSize);
+        sleepTime = 0;
     }
-
-    if (!mStandby) {
-        mOutput->stream->common.standby(&mOutput->stream->common);
-    }
-
-    releaseWakeLock();
-
-    ALOGV("DirectOutputThread %p exiting", this);
-    return false;
 }
 
 // getTrackName_l() must be called with ThreadBase::mLock held
@@ -2897,10 +3087,10 @@
             status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
                                                     keyValuePair.string());
             if (!mStandby && status == INVALID_OPERATION) {
-               mOutput->stream->common.standby(&mOutput->stream->common);
-               mStandby = true;
-               mBytesWritten = 0;
-               status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
+                mOutput->stream->common.standby(&mOutput->stream->common);
+                mStandby = true;
+                mBytesWritten = 0;
+                status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
                                                        keyValuePair.string());
             }
             if (status == NO_ERROR && reconfig) {
@@ -2915,7 +3105,7 @@
         mParamCond.signal();
         // wait for condition with time out in case the thread calling ThreadBase::setParameters()
         // already timed out waiting for the status and will never signal the condition.
-        mWaitWorkCV.waitRelative(mLock, kSetParametersTimeout);
+        mWaitWorkCV.waitRelative(mLock, kSetParametersTimeoutNs);
     }
     return reconfig;
 }
@@ -2953,13 +3143,22 @@
     return time;
 }
 
+void AudioFlinger::DirectOutputThread::cacheParameters_l()
+{
+    PlaybackThread::cacheParameters_l();
+
+    // use shorter standby delay as on normal output to release
+    // hardware resources as soon as possible
+    standbyDelay = microseconds(activeSleepTime*2);
+}
 
 // ----------------------------------------------------------------------------
 
-AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger, AudioFlinger::MixerThread* mainThread, int id)
-    :   MixerThread(audioFlinger, mainThread->getOutput(), id, mainThread->device()), mWaitTimeMs(UINT_MAX)
+AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger,
+        AudioFlinger::MixerThread* mainThread, audio_io_handle_t id)
+    :   MixerThread(audioFlinger, mainThread->getOutput(), id, mainThread->device(), DUPLICATING),
+        mWaitTimeMs(UINT_MAX)
 {
-    mType = ThreadBase::DUPLICATING;
     addOutputTrack(mainThread);
 }
 
@@ -2968,166 +3167,74 @@
     for (size_t i = 0; i < mOutputTracks.size(); i++) {
         mOutputTracks[i]->destroy();
     }
-    mOutputTracks.clear();
 }
 
-bool AudioFlinger::DuplicatingThread::threadLoop()
+void AudioFlinger::DuplicatingThread::threadLoop_mix()
 {
-    Vector< sp<Track> > tracksToRemove;
-    uint32_t mixerStatus = MIXER_IDLE;
-    nsecs_t standbyTime = systemTime();
-    size_t mixBufferSize = mFrameCount*mFrameSize;
-    SortedVector< sp<OutputTrack> > outputTracks;
-    uint32_t writeFrames = 0;
-    uint32_t activeSleepTime = activeSleepTimeUs();
-    uint32_t idleSleepTime = idleSleepTimeUs();
-    uint32_t sleepTime = idleSleepTime;
-    Vector< sp<EffectChain> > effectChains;
-
-    acquireWakeLock();
-
-    while (!exitPending())
-    {
-        processConfigEvents();
-
-        mixerStatus = MIXER_IDLE;
-        { // scope for the mLock
-
-            Mutex::Autolock _l(mLock);
-
-            if (checkForNewParameters_l()) {
-                mixBufferSize = mFrameCount*mFrameSize;
-                updateWaitTime();
-                activeSleepTime = activeSleepTimeUs();
-                idleSleepTime = idleSleepTimeUs();
-            }
-
-            const SortedVector< wp<Track> >& activeTracks = mActiveTracks;
-
-            for (size_t i = 0; i < mOutputTracks.size(); i++) {
-                outputTracks.add(mOutputTracks[i]);
-            }
-
-            // put audio hardware into standby after short delay
-            if UNLIKELY((!activeTracks.size() && systemTime() > standbyTime) ||
-                         mSuspended) {
-                if (!mStandby) {
-                    for (size_t i = 0; i < outputTracks.size(); i++) {
-                        outputTracks[i]->stop();
-                    }
-                    mStandby = true;
-                    mBytesWritten = 0;
-                }
-
-                if (!activeTracks.size() && mConfigEvents.isEmpty()) {
-                    // we're about to wait, flush the binder command buffer
-                    IPCThreadState::self()->flushCommands();
-                    outputTracks.clear();
-
-                    if (exitPending()) break;
-
-                    releaseWakeLock_l();
-                    ALOGV("DuplicatingThread %p TID %d going to sleep\n", this, gettid());
-                    mWaitWorkCV.wait(mLock);
-                    ALOGV("DuplicatingThread %p TID %d waking up\n", this, gettid());
-                    acquireWakeLock_l();
-
-                    mPrevMixerStatus = MIXER_IDLE;
-                    if (mMasterMute == false) {
-                        char value[PROPERTY_VALUE_MAX];
-                        property_get("ro.audio.silent", value, "0");
-                        if (atoi(value)) {
-                            ALOGD("Silence is golden");
-                            setMasterMute(true);
-                        }
-                    }
-
-                    standbyTime = systemTime() + kStandbyTimeInNsecs;
-                    sleepTime = idleSleepTime;
-                    continue;
-                }
-            }
-
-            mixerStatus = prepareTracks_l(activeTracks, &tracksToRemove);
-
-            // prevent any changes in effect chain list and in each effect chain
-            // during mixing and effect process as the audio buffers could be deleted
-            // or modified if an effect is created or deleted
-            lockEffectChains_l(effectChains);
-        }
-
-        if (LIKELY(mixerStatus == MIXER_TRACKS_READY)) {
-            // mix buffers...
-            if (outputsReady(outputTracks)) {
-                mAudioMixer->process();
-            } else {
-                memset(mMixBuffer, 0, mixBufferSize);
-            }
-            sleepTime = 0;
-            writeFrames = mFrameCount;
-        } else {
-            if (sleepTime == 0) {
-                if (mixerStatus == MIXER_TRACKS_ENABLED) {
-                    sleepTime = activeSleepTime;
-                } else {
-                    sleepTime = idleSleepTime;
-                }
-            } else if (mBytesWritten != 0) {
-                // flush remaining overflow buffers in output tracks
-                for (size_t i = 0; i < outputTracks.size(); i++) {
-                    if (outputTracks[i]->isActive()) {
-                        sleepTime = 0;
-                        writeFrames = 0;
-                        memset(mMixBuffer, 0, mixBufferSize);
-                        break;
-                    }
-                }
-            }
-        }
-
-        if (mSuspended) {
-            sleepTime = suspendSleepTimeUs();
-        }
-        // sleepTime == 0 means we must write to audio hardware
-        if (sleepTime == 0) {
-            for (size_t i = 0; i < effectChains.size(); i ++) {
-                effectChains[i]->process_l();
-            }
-            // enable changes in effect chain
-            unlockEffectChains(effectChains);
-
-            standbyTime = systemTime() + kStandbyTimeInNsecs;
-            for (size_t i = 0; i < outputTracks.size(); i++) {
-                outputTracks[i]->write(mMixBuffer, writeFrames);
-            }
-            mStandby = false;
-            mBytesWritten += mixBufferSize;
-        } else {
-            // enable changes in effect chain
-            unlockEffectChains(effectChains);
-            usleep(sleepTime);
-        }
-
-        // finally let go of all our tracks, without the lock held
-        // since we can't guarantee the destructors won't acquire that
-        // same lock.
-        tracksToRemove.clear();
-        outputTracks.clear();
-
-        // Effect chains will be actually deleted here if they were removed from
-        // mEffectChains list during mixing or effects processing
-        effectChains.clear();
+    // mix buffers...
+    if (outputsReady(outputTracks)) {
+        mAudioMixer->process(AudioBufferProvider::kInvalidPTS);
+    } else {
+        memset(mMixBuffer, 0, mixBufferSize);
     }
+    sleepTime = 0;
+    writeFrames = mFrameCount;
+}
 
-    releaseWakeLock();
+void AudioFlinger::DuplicatingThread::threadLoop_sleepTime()
+{
+    if (sleepTime == 0) {
+        if (mMixerStatus == MIXER_TRACKS_ENABLED) {
+            sleepTime = activeSleepTime;
+        } else {
+            sleepTime = idleSleepTime;
+        }
+    } else if (mBytesWritten != 0) {
+        // flush remaining overflow buffers in output tracks
+        for (size_t i = 0; i < outputTracks.size(); i++) {
+            if (outputTracks[i]->isActive()) {
+                sleepTime = 0;
+                writeFrames = 0;
+                memset(mMixBuffer, 0, mixBufferSize);
+                break;
+            }
+        }
+    }
+}
 
-    return false;
+void AudioFlinger::DuplicatingThread::threadLoop_write()
+{
+    standbyTime = systemTime() + standbyDelay;
+    for (size_t i = 0; i < outputTracks.size(); i++) {
+        outputTracks[i]->write(mMixBuffer, writeFrames);
+    }
+    mBytesWritten += mixBufferSize;
+}
+
+void AudioFlinger::DuplicatingThread::threadLoop_standby()
+{
+    // DuplicatingThread implements standby by stopping all tracks
+    for (size_t i = 0; i < outputTracks.size(); i++) {
+        outputTracks[i]->stop();
+    }
+}
+
+void AudioFlinger::DuplicatingThread::saveOutputTracks()
+{
+    outputTracks = mOutputTracks;
+}
+
+void AudioFlinger::DuplicatingThread::clearOutputTracks()
+{
+    outputTracks.clear();
 }
 
 void AudioFlinger::DuplicatingThread::addOutputTrack(MixerThread *thread)
 {
+    Mutex::Autolock _l(mLock);
+    // FIXME explain this formula
     int frameCount = (3 * mFrameCount * mSampleRate) / thread->sampleRate();
-    OutputTrack *outputTrack = new OutputTrack((ThreadBase *)thread,
+    OutputTrack *outputTrack = new OutputTrack(thread,
                                             this,
                                             mSampleRate,
                                             mFormat,
@@ -3137,7 +3244,7 @@
         thread->setStreamVolume(AUDIO_STREAM_CNT, 1.0f);
         mOutputTracks.add(outputTrack);
         ALOGV("addOutputTrack() track %p, on thread %p", outputTrack, thread);
-        updateWaitTime();
+        updateWaitTime_l();
     }
 }
 
@@ -3145,22 +3252,23 @@
 {
     Mutex::Autolock _l(mLock);
     for (size_t i = 0; i < mOutputTracks.size(); i++) {
-        if (mOutputTracks[i]->thread() == (ThreadBase *)thread) {
+        if (mOutputTracks[i]->thread() == thread) {
             mOutputTracks[i]->destroy();
             mOutputTracks.removeAt(i);
-            updateWaitTime();
+            updateWaitTime_l();
             return;
         }
     }
     ALOGV("removeOutputTrack(): unkonwn thread: %p", thread);
 }
 
-void AudioFlinger::DuplicatingThread::updateWaitTime()
+// caller must hold mLock
+void AudioFlinger::DuplicatingThread::updateWaitTime_l()
 {
     mWaitTimeMs = UINT_MAX;
     for (size_t i = 0; i < mOutputTracks.size(); i++) {
         sp<ThreadBase> strong = mOutputTracks[i]->thread().promote();
-        if (strong != NULL) {
+        if (strong != 0) {
             uint32_t waitTimeMs = (strong->frameCount() * 2 * 1000) / strong->sampleRate();
             if (waitTimeMs < mWaitTimeMs) {
                 mWaitTimeMs = waitTimeMs;
@@ -3170,10 +3278,10 @@
 }
 
 
-bool AudioFlinger::DuplicatingThread::outputsReady(SortedVector< sp<OutputTrack> > &outputTracks)
+bool AudioFlinger::DuplicatingThread::outputsReady(const SortedVector< sp<OutputTrack> > &outputTracks)
 {
     for (size_t i = 0; i < outputTracks.size(); i++) {
-        sp <ThreadBase> thread = outputTracks[i]->thread().promote();
+        sp<ThreadBase> thread = outputTracks[i]->thread().promote();
         if (thread == 0) {
             ALOGW("DuplicatingThread::outputsReady() could not promote thread on output track %p", outputTracks[i].get());
             return false;
@@ -3192,45 +3300,55 @@
     return (mWaitTimeMs * 1000) / 2;
 }
 
+void AudioFlinger::DuplicatingThread::cacheParameters_l()
+{
+    // updateWaitTime_l() sets mWaitTimeMs, which affects activeSleepTimeUs(), so call it first
+    updateWaitTime_l();
+
+    MixerThread::cacheParameters_l();
+}
+
 // ----------------------------------------------------------------------------
 
 // TrackBase constructor must be called with AudioFlinger::mLock held
 AudioFlinger::ThreadBase::TrackBase::TrackBase(
-            const wp<ThreadBase>& thread,
+            ThreadBase *thread,
             const sp<Client>& client,
             uint32_t sampleRate,
-            uint32_t format,
+            audio_format_t format,
             uint32_t channelMask,
             int frameCount,
-            uint32_t flags,
             const sp<IMemory>& sharedBuffer,
             int sessionId)
     :   RefBase(),
         mThread(thread),
         mClient(client),
-        mCblk(0),
+        mCblk(NULL),
+        // mBuffer
+        // mBufferEnd
         mFrameCount(0),
         mState(IDLE),
-        mClientTid(-1),
         mFormat(format),
-        mFlags(flags & ~SYSTEM_FLAGS_MASK),
+        mStepServerFailed(false),
         mSessionId(sessionId)
+        // mChannelCount
+        // mChannelMask
 {
     ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), sharedBuffer->size());
 
     // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
-   size_t size = sizeof(audio_track_cblk_t);
-   uint8_t channelCount = popcount(channelMask);
-   size_t bufferSize = frameCount*channelCount*sizeof(int16_t);
-   if (sharedBuffer == 0) {
-       size += bufferSize;
-   }
+    size_t size = sizeof(audio_track_cblk_t);
+    uint8_t channelCount = popcount(channelMask);
+    size_t bufferSize = frameCount*channelCount*sizeof(int16_t);
+    if (sharedBuffer == 0) {
+        size += bufferSize;
+    }
 
-   if (client != NULL) {
+    if (client != NULL) {
         mCblkMemory = client->heap()->allocate(size);
         if (mCblkMemory != 0) {
             mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer());
-            if (mCblk) { // construct the shared structure in-place.
+            if (mCblk != NULL) { // construct the shared structure in-place.
                 new(mCblk) audio_track_cblk_t();
                 // clear all buffers
                 mCblk->frameCount = frameCount;
@@ -3253,45 +3371,52 @@
             client->heap()->dump("AudioTrack");
             return;
         }
-   } else {
-       mCblk = (audio_track_cblk_t *)(new uint8_t[size]);
-       if (mCblk) { // construct the shared structure in-place.
-           new(mCblk) audio_track_cblk_t();
-           // clear all buffers
-           mCblk->frameCount = frameCount;
-           mCblk->sampleRate = sampleRate;
-           mChannelCount = channelCount;
-           mChannelMask = channelMask;
-           mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
-           memset(mBuffer, 0, frameCount*channelCount*sizeof(int16_t));
-           // Force underrun condition to avoid false underrun callback until first data is
-           // written to buffer (other flags are cleared)
-           mCblk->flags = CBLK_UNDERRUN_ON;
-           mBufferEnd = (uint8_t *)mBuffer + bufferSize;
-       }
-   }
+    } else {
+        mCblk = (audio_track_cblk_t *)(new uint8_t[size]);
+            // construct the shared structure in-place.
+            new(mCblk) audio_track_cblk_t();
+            // clear all buffers
+            mCblk->frameCount = frameCount;
+            mCblk->sampleRate = sampleRate;
+            mChannelCount = channelCount;
+            mChannelMask = channelMask;
+            mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
+            memset(mBuffer, 0, frameCount*channelCount*sizeof(int16_t));
+            // Force underrun condition to avoid false underrun callback until first data is
+            // written to buffer (other flags are cleared)
+            mCblk->flags = CBLK_UNDERRUN_ON;
+            mBufferEnd = (uint8_t *)mBuffer + bufferSize;
+    }
 }
 
 AudioFlinger::ThreadBase::TrackBase::~TrackBase()
 {
-    if (mCblk) {
-        mCblk->~audio_track_cblk_t();   // destroy our shared-structure.
-        if (mClient == NULL) {
+    if (mCblk != NULL) {
+        if (mClient == 0) {
             delete mCblk;
+        } else {
+            mCblk->~audio_track_cblk_t();   // destroy our shared-structure.
         }
     }
-    mCblkMemory.clear();            // and free the shared memory
-    if (mClient != NULL) {
+    mCblkMemory.clear();    // free the shared memory before releasing the heap it belongs to
+    if (mClient != 0) {
+        // Client destructor must run with AudioFlinger mutex locked
         Mutex::Autolock _l(mClient->audioFlinger()->mLock);
+        // If the client's reference count drops to zero, the associated destructor
+        // must run with AudioFlinger lock held. Thus the explicit clear() rather than
+        // relying on the automatic clear() at end of scope.
         mClient.clear();
     }
 }
 
+// AudioBufferProvider interface
+// getNextBuffer() = 0;
+// This implementation of releaseBuffer() is used by Track and RecordTrack, but not TimedTrack
 void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
 {
-    buffer->raw = 0;
+    buffer->raw = NULL;
     mFrameCount = buffer->frameCount;
-    step();
+    (void) step();      // ignore return value of step()
     buffer->frameCount = 0;
 }
 
@@ -3302,7 +3427,7 @@
     result = cblk->stepServer(mFrameCount);
     if (!result) {
         ALOGV("stepServer failed acquiring cblk mutex");
-        mFlags |= STEPSERVER_FAILED;
+        mStepServerFailed = true;
     }
     return result;
 }
@@ -3314,40 +3439,28 @@
     cblk->server = 0;
     cblk->userBase = 0;
     cblk->serverBase = 0;
-    mFlags &= (uint32_t)(~SYSTEM_FLAGS_MASK);
+    mStepServerFailed = false;
     ALOGV("TrackBase::reset");
 }
 
-sp<IMemory> AudioFlinger::ThreadBase::TrackBase::getCblk() const
-{
-    return mCblkMemory;
-}
-
 int AudioFlinger::ThreadBase::TrackBase::sampleRate() const {
     return (int)mCblk->sampleRate;
 }
 
-int AudioFlinger::ThreadBase::TrackBase::channelCount() const {
-    return (const int)mChannelCount;
-}
-
-uint32_t AudioFlinger::ThreadBase::TrackBase::channelMask() const {
-    return mChannelMask;
-}
-
 void* AudioFlinger::ThreadBase::TrackBase::getBuffer(uint32_t offset, uint32_t frames) const {
     audio_track_cblk_t* cblk = this->cblk();
-    int8_t *bufferStart = (int8_t *)mBuffer + (offset-cblk->serverBase)*cblk->frameSize;
-    int8_t *bufferEnd = bufferStart + frames * cblk->frameSize;
+    size_t frameSize = cblk->frameSize;
+    int8_t *bufferStart = (int8_t *)mBuffer + (offset-cblk->serverBase)*frameSize;
+    int8_t *bufferEnd = bufferStart + frames * frameSize;
 
     // Check validity of returned pointer in case the track control block would have been corrupted.
     if (bufferStart < mBuffer || bufferStart > bufferEnd || bufferEnd > mBufferEnd ||
-        ((unsigned long)bufferStart & (unsigned long)(cblk->frameSize - 1))) {
+        ((unsigned long)bufferStart & (unsigned long)(frameSize - 1))) {
         ALOGE("TrackBase::getBuffer buffer out of range:\n    start: %p, end %p , mBuffer %p mBufferEnd %p\n    \
                 server %d, serverBase %d, user %d, userBase %d",
                 bufferStart, bufferEnd, mBuffer, mBufferEnd,
                 cblk->server, cblk->serverBase, cblk->user, cblk->userBase);
-        return 0;
+        return NULL;
     }
 
     return bufferStart;
@@ -3357,32 +3470,28 @@
 
 // Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
 AudioFlinger::PlaybackThread::Track::Track(
-            const wp<ThreadBase>& thread,
+            PlaybackThread *thread,
             const sp<Client>& client,
-            int streamType,
+            audio_stream_type_t streamType,
             uint32_t sampleRate,
-            uint32_t format,
+            audio_format_t format,
             uint32_t channelMask,
             int frameCount,
             const sp<IMemory>& sharedBuffer,
             int sessionId)
-    :   TrackBase(thread, client, sampleRate, format, channelMask, frameCount, 0, sharedBuffer, sessionId),
+    :   TrackBase(thread, client, sampleRate, format, channelMask, frameCount, sharedBuffer, sessionId),
     mMute(false), mSharedBuffer(sharedBuffer), mName(-1), mMainBuffer(NULL), mAuxBuffer(NULL),
     mAuxEffectId(0), mHasVolumeController(false)
 {
     if (mCblk != NULL) {
-        sp<ThreadBase> baseThread = thread.promote();
-        if (baseThread != 0) {
-            PlaybackThread *playbackThread = (PlaybackThread *)baseThread.get();
-            mName = playbackThread->getTrackName_l();
-            mMainBuffer = playbackThread->mixBuffer();
+        if (thread != NULL) {
+            mName = thread->getTrackName_l();
+            mMainBuffer = thread->mixBuffer();
         }
-        ALOGV("Track constructor name %d, calling thread %d", mName, IPCThreadState::self()->getCallingPid());
+        ALOGV("Track constructor name %d, calling pid %d", mName, IPCThreadState::self()->getCallingPid());
         if (mName < 0) {
             ALOGE("no more track names available");
         }
-        mVolume[0] = 1.0f;
-        mVolume[1] = 1.0f;
         mStreamType = streamType;
         // NOTE: audio_track_cblk_t::frameSize for 8 bit PCM data is based on a sample size of
         // 16 bit because data is converted to 16 bit before being stored in buffer by AudioTrack
@@ -3404,7 +3513,7 @@
 {
     // NOTE: destroyTrack_l() can remove a strong reference to this Track
     // by removing it from mTracks vector, so there is a risk that this Tracks's
-    // desctructor is called. As the destructor needs to lock mLock,
+    // destructor is called. As the destructor needs to lock mLock,
     // we must acquire a strong reference on this Track before locking mLock
     // here so that the destructor is called only when exiting this function.
     // On the other hand, as long as Track::destroy() is only called by
@@ -3416,12 +3525,12 @@
         if (thread != 0) {
             if (!isOutputTrack()) {
                 if (mState == ACTIVE || mState == RESUMING) {
-                    AudioSystem::stopOutput(thread->id(),
-                                            (audio_stream_type_t)mStreamType,
-                                            mSessionId);
+                    AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
 
+#ifdef ADD_BATTERY_DATA
                     // to track the speaker usage
                     addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
+#endif
                 }
                 AudioSystem::releaseOutput(thread->id());
             }
@@ -3434,9 +3543,10 @@
 
 void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
 {
+    uint32_t vlr = mCblk->getVolumeLR();
     snprintf(buffer, size, "   %05d %05d %03u %03u 0x%08x %05u   %04u %1d %1d %1d %05u %05u %05u  0x%08x 0x%08x 0x%08x 0x%08x\n",
             mName - AudioMixer::TRACK0,
-            (mClient == NULL) ? getpid() : mClient->pid(),
+            (mClient == 0) ? getpid_cached : mClient->pid(),
             mStreamType,
             mFormat,
             mChannelMask,
@@ -3446,30 +3556,32 @@
             mMute,
             mFillingUpStatus,
             mCblk->sampleRate,
-            mCblk->volume[0],
-            mCblk->volume[1],
+            vlr & 0xFFFF,
+            vlr >> 16,
             mCblk->server,
             mCblk->user,
             (int)mMainBuffer,
             (int)mAuxBuffer);
 }
 
-status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(AudioBufferProvider::Buffer* buffer)
+// AudioBufferProvider interface
+status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
+        AudioBufferProvider::Buffer* buffer, int64_t pts)
 {
-     audio_track_cblk_t* cblk = this->cblk();
-     uint32_t framesReady;
-     uint32_t framesReq = buffer->frameCount;
+    audio_track_cblk_t* cblk = this->cblk();
+    uint32_t framesReady;
+    uint32_t framesReq = buffer->frameCount;
 
-     // Check if last stepServer failed, try to step now
-     if (mFlags & TrackBase::STEPSERVER_FAILED) {
-         if (!step())  goto getNextBuffer_exit;
-         ALOGV("stepServer recovered");
-         mFlags &= ~TrackBase::STEPSERVER_FAILED;
-     }
+    // Check if last stepServer failed, try to step now
+    if (mStepServerFailed) {
+        if (!step())  goto getNextBuffer_exit;
+        ALOGV("stepServer recovered");
+        mStepServerFailed = false;
+    }
 
-     framesReady = cblk->framesReady();
+    framesReady = cblk->framesReady();
 
-     if (LIKELY(framesReady)) {
+    if (CC_LIKELY(framesReady)) {
         uint32_t s = cblk->server;
         uint32_t bufferEnd = cblk->serverBase + cblk->frameCount;
 
@@ -3481,24 +3593,28 @@
             framesReq = bufferEnd - s;
         }
 
-         buffer->raw = getBuffer(s, framesReq);
-         if (buffer->raw == 0) goto getNextBuffer_exit;
+        buffer->raw = getBuffer(s, framesReq);
+        if (buffer->raw == NULL) goto getNextBuffer_exit;
 
-         buffer->frameCount = framesReq;
+        buffer->frameCount = framesReq;
         return NO_ERROR;
-     }
+    }
 
 getNextBuffer_exit:
-     buffer->raw = 0;
-     buffer->frameCount = 0;
-     ALOGV("getNextBuffer() no more data for track %d on thread %p", mName, mThread.unsafe_get());
-     return NOT_ENOUGH_DATA;
+    buffer->raw = NULL;
+    buffer->frameCount = 0;
+    ALOGV("getNextBuffer() no more data for track %d on thread %p", mName, mThread.unsafe_get());
+    return NOT_ENOUGH_DATA;
+}
+
+uint32_t AudioFlinger::PlaybackThread::Track::framesReady() const {
+    return mCblk->framesReady();
 }
 
 bool AudioFlinger::PlaybackThread::Track::isReady() const {
     if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) return true;
 
-    if (mCblk->framesReady() >= mCblk->frameCount ||
+    if (framesReady() >= mCblk->frameCount ||
             (mCblk->flags & CBLK_FORCEREADY_MSK)) {
         mFillingUpStatus = FS_FILLED;
         android_atomic_and(~CBLK_FORCEREADY_MSK, &mCblk->flags);
@@ -3507,15 +3623,15 @@
     return false;
 }
 
-status_t AudioFlinger::PlaybackThread::Track::start()
+status_t AudioFlinger::PlaybackThread::Track::start(pid_t tid)
 {
     status_t status = NO_ERROR;
-    ALOGV("start(%d), calling thread %d session %d",
-            mName, IPCThreadState::self()->getCallingPid(), mSessionId);
+    ALOGV("start(%d), calling pid %d session %d tid %d",
+            mName, IPCThreadState::self()->getCallingPid(), mSessionId, tid);
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         Mutex::Autolock _l(thread->mLock);
-        int state = mState;
+        track_state state = mState;
         // here the track could be either new, or restarted
         // in both cases "unstop" the track
         if (mState == PAUSED) {
@@ -3528,15 +3644,15 @@
 
         if (!isOutputTrack() && state != ACTIVE && state != RESUMING) {
             thread->mLock.unlock();
-            status = AudioSystem::startOutput(thread->id(),
-                                              (audio_stream_type_t)mStreamType,
-                                              mSessionId);
+            status = AudioSystem::startOutput(thread->id(), mStreamType, mSessionId);
             thread->mLock.lock();
 
+#ifdef ADD_BATTERY_DATA
             // to track the speaker usage
             if (status == NO_ERROR) {
                 addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStart);
             }
+#endif
         }
         if (status == NO_ERROR) {
             PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
@@ -3552,11 +3668,11 @@
 
 void AudioFlinger::PlaybackThread::Track::stop()
 {
-    ALOGV("stop(%d), calling thread %d", mName, IPCThreadState::self()->getCallingPid());
+    ALOGV("stop(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         Mutex::Autolock _l(thread->mLock);
-        int state = mState;
+        track_state state = mState;
         if (mState > STOPPED) {
             mState = STOPPED;
             // If the track is not active (PAUSED and buffers full), flush buffers
@@ -3568,20 +3684,20 @@
         }
         if (!isOutputTrack() && (state == ACTIVE || state == RESUMING)) {
             thread->mLock.unlock();
-            AudioSystem::stopOutput(thread->id(),
-                                    (audio_stream_type_t)mStreamType,
-                                    mSessionId);
+            AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
             thread->mLock.lock();
 
+#ifdef ADD_BATTERY_DATA
             // to track the speaker usage
             addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
+#endif
         }
     }
 }
 
 void AudioFlinger::PlaybackThread::Track::pause()
 {
-    ALOGV("pause(%d), calling thread %d", mName, IPCThreadState::self()->getCallingPid());
+    ALOGV("pause(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         Mutex::Autolock _l(thread->mLock);
@@ -3590,13 +3706,13 @@
             ALOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get());
             if (!isOutputTrack()) {
                 thread->mLock.unlock();
-                AudioSystem::stopOutput(thread->id(),
-                                        (audio_stream_type_t)mStreamType,
-                                        mSessionId);
+                AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
                 thread->mLock.lock();
 
+#ifdef ADD_BATTERY_DATA
                 // to track the speaker usage
                 addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
+#endif
             }
         }
     }
@@ -3644,19 +3760,13 @@
     mMute = muted;
 }
 
-void AudioFlinger::PlaybackThread::Track::setVolume(float left, float right)
-{
-    mVolume[0] = left;
-    mVolume[1] = right;
-}
-
 status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
 {
     status_t status = DEAD_OBJECT;
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
-       PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
-       status = playbackThread->attachAuxEffect(this, EffectId);
+        PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+        status = playbackThread->attachAuxEffect(this, EffectId);
     }
     return status;
 }
@@ -3667,31 +3777,422 @@
     mAuxBuffer = buffer;
 }
 
+// timed audio tracks
+
+sp<AudioFlinger::PlaybackThread::TimedTrack>
+AudioFlinger::PlaybackThread::TimedTrack::create(
+            PlaybackThread *thread,
+            const sp<Client>& client,
+            audio_stream_type_t streamType,
+            uint32_t sampleRate,
+            audio_format_t format,
+            uint32_t channelMask,
+            int frameCount,
+            const sp<IMemory>& sharedBuffer,
+            int sessionId) {
+    if (!client->reserveTimedTrack())
+        return NULL;
+
+    sp<TimedTrack> track = new TimedTrack(
+        thread, client, streamType, sampleRate, format, channelMask, frameCount,
+        sharedBuffer, sessionId);
+
+    if (track == NULL) {
+        client->releaseTimedTrack();
+        return NULL;
+    }
+
+    return track;
+}
+
+AudioFlinger::PlaybackThread::TimedTrack::TimedTrack(
+            PlaybackThread *thread,
+            const sp<Client>& client,
+            audio_stream_type_t streamType,
+            uint32_t sampleRate,
+            audio_format_t format,
+            uint32_t channelMask,
+            int frameCount,
+            const sp<IMemory>& sharedBuffer,
+            int sessionId)
+    : Track(thread, client, streamType, sampleRate, format, channelMask,
+            frameCount, sharedBuffer, sessionId),
+      mTimedSilenceBuffer(NULL),
+      mTimedSilenceBufferSize(0),
+      mTimedAudioOutputOnTime(false),
+      mMediaTimeTransformValid(false)
+{
+    LocalClock lc;
+    mLocalTimeFreq = lc.getLocalFreq();
+
+    mLocalTimeToSampleTransform.a_zero = 0;
+    mLocalTimeToSampleTransform.b_zero = 0;
+    mLocalTimeToSampleTransform.a_to_b_numer = sampleRate;
+    mLocalTimeToSampleTransform.a_to_b_denom = mLocalTimeFreq;
+    LinearTransform::reduce(&mLocalTimeToSampleTransform.a_to_b_numer,
+                            &mLocalTimeToSampleTransform.a_to_b_denom);
+}
+
+AudioFlinger::PlaybackThread::TimedTrack::~TimedTrack() {
+    mClient->releaseTimedTrack();
+    delete [] mTimedSilenceBuffer;
+}
+
+status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer(
+    size_t size, sp<IMemory>* buffer) {
+
+    Mutex::Autolock _l(mTimedBufferQueueLock);
+
+    trimTimedBufferQueue_l();
+
+    // lazily initialize the shared memory heap for timed buffers
+    if (mTimedMemoryDealer == NULL) {
+        const int kTimedBufferHeapSize = 512 << 10;
+
+        mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize,
+                                              "AudioFlingerTimed");
+        if (mTimedMemoryDealer == NULL)
+            return NO_MEMORY;
+    }
+
+    sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size);
+    if (newBuffer == NULL) {
+        newBuffer = mTimedMemoryDealer->allocate(size);
+        if (newBuffer == NULL)
+            return NO_MEMORY;
+    }
+
+    *buffer = newBuffer;
+    return NO_ERROR;
+}
+
+// caller must hold mTimedBufferQueueLock
+void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueue_l() {
+    int64_t mediaTimeNow;
+    {
+        Mutex::Autolock mttLock(mMediaTimeTransformLock);
+        if (!mMediaTimeTransformValid)
+            return;
+
+        int64_t targetTimeNow;
+        status_t res = (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME)
+            ? mCCHelper.getCommonTime(&targetTimeNow)
+            : mCCHelper.getLocalTime(&targetTimeNow);
+
+        if (OK != res)
+            return;
+
+        if (!mMediaTimeTransform.doReverseTransform(targetTimeNow,
+                                                    &mediaTimeNow)) {
+            return;
+        }
+    }
+
+    size_t trimIndex;
+    for (trimIndex = 0; trimIndex < mTimedBufferQueue.size(); trimIndex++) {
+        if (mTimedBufferQueue[trimIndex].pts() > mediaTimeNow)
+            break;
+    }
+
+    if (trimIndex) {
+        mTimedBufferQueue.removeItemsAt(0, trimIndex);
+    }
+}
+
+status_t AudioFlinger::PlaybackThread::TimedTrack::queueTimedBuffer(
+    const sp<IMemory>& buffer, int64_t pts) {
+
+    {
+        Mutex::Autolock mttLock(mMediaTimeTransformLock);
+        if (!mMediaTimeTransformValid)
+            return INVALID_OPERATION;
+    }
+
+    Mutex::Autolock _l(mTimedBufferQueueLock);
+
+    mTimedBufferQueue.add(TimedBuffer(buffer, pts));
+
+    return NO_ERROR;
+}
+
+status_t AudioFlinger::PlaybackThread::TimedTrack::setMediaTimeTransform(
+    const LinearTransform& xform, TimedAudioTrack::TargetTimeline target) {
+
+    ALOGV("%s az=%lld bz=%lld n=%d d=%u tgt=%d", __PRETTY_FUNCTION__,
+         xform.a_zero, xform.b_zero, xform.a_to_b_numer, xform.a_to_b_denom,
+         target);
+
+    if (!(target == TimedAudioTrack::LOCAL_TIME ||
+          target == TimedAudioTrack::COMMON_TIME)) {
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock lock(mMediaTimeTransformLock);
+    mMediaTimeTransform = xform;
+    mMediaTimeTransformTarget = target;
+    mMediaTimeTransformValid = true;
+
+    return NO_ERROR;
+}
+
+#define min(a, b) ((a) < (b) ? (a) : (b))
+
+// implementation of getNextBuffer for tracks whose buffers have timestamps
+status_t AudioFlinger::PlaybackThread::TimedTrack::getNextBuffer(
+    AudioBufferProvider::Buffer* buffer, int64_t pts)
+{
+    if (pts == AudioBufferProvider::kInvalidPTS) {
+        buffer->raw = 0;
+        buffer->frameCount = 0;
+        return INVALID_OPERATION;
+    }
+
+    Mutex::Autolock _l(mTimedBufferQueueLock);
+
+    while (true) {
+
+        // if we have no timed buffers, then fail
+        if (mTimedBufferQueue.isEmpty()) {
+            buffer->raw = 0;
+            buffer->frameCount = 0;
+            return NOT_ENOUGH_DATA;
+        }
+
+        TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
+
+        // calculate the PTS of the head of the timed buffer queue expressed in
+        // local time
+        int64_t headLocalPTS;
+        {
+            Mutex::Autolock mttLock(mMediaTimeTransformLock);
+
+            ALOG_ASSERT(mMediaTimeTransformValid, "media time transform invalid");
+
+            if (mMediaTimeTransform.a_to_b_denom == 0) {
+                // the transform represents a pause, so yield silence
+                timedYieldSilence(buffer->frameCount, buffer);
+                return NO_ERROR;
+            }
+
+            int64_t transformedPTS;
+            if (!mMediaTimeTransform.doForwardTransform(head.pts(),
+                                                        &transformedPTS)) {
+                // the transform failed.  this shouldn't happen, but if it does
+                // then just drop this buffer
+                ALOGW("timedGetNextBuffer transform failed");
+                buffer->raw = 0;
+                buffer->frameCount = 0;
+                mTimedBufferQueue.removeAt(0);
+                return NO_ERROR;
+            }
+
+            if (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) {
+                if (OK != mCCHelper.commonTimeToLocalTime(transformedPTS,
+                                                          &headLocalPTS)) {
+                    buffer->raw = 0;
+                    buffer->frameCount = 0;
+                    return INVALID_OPERATION;
+                }
+            } else {
+                headLocalPTS = transformedPTS;
+            }
+        }
+
+        // adjust the head buffer's PTS to reflect the portion of the head buffer
+        // that has already been consumed
+        int64_t effectivePTS = headLocalPTS +
+                ((head.position() / mCblk->frameSize) * mLocalTimeFreq / sampleRate());
+
+        // Calculate the delta in samples between the head of the input buffer
+        // queue and the start of the next output buffer that will be written.
+        // If the transformation fails because of over or underflow, it means
+        // that the sample's position in the output stream is so far out of
+        // whack that it should just be dropped.
+        int64_t sampleDelta;
+        if (llabs(effectivePTS - pts) >= (static_cast<int64_t>(1) << 31)) {
+            ALOGV("*** head buffer is too far from PTS: dropped buffer");
+            mTimedBufferQueue.removeAt(0);
+            continue;
+        }
+        if (!mLocalTimeToSampleTransform.doForwardTransform(
+                (effectivePTS - pts) << 32, &sampleDelta)) {
+            ALOGV("*** too late during sample rate transform: dropped buffer");
+            mTimedBufferQueue.removeAt(0);
+            continue;
+        }
+
+        ALOGV("*** %s head.pts=%lld head.pos=%d pts=%lld sampleDelta=[%d.%08x]",
+             __PRETTY_FUNCTION__, head.pts(), head.position(), pts,
+             static_cast<int32_t>((sampleDelta >= 0 ? 0 : 1) + (sampleDelta >> 32)),
+             static_cast<uint32_t>(sampleDelta & 0xFFFFFFFF));
+
+        // if the delta between the ideal placement for the next input sample and
+        // the current output position is within this threshold, then we will
+        // concatenate the next input samples to the previous output
+        const int64_t kSampleContinuityThreshold =
+                (static_cast<int64_t>(sampleRate()) << 32) / 10;
+
+        // if this is the first buffer of audio that we're emitting from this track
+        // then it should be almost exactly on time.
+        const int64_t kSampleStartupThreshold = 1LL << 32;
+
+        if ((mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleContinuityThreshold) ||
+            (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) {
+            // the next input is close enough to being on time, so concatenate it
+            // with the last output
+            timedYieldSamples(buffer);
+
+            ALOGV("*** on time: head.pos=%d frameCount=%u", head.position(), buffer->frameCount);
+            return NO_ERROR;
+        } else if (sampleDelta > 0) {
+            // the gap between the current output position and the proper start of
+            // the next input sample is too big, so fill it with silence
+            uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32;
+
+            timedYieldSilence(framesUntilNextInput, buffer);
+            ALOGV("*** silence: frameCount=%u", buffer->frameCount);
+            return NO_ERROR;
+        } else {
+            // the next input sample is late
+            uint32_t lateFrames = static_cast<uint32_t>(-((sampleDelta + 0x80000000) >> 32));
+            size_t onTimeSamplePosition =
+                    head.position() + lateFrames * mCblk->frameSize;
+
+            if (onTimeSamplePosition > head.buffer()->size()) {
+                // all the remaining samples in the head are too late, so
+                // drop it and move on
+                ALOGV("*** too late: dropped buffer");
+                mTimedBufferQueue.removeAt(0);
+                continue;
+            } else {
+                // skip over the late samples
+                head.setPosition(onTimeSamplePosition);
+
+                // yield the available samples
+                timedYieldSamples(buffer);
+
+                ALOGV("*** late: head.pos=%d frameCount=%u", head.position(), buffer->frameCount);
+                return NO_ERROR;
+            }
+        }
+    }
+}
+
+// Yield samples from the timed buffer queue head up to the given output
+// buffer's capacity.
+//
+// Caller must hold mTimedBufferQueueLock
+void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples(
+    AudioBufferProvider::Buffer* buffer) {
+
+    const TimedBuffer& head = mTimedBufferQueue[0];
+
+    buffer->raw = (static_cast<uint8_t*>(head.buffer()->pointer()) +
+                   head.position());
+
+    uint32_t framesLeftInHead = ((head.buffer()->size() - head.position()) /
+                                 mCblk->frameSize);
+    size_t framesRequested = buffer->frameCount;
+    buffer->frameCount = min(framesLeftInHead, framesRequested);
+
+    mTimedAudioOutputOnTime = true;
+}
+
+// Yield samples of silence up to the given output buffer's capacity
+//
+// Caller must hold mTimedBufferQueueLock
+void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence(
+    uint32_t numFrames, AudioBufferProvider::Buffer* buffer) {
+
+    // lazily allocate a buffer filled with silence
+    if (mTimedSilenceBufferSize < numFrames * mCblk->frameSize) {
+        delete [] mTimedSilenceBuffer;
+        mTimedSilenceBufferSize = numFrames * mCblk->frameSize;
+        mTimedSilenceBuffer = new uint8_t[mTimedSilenceBufferSize];
+        memset(mTimedSilenceBuffer, 0, mTimedSilenceBufferSize);
+    }
+
+    buffer->raw = mTimedSilenceBuffer;
+    size_t framesRequested = buffer->frameCount;
+    buffer->frameCount = min(numFrames, framesRequested);
+
+    mTimedAudioOutputOnTime = false;
+}
+
+// AudioBufferProvider interface
+void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer(
+    AudioBufferProvider::Buffer* buffer) {
+
+    Mutex::Autolock _l(mTimedBufferQueueLock);
+
+    // If the buffer which was just released is part of the buffer at the head
+    // of the queue, be sure to update the amt of the buffer which has been
+    // consumed.  If the buffer being returned is not part of the head of the
+    // queue, its either because the buffer is part of the silence buffer, or
+    // because the head of the timed queue was trimmed after the mixer called
+    // getNextBuffer but before the mixer called releaseBuffer.
+    if ((buffer->raw != mTimedSilenceBuffer) && mTimedBufferQueue.size()) {
+        TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
+
+        void* start = head.buffer()->pointer();
+        void* end   = (char *) head.buffer()->pointer() + head.buffer()->size();
+
+        if ((buffer->raw >= start) && (buffer->raw <= end)) {
+            head.setPosition(head.position() +
+                    (buffer->frameCount * mCblk->frameSize));
+            if (static_cast<size_t>(head.position()) >= head.buffer()->size()) {
+                mTimedBufferQueue.removeAt(0);
+            }
+        }
+    }
+
+    buffer->raw = 0;
+    buffer->frameCount = 0;
+}
+
+uint32_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const {
+    Mutex::Autolock _l(mTimedBufferQueueLock);
+
+    uint32_t frames = 0;
+    for (size_t i = 0; i < mTimedBufferQueue.size(); i++) {
+        const TimedBuffer& tb = mTimedBufferQueue[i];
+        frames += (tb.buffer()->size() - tb.position())  / mCblk->frameSize;
+    }
+
+    return frames;
+}
+
+AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer()
+        : mPTS(0), mPosition(0) {}
+
+AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer(
+    const sp<IMemory>& buffer, int64_t pts)
+        : mBuffer(buffer), mPTS(pts), mPosition(0) {}
+
 // ----------------------------------------------------------------------------
 
 // RecordTrack constructor must be called with AudioFlinger::mLock held
 AudioFlinger::RecordThread::RecordTrack::RecordTrack(
-            const wp<ThreadBase>& thread,
+            RecordThread *thread,
             const sp<Client>& client,
             uint32_t sampleRate,
-            uint32_t format,
+            audio_format_t format,
             uint32_t channelMask,
             int frameCount,
-            uint32_t flags,
             int sessionId)
     :   TrackBase(thread, client, sampleRate, format,
-                  channelMask, frameCount, flags, 0, sessionId),
+                  channelMask, frameCount, 0 /*sharedBuffer*/, sessionId),
         mOverflow(false)
 {
     if (mCblk != NULL) {
-       ALOGV("RecordTrack constructor, size %d", (int)mBufferEnd - (int)mBuffer);
-       if (format == AUDIO_FORMAT_PCM_16_BIT) {
-           mCblk->frameSize = mChannelCount * sizeof(int16_t);
-       } else if (format == AUDIO_FORMAT_PCM_8_BIT) {
-           mCblk->frameSize = mChannelCount * sizeof(int8_t);
-       } else {
-           mCblk->frameSize = sizeof(int8_t);
-       }
+        ALOGV("RecordTrack constructor, size %d", (int)mBufferEnd - (int)mBuffer);
+        if (format == AUDIO_FORMAT_PCM_16_BIT) {
+            mCblk->frameSize = mChannelCount * sizeof(int16_t);
+        } else if (format == AUDIO_FORMAT_PCM_8_BIT) {
+            mCblk->frameSize = mChannelCount * sizeof(int8_t);
+        } else {
+            mCblk->frameSize = sizeof(int8_t);
+        }
     }
 }
 
@@ -3703,22 +4204,23 @@
     }
 }
 
-status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
+// AudioBufferProvider interface
+status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts)
 {
     audio_track_cblk_t* cblk = this->cblk();
     uint32_t framesAvail;
     uint32_t framesReq = buffer->frameCount;
 
-     // Check if last stepServer failed, try to step now
-    if (mFlags & TrackBase::STEPSERVER_FAILED) {
+    // Check if last stepServer failed, try to step now
+    if (mStepServerFailed) {
         if (!step()) goto getNextBuffer_exit;
         ALOGV("stepServer recovered");
-        mFlags &= ~TrackBase::STEPSERVER_FAILED;
+        mStepServerFailed = false;
     }
 
     framesAvail = cblk->framesAvailable_l();
 
-    if (LIKELY(framesAvail)) {
+    if (CC_LIKELY(framesAvail)) {
         uint32_t s = cblk->server;
         uint32_t bufferEnd = cblk->serverBase + cblk->frameCount;
 
@@ -3730,24 +4232,24 @@
         }
 
         buffer->raw = getBuffer(s, framesReq);
-        if (buffer->raw == 0) goto getNextBuffer_exit;
+        if (buffer->raw == NULL) goto getNextBuffer_exit;
 
         buffer->frameCount = framesReq;
         return NO_ERROR;
     }
 
 getNextBuffer_exit:
-    buffer->raw = 0;
+    buffer->raw = NULL;
     buffer->frameCount = 0;
     return NOT_ENOUGH_DATA;
 }
 
-status_t AudioFlinger::RecordThread::RecordTrack::start()
+status_t AudioFlinger::RecordThread::RecordTrack::start(pid_t tid)
 {
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         RecordThread *recordThread = (RecordThread *)thread.get();
-        return recordThread->start(this);
+        return recordThread->start(this, tid);
     } else {
         return BAD_VALUE;
     }
@@ -3769,7 +4271,7 @@
 void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size)
 {
     snprintf(buffer, size, "   %05d %03u 0x%08x %05d   %04u %01d %05u  %08x %08x\n",
-            (mClient == NULL) ? getpid() : mClient->pid(),
+            (mClient == 0) ? getpid_cached : mClient->pid(),
             mFormat,
             mChannelMask,
             mSessionId,
@@ -3784,21 +4286,19 @@
 // ----------------------------------------------------------------------------
 
 AudioFlinger::PlaybackThread::OutputTrack::OutputTrack(
-            const wp<ThreadBase>& thread,
+            PlaybackThread *playbackThread,
             DuplicatingThread *sourceThread,
             uint32_t sampleRate,
-            uint32_t format,
+            audio_format_t format,
             uint32_t channelMask,
             int frameCount)
-    :   Track(thread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount, NULL, 0),
+    :   Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount, NULL, 0),
     mActive(false), mSourceThread(sourceThread)
 {
 
-    PlaybackThread *playbackThread = (PlaybackThread *)thread.unsafe_get();
     if (mCblk != NULL) {
         mCblk->flags |= CBLK_DIRECTION_OUT;
         mCblk->buffers = (char*)mCblk + sizeof(audio_track_cblk_t);
-        mCblk->volume[0] = mCblk->volume[1] = 0x1000;
         mOutBuffer.frameCount = 0;
         playbackThread->mTracks.add(this);
         ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, mCblk->buffers %p, " \
@@ -3815,9 +4315,9 @@
     clearBufferQueue();
 }
 
-status_t AudioFlinger::PlaybackThread::OutputTrack::start()
+status_t AudioFlinger::PlaybackThread::OutputTrack::start(pid_t tid)
 {
-    status_t status = Track::start();
+    status_t status = Track::start(tid);
     if (status != NO_ERROR) {
         return status;
     }
@@ -3847,7 +4347,7 @@
     uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs();
 
     if (!mActive && frames != 0) {
-        start();
+        start(0);
         sp<ThreadBase> thread = mThread.promote();
         if (thread != 0) {
             MixerThread *mixerThread = (MixerThread *)thread.get();
@@ -3882,7 +4382,7 @@
         if (mOutBuffer.frameCount == 0) {
             mOutBuffer.frameCount = pInBuffer->frameCount;
             nsecs_t startTime = systemTime();
-            if (obtainBuffer(&mOutBuffer, waitTimeLeftMs) == (status_t)AudioTrack::NO_MORE_BUFFERS) {
+            if (obtainBuffer(&mOutBuffer, waitTimeLeftMs) == (status_t)NO_MORE_BUFFERS) {
                 ALOGV ("OutputTrack::write() %p thread %p no more output buffers", this, mThread.unsafe_get());
                 outputBufferFull = true;
                 break;
@@ -3971,13 +4471,13 @@
         goto start_loop_here;
         while (framesAvail == 0) {
             active = mActive;
-            if (UNLIKELY(!active)) {
+            if (CC_UNLIKELY(!active)) {
                 ALOGV("Not active and NO_MORE_BUFFERS");
-                return AudioTrack::NO_MORE_BUFFERS;
+                return NO_MORE_BUFFERS;
             }
             result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));
             if (result != NO_ERROR) {
-                return AudioTrack::NO_MORE_BUFFERS;
+                return NO_MORE_BUFFERS;
             }
             // read the server count again
         start_loop_here:
@@ -3986,7 +4486,7 @@
     }
 
 //    if (framesAvail < framesReq) {
-//        return AudioTrack::NO_MORE_BUFFERS;
+//        return NO_MORE_BUFFERS;
 //    }
 
     if (framesReq > framesAvail) {
@@ -4009,10 +4509,9 @@
 void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue()
 {
     size_t size = mBufferQueue.size();
-    Buffer *pBuffer;
 
     for (size_t i = 0; i < size; i++) {
-        pBuffer = mBufferQueue.itemAt(i);
+        Buffer *pBuffer = mBufferQueue.itemAt(i);
         delete [] pBuffer->mBuffer;
         delete pBuffer;
     }
@@ -4024,8 +4523,10 @@
 AudioFlinger::Client::Client(const sp<AudioFlinger>& audioFlinger, pid_t pid)
     :   RefBase(),
         mAudioFlinger(audioFlinger),
+        // FIXME should be a "k" constant not hard-coded, in .h or ro. property, see 4 lines below
         mMemoryDealer(new MemoryDealer(1024*1024, "AudioFlinger::Client")),
-        mPid(pid)
+        mPid(pid),
+        mTimedTrackCount(0)
 {
     // 1 MB of address space is good for 32 tracks, 8 buffers each, 4 KB/buffer
 }
@@ -4036,31 +4537,53 @@
     mAudioFlinger->removeClient_l(mPid);
 }
 
-const sp<MemoryDealer>& AudioFlinger::Client::heap() const
+sp<MemoryDealer> AudioFlinger::Client::heap() const
 {
     return mMemoryDealer;
 }
 
+// Reserve one of the limited slots for a timed audio track associated
+// with this client
+bool AudioFlinger::Client::reserveTimedTrack()
+{
+    const int kMaxTimedTracksPerClient = 4;
+
+    Mutex::Autolock _l(mTimedTrackLock);
+
+    if (mTimedTrackCount >= kMaxTimedTracksPerClient) {
+        ALOGW("can not create timed track - pid %d has exceeded the limit",
+             mPid);
+        return false;
+    }
+
+    mTimedTrackCount++;
+    return true;
+}
+
+// Release a slot for a timed audio track
+void AudioFlinger::Client::releaseTimedTrack()
+{
+    Mutex::Autolock _l(mTimedTrackLock);
+    mTimedTrackCount--;
+}
+
 // ----------------------------------------------------------------------------
 
 AudioFlinger::NotificationClient::NotificationClient(const sp<AudioFlinger>& audioFlinger,
                                                      const sp<IAudioFlingerClient>& client,
                                                      pid_t pid)
-    : mAudioFlinger(audioFlinger), mPid(pid), mClient(client)
+    : mAudioFlinger(audioFlinger), mPid(pid), mAudioFlingerClient(client)
 {
 }
 
 AudioFlinger::NotificationClient::~NotificationClient()
 {
-    mClient.clear();
 }
 
 void AudioFlinger::NotificationClient::binderDied(const wp<IBinder>& who)
 {
     sp<NotificationClient> keep(this);
-    {
-        mAudioFlinger->removeNotificationClient(mPid);
-    }
+    mAudioFlinger->removeNotificationClient(mPid);
 }
 
 // ----------------------------------------------------------------------------
@@ -4079,8 +4602,12 @@
     mTrack->destroy();
 }
 
-status_t AudioFlinger::TrackHandle::start() {
-    return mTrack->start();
+sp<IMemory> AudioFlinger::TrackHandle::getCblk() const {
+    return mTrack->getCblk();
+}
+
+status_t AudioFlinger::TrackHandle::start(pid_t tid) {
+    return mTrack->start(tid);
 }
 
 void AudioFlinger::TrackHandle::stop() {
@@ -4099,19 +4626,43 @@
     mTrack->pause();
 }
 
-void AudioFlinger::TrackHandle::setVolume(float left, float right) {
-    mTrack->setVolume(left, right);
-}
-
-sp<IMemory> AudioFlinger::TrackHandle::getCblk() const {
-    return mTrack->getCblk();
-}
-
 status_t AudioFlinger::TrackHandle::attachAuxEffect(int EffectId)
 {
     return mTrack->attachAuxEffect(EffectId);
 }
 
+status_t AudioFlinger::TrackHandle::allocateTimedBuffer(size_t size,
+                                                         sp<IMemory>* buffer) {
+    if (!mTrack->isTimedTrack())
+        return INVALID_OPERATION;
+
+    PlaybackThread::TimedTrack* tt =
+            reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
+    return tt->allocateTimedBuffer(size, buffer);
+}
+
+status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer,
+                                                     int64_t pts) {
+    if (!mTrack->isTimedTrack())
+        return INVALID_OPERATION;
+
+    PlaybackThread::TimedTrack* tt =
+            reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
+    return tt->queueTimedBuffer(buffer, pts);
+}
+
+status_t AudioFlinger::TrackHandle::setMediaTimeTransform(
+    const LinearTransform& xform, int target) {
+
+    if (!mTrack->isTimedTrack())
+        return INVALID_OPERATION;
+
+    PlaybackThread::TimedTrack* tt =
+            reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
+    return tt->setMediaTimeTransform(
+        xform, static_cast<TimedAudioTrack::TargetTimeline>(target));
+}
+
 status_t AudioFlinger::TrackHandle::onTransact(
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
@@ -4122,11 +4673,12 @@
 
 sp<IAudioRecord> AudioFlinger::openRecord(
         pid_t pid,
-        int input,
+        audio_io_handle_t input,
         uint32_t sampleRate,
-        uint32_t format,
+        audio_format_t format,
         uint32_t channelMask,
         int frameCount,
+        // FIXME dead, remove from IAudioFlinger
         uint32_t flags,
         int *sessionId,
         status_t *status)
@@ -4134,7 +4686,6 @@
     sp<RecordThread::RecordTrack> recordTrack;
     sp<RecordHandle> recordHandle;
     sp<Client> client;
-    wp<Client> wclient;
     status_t lStatus;
     RecordThread *thread;
     size_t inFrameCount;
@@ -4155,13 +4706,7 @@
             goto Exit;
         }
 
-        wclient = mClients.valueFor(pid);
-        if (wclient != NULL) {
-            client = wclient.promote();
-        } else {
-            client = new Client(this, pid);
-            mClients.add(pid, client);
-        }
+        client = registerPid_l(pid);
 
         // If no audio session id is provided, create one here
         if (sessionId != NULL && *sessionId != AUDIO_SESSION_OUTPUT_MIX) {
@@ -4178,7 +4723,6 @@
                                                 format,
                                                 channelMask,
                                                 frameCount,
-                                                flags,
                                                 lSessionId,
                                                 &lStatus);
     }
@@ -4213,9 +4757,13 @@
     stop();
 }
 
-status_t AudioFlinger::RecordHandle::start() {
+sp<IMemory> AudioFlinger::RecordHandle::getCblk() const {
+    return mRecordTrack->getCblk();
+}
+
+status_t AudioFlinger::RecordHandle::start(pid_t tid) {
     ALOGV("RecordHandle::start()");
-    return mRecordTrack->start();
+    return mRecordTrack->start(tid);
 }
 
 void AudioFlinger::RecordHandle::stop() {
@@ -4223,10 +4771,6 @@
     mRecordTrack->stop();
 }
 
-sp<IMemory> AudioFlinger::RecordHandle::getCblk() const {
-    return mRecordTrack->getCblk();
-}
-
 status_t AudioFlinger::RecordHandle::onTransact(
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
@@ -4239,17 +4783,18 @@
                                          AudioStreamIn *input,
                                          uint32_t sampleRate,
                                          uint32_t channels,
-                                         int id,
+                                         audio_io_handle_t id,
                                          uint32_t device) :
-    ThreadBase(audioFlinger, id, device),
-    mInput(input), mTrack(NULL), mResampler(0), mRsmpOutBuffer(0), mRsmpInBuffer(0)
+    ThreadBase(audioFlinger, id, device, RECORD),
+    mInput(input), mTrack(NULL), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL),
+    // mRsmpInIndex and mInputBytes set by readInputParameters()
+    mReqChannelCount(popcount(channels)),
+    mReqSampleRate(sampleRate)
+    // mBytesRead is only meaningful while active, and so is cleared in start()
+    // (but might be better to also clear here for dump?)
 {
-    mType = ThreadBase::RECORD;
+    snprintf(mName, kNameLength, "AudioIn_%X", id);
 
-    snprintf(mName, kNameLength, "AudioIn_%d", id);
-
-    mReqChannelCount = popcount(channels);
-    mReqSampleRate = sampleRate;
     readInputParameters();
 }
 
@@ -4257,10 +4802,8 @@
 AudioFlinger::RecordThread::~RecordThread()
 {
     delete[] mRsmpInBuffer;
-    if (mResampler != 0) {
-        delete mResampler;
-        delete[] mRsmpOutBuffer;
-    }
+    delete mResampler;
+    delete[] mRsmpOutBuffer;
 }
 
 void AudioFlinger::RecordThread::onFirstRef()
@@ -4349,9 +4892,9 @@
             }
 
             buffer.frameCount = mFrameCount;
-            if (LIKELY(mActiveTrack->getNextBuffer(&buffer) == NO_ERROR)) {
+            if (CC_LIKELY(mActiveTrack->getNextBuffer(&buffer) == NO_ERROR)) {
                 size_t framesOut = buffer.frameCount;
-                if (mResampler == 0) {
+                if (mResampler == NULL) {
                     // no resampling
                     while (framesOut) {
                         size_t framesIn = mFrameCount - mRsmpInIndex;
@@ -4416,7 +4959,7 @@
                     // ditherAndClamp() works as long as all buffers returned by mActiveTrack->getNextBuffer()
                     // are 32 bit aligned which should be always true.
                     if (mChannelCount == 2 && mReqChannelCount == 1) {
-                        AudioMixer::ditherAndClamp(mRsmpOutBuffer, mRsmpOutBuffer, framesOut);
+                        ditherAndClamp(mRsmpOutBuffer, mRsmpOutBuffer, framesOut);
                         // the resampler always outputs stereo samples: do post stereo to mono conversion
                         int16_t *src = (int16_t *)mRsmpOutBuffer;
                         int16_t *dst = buffer.i16;
@@ -4425,7 +4968,7 @@
                             src += 2;
                         }
                     } else {
-                        AudioMixer::ditherAndClamp((int32_t *)buffer.raw, mRsmpOutBuffer, framesOut);
+                        ditherAndClamp((int32_t *)buffer.raw, mRsmpOutBuffer, framesOut);
                     }
 
                 }
@@ -4436,7 +4979,7 @@
             else {
                 if (!mActiveTrack->setOverflow()) {
                     nsecs_t now = systemTime();
-                    if ((now - lastWarning) > kWarningThrottle) {
+                    if ((now - lastWarning) > kWarningThrottleNs) {
                         ALOGW("RecordThread: buffer overflow");
                         lastWarning = now;
                     }
@@ -4469,10 +5012,9 @@
 sp<AudioFlinger::RecordThread::RecordTrack>  AudioFlinger::RecordThread::createRecordTrack_l(
         const sp<AudioFlinger::Client>& client,
         uint32_t sampleRate,
-        int format,
+        audio_format_t format,
         int channelMask,
         int frameCount,
-        uint32_t flags,
         int sessionId,
         status_t *status)
 {
@@ -4489,9 +5031,9 @@
         Mutex::Autolock _l(mLock);
 
         track = new RecordTrack(this, client, sampleRate,
-                      format, channelMask, frameCount, flags, sessionId);
+                      format, channelMask, frameCount, sessionId);
 
-        if (track->getCblk() == NULL) {
+        if (track->getCblk() == 0) {
             lStatus = NO_MEMORY;
             goto Exit;
         }
@@ -4512,13 +5054,13 @@
     return track;
 }
 
-status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrack)
+status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrack, pid_t tid)
 {
-    ALOGV("RecordThread::start");
-    sp <ThreadBase> strongMe = this;
+    ALOGV("RecordThread::start tid=%d", tid);
+    sp<ThreadBase> strongMe = this;
     status_t status = NO_ERROR;
     {
-        AutoMutex lock(&mLock);
+        AutoMutex lock(mLock);
         if (mActiveTrack != 0) {
             if (recordTrack != mActiveTrack.get()) {
                 status = -EBUSY;
@@ -4547,7 +5089,7 @@
         ALOGV("Signal record thread");
         mWaitWorkCV.signal();
         // do not wait for mStartStopCond if exiting
-        if (mExiting) {
+        if (exitPending()) {
             mActiveTrack.clear();
             status = INVALID_OPERATION;
             goto startError;
@@ -4568,13 +5110,13 @@
 
 void AudioFlinger::RecordThread::stop(RecordThread::RecordTrack* recordTrack) {
     ALOGV("RecordThread::stop");
-    sp <ThreadBase> strongMe = this;
+    sp<ThreadBase> strongMe = this;
     {
-        AutoMutex lock(&mLock);
+        AutoMutex lock(mLock);
         if (mActiveTrack != 0 && recordTrack == mActiveTrack.get()) {
             mActiveTrack->mState = TrackBase::PAUSING;
             // do not wait for mStartStopCond if exiting
-            if (mExiting) {
+            if (exitPending()) {
                 return;
             }
             mStartStopCond.wait(mLock);
@@ -4594,7 +5136,6 @@
     const size_t SIZE = 256;
     char buffer[SIZE];
     String8 result;
-    pid_t pid = 0;
 
     snprintf(buffer, SIZE, "\nInput thread %p internals\n", this);
     result.append(buffer);
@@ -4609,7 +5150,7 @@
         result.append(buffer);
         snprintf(buffer, SIZE, "In size: %d\n", mInputBytes);
         result.append(buffer);
-        snprintf(buffer, SIZE, "Resampling: %d\n", (mResampler != 0));
+        snprintf(buffer, SIZE, "Resampling: %d\n", (mResampler != NULL));
         result.append(buffer);
         snprintf(buffer, SIZE, "Out channel count: %d\n", mReqChannelCount);
         result.append(buffer);
@@ -4628,7 +5169,8 @@
     return NO_ERROR;
 }
 
-status_t AudioFlinger::RecordThread::getNextBuffer(AudioBufferProvider::Buffer* buffer)
+// AudioBufferProvider interface
+status_t AudioFlinger::RecordThread::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts)
 {
     size_t framesReq = buffer->frameCount;
     size_t framesReady = mFrameCount - mRsmpInIndex;
@@ -4644,7 +5186,7 @@
                 mInput->stream->common.standby(&mInput->stream->common);
                 usleep(kRecordThreadSleepUs);
             }
-            buffer->raw = 0;
+            buffer->raw = NULL;
             buffer->frameCount = 0;
             return NOT_ENOUGH_DATA;
         }
@@ -4666,6 +5208,7 @@
     return NO_ERROR;
 }
 
+// AudioBufferProvider interface
 void AudioFlinger::RecordThread::releaseBuffer(AudioBufferProvider::Buffer* buffer)
 {
     mRsmpInIndex += buffer->frameCount;
@@ -4681,7 +5224,7 @@
         String8 keyValuePair = mNewParameters[0];
         AudioParameter param = AudioParameter(keyValuePair);
         int value;
-        int reqFormat = mFormat;
+        audio_format_t reqFormat = mFormat;
         int reqSamplingRate = mReqSampleRate;
         int reqChannelCount = mReqChannelCount;
 
@@ -4690,7 +5233,7 @@
             reconfig = true;
         }
         if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
-            reqFormat = value;
+            reqFormat = (audio_format_t) value;
             reconfig = true;
         }
         if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
@@ -4699,7 +5242,7 @@
         }
         if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
             // do not accept frame count changes if tracks are open as the track buffer
-            // size depends on frame count and correct behavior would not be garantied
+            // size depends on frame count and correct behavior would not be guaranteed
             // if frame count is changed after track creation
             if (mActiveTrack != 0) {
                 status = INVALID_OPERATION;
@@ -4734,16 +5277,17 @@
         if (status == NO_ERROR) {
             status = mInput->stream->common.set_parameters(&mInput->stream->common, keyValuePair.string());
             if (status == INVALID_OPERATION) {
-               mInput->stream->common.standby(&mInput->stream->common);
-               status = mInput->stream->common.set_parameters(&mInput->stream->common, keyValuePair.string());
+                mInput->stream->common.standby(&mInput->stream->common);
+                status = mInput->stream->common.set_parameters(&mInput->stream->common,
+                        keyValuePair.string());
             }
             if (reconfig) {
                 if (status == BAD_VALUE &&
                     reqFormat == mInput->stream->common.get_format(&mInput->stream->common) &&
                     reqFormat == AUDIO_FORMAT_PCM_16_BIT &&
                     ((int)mInput->stream->common.get_sample_rate(&mInput->stream->common) <= (2 * reqSamplingRate)) &&
-                    (popcount(mInput->stream->common.get_channels(&mInput->stream->common)) < 3) &&
-                    (reqChannelCount < 3)) {
+                    popcount(mInput->stream->common.get_channels(&mInput->stream->common)) <= FCC_2 &&
+                    (reqChannelCount <= FCC_2)) {
                     status = NO_ERROR;
                 }
                 if (status == NO_ERROR) {
@@ -4759,7 +5303,7 @@
         mParamCond.signal();
         // wait for condition with time out in case the thread calling ThreadBase::setParameters()
         // already timed out waiting for the status and will never signal the condition.
-        mWaitWorkCV.waitRelative(mLock, kSetParametersTimeout);
+        mWaitWorkCV.waitRelative(mLock, kSetParametersTimeoutNs);
     }
     return reconfig;
 }
@@ -4782,7 +5326,7 @@
 
 void AudioFlinger::RecordThread::audioConfigChanged_l(int event, int param) {
     AudioSystem::OutputDescriptor desc;
-    void *param2 = 0;
+    void *param2 = NULL;
 
     switch (event) {
     case AudioSystem::INPUT_OPENED:
@@ -4804,25 +5348,27 @@
 
 void AudioFlinger::RecordThread::readInputParameters()
 {
-    if (mRsmpInBuffer) delete mRsmpInBuffer;
-    if (mRsmpOutBuffer) delete mRsmpOutBuffer;
-    if (mResampler) delete mResampler;
-    mResampler = 0;
+    delete mRsmpInBuffer;
+    // mRsmpInBuffer is always assigned a new[] below
+    delete mRsmpOutBuffer;
+    mRsmpOutBuffer = NULL;
+    delete mResampler;
+    mResampler = NULL;
 
     mSampleRate = mInput->stream->common.get_sample_rate(&mInput->stream->common);
     mChannelMask = mInput->stream->common.get_channels(&mInput->stream->common);
     mChannelCount = (uint16_t)popcount(mChannelMask);
     mFormat = mInput->stream->common.get_format(&mInput->stream->common);
-    mFrameSize = (uint16_t)audio_stream_frame_size(&mInput->stream->common);
+    mFrameSize = audio_stream_frame_size(&mInput->stream->common);
     mInputBytes = mInput->stream->common.get_buffer_size(&mInput->stream->common);
     mFrameCount = mInputBytes / mFrameSize;
     mRsmpInBuffer = new int16_t[mFrameCount * mChannelCount];
 
-    if (mSampleRate != mReqSampleRate && mChannelCount < 3 && mReqChannelCount < 3)
+    if (mSampleRate != mReqSampleRate && mChannelCount <= FCC_2 && mReqChannelCount <= FCC_2)
     {
         int channelCount;
-         // optmization: if mono to mono, use the resampler in stereo to stereo mode to avoid
-         // stereo to mono post process as the resampler always outputs stereo.
+        // optimization: if mono to mono, use the resampler in stereo to stereo mode to avoid
+        // stereo to mono post process as the resampler always outputs stereo.
         if (mChannelCount == 1 && mReqChannelCount == 2) {
             channelCount = 1;
         } else {
@@ -4873,7 +5419,7 @@
     return mTrack;
 }
 
-AudioFlinger::AudioStreamIn* AudioFlinger::RecordThread::getInput()
+AudioFlinger::AudioStreamIn* AudioFlinger::RecordThread::getInput() const
 {
     Mutex::Autolock _l(mLock);
     return mInput;
@@ -4899,18 +5445,17 @@
 
 // ----------------------------------------------------------------------------
 
-int AudioFlinger::openOutput(uint32_t *pDevices,
+audio_io_handle_t AudioFlinger::openOutput(uint32_t *pDevices,
                                 uint32_t *pSamplingRate,
-                                uint32_t *pFormat,
+                                audio_format_t *pFormat,
                                 uint32_t *pChannels,
                                 uint32_t *pLatencyMs,
-                                uint32_t flags)
+                                audio_policy_output_flags_t flags)
 {
     status_t status;
     PlaybackThread *thread = NULL;
-    mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
     uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0;
-    uint32_t format = pFormat ? *pFormat : 0;
+    audio_format_t format = pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT;
     uint32_t channels = pChannels ? *pChannels : 0;
     uint32_t latency = pLatencyMs ? *pLatencyMs : 0;
     audio_stream_out_t *outStream;
@@ -4933,8 +5478,10 @@
     if (outHwDev == NULL)
         return 0;
 
-    status = outHwDev->open_output_stream(outHwDev, *pDevices, (int *)&format,
+    mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
+    status = outHwDev->open_output_stream(outHwDev, *pDevices, &format,
                                           &channels, &samplingRate, &outStream);
+    mHardwareStatus = AUDIO_HW_IDLE;
     ALOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %d, Channels %x, status %d",
             outStream,
             samplingRate,
@@ -4942,10 +5489,9 @@
             channels,
             status);
 
-    mHardwareStatus = AUDIO_HW_IDLE;
     if (outStream != NULL) {
         AudioStreamOut *output = new AudioStreamOut(outHwDev, outStream);
-        int id = nextUniqueId();
+        audio_io_handle_t id = nextUniqueId();
 
         if ((flags & AUDIO_POLICY_OUTPUT_FLAG_DIRECT) ||
             (format != AUDIO_FORMAT_PCM_16_BIT) ||
@@ -4958,10 +5504,10 @@
         }
         mPlaybackThreads.add(id, thread);
 
-        if (pSamplingRate) *pSamplingRate = samplingRate;
-        if (pFormat) *pFormat = format;
-        if (pChannels) *pChannels = channels;
-        if (pLatencyMs) *pLatencyMs = thread->latency();
+        if (pSamplingRate != NULL) *pSamplingRate = samplingRate;
+        if (pFormat != NULL) *pFormat = format;
+        if (pChannels != NULL) *pChannels = channels;
+        if (pLatencyMs != NULL) *pLatencyMs = thread->latency();
 
         // notify client processes of the new output creation
         thread->audioConfigChanged_l(AudioSystem::OUTPUT_OPENED);
@@ -4971,7 +5517,8 @@
     return 0;
 }
 
-int AudioFlinger::openDuplicateOutput(int output1, int output2)
+audio_io_handle_t AudioFlinger::openDuplicateOutput(audio_io_handle_t output1,
+        audio_io_handle_t output2)
 {
     Mutex::Autolock _l(mLock);
     MixerThread *thread1 = checkMixerThread_l(output1);
@@ -4982,7 +5529,7 @@
         return 0;
     }
 
-    int id = nextUniqueId();
+    audio_io_handle_t id = nextUniqueId();
     DuplicatingThread *thread = new DuplicatingThread(this, thread1, id);
     thread->addOutputTrack(thread2);
     mPlaybackThreads.add(id, thread);
@@ -4991,11 +5538,11 @@
     return id;
 }
 
-status_t AudioFlinger::closeOutput(int output)
+status_t AudioFlinger::closeOutput(audio_io_handle_t output)
 {
     // keep strong reference on the playback thread so that
     // it is not destroyed while exit() is executed
-    sp <PlaybackThread> thread;
+    sp<PlaybackThread> thread;
     {
         Mutex::Autolock _l(mLock);
         thread = checkPlaybackThread_l(output);
@@ -5013,14 +5560,16 @@
                 }
             }
         }
-        void *param2 = 0;
-        audioConfigChanged_l(AudioSystem::OUTPUT_CLOSED, output, param2);
+        audioConfigChanged_l(AudioSystem::OUTPUT_CLOSED, output, NULL);
         mPlaybackThreads.removeItem(output);
     }
     thread->exit();
+    // The thread entity (active unit of execution) is no longer running here,
+    // but the ThreadBase container still exists.
 
     if (thread->type() != ThreadBase::DUPLICATING) {
         AudioStreamOut *out = thread->clearOutput();
+        ALOG_ASSERT(out != NULL, "out shouldn't be NULL");
         // from now on thread->mOutput is NULL
         out->hwDev->close_output_stream(out->hwDev, out->stream);
         delete out;
@@ -5028,7 +5577,7 @@
     return NO_ERROR;
 }
 
-status_t AudioFlinger::suspendOutput(int output)
+status_t AudioFlinger::suspendOutput(audio_io_handle_t output)
 {
     Mutex::Autolock _l(mLock);
     PlaybackThread *thread = checkPlaybackThread_l(output);
@@ -5043,7 +5592,7 @@
     return NO_ERROR;
 }
 
-status_t AudioFlinger::restoreOutput(int output)
+status_t AudioFlinger::restoreOutput(audio_io_handle_t output)
 {
     Mutex::Autolock _l(mLock);
     PlaybackThread *thread = checkPlaybackThread_l(output);
@@ -5059,19 +5608,19 @@
     return NO_ERROR;
 }
 
-int AudioFlinger::openInput(uint32_t *pDevices,
+audio_io_handle_t AudioFlinger::openInput(uint32_t *pDevices,
                                 uint32_t *pSamplingRate,
-                                uint32_t *pFormat,
+                                audio_format_t *pFormat,
                                 uint32_t *pChannels,
-                                uint32_t acoustics)
+                                audio_in_acoustics_t acoustics)
 {
     status_t status;
     RecordThread *thread = NULL;
     uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0;
-    uint32_t format = pFormat ? *pFormat : 0;
+    audio_format_t format = pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT;
     uint32_t channels = pChannels ? *pChannels : 0;
     uint32_t reqSamplingRate = samplingRate;
-    uint32_t reqFormat = format;
+    audio_format_t reqFormat = format;
     uint32_t reqChannels = channels;
     audio_stream_in_t *inStream;
     audio_hw_device_t *inHwDev;
@@ -5086,9 +5635,9 @@
     if (inHwDev == NULL)
         return 0;
 
-    status = inHwDev->open_input_stream(inHwDev, *pDevices, (int *)&format,
+    status = inHwDev->open_input_stream(inHwDev, *pDevices, &format,
                                         &channels, &samplingRate,
-                                        (audio_in_acoustics_t)acoustics,
+                                        acoustics,
                                         &inStream);
     ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %d, Channels %x, acoustics %x, status %d",
             inStream,
@@ -5104,18 +5653,18 @@
     if (inStream == NULL && status == BAD_VALUE &&
         reqFormat == format && format == AUDIO_FORMAT_PCM_16_BIT &&
         (samplingRate <= 2 * reqSamplingRate) &&
-        (popcount(channels) < 3) && (popcount(reqChannels) < 3)) {
+        (popcount(channels) <= FCC_2) && (popcount(reqChannels) <= FCC_2)) {
         ALOGV("openInput() reopening with proposed sampling rate and channels");
-        status = inHwDev->open_input_stream(inHwDev, *pDevices, (int *)&format,
+        status = inHwDev->open_input_stream(inHwDev, *pDevices, &format,
                                             &channels, &samplingRate,
-                                            (audio_in_acoustics_t)acoustics,
+                                            acoustics,
                                             &inStream);
     }
 
     if (inStream != NULL) {
         AudioStreamIn *input = new AudioStreamIn(inHwDev, inStream);
 
-        int id = nextUniqueId();
+        audio_io_handle_t id = nextUniqueId();
         // Start record thread
         // RecorThread require both input and output device indication to forward to audio
         // pre processing modules
@@ -5128,9 +5677,9 @@
                                   device);
         mRecordThreads.add(id, thread);
         ALOGV("openInput() created record thread: ID %d thread %p", id, thread);
-        if (pSamplingRate) *pSamplingRate = reqSamplingRate;
-        if (pFormat) *pFormat = format;
-        if (pChannels) *pChannels = reqChannels;
+        if (pSamplingRate != NULL) *pSamplingRate = reqSamplingRate;
+        if (pFormat != NULL) *pFormat = format;
+        if (pChannels != NULL) *pChannels = reqChannels;
 
         input->stream->common.standby(&input->stream->common);
 
@@ -5142,11 +5691,11 @@
     return 0;
 }
 
-status_t AudioFlinger::closeInput(int input)
+status_t AudioFlinger::closeInput(audio_io_handle_t input)
 {
     // keep strong reference on the record thread so that
     // it is not destroyed while exit() is executed
-    sp <RecordThread> thread;
+    sp<RecordThread> thread;
     {
         Mutex::Autolock _l(mLock);
         thread = checkRecordThread_l(input);
@@ -5155,13 +5704,15 @@
         }
 
         ALOGV("closeInput() %d", input);
-        void *param2 = 0;
-        audioConfigChanged_l(AudioSystem::INPUT_CLOSED, input, param2);
+        audioConfigChanged_l(AudioSystem::INPUT_CLOSED, input, NULL);
         mRecordThreads.removeItem(input);
     }
     thread->exit();
+    // The thread entity (active unit of execution) is no longer running here,
+    // but the ThreadBase container still exists.
 
     AudioStreamIn *in = thread->clearInput();
+    ALOG_ASSERT(in != NULL, "in shouldn't be NULL");
     // from now on thread->mInput is NULL
     in->hwDev->close_input_stream(in->hwDev, in->stream);
     delete in;
@@ -5169,7 +5720,7 @@
     return NO_ERROR;
 }
 
-status_t AudioFlinger::setStreamOutput(uint32_t stream, int output)
+status_t AudioFlinger::setStreamOutput(audio_stream_type_t stream, audio_io_handle_t output)
 {
     Mutex::Autolock _l(mLock);
     MixerThread *dstThread = checkMixerThread_l(output);
@@ -5185,8 +5736,7 @@
 
     for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
         PlaybackThread *thread = mPlaybackThreads.valueAt(i).get();
-        if (thread != dstThread &&
-            thread->type() != ThreadBase::DIRECT) {
+        if (thread != dstThread && thread->type() != ThreadBase::DIRECT) {
             MixerThread *srcThread = (MixerThread *)thread;
             srcThread->setStreamValid(stream, false);
             srcThread->invalidateTracks(stream);
@@ -5205,37 +5755,33 @@
 void AudioFlinger::acquireAudioSessionId(int audioSession)
 {
     Mutex::Autolock _l(mLock);
-    int caller = IPCThreadState::self()->getCallingPid();
+    pid_t caller = IPCThreadState::self()->getCallingPid();
     ALOGV("acquiring %d from %d", audioSession, caller);
-    int num = mAudioSessionRefs.size();
-    for (int i = 0; i< num; i++) {
+    size_t num = mAudioSessionRefs.size();
+    for (size_t i = 0; i< num; i++) {
         AudioSessionRef *ref = mAudioSessionRefs.editItemAt(i);
-        if (ref->sessionid == audioSession && ref->pid == caller) {
-            ref->cnt++;
-            ALOGV(" incremented refcount to %d", ref->cnt);
+        if (ref->mSessionid == audioSession && ref->mPid == caller) {
+            ref->mCnt++;
+            ALOGV(" incremented refcount to %d", ref->mCnt);
             return;
         }
     }
-    AudioSessionRef *ref = new AudioSessionRef();
-    ref->sessionid = audioSession;
-    ref->pid = caller;
-    ref->cnt = 1;
-    mAudioSessionRefs.push(ref);
-    ALOGV(" added new entry for %d", ref->sessionid);
+    mAudioSessionRefs.push(new AudioSessionRef(audioSession, caller));
+    ALOGV(" added new entry for %d", audioSession);
 }
 
 void AudioFlinger::releaseAudioSessionId(int audioSession)
 {
     Mutex::Autolock _l(mLock);
-    int caller = IPCThreadState::self()->getCallingPid();
+    pid_t caller = IPCThreadState::self()->getCallingPid();
     ALOGV("releasing %d from %d", audioSession, caller);
-    int num = mAudioSessionRefs.size();
-    for (int i = 0; i< num; i++) {
+    size_t num = mAudioSessionRefs.size();
+    for (size_t i = 0; i< num; i++) {
         AudioSessionRef *ref = mAudioSessionRefs.itemAt(i);
-        if (ref->sessionid == audioSession && ref->pid == caller) {
-            ref->cnt--;
-            ALOGV(" decremented refcount to %d", ref->cnt);
-            if (ref->cnt == 0) {
+        if (ref->mSessionid == audioSession && ref->mPid == caller) {
+            ref->mCnt--;
+            ALOGV(" decremented refcount to %d", ref->mCnt);
+            if (ref->mCnt == 0) {
                 mAudioSessionRefs.removeAt(i);
                 delete ref;
                 purgeStaleEffects_l();
@@ -5280,9 +5826,9 @@
         bool found = false;
         for (size_t k = 0; k < numsessionrefs; k++) {
             AudioSessionRef *ref = mAudioSessionRefs.itemAt(k);
-            if (ref->sessionid == sessionid) {
+            if (ref->mSessionid == sessionid) {
                 ALOGV(" session %d still exists for %d with %d refs",
-                     sessionid, ref->pid, ref->cnt);
+                    sessionid, ref->mPid, ref->mCnt);
                 found = true;
                 break;
             }
@@ -5311,35 +5857,22 @@
 }
 
 // checkPlaybackThread_l() must be called with AudioFlinger::mLock held
-AudioFlinger::PlaybackThread *AudioFlinger::checkPlaybackThread_l(int output) const
+AudioFlinger::PlaybackThread *AudioFlinger::checkPlaybackThread_l(audio_io_handle_t output) const
 {
-    PlaybackThread *thread = NULL;
-    if (mPlaybackThreads.indexOfKey(output) >= 0) {
-        thread = (PlaybackThread *)mPlaybackThreads.valueFor(output).get();
-    }
-    return thread;
+    return mPlaybackThreads.valueFor(output).get();
 }
 
 // checkMixerThread_l() must be called with AudioFlinger::mLock held
-AudioFlinger::MixerThread *AudioFlinger::checkMixerThread_l(int output) const
+AudioFlinger::MixerThread *AudioFlinger::checkMixerThread_l(audio_io_handle_t output) const
 {
     PlaybackThread *thread = checkPlaybackThread_l(output);
-    if (thread != NULL) {
-        if (thread->type() == ThreadBase::DIRECT) {
-            thread = NULL;
-        }
-    }
-    return (MixerThread *)thread;
+    return thread != NULL && thread->type() != ThreadBase::DIRECT ? (MixerThread *) thread : NULL;
 }
 
 // checkRecordThread_l() must be called with AudioFlinger::mLock held
-AudioFlinger::RecordThread *AudioFlinger::checkRecordThread_l(int input) const
+AudioFlinger::RecordThread *AudioFlinger::checkRecordThread_l(audio_io_handle_t input) const
 {
-    RecordThread *thread = NULL;
-    if (mRecordThreads.indexOfKey(input) >= 0) {
-        thread = (RecordThread *)mRecordThreads.valueFor(input).get();
-    }
-    return thread;
+    return mRecordThreads.valueFor(input).get();
 }
 
 uint32_t AudioFlinger::nextUniqueId()
@@ -5347,7 +5880,7 @@
     return android_atomic_inc(&mNextUniqueId);
 }
 
-AudioFlinger::PlaybackThread *AudioFlinger::primaryPlaybackThread_l()
+AudioFlinger::PlaybackThread *AudioFlinger::primaryPlaybackThread_l() const
 {
     for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
         PlaybackThread *thread = mPlaybackThreads.valueAt(i).get();
@@ -5359,7 +5892,7 @@
     return NULL;
 }
 
-uint32_t AudioFlinger::primaryOutputDevice_l()
+uint32_t AudioFlinger::primaryOutputDevice_l() const
 {
     PlaybackThread *thread = primaryPlaybackThread_l();
 
@@ -5376,19 +5909,20 @@
 // ----------------------------------------------------------------------------
 
 
-status_t AudioFlinger::queryNumberEffects(uint32_t *numEffects)
+status_t AudioFlinger::queryNumberEffects(uint32_t *numEffects) const
 {
     Mutex::Autolock _l(mLock);
     return EffectQueryNumberEffects(numEffects);
 }
 
-status_t AudioFlinger::queryEffect(uint32_t index, effect_descriptor_t *descriptor)
+status_t AudioFlinger::queryEffect(uint32_t index, effect_descriptor_t *descriptor) const
 {
     Mutex::Autolock _l(mLock);
     return EffectQueryEffect(index, descriptor);
 }
 
-status_t AudioFlinger::getEffectDescriptor(effect_uuid_t *pUuid, effect_descriptor_t *descriptor)
+status_t AudioFlinger::getEffectDescriptor(const effect_uuid_t *pUuid,
+        effect_descriptor_t *descriptor) const
 {
     Mutex::Autolock _l(mLock);
     return EffectGetDescriptor(pUuid, descriptor);
@@ -5399,7 +5933,7 @@
         effect_descriptor_t *pDesc,
         const sp<IEffectClient>& effectClient,
         int32_t priority,
-        int io,
+        audio_io_handle_t io,
         int sessionId,
         status_t *status,
         int *id,
@@ -5408,10 +5942,8 @@
     status_t lStatus = NO_ERROR;
     sp<EffectHandle> handle;
     effect_descriptor_t desc;
-    sp<Client> client;
-    wp<Client> wclient;
 
-    ALOGV("createEffect pid %d, client %p, priority %d, sessionId %d, io %d",
+    ALOGV("createEffect pid %d, effectClient %p, priority %d, sessionId %d, io %d",
             pid, effectClient.get(), priority, sessionId, io);
 
     if (pDesc == NULL) {
@@ -5427,7 +5959,7 @@
 
     // Session AUDIO_SESSION_OUTPUT_STAGE is reserved for output stage effects
     // that can only be created by audio policy manager (running in same process)
-    if (sessionId == AUDIO_SESSION_OUTPUT_STAGE && getpid() != pid) {
+    if (sessionId == AUDIO_SESSION_OUTPUT_STAGE && getpid_cached != pid) {
         lStatus = PERMISSION_DENIED;
         goto Exit;
     }
@@ -5529,7 +6061,7 @@
         // because of code checking output when entering the function.
         // Note: io is never 0 when creating an effect on an input
         if (io == 0) {
-             // look for the thread where the specified audio session is present
+            // look for the thread where the specified audio session is present
             for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
                 if (mPlaybackThreads.valueAt(i)->hasAudioSession(sessionId) != 0) {
                     io = mPlaybackThreads.keyAt(i);
@@ -5537,12 +6069,12 @@
                 }
             }
             if (io == 0) {
-               for (size_t i = 0; i < mRecordThreads.size(); i++) {
-                   if (mRecordThreads.valueAt(i)->hasAudioSession(sessionId) != 0) {
-                       io = mRecordThreads.keyAt(i);
-                       break;
-                   }
-               }
+                for (size_t i = 0; i < mRecordThreads.size(); i++) {
+                    if (mRecordThreads.valueAt(i)->hasAudioSession(sessionId) != 0) {
+                        io = mRecordThreads.keyAt(i);
+                        break;
+                    }
+                }
             }
             // If no output thread contains the requested session ID, default to
             // first output. The effect chain will be moved to the correct output
@@ -5562,14 +6094,7 @@
             }
         }
 
-        wclient = mClients.valueFor(pid);
-
-        if (wclient != NULL) {
-            client = wclient.promote();
-        } else {
-            client = new Client(this, pid);
-            mClients.add(pid, client);
-        }
+        sp<Client> client = registerPid_l(pid);
 
         // create effect on selected output thread
         handle = thread->createEffect_l(client, effectClient, priority, sessionId,
@@ -5580,13 +6105,14 @@
     }
 
 Exit:
-    if(status) {
+    if (status != NULL) {
         *status = lStatus;
     }
     return handle;
 }
 
-status_t AudioFlinger::moveEffects(int sessionId, int srcOutput, int dstOutput)
+status_t AudioFlinger::moveEffects(int sessionId, audio_io_handle_t srcOutput,
+        audio_io_handle_t dstOutput)
 {
     ALOGV("moveEffects() session %d, srcOutput %d, dstOutput %d",
             sessionId, srcOutput, dstOutput);
@@ -5637,7 +6163,7 @@
 
     // transfer all effects one by one so that new effect chain is created on new thread with
     // correct buffer sizes and audio parameters and effect engines reconfigured accordingly
-    int dstOutput = dstThread->id();
+    audio_io_handle_t dstOutput = dstThread->id();
     sp<EffectChain> dstChain;
     uint32_t strategy = 0; // prevent compiler warning
     sp<EffectModule> effect = chain->getEffectFromId_l(0);
@@ -5709,10 +6235,7 @@
         goto Exit;
     }
     // Only Pre processor effects are allowed on input threads and only on input threads
-    if ((mType == RECORD &&
-            (desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_PRE_PROC) ||
-            (mType != RECORD &&
-                    (desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC)) {
+    if ((mType == RECORD) != ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC)) {
         ALOGW("createEffect_l() effect %s (flags %08x) created on wrong thread type %d",
                 desc->name, desc->flags, mType);
         lStatus = BAD_VALUE;
@@ -5737,7 +6260,7 @@
             effect = chain->getEffectFromDesc_l(desc);
         }
 
-        ALOGV("createEffect_l() got effect %p on chain %p", effect == 0 ? 0 : effect.get(), chain.get());
+        ALOGV("createEffect_l() got effect %p on chain %p", effect.get(), chain.get());
 
         if (effect == 0) {
             int id = mAudioFlinger->nextUniqueId();
@@ -5765,7 +6288,7 @@
         // create effect handle and connect it to effect module
         handle = new EffectHandle(effect, client, effectClient, priority);
         lStatus = effect->addHandle(handle);
-        if (enabled) {
+        if (enabled != NULL) {
             *enabled = (int)effect->isEnabled();
         }
     }
@@ -5785,7 +6308,7 @@
         handle.clear();
     }
 
-    if(status) {
+    if (status != NULL) {
         *status = lStatus;
     }
     return handle;
@@ -5793,13 +6316,8 @@
 
 sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect_l(int sessionId, int effectId)
 {
-    sp<EffectModule> effect;
-
     sp<EffectChain> chain = getEffectChain_l(sessionId);
-    if (chain != 0) {
-        effect = chain->getEffectFromId_l(effectId);
-    }
-    return effect;
+    return chain != 0 ? chain->getEffectFromId_l(effectId) : 0;
 }
 
 // PlaybackThread::addEffect_l() must be called with AudioFlinger::mLock and
@@ -5860,7 +6378,7 @@
 }
 
 void AudioFlinger::ThreadBase::lockEffectChains_l(
-        Vector<sp <AudioFlinger::EffectChain> >& effectChains)
+        Vector< sp<AudioFlinger::EffectChain> >& effectChains)
 {
     effectChains = mEffectChains;
     for (size_t i = 0; i < mEffectChains.size(); i++) {
@@ -5869,7 +6387,7 @@
 }
 
 void AudioFlinger::ThreadBase::unlockEffectChains(
-        Vector<sp <AudioFlinger::EffectChain> >& effectChains)
+        const Vector< sp<AudioFlinger::EffectChain> >& effectChains)
 {
     for (size_t i = 0; i < effectChains.size(); i++) {
         effectChains[i]->unlock();
@@ -5884,19 +6402,16 @@
 
 sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain_l(int sessionId)
 {
-    sp<EffectChain> chain;
-
     size_t size = mEffectChains.size();
     for (size_t i = 0; i < size; i++) {
         if (mEffectChains[i]->sessionId() == sessionId) {
-            chain = mEffectChains[i];
-            break;
+            return mEffectChains[i];
         }
     }
-    return chain;
+    return 0;
 }
 
-void AudioFlinger::ThreadBase::setMode(uint32_t mode)
+void AudioFlinger::ThreadBase::setMode(audio_mode_t mode)
 {
     Mutex::Autolock _l(mLock);
     size_t size = mEffectChains.size();
@@ -5907,13 +6422,13 @@
 
 void AudioFlinger::ThreadBase::disconnectEffect(const sp<EffectModule>& effect,
                                                     const wp<EffectHandle>& handle,
-                                                    bool unpiniflast) {
+                                                    bool unpinIfLast) {
 
     Mutex::Autolock _l(mLock);
     ALOGV("disconnectEffect() %p effect %p", this, effect.get());
     // delete the effect module if removing last handle on it
     if (effect->removeHandle(handle) == 0) {
-        if (!effect->isPinned() || unpiniflast) {
+        if (!effect->isPinned() || unpinIfLast) {
             removeEffect_l(effect);
             AudioSystem::unregisterEffect(effect->id());
         }
@@ -6048,7 +6563,7 @@
 
 void AudioFlinger::PlaybackThread::detachAuxEffect_l(int effectId)
 {
-     for (size_t i = 0; i < mTracks.size(); ++i) {
+    for (size_t i = 0; i < mTracks.size(); ++i) {
         sp<Track> track = mTracks[i];
         if (track->auxEffectId() == effectId) {
             attachAuxEffect_l(track, 0);
@@ -6093,18 +6608,17 @@
 #undef LOG_TAG
 #define LOG_TAG "AudioFlinger::EffectModule"
 
-AudioFlinger::EffectModule::EffectModule(const wp<ThreadBase>& wThread,
+AudioFlinger::EffectModule::EffectModule(ThreadBase *thread,
                                         const wp<AudioFlinger::EffectChain>& chain,
                                         effect_descriptor_t *desc,
                                         int id,
                                         int sessionId)
-    : mThread(wThread), mChain(chain), mId(id), mSessionId(sessionId), mEffectInterface(NULL),
+    : mThread(thread), mChain(chain), mId(id), mSessionId(sessionId), mEffectInterface(NULL),
       mStatus(NO_INIT), mState(IDLE), mSuspended(false)
 {
     ALOGV("Constructor %p", this);
     int lStatus;
-    sp<ThreadBase> thread = mThread.promote();
-    if (thread == 0) {
+    if (thread == NULL) {
         return;
     }
 
@@ -6152,12 +6666,11 @@
     }
 }
 
-status_t AudioFlinger::EffectModule::addHandle(sp<EffectHandle>& handle)
+status_t AudioFlinger::EffectModule::addHandle(const sp<EffectHandle>& handle)
 {
     status_t status;
 
     Mutex::Autolock _l(mLock);
-    // First handle in mHandles has highest priority and controls the effect module
     int priority = handle->priority();
     size_t size = mHandles.size();
     sp<EffectHandle> h;
@@ -6199,7 +6712,7 @@
 
     bool enabled = false;
     EffectHandle *hdl = handle.unsafe_get();
-    if (hdl) {
+    if (hdl != NULL) {
         ALOGV("removeHandle() unsafe_get OK");
         enabled = hdl->enabled();
     }
@@ -6226,23 +6739,19 @@
 sp<AudioFlinger::EffectHandle> AudioFlinger::EffectModule::controlHandle()
 {
     Mutex::Autolock _l(mLock);
-    sp<EffectHandle> handle;
-    if (mHandles.size() != 0) {
-        handle = mHandles[0].promote();
-    }
-    return handle;
+    return mHandles.size() != 0 ? mHandles[0].promote() : 0;
 }
 
-void AudioFlinger::EffectModule::disconnect(const wp<EffectHandle>& handle, bool unpiniflast)
+void AudioFlinger::EffectModule::disconnect(const wp<EffectHandle>& handle, bool unpinIfLast)
 {
-    ALOGV("disconnect() %p handle %p ", this, handle.unsafe_get());
+    ALOGV("disconnect() %p handle %p", this, handle.unsafe_get());
     // keep a strong reference on this EffectModule to avoid calling the
     // destructor before we exit
     sp<EffectModule> keep(this);
     {
         sp<ThreadBase> thread = mThread.promote();
         if (thread != 0) {
-            thread->disconnectEffect(keep, handle, unpiniflast);
+            thread->disconnectEffect(keep, handle, unpinIfLast);
         }
     }
 }
@@ -6296,7 +6805,7 @@
     if (isProcessEnabled()) {
         // do 32 bit to 16 bit conversion for auxiliary effect input buffer
         if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
-            AudioMixer::ditherAndClamp(mConfig.inputCfg.buffer.s32,
+            ditherAndClamp(mConfig.inputCfg.buffer.s32,
                                         mConfig.inputCfg.buffer.s32,
                                         mConfig.inputCfg.buffer.frameCount/2);
         }
@@ -6401,7 +6910,7 @@
     status_t cmdStatus;
     uint32_t size = sizeof(int);
     status_t status = (*mEffectInterface)->command(mEffectInterface,
-                                                   EFFECT_CMD_CONFIGURE,
+                                                   EFFECT_CMD_SET_CONFIG,
                                                    sizeof(effect_config_t),
                                                    &mConfig,
                                                    &size,
@@ -6585,7 +7094,7 @@
     return NO_ERROR;
 }
 
-bool AudioFlinger::EffectModule::isEnabled()
+bool AudioFlinger::EffectModule::isEnabled() const
 {
     switch (mState) {
     case RESTART:
@@ -6601,7 +7110,7 @@
     }
 }
 
-bool AudioFlinger::EffectModule::isProcessEnabled()
+bool AudioFlinger::EffectModule::isProcessEnabled() const
 {
     switch (mState) {
     case RESTART:
@@ -6694,7 +7203,7 @@
     return status;
 }
 
-status_t AudioFlinger::EffectModule::setMode(uint32_t mode)
+status_t AudioFlinger::EffectModule::setMode(audio_mode_t mode)
 {
     Mutex::Autolock _l(mLock);
     status_t status = NO_ERROR;
@@ -6703,7 +7212,7 @@
         uint32_t size = sizeof(status_t);
         status = (*mEffectInterface)->command(mEffectInterface,
                                               EFFECT_CMD_SET_AUDIO_MODE,
-                                              sizeof(int),
+                                              sizeof(audio_mode_t),
                                               &mode,
                                               &size,
                                               &cmdStatus);
@@ -6719,7 +7228,8 @@
     Mutex::Autolock _l(mLock);
     mSuspended = suspended;
 }
-bool AudioFlinger::EffectModule::suspended()
+
+bool AudioFlinger::EffectModule::suspended() const
 {
     Mutex::Autolock _l(mLock);
     return mSuspended;
@@ -6834,10 +7344,10 @@
     if (mCblkMemory != 0) {
         mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->pointer());
 
-        if (mCblk) {
+        if (mCblk != NULL) {
             new(mCblk) effect_param_cblk_t();
             mBuffer = (uint8_t *)mCblk + bufOffset;
-         }
+        }
     } else {
         ALOGE("not enough memory for Effect size=%u", EFFECT_PARAM_BUFFER_SIZE + sizeof(effect_param_cblk_t));
         return;
@@ -6913,13 +7423,13 @@
     disconnect(true);
 }
 
-void AudioFlinger::EffectHandle::disconnect(bool unpiniflast)
+void AudioFlinger::EffectHandle::disconnect(bool unpinIfLast)
 {
-    ALOGV("disconnect(%s)", unpiniflast ? "true" : "false");
+    ALOGV("disconnect(%s)", unpinIfLast ? "true" : "false");
     if (mEffect == 0) {
         return;
     }
-    mEffect->disconnect(this, unpiniflast);
+    mEffect->disconnect(this, unpinIfLast);
 
     if (mHasControl && mEnabled) {
         sp<ThreadBase> thread = mEffect->thread().promote();
@@ -6931,10 +7441,12 @@
     // release sp on module => module destructor can be called now
     mEffect.clear();
     if (mClient != 0) {
-        if (mCblk) {
+        if (mCblk != NULL) {
+            // unlike ~TrackBase(), mCblk is never a local new, so don't delete
             mCblk->~effect_param_cblk_t();   // destroy our shared-structure.
         }
-        mCblkMemory.clear();            // and free the shared memory
+        mCblkMemory.clear();    // free the shared memory before releasing the heap it belongs to
+        // Client destructor must run with AudioFlinger mutex locked
         Mutex::Autolock _l(mClient->audioFlinger()->mLock);
         mClient.clear();
     }
@@ -7016,10 +7528,6 @@
     return mEffect->command(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
 }
 
-sp<IMemory> AudioFlinger::EffectHandle::getCblk() const {
-    return mCblkMemory;
-}
-
 void AudioFlinger::EffectHandle::setControl(bool hasControl, bool signal, bool enabled)
 {
     ALOGV("setControl %p control %d", this, hasControl);
@@ -7061,10 +7569,10 @@
 
 void AudioFlinger::EffectHandle::dump(char* buffer, size_t size)
 {
-    bool locked = mCblk ? tryLock(mCblk->lock) : false;
+    bool locked = mCblk != NULL && tryLock(mCblk->lock);
 
     snprintf(buffer, size, "\t\t\t%05d %05d    %01u    %01u      %05u  %05u\n",
-            (mClient == NULL) ? getpid() : mClient->pid(),
+            (mClient == 0) ? getpid_cached : mClient->pid(),
             mPriority,
             mHasControl,
             !locked,
@@ -7080,15 +7588,14 @@
 #undef LOG_TAG
 #define LOG_TAG "AudioFlinger::EffectChain"
 
-AudioFlinger::EffectChain::EffectChain(const wp<ThreadBase>& wThread,
+AudioFlinger::EffectChain::EffectChain(ThreadBase *thread,
                                         int sessionId)
-    : mThread(wThread), mSessionId(sessionId), mActiveTrackCnt(0), mTrackCnt(0), mTailBufferCount(0),
+    : mThread(thread), mSessionId(sessionId), mActiveTrackCnt(0), mTrackCnt(0), mTailBufferCount(0),
       mOwnInBuffer(false), mVolumeCtrlIdx(-1), mLeftVolume(UINT_MAX), mRightVolume(UINT_MAX),
       mNewLeftVolume(UINT_MAX), mNewRightVolume(UINT_MAX)
 {
     mStrategy = AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
-    sp<ThreadBase> thread = mThread.promote();
-    if (thread == 0) {
+    if (thread == NULL) {
         return;
     }
     mMaxTailBuffers = ((kProcessTailDurationMs * thread->sampleRate()) / 1000) /
@@ -7106,48 +7613,42 @@
 // getEffectFromDesc_l() must be called with ThreadBase::mLock held
 sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromDesc_l(effect_descriptor_t *descriptor)
 {
-    sp<EffectModule> effect;
     size_t size = mEffects.size();
 
     for (size_t i = 0; i < size; i++) {
         if (memcmp(&mEffects[i]->desc().uuid, &descriptor->uuid, sizeof(effect_uuid_t)) == 0) {
-            effect = mEffects[i];
-            break;
+            return mEffects[i];
         }
     }
-    return effect;
+    return 0;
 }
 
 // getEffectFromId_l() must be called with ThreadBase::mLock held
 sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromId_l(int id)
 {
-    sp<EffectModule> effect;
     size_t size = mEffects.size();
 
     for (size_t i = 0; i < size; i++) {
         // by convention, return first effect if id provided is 0 (0 is never a valid id)
         if (id == 0 || mEffects[i]->id() == id) {
-            effect = mEffects[i];
-            break;
+            return mEffects[i];
         }
     }
-    return effect;
+    return 0;
 }
 
 // getEffectFromType_l() must be called with ThreadBase::mLock held
 sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromType_l(
         const effect_uuid_t *type)
 {
-    sp<EffectModule> effect;
     size_t size = mEffects.size();
 
     for (size_t i = 0; i < size; i++) {
         if (memcmp(&mEffects[i]->desc().type, type, sizeof(effect_uuid_t)) == 0) {
-            effect = mEffects[i];
-            break;
+            return mEffects[i];
         }
     }
-    return effect;
+    return 0;
 }
 
 // Must be called with EffectChain::mLock locked
@@ -7238,12 +7739,12 @@
         // Reject insertion if an effect with EFFECT_FLAG_INSERT_EXCLUSIVE is
         // already present
 
-        int size = (int)mEffects.size();
-        int idx_insert = size;
-        int idx_insert_first = -1;
-        int idx_insert_last = -1;
+        size_t size = mEffects.size();
+        size_t idx_insert = size;
+        ssize_t idx_insert_first = -1;
+        ssize_t idx_insert_last = -1;
 
-        for (int i = 0; i < size; i++) {
+        for (size_t i = 0; i < size; i++) {
             effect_descriptor_t d = mEffects[i]->desc();
             uint32_t iMode = d.flags & EFFECT_FLAG_TYPE_MASK;
             uint32_t iPref = d.flags & EFFECT_FLAG_INSERT_MASK;
@@ -7312,11 +7813,10 @@
 size_t AudioFlinger::EffectChain::removeEffect_l(const sp<EffectModule>& effect)
 {
     Mutex::Autolock _l(mLock);
-    int size = (int)mEffects.size();
-    int i;
+    size_t size = mEffects.size();
     uint32_t type = effect->desc().flags & EFFECT_FLAG_TYPE_MASK;
 
-    for (i = 0; i < size; i++) {
+    for (size_t i = 0; i < size; i++) {
         if (effect == mEffects[i]) {
             // calling stop here will remove pre-processing effect from the audio HAL.
             // This is safe as we hold the EffectChain mutex which guarantees that we are not in
@@ -7352,7 +7852,7 @@
 }
 
 // setMode_l() must be called with PlaybackThread::mLock held
-void AudioFlinger::EffectChain::setMode_l(uint32_t mode)
+void AudioFlinger::EffectChain::setMode_l(audio_mode_t mode)
 {
     size_t size = mEffects.size();
     for (size_t i = 0; i < size; i++) {
@@ -7463,7 +7963,7 @@
     sp<SuspendedEffectDesc> desc;
     // use effect type UUID timelow as key as there is no real risk of identical
     // timeLow fields among effect type UUIDs.
-    int index = mSuspendedEffects.indexOfKey(type->timeLow);
+    ssize_t index = mSuspendedEffects.indexOfKey(type->timeLow);
     if (suspend) {
         if (index >= 0) {
             desc = mSuspendedEffects.valueAt(index);
@@ -7513,7 +8013,7 @@
 {
     sp<SuspendedEffectDesc> desc;
 
-    int index = mSuspendedEffects.indexOfKey((int)kKeyForSuspendAll);
+    ssize_t index = mSuspendedEffects.indexOfKey((int)kKeyForSuspendAll);
     if (suspend) {
         if (index >= 0) {
             desc = mSuspendedEffects.valueAt(index);
@@ -7523,7 +8023,8 @@
             ALOGV("setEffectSuspendedAll_l() add entry for 0");
         }
         if (desc->mRefCount++ == 0) {
-            Vector< sp<EffectModule> > effects = getSuspendEligibleEffects();
+            Vector< sp<EffectModule> > effects;
+            getSuspendEligibleEffects(effects);
             for (size_t i = 0; i < effects.size(); i++) {
                 setEffectSuspended_l(&effects[i]->desc().type, true);
             }
@@ -7574,33 +8075,27 @@
     return true;
 }
 
-Vector< sp<AudioFlinger::EffectModule> > AudioFlinger::EffectChain::getSuspendEligibleEffects()
+void AudioFlinger::EffectChain::getSuspendEligibleEffects(Vector< sp<AudioFlinger::EffectModule> > &effects)
 {
-    Vector< sp<EffectModule> > effects;
+    effects.clear();
     for (size_t i = 0; i < mEffects.size(); i++) {
-        if (!isEffectEligibleForSuspend(mEffects[i]->desc())) {
-            continue;
+        if (isEffectEligibleForSuspend(mEffects[i]->desc())) {
+            effects.add(mEffects[i]);
         }
-        effects.add(mEffects[i]);
     }
-    return effects;
 }
 
 sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectIfEnabled(
                                                             const effect_uuid_t *type)
 {
-    sp<EffectModule> effect;
-    effect = getEffectFromType_l(type);
-    if (effect != 0 && !effect->isEnabled()) {
-        effect.clear();
-    }
-    return effect;
+    sp<EffectModule> effect = getEffectFromType_l(type);
+    return effect != 0 && effect->isEnabled() ? effect : 0;
 }
 
 void AudioFlinger::EffectChain::checkSuspendOnEffectEnabled(const sp<EffectModule>& effect,
                                                             bool enabled)
 {
-    int index = mSuspendedEffects.indexOfKey(effect->desc().type.timeLow);
+    ssize_t index = mSuspendedEffects.indexOfKey(effect->desc().type.timeLow);
     if (enabled) {
         if (index < 0) {
             // if the effect is not suspend check if all effects are suspended
@@ -7619,7 +8114,7 @@
             }
         }
         ALOGV("checkSuspendOnEffectEnabled() enable suspending fx %08x",
-             effect->desc().type.timeLow);
+            effect->desc().type.timeLow);
         sp<SuspendedEffectDesc> desc = mSuspendedEffects.valueAt(index);
         // if effect is requested to suspended but was not yet enabled, supend it now.
         if (desc->mEffect == 0) {
@@ -7632,7 +8127,7 @@
             return;
         }
         ALOGV("checkSuspendOnEffectEnabled() disable restoring fx %08x",
-             effect->desc().type.timeLow);
+            effect->desc().type.timeLow);
         sp<SuspendedEffectDesc> desc = mSuspendedEffects.valueAt(index);
         desc->mEffect.clear();
         effect->setSuspended(false);
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 9bd2c7f..0e4b24a 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -1,4 +1,4 @@
-/* //device/include/server/AudioFlinger/AudioFlinger.h
+/*
 **
 ** Copyright 2007, The Android Open Source Project
 **
@@ -22,10 +22,13 @@
 #include <sys/types.h>
 #include <limits.h>
 
+#include <common_time/cc_helper.h>
+
 #include <media/IAudioFlinger.h>
 #include <media/IAudioFlingerClient.h>
 #include <media/IAudioTrack.h>
 #include <media/IAudioRecord.h>
+#include <media/AudioSystem.h>
 #include <media/AudioTrack.h>
 
 #include <utils/Atomic.h>
@@ -55,97 +58,118 @@
 
 // ----------------------------------------------------------------------------
 
-#define LIKELY( exp )       (__builtin_expect( (exp) != 0, true  ))
-#define UNLIKELY( exp )     (__builtin_expect( (exp) != 0, false ))
+// AudioFlinger has a hard-coded upper limit of 2 channels for capture and playback.
+// There is support for > 2 channel tracks down-mixed to 2 channel output via a down-mix effect.
+// Adding full support for > 2 channel capture or playback would require more than simply changing
+// this #define.  There is an independent hard-coded upper limit in AudioMixer;
+// removing that AudioMixer limit would be necessary but insufficient to support > 2 channels.
+// The macro FCC_2 highlights some (but not all) places where there is are 2-channel assumptions.
+// Search also for "2", "left", "right", "[0]", "[1]", ">> 16", "<< 16", etc.
+#define FCC_2 2     // FCC_2 = Fixed Channel Count 2
 
-
-// ----------------------------------------------------------------------------
-
-static const nsecs_t kStandbyTimeInNsecs = seconds(3);
+static const nsecs_t kDefaultStandbyTimeInNsecs = seconds(3);
 
 class AudioFlinger :
     public BinderService<AudioFlinger>,
     public BnAudioFlinger
 {
-    friend class BinderService<AudioFlinger>;
+    friend class BinderService<AudioFlinger>;   // for AudioFlinger()
 public:
-    static char const* getServiceName() { return "media.audio_flinger"; }
+    static const char* getServiceName() { return "media.audio_flinger"; }
 
     virtual     status_t    dump(int fd, const Vector<String16>& args);
 
-    // IAudioFlinger interface
+    // IAudioFlinger interface, in binder opcode order
     virtual sp<IAudioTrack> createTrack(
                                 pid_t pid,
-                                int streamType,
+                                audio_stream_type_t streamType,
                                 uint32_t sampleRate,
-                                uint32_t format,
+                                audio_format_t format,
                                 uint32_t channelMask,
                                 int frameCount,
                                 uint32_t flags,
                                 const sp<IMemory>& sharedBuffer,
-                                int output,
+                                audio_io_handle_t output,
+                                bool isTimed,
                                 int *sessionId,
                                 status_t *status);
 
-    virtual     uint32_t    sampleRate(int output) const;
-    virtual     int         channelCount(int output) const;
-    virtual     uint32_t    format(int output) const;
-    virtual     size_t      frameCount(int output) const;
-    virtual     uint32_t    latency(int output) const;
+    virtual sp<IAudioRecord> openRecord(
+                                pid_t pid,
+                                audio_io_handle_t input,
+                                uint32_t sampleRate,
+                                audio_format_t format,
+                                uint32_t channelMask,
+                                int frameCount,
+                                uint32_t flags,
+                                int *sessionId,
+                                status_t *status);
+
+    virtual     uint32_t    sampleRate(audio_io_handle_t output) const;
+    virtual     int         channelCount(audio_io_handle_t output) const;
+    virtual     audio_format_t format(audio_io_handle_t output) const;
+    virtual     size_t      frameCount(audio_io_handle_t output) const;
+    virtual     uint32_t    latency(audio_io_handle_t output) const;
 
     virtual     status_t    setMasterVolume(float value);
     virtual     status_t    setMasterMute(bool muted);
 
     virtual     float       masterVolume() const;
+    virtual     float       masterVolumeSW() const;
     virtual     bool        masterMute() const;
 
-    virtual     status_t    setStreamVolume(int stream, float value, int output);
-    virtual     status_t    setStreamMute(int stream, bool muted);
+    virtual     status_t    setStreamVolume(audio_stream_type_t stream, float value,
+                                            audio_io_handle_t output);
+    virtual     status_t    setStreamMute(audio_stream_type_t stream, bool muted);
 
-    virtual     float       streamVolume(int stream, int output) const;
-    virtual     bool        streamMute(int stream) const;
+    virtual     float       streamVolume(audio_stream_type_t stream,
+                                         audio_io_handle_t output) const;
+    virtual     bool        streamMute(audio_stream_type_t stream) const;
 
-    virtual     status_t    setMode(int mode);
+    virtual     status_t    setMode(audio_mode_t mode);
 
     virtual     status_t    setMicMute(bool state);
     virtual     bool        getMicMute() const;
 
-    virtual     status_t    setParameters(int ioHandle, const String8& keyValuePairs);
-    virtual     String8     getParameters(int ioHandle, const String8& keys);
+    virtual     status_t    setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs);
+    virtual     String8     getParameters(audio_io_handle_t ioHandle, const String8& keys) const;
 
     virtual     void        registerClient(const sp<IAudioFlingerClient>& client);
 
-    virtual     size_t      getInputBufferSize(uint32_t sampleRate, int format, int channelCount);
-    virtual     unsigned int  getInputFramesLost(int ioHandle);
+    virtual     size_t      getInputBufferSize(uint32_t sampleRate, audio_format_t format, int channelCount) const;
 
-    virtual int openOutput(uint32_t *pDevices,
+    virtual audio_io_handle_t openOutput(uint32_t *pDevices,
                                     uint32_t *pSamplingRate,
-                                    uint32_t *pFormat,
+                                    audio_format_t *pFormat,
                                     uint32_t *pChannels,
                                     uint32_t *pLatencyMs,
-                                    uint32_t flags);
+                                    audio_policy_output_flags_t flags);
 
-    virtual int openDuplicateOutput(int output1, int output2);
+    virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
+                                                  audio_io_handle_t output2);
 
-    virtual status_t closeOutput(int output);
+    virtual status_t closeOutput(audio_io_handle_t output);
 
-    virtual status_t suspendOutput(int output);
+    virtual status_t suspendOutput(audio_io_handle_t output);
 
-    virtual status_t restoreOutput(int output);
+    virtual status_t restoreOutput(audio_io_handle_t output);
 
-    virtual int openInput(uint32_t *pDevices,
+    virtual audio_io_handle_t openInput(uint32_t *pDevices,
                             uint32_t *pSamplingRate,
-                            uint32_t *pFormat,
+                            audio_format_t *pFormat,
                             uint32_t *pChannels,
-                            uint32_t acoustics);
+                            audio_in_acoustics_t acoustics);
 
-    virtual status_t closeInput(int input);
+    virtual status_t closeInput(audio_io_handle_t input);
 
-    virtual status_t setStreamOutput(uint32_t stream, int output);
+    virtual status_t setStreamOutput(audio_stream_type_t stream, audio_io_handle_t output);
 
     virtual status_t setVoiceVolume(float volume);
 
-    virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, int output);
+    virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames,
+                                       audio_io_handle_t output) const;
+
+    virtual     unsigned int  getInputFramesLost(audio_io_handle_t ioHandle) const;
 
     virtual int newAudioSessionId();
 
@@ -153,54 +177,25 @@
 
     virtual void releaseAudioSessionId(int audioSession);
 
-    virtual status_t queryNumberEffects(uint32_t *numEffects);
+    virtual status_t queryNumberEffects(uint32_t *numEffects) const;
 
-    virtual status_t queryEffect(uint32_t index, effect_descriptor_t *descriptor);
+    virtual status_t queryEffect(uint32_t index, effect_descriptor_t *descriptor) const;
 
-    virtual status_t getEffectDescriptor(effect_uuid_t *pUuid, effect_descriptor_t *descriptor);
+    virtual status_t getEffectDescriptor(const effect_uuid_t *pUuid,
+                                         effect_descriptor_t *descriptor) const;
 
     virtual sp<IEffect> createEffect(pid_t pid,
                         effect_descriptor_t *pDesc,
                         const sp<IEffectClient>& effectClient,
                         int32_t priority,
-                        int io,
+                        audio_io_handle_t io,
                         int sessionId,
                         status_t *status,
                         int *id,
                         int *enabled);
 
-    virtual status_t moveEffects(int sessionId, int srcOutput, int dstOutput);
-
-    enum hardware_call_state {
-        AUDIO_HW_IDLE = 0,
-        AUDIO_HW_INIT,
-        AUDIO_HW_OUTPUT_OPEN,
-        AUDIO_HW_OUTPUT_CLOSE,
-        AUDIO_HW_INPUT_OPEN,
-        AUDIO_HW_INPUT_CLOSE,
-        AUDIO_HW_STANDBY,
-        AUDIO_HW_SET_MASTER_VOLUME,
-        AUDIO_HW_GET_ROUTING,
-        AUDIO_HW_SET_ROUTING,
-        AUDIO_HW_GET_MODE,
-        AUDIO_HW_SET_MODE,
-        AUDIO_HW_GET_MIC_MUTE,
-        AUDIO_HW_SET_MIC_MUTE,
-        AUDIO_SET_VOICE_VOLUME,
-        AUDIO_SET_PARAMETER,
-    };
-
-    // record interface
-    virtual sp<IAudioRecord> openRecord(
-                                pid_t pid,
-                                int input,
-                                uint32_t sampleRate,
-                                uint32_t format,
-                                uint32_t channelMask,
-                                int frameCount,
-                                uint32_t flags,
-                                int *sessionId,
-                                status_t *status);
+    virtual status_t moveEffects(int sessionId, audio_io_handle_t srcOutput,
+                        audio_io_handle_t dstOutput);
 
     virtual     status_t    onTransact(
                                 uint32_t code,
@@ -208,19 +203,29 @@
                                 Parcel* reply,
                                 uint32_t flags);
 
-                uint32_t    getMode() { return mMode; }
-
-                bool        btNrecIsOff() { return mBtNrecIsOff; }
+    // end of IAudioFlinger interface
 
 private:
+               audio_mode_t getMode() const { return mMode; }
+
+                bool        btNrecIsOff() const { return mBtNrecIsOff; }
+
                             AudioFlinger();
     virtual                 ~AudioFlinger();
 
-    status_t                initCheck() const;
+    // call in any IAudioFlinger method that accesses mPrimaryHardwareDev
+    status_t                initCheck() const { return mPrimaryHardwareDev == NULL ? NO_INIT : NO_ERROR; }
+
+    // RefBase
     virtual     void        onFirstRef();
+
     audio_hw_device_t*      findSuitableHwDev_l(uint32_t devices);
     void                    purgeStaleEffects_l();
 
+    // standby delay for MIXER and DUPLICATING playback threads is read from property
+    // ro.audio.flinger_standbytime_ms or defaults to kDefaultStandbyTimeInNsecs
+    static nsecs_t          mStandbyTimeInNsecs;
+
     // Internal dump utilites.
     status_t dumpPermissionDenial(int fd, const Vector<String16>& args);
     status_t dumpClients(int fd, const Vector<String16>& args);
@@ -231,16 +236,22 @@
     public:
                             Client(const sp<AudioFlinger>& audioFlinger, pid_t pid);
         virtual             ~Client();
-        const sp<MemoryDealer>&     heap() const;
+        sp<MemoryDealer>    heap() const;
         pid_t               pid() const { return mPid; }
-        sp<AudioFlinger>    audioFlinger() { return mAudioFlinger; }
+        sp<AudioFlinger>    audioFlinger() const { return mAudioFlinger; }
+
+        bool reserveTimedTrack();
+        void releaseTimedTrack();
 
     private:
                             Client(const Client&);
                             Client& operator = (const Client&);
-        sp<AudioFlinger>    mAudioFlinger;
-        sp<MemoryDealer>    mMemoryDealer;
-        pid_t               mPid;
+        const sp<AudioFlinger> mAudioFlinger;
+        const sp<MemoryDealer> mMemoryDealer;
+        const pid_t         mPid;
+
+        Mutex               mTimedTrackLock;
+        int                 mTimedTrackCount;
     };
 
     // --- Notification Client ---
@@ -251,7 +262,7 @@
                                                 pid_t pid);
         virtual             ~NotificationClient();
 
-                sp<IAudioFlingerClient>    client() { return mClient; }
+                sp<IAudioFlingerClient> audioFlingerClient() const { return mAudioFlingerClient; }
 
                 // IBinder::DeathRecipient
                 virtual     void        binderDied(const wp<IBinder>& who);
@@ -260,9 +271,9 @@
                             NotificationClient(const NotificationClient&);
                             NotificationClient& operator = (const NotificationClient&);
 
-        sp<AudioFlinger>        mAudioFlinger;
-        pid_t                   mPid;
-        sp<IAudioFlingerClient> mClient;
+        const sp<AudioFlinger>  mAudioFlinger;
+        const pid_t             mPid;
+        const sp<IAudioFlingerClient> mAudioFlingerClient;
     };
 
     class TrackHandle;
@@ -282,17 +293,17 @@
 
     class ThreadBase : public Thread {
     public:
-        ThreadBase (const sp<AudioFlinger>& audioFlinger, int id, uint32_t device);
-        virtual             ~ThreadBase();
 
-
-        enum type {
+        enum type_t {
             MIXER,              // Thread class is MixerThread
             DIRECT,             // Thread class is DirectOutputThread
             DUPLICATING,        // Thread class is DuplicatingThread
             RECORD              // Thread class is RecordThread
         };
 
+        ThreadBase (const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id, uint32_t device, type_t type);
+        virtual             ~ThreadBase();
+
         status_t dumpBase(int fd, const Vector<String16>& args);
         status_t dumpEffectChains(int fd, const Vector<String16>& args);
 
@@ -305,6 +316,8 @@
             enum track_state {
                 IDLE,
                 TERMINATED,
+                // These are order-sensitive; do not change order without reviewing the impact.
+                // In particular there are assumptions about > STOPPED.
                 STOPPED,
                 RESUMING,
                 ACTIVE,
@@ -312,52 +325,39 @@
                 PAUSED
             };
 
-            enum track_flags {
-                STEPSERVER_FAILED = 0x01, //  StepServer could not acquire cblk->lock mutex
-                SYSTEM_FLAGS_MASK = 0x0000ffffUL,
-                // The upper 16 bits are used for track-specific flags.
-            };
-
-                                TrackBase(const wp<ThreadBase>& thread,
+                                TrackBase(ThreadBase *thread,
                                         const sp<Client>& client,
                                         uint32_t sampleRate,
-                                        uint32_t format,
+                                        audio_format_t format,
                                         uint32_t channelMask,
                                         int frameCount,
-                                        uint32_t flags,
                                         const sp<IMemory>& sharedBuffer,
                                         int sessionId);
-                                ~TrackBase();
+            virtual             ~TrackBase();
 
-            virtual status_t    start() = 0;
+            virtual status_t    start(pid_t tid) = 0;
             virtual void        stop() = 0;
-                    sp<IMemory> getCblk() const;
+                    sp<IMemory> getCblk() const { return mCblkMemory; }
                     audio_track_cblk_t* cblk() const { return mCblk; }
-                    int         sessionId() { return mSessionId; }
+                    int         sessionId() const { return mSessionId; }
 
         protected:
-            friend class ThreadBase;
-            friend class RecordHandle;
-            friend class PlaybackThread;
-            friend class RecordThread;
-            friend class MixerThread;
-            friend class DirectOutputThread;
-
                                 TrackBase(const TrackBase&);
                                 TrackBase& operator = (const TrackBase&);
 
-            virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer) = 0;
+            // AudioBufferProvider interface
+            virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts) = 0;
             virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
 
-            uint32_t format() const {
+            audio_format_t format() const {
                 return mFormat;
             }
 
-            int channelCount() const ;
+            int channelCount() const { return mChannelCount; }
 
-            uint32_t channelMask() const;
+            uint32_t channelMask() const { return mChannelMask; }
 
-            int sampleRate() const;
+            int sampleRate() const; // FIXME inline after cblk sr moved
 
             void* getBuffer(uint32_t offset, uint32_t frames) const;
 
@@ -372,19 +372,18 @@
             bool step();
             void reset();
 
-            wp<ThreadBase>      mThread;
-            sp<Client>          mClient;
+            const wp<ThreadBase> mThread;
+            /*const*/ sp<Client> mClient;   // see explanation at ~TrackBase() why not const
             sp<IMemory>         mCblkMemory;
             audio_track_cblk_t* mCblk;
             void*               mBuffer;
             void*               mBufferEnd;
             uint32_t            mFrameCount;
             // we don't really need a lock for these
-            int                 mState;
-            int                 mClientTid;
-            uint32_t            mFormat;
-            uint32_t            mFlags;
-            int                 mSessionId;
+            track_state         mState;
+            const audio_format_t mFormat;
+            bool                mStepServerFailed;
+            const int           mSessionId;
             uint8_t             mChannelCount;
             uint32_t            mChannelMask;
         };
@@ -413,12 +412,14 @@
         };
 
         virtual     status_t    initCheck() const = 0;
-                    int         type() const { return mType; }
-                    uint32_t    sampleRate() const;
-                    int         channelCount() const;
-                    uint32_t    format() const;
-                    size_t      frameCount() const;
+                    type_t      type() const { return mType; }
+                    uint32_t    sampleRate() const { return mSampleRate; }
+                    int         channelCount() const { return mChannelCount; }
+                    audio_format_t format() const { return mFormat; }
+                    size_t      frameCount() const { return mFrameCount; }
                     void        wakeUp()    { mWaitWorkCV.broadcast(); }
+        // Should be "virtual status_t requestExitAndWait()" and override same
+        // method in Thread, but Thread::requestExitAndWait() is not yet virtual.
                     void        exit();
         virtual     bool        checkForNewParameters_l() = 0;
         virtual     status_t    setParameters(const String8& keyValuePairs);
@@ -427,9 +428,9 @@
                     void        sendConfigEvent(int event, int param = 0);
                     void        sendConfigEvent_l(int event, int param = 0);
                     void        processConfigEvents();
-                    int         id() const { return mId;}
-                    bool        standby() { return mStandby; }
-                    uint32_t    device() { return mDevice; }
+                    audio_io_handle_t id() const { return mId;}
+                    bool        standby() const { return mStandby; }
+                    uint32_t    device() const { return mDevice; }
         virtual     audio_stream_t* stream() = 0;
 
                     sp<EffectHandle> createEffect_l(
@@ -442,7 +443,7 @@
                                         status_t *status);
                     void disconnectEffect(const sp< EffectModule>& effect,
                                           const wp<EffectHandle>& handle,
-                                          bool unpiniflast);
+                                          bool unpinIfLast);
 
                     // return values for hasAudioSession (bit field)
                     enum effect_state {
@@ -460,14 +461,15 @@
         virtual     status_t addEffectChain_l(const sp<EffectChain>& chain) = 0;
                     // remove an effect chain from the chain list (mEffectChains)
         virtual     size_t removeEffectChain_l(const sp<EffectChain>& chain) = 0;
-                    // lock mall effect chains Mutexes. Must be called before releasing the
+                    // lock all effect chains Mutexes. Must be called before releasing the
                     // ThreadBase mutex before processing the mixer and effects. This guarantees the
                     // integrity of the chains during the process.
-                    void lockEffectChains_l(Vector<sp <EffectChain> >& effectChains);
+                    // Also sets the parameter 'effectChains' to current value of mEffectChains.
+                    void lockEffectChains_l(Vector< sp<EffectChain> >& effectChains);
                     // unlock effect chains after process
-                    void unlockEffectChains(Vector<sp <EffectChain> >& effectChains);
+                    void unlockEffectChains(const Vector< sp<EffectChain> >& effectChains);
                     // set audio mode to all effect chains
-                    void setMode(uint32_t mode);
+                    void setMode(audio_mode_t mode);
                     // get effect module with corresponding ID on specified audio session
                     sp<AudioFlinger::EffectModule> getEffect_l(int sessionId, int effectId);
                     // add and effect module. Also creates the effect chain is none exists for
@@ -525,96 +527,122 @@
                     // check if some effects must be suspended when an effect chain is added
                     void checkSuspendOnAddEffectChain_l(const sp<EffectChain>& chain);
 
-        friend class AudioFlinger;
-        friend class Track;
-        friend class TrackBase;
-        friend class PlaybackThread;
-        friend class MixerThread;
-        friend class DirectOutputThread;
-        friend class DuplicatingThread;
-        friend class RecordThread;
-        friend class RecordTrack;
+        friend class AudioFlinger;      // for mEffectChains
 
-                    int                     mType;
+                    const type_t            mType;
+
+                    // Used by parameters, config events, addTrack_l, exit
                     Condition               mWaitWorkCV;
-                    sp<AudioFlinger>        mAudioFlinger;
+
+                    const sp<AudioFlinger>  mAudioFlinger;
                     uint32_t                mSampleRate;
                     size_t                  mFrameCount;
                     uint32_t                mChannelMask;
                     uint16_t                mChannelCount;
-                    uint16_t                mFrameSize;
-                    uint32_t                mFormat;
+                    size_t                  mFrameSize;
+                    audio_format_t          mFormat;
+
+                    // Parameter sequence by client: binder thread calling setParameters():
+                    //  1. Lock mLock
+                    //  2. Append to mNewParameters
+                    //  3. mWaitWorkCV.signal
+                    //  4. mParamCond.waitRelative with timeout
+                    //  5. read mParamStatus
+                    //  6. mWaitWorkCV.signal
+                    //  7. Unlock
+                    //
+                    // Parameter sequence by server: threadLoop calling checkForNewParameters_l():
+                    // 1. Lock mLock
+                    // 2. If there is an entry in mNewParameters proceed ...
+                    // 2. Read first entry in mNewParameters
+                    // 3. Process
+                    // 4. Remove first entry from mNewParameters
+                    // 5. Set mParamStatus
+                    // 6. mParamCond.signal
+                    // 7. mWaitWorkCV.wait with timeout (this is to avoid overwriting mParamStatus)
+                    // 8. Unlock
                     Condition               mParamCond;
                     Vector<String8>         mNewParameters;
                     status_t                mParamStatus;
-                    Vector<ConfigEvent *>   mConfigEvents;
+
+                    Vector<ConfigEvent>     mConfigEvents;
                     bool                    mStandby;
-                    int                     mId;
-                    bool                    mExiting;
+                    const audio_io_handle_t mId;
                     Vector< sp<EffectChain> > mEffectChains;
                     uint32_t                mDevice;    // output device for PlaybackThread
                                                         // input + output devices for RecordThread
-                    static const int        kNameLength = 32;
+                    static const int        kNameLength = 16;   // prctl(PR_SET_NAME) limit
                     char                    mName[kNameLength];
                     sp<IPowerManager>       mPowerManager;
                     sp<IBinder>             mWakeLockToken;
-                    sp<PMDeathRecipient>    mDeathRecipient;
+                    const sp<PMDeathRecipient> mDeathRecipient;
                     // list of suspended effects per session and per type. The first vector is
                     // keyed by session ID, the second by type UUID timeLow field
                     KeyedVector< int, KeyedVector< int, sp<SuspendedSessionDesc> > >  mSuspendedSessions;
     };
 
+    struct  stream_type_t {
+        stream_type_t()
+            :   volume(1.0f),
+                mute(false),
+                valid(true)
+        {
+        }
+        float       volume;
+        bool        mute;
+        bool        valid;
+    };
+
     // --- PlaybackThread ---
     class PlaybackThread : public ThreadBase {
     public:
 
         enum mixer_state {
-            MIXER_IDLE,
-            MIXER_TRACKS_ENABLED,
-            MIXER_TRACKS_READY
+            MIXER_IDLE,             // no active tracks
+            MIXER_TRACKS_ENABLED,   // at least one active track, but no track has any data ready
+            MIXER_TRACKS_READY      // at least one active track, and at least one track has data
+            // standby mode does not have an enum value
+            // suspend by audio policy manager is orthogonal to mixer state
         };
 
         // playback track
         class Track : public TrackBase {
         public:
-                                Track(  const wp<ThreadBase>& thread,
+                                Track(  PlaybackThread *thread,
                                         const sp<Client>& client,
-                                        int streamType,
+                                        audio_stream_type_t streamType,
                                         uint32_t sampleRate,
-                                        uint32_t format,
+                                        audio_format_t format,
                                         uint32_t channelMask,
                                         int frameCount,
                                         const sp<IMemory>& sharedBuffer,
                                         int sessionId);
-                                ~Track();
+            virtual             ~Track();
 
                     void        dump(char* buffer, size_t size);
-            virtual status_t    start();
+            virtual status_t    start(pid_t tid);
             virtual void        stop();
                     void        pause();
 
                     void        flush();
                     void        destroy();
                     void        mute(bool);
-                    void        setVolume(float left, float right);
                     int name() const {
                         return mName;
                     }
 
-                    int type() const {
+                    audio_stream_type_t streamType() const {
                         return mStreamType;
                     }
                     status_t    attachAuxEffect(int EffectId);
                     void        setAuxBuffer(int EffectId, int32_t *buffer);
-                    int32_t     *auxBuffer() { return mAuxBuffer; }
+                    int32_t     *auxBuffer() const { return mAuxBuffer; }
                     void        setMainBuffer(int16_t *buffer) { mMainBuffer = buffer; }
-                    int16_t     *mainBuffer() { return mMainBuffer; }
-                    int         auxEffectId() { return mAuxEffectId; }
-
+                    int16_t     *mainBuffer() const { return mMainBuffer; }
+                    int         auxEffectId() const { return mAuxEffectId; }
 
         protected:
-            friend class ThreadBase;
-            friend class TrackHandle;
+            // for numerous
             friend class PlaybackThread;
             friend class MixerThread;
             friend class DirectOutputThread;
@@ -622,8 +650,13 @@
                                 Track(const Track&);
                                 Track& operator = (const Track&);
 
-            virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
-            bool isMuted() { return mMute; }
+            // AudioBufferProvider interface
+            virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts = kInvalidPTS);
+            // releaseBuffer() not overridden
+
+            virtual uint32_t framesReady() const;
+
+            bool isMuted() const { return mMute; }
             bool isPausing() const {
                 return mState == PAUSING;
             }
@@ -638,8 +671,11 @@
                 return (mStreamType == AUDIO_STREAM_CNT);
             }
 
+        public:
+            virtual bool isTimedTrack() const { return false; }
+        protected:
+
             // we don't really need a lock for these
-            float               mVolume[2];
             volatile bool       mMute;
             // FILLED state is used for suppressing volume ramp at begin of playing
             enum {FS_FILLING, FS_FILLED, FS_ACTIVE};
@@ -647,7 +683,7 @@
             int8_t              mRetryCount;
             sp<IMemory>         mSharedBuffer;
             bool                mResetDone;
-            int                 mStreamType;
+            audio_stream_type_t mStreamType;
             int                 mName;
             int16_t             *mMainBuffer;
             int32_t             *mAuxBuffer;
@@ -655,6 +691,80 @@
             bool                mHasVolumeController;
         };  // end of Track
 
+        class TimedTrack : public Track {
+          public:
+            static sp<TimedTrack> create(PlaybackThread *thread,
+                                         const sp<Client>& client,
+                                         audio_stream_type_t streamType,
+                                         uint32_t sampleRate,
+                                         audio_format_t format,
+                                         uint32_t channelMask,
+                                         int frameCount,
+                                         const sp<IMemory>& sharedBuffer,
+                                         int sessionId);
+            ~TimedTrack();
+
+            class TimedBuffer {
+              public:
+                TimedBuffer();
+                TimedBuffer(const sp<IMemory>& buffer, int64_t pts);
+                const sp<IMemory>& buffer() const { return mBuffer; }
+                int64_t pts() const { return mPTS; }
+                int position() const { return mPosition; }
+                void setPosition(int pos) { mPosition = pos; }
+              private:
+                sp<IMemory> mBuffer;
+                int64_t mPTS;
+                int mPosition;
+            };
+
+            virtual bool isTimedTrack() const { return true; }
+
+            virtual uint32_t framesReady() const;
+
+            // AudioBufferProvider interface
+            virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts);
+            virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
+
+            void timedYieldSamples(AudioBufferProvider::Buffer* buffer);
+            void timedYieldSilence(uint32_t numFrames,
+                                   AudioBufferProvider::Buffer* buffer);
+
+            status_t    allocateTimedBuffer(size_t size,
+                                            sp<IMemory>* buffer);
+            status_t    queueTimedBuffer(const sp<IMemory>& buffer,
+                                         int64_t pts);
+            status_t    setMediaTimeTransform(const LinearTransform& xform,
+                                              TimedAudioTrack::TargetTimeline target);
+            void        trimTimedBufferQueue_l();
+
+          private:
+            TimedTrack(PlaybackThread *thread,
+                       const sp<Client>& client,
+                       audio_stream_type_t streamType,
+                       uint32_t sampleRate,
+                       audio_format_t format,
+                       uint32_t channelMask,
+                       int frameCount,
+                       const sp<IMemory>& sharedBuffer,
+                       int sessionId);
+
+            uint64_t            mLocalTimeFreq;
+            LinearTransform     mLocalTimeToSampleTransform;
+            sp<MemoryDealer>    mTimedMemoryDealer;
+            Vector<TimedBuffer> mTimedBufferQueue;
+            uint8_t*            mTimedSilenceBuffer;
+            uint32_t            mTimedSilenceBufferSize;
+            mutable Mutex       mTimedBufferQueueLock;
+            bool                mTimedAudioOutputOnTime;
+            CCHelper            mCCHelper;
+
+            Mutex               mMediaTimeTransformLock;
+            LinearTransform     mMediaTimeTransform;
+            bool                mMediaTimeTransformValid;
+            TimedAudioTrack::TargetTimeline mMediaTimeTransformTarget;
+        };
+
 
         // playback track
         class OutputTrack : public Track {
@@ -665,23 +775,27 @@
                 int16_t *mBuffer;
             };
 
-                                OutputTrack(  const wp<ThreadBase>& thread,
+                                OutputTrack(PlaybackThread *thread,
                                         DuplicatingThread *sourceThread,
                                         uint32_t sampleRate,
-                                        uint32_t format,
+                                        audio_format_t format,
                                         uint32_t channelMask,
                                         int frameCount);
-                                ~OutputTrack();
+            virtual             ~OutputTrack();
 
-            virtual status_t    start();
+            virtual status_t    start(pid_t tid);
             virtual void        stop();
                     bool        write(int16_t* data, uint32_t frames);
-                    bool        bufferQueueEmpty() { return (mBufferQueue.size() == 0) ? true : false; }
-                    bool        isActive() { return mActive; }
-            wp<ThreadBase>&     thread()  { return mThread; }
+                    bool        bufferQueueEmpty() const { return mBufferQueue.size() == 0; }
+                    bool        isActive() const { return mActive; }
+            const wp<ThreadBase>& thread() const { return mThread; }
 
         private:
 
+            enum {
+                NO_MORE_BUFFERS = 0x80000001,   // same in AudioTrack.h, ok to be different value
+            };
+
             status_t            obtainBuffer(AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs);
             void                clearBufferQueue();
 
@@ -691,56 +805,73 @@
             Vector < Buffer* >          mBufferQueue;
             AudioBufferProvider::Buffer mOutBuffer;
             bool                        mActive;
-            DuplicatingThread*          mSourceThread;
+            DuplicatingThread* const mSourceThread; // for waitTimeMs() in write()
         };  // end of OutputTrack
 
-        PlaybackThread (const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, int id, uint32_t device);
+        PlaybackThread (const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
+                        audio_io_handle_t id, uint32_t device, type_t type);
         virtual             ~PlaybackThread();
 
-        virtual     status_t    dump(int fd, const Vector<String16>& args);
+                    status_t    dump(int fd, const Vector<String16>& args);
 
         // Thread virtuals
         virtual     status_t    readyToRun();
+        virtual     bool        threadLoop();
+
+        // RefBase
         virtual     void        onFirstRef();
 
-        virtual     status_t    initCheck() const { return (mOutput == 0) ? NO_INIT : NO_ERROR; }
+protected:
+        // Code snippets that were lifted up out of threadLoop()
+        virtual     void        threadLoop_mix() = 0;
+        virtual     void        threadLoop_sleepTime() = 0;
+        virtual     void        threadLoop_write();
+        virtual     void        threadLoop_standby();
 
-        virtual     uint32_t    latency() const;
+                    // prepareTracks_l reads and writes mActiveTracks, and also returns the
+                    // pending set of tracks to remove via Vector 'tracksToRemove'.  The caller is
+                    // responsible for clearing or destroying this Vector later on, when it
+                    // is safe to do so. That will drop the final ref count and destroy the tracks.
+        virtual     mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove) = 0;
 
-        virtual     status_t    setMasterVolume(float value);
-        virtual     status_t    setMasterMute(bool muted);
+public:
 
-        virtual     float       masterVolume() const;
-        virtual     bool        masterMute() const;
+        virtual     status_t    initCheck() const { return (mOutput == NULL) ? NO_INIT : NO_ERROR; }
 
-        virtual     status_t    setStreamVolume(int stream, float value);
-        virtual     status_t    setStreamMute(int stream, bool muted);
+                    // return estimated latency in milliseconds, as reported by HAL
+                    uint32_t    latency() const;
 
-        virtual     float       streamVolume(int stream) const;
-        virtual     bool        streamMute(int stream) const;
+                    void        setMasterVolume(float value);
+                    void        setMasterMute(bool muted);
+
+                    void        setStreamVolume(audio_stream_type_t stream, float value);
+                    void        setStreamMute(audio_stream_type_t stream, bool muted);
+
+                    float       streamVolume(audio_stream_type_t stream) const;
 
                     sp<Track>   createTrack_l(
                                     const sp<AudioFlinger::Client>& client,
-                                    int streamType,
+                                    audio_stream_type_t streamType,
                                     uint32_t sampleRate,
-                                    uint32_t format,
+                                    audio_format_t format,
                                     uint32_t channelMask,
                                     int frameCount,
                                     const sp<IMemory>& sharedBuffer,
                                     int sessionId,
+                                    bool isTimed,
                                     status_t *status);
 
-                    AudioStreamOut* getOutput();
+                    AudioStreamOut* getOutput() const;
                     AudioStreamOut* clearOutput();
                     virtual audio_stream_t* stream();
 
                     void        suspend() { mSuspended++; }
-                    void        restore() { if (mSuspended) mSuspended--; }
-                    bool        isSuspended() { return (mSuspended != 0); }
+                    void        restore() { if (mSuspended > 0) mSuspended--; }
+                    bool        isSuspended() const { return (mSuspended > 0); }
         virtual     String8     getParameters(const String8& keys);
         virtual     void        audioConfigChanged_l(int event, int param = 0);
-        virtual     status_t    getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames);
-                    int16_t     *mixBuffer() { return mMixBuffer; };
+                    status_t    getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames);
+                    int16_t     *mixBuffer() const { return mMixBuffer; };
 
         virtual     void detachAuxEffect_l(int effectId);
                     status_t attachAuxEffect(const sp<AudioFlinger::PlaybackThread::Track> track,
@@ -753,25 +884,19 @@
                     virtual uint32_t hasAudioSession(int sessionId);
                     virtual uint32_t getStrategyForSession_l(int sessionId);
 
-                            void setStreamValid(int streamType, bool valid);
-
-        struct  stream_type_t {
-            stream_type_t()
-                :   volume(1.0f),
-                    mute(false),
-                    valid(true)
-            {
-            }
-            float       volume;
-            bool        mute;
-            bool        valid;
-        };
+                            void setStreamValid(audio_stream_type_t streamType, bool valid);
 
     protected:
         int16_t*                        mMixBuffer;
-        int                             mSuspended;
+        uint32_t                        mSuspended;     // suspend count, > 0 means suspended
         int                             mBytesWritten;
+    private:
+        // mMasterMute is in both PlaybackThread and in AudioFlinger.  When a
+        // PlaybackThread needs to find out if master-muted, it checks it's local
+        // copy rather than the one in AudioFlinger.  This optimization saves a lock.
         bool                            mMasterMute;
+                    void        setMasterMute_l(bool muted) { mMasterMute = muted; }
+    protected:
         SortedVector< wp<Track> >       mActiveTracks;
 
         virtual int             getTrackName_l() = 0;
@@ -780,15 +905,19 @@
         virtual uint32_t        idleSleepTimeUs() = 0;
         virtual uint32_t        suspendSleepTimeUs() = 0;
 
+        // Code snippets that are temporarily lifted up out of threadLoop() until the merge
+                    void        checkSilentMode_l();
+
+        // Non-trivial for DUPLICATING only
+        virtual     void        saveOutputTracks() { }
+        virtual     void        clearOutputTracks() { }
+
+        // Cache various calculated values, at threadLoop() entry and after a parameter change
+        virtual     void        cacheParameters_l();
+
     private:
 
-        friend class AudioFlinger;
-        friend class OutputTrack;
-        friend class Track;
-        friend class TrackBase;
-        friend class MixerThread;
-        friend class DirectOutputThread;
-        friend class DuplicatingThread;
+        friend class AudioFlinger;      // for numerous
 
         PlaybackThread(const Client&);
         PlaybackThread& operator = (const PlaybackThread&);
@@ -803,52 +932,82 @@
         status_t    dumpTracks(int fd, const Vector<String16>& args);
 
         SortedVector< sp<Track> >       mTracks;
-        // mStreamTypes[] uses 1 additionnal stream type internally for the OutputTrack used by DuplicatingThread
+        // mStreamTypes[] uses 1 additional stream type internally for the OutputTrack used by DuplicatingThread
         stream_type_t                   mStreamTypes[AUDIO_STREAM_CNT + 1];
-        AudioStreamOut*                 mOutput;
+        AudioStreamOut                  *mOutput;
         float                           mMasterVolume;
         nsecs_t                         mLastWriteTime;
         int                             mNumWrites;
         int                             mNumDelayedWrites;
         bool                            mInWrite;
+
+        // FIXME rename these former local variables of threadLoop to standard "m" names
+        nsecs_t                         standbyTime;
+        size_t                          mixBufferSize;
+
+        // cached copies of activeSleepTimeUs() and idleSleepTimeUs() made by cacheParameters_l()
+        uint32_t                        activeSleepTime;
+        uint32_t                        idleSleepTime;
+
+        uint32_t                        sleepTime;
+
+        // mixer status returned by prepareTracks_l()
+        mixer_state                     mMixerStatus;       // current cycle
+        mixer_state                     mPrevMixerStatus;   // previous cycle
+
+        // FIXME move these declarations into the specific sub-class that needs them
+        // MIXER only
+        bool                            longStandbyExit;
+        uint32_t                        sleepTimeShift;
+
+        // same as AudioFlinger::mStandbyTimeInNsecs except for DIRECT which uses a shorter value
+        nsecs_t                         standbyDelay;
+
+        // MIXER only
+        nsecs_t                         maxPeriod;
+
+        // DUPLICATING only
+        uint32_t                        writeFrames;
     };
 
     class MixerThread : public PlaybackThread {
     public:
         MixerThread (const sp<AudioFlinger>& audioFlinger,
                      AudioStreamOut* output,
-                     int id,
-                     uint32_t device);
+                     audio_io_handle_t id,
+                     uint32_t device,
+                     type_t type = MIXER);
         virtual             ~MixerThread();
 
         // Thread virtuals
-        virtual     bool        threadLoop();
 
-                    void        invalidateTracks(int streamType);
+                    void        invalidateTracks(audio_stream_type_t streamType);
         virtual     bool        checkForNewParameters_l();
         virtual     status_t    dumpInternals(int fd, const Vector<String16>& args);
 
     protected:
-                    uint32_t    prepareTracks_l(const SortedVector< wp<Track> >& activeTracks,
-                                                Vector< sp<Track> > *tracksToRemove);
+        virtual     mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove);
         virtual     int         getTrackName_l();
         virtual     void        deleteTrackName_l(int name);
         virtual     uint32_t    idleSleepTimeUs();
         virtual     uint32_t    suspendSleepTimeUs();
+        virtual     void        cacheParameters_l();
+
+        // threadLoop snippets
+        virtual     void        threadLoop_mix();
+        virtual     void        threadLoop_sleepTime();
 
                     AudioMixer* mAudioMixer;
-                    uint32_t    mPrevMixerStatus; // previous status (mixer_state) returned by
-                                                  // prepareTracks_l()
     };
 
     class DirectOutputThread : public PlaybackThread {
     public:
 
-        DirectOutputThread (const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output, int id, uint32_t device);
-        ~DirectOutputThread();
+        DirectOutputThread (const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
+                            audio_io_handle_t id, uint32_t device);
+        virtual                 ~DirectOutputThread();
 
         // Thread virtuals
-        virtual     bool        threadLoop();
 
         virtual     bool        checkForNewParameters_l();
 
@@ -858,23 +1017,38 @@
         virtual     uint32_t    activeSleepTimeUs();
         virtual     uint32_t    idleSleepTimeUs();
         virtual     uint32_t    suspendSleepTimeUs();
+        virtual     void        cacheParameters_l();
 
-    private:
-        void applyVolume(uint16_t leftVol, uint16_t rightVol, bool ramp);
+        // threadLoop snippets
+        virtual     mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove);
+        virtual     void        threadLoop_mix();
+        virtual     void        threadLoop_sleepTime();
 
+        // volumes last sent to audio HAL with stream->set_volume()
+        // FIXME use standard representation and names
         float mLeftVolFloat;
         float mRightVolFloat;
         uint16_t mLeftVolShort;
         uint16_t mRightVolShort;
+
+        // FIXME rename these former local variables of threadLoop to standard names
+        // next 3 were local to the while !exitingPending loop
+        bool rampVolume;
+        uint16_t leftVol;
+        uint16_t rightVol;
+
+private:
+        // prepareTracks_l() tells threadLoop_mix() the name of the single active track
+        sp<Track>               mActiveTrack;
     };
 
     class DuplicatingThread : public MixerThread {
     public:
-        DuplicatingThread (const sp<AudioFlinger>& audioFlinger, MixerThread* mainThread, int id);
-        ~DuplicatingThread();
+        DuplicatingThread (const sp<AudioFlinger>& audioFlinger, MixerThread* mainThread,
+                           audio_io_handle_t id);
+        virtual                 ~DuplicatingThread();
 
         // Thread virtuals
-        virtual     bool        threadLoop();
                     void        addOutputTrack(MixerThread* thread);
                     void        removeOutputTrack(MixerThread* thread);
                     uint32_t    waitTimeMs() { return mWaitTimeMs; }
@@ -882,51 +1056,74 @@
         virtual     uint32_t    activeSleepTimeUs();
 
     private:
-                    bool        outputsReady(SortedVector< sp<OutputTrack> > &outputTracks);
-                    void        updateWaitTime();
+                    bool        outputsReady(const SortedVector< sp<OutputTrack> > &outputTracks);
+    protected:
+        // threadLoop snippets
+        virtual     void        threadLoop_mix();
+        virtual     void        threadLoop_sleepTime();
+        virtual     void        threadLoop_write();
+        virtual     void        threadLoop_standby();
+        virtual     void        cacheParameters_l();
 
-        SortedVector < sp<OutputTrack> >  mOutputTracks;
+    private:
+        // called from threadLoop, addOutputTrack, removeOutputTrack
+        virtual     void        updateWaitTime_l();
+    protected:
+        virtual     void        saveOutputTracks();
+        virtual     void        clearOutputTracks();
+    private:
+
                     uint32_t    mWaitTimeMs;
+        SortedVector < sp<OutputTrack> >  outputTracks;
+        SortedVector < sp<OutputTrack> >  mOutputTracks;
     };
 
-              PlaybackThread *checkPlaybackThread_l(int output) const;
-              MixerThread *checkMixerThread_l(int output) const;
-              RecordThread *checkRecordThread_l(int input) const;
-              float streamVolumeInternal(int stream) const { return mStreamTypes[stream].volume; }
-              void audioConfigChanged_l(int event, int ioHandle, void *param2);
+              PlaybackThread *checkPlaybackThread_l(audio_io_handle_t output) const;
+              MixerThread *checkMixerThread_l(audio_io_handle_t output) const;
+              RecordThread *checkRecordThread_l(audio_io_handle_t input) const;
+              // no range check, AudioFlinger::mLock held
+              bool streamMute_l(audio_stream_type_t stream) const
+                                { return mStreamTypes[stream].mute; }
+              // no range check, doesn't check per-thread stream volume, AudioFlinger::mLock held
+              float streamVolume_l(audio_stream_type_t stream) const
+                                { return mStreamTypes[stream].volume; }
+              void audioConfigChanged_l(int event, audio_io_handle_t ioHandle, const void *param2);
 
+              // allocate an audio_io_handle_t, session ID, or effect ID
               uint32_t nextUniqueId();
+
               status_t moveEffectChain_l(int sessionId,
-                                     AudioFlinger::PlaybackThread *srcThread,
-                                     AudioFlinger::PlaybackThread *dstThread,
+                                     PlaybackThread *srcThread,
+                                     PlaybackThread *dstThread,
                                      bool reRegister);
-              PlaybackThread *primaryPlaybackThread_l();
-              uint32_t primaryOutputDevice_l();
+              // return thread associated with primary hardware device, or NULL
+              PlaybackThread *primaryPlaybackThread_l() const;
+              uint32_t primaryOutputDevice_l() const;
 
-    friend class AudioBuffer;
-
+    // server side of the client's IAudioTrack
     class TrackHandle : public android::BnAudioTrack {
     public:
                             TrackHandle(const sp<PlaybackThread::Track>& track);
         virtual             ~TrackHandle();
-        virtual status_t    start();
+        virtual sp<IMemory> getCblk() const;
+        virtual status_t    start(pid_t tid);
         virtual void        stop();
         virtual void        flush();
         virtual void        mute(bool);
         virtual void        pause();
-        virtual void        setVolume(float left, float right);
-        virtual sp<IMemory> getCblk() const;
         virtual status_t    attachAuxEffect(int effectId);
+        virtual status_t    allocateTimedBuffer(size_t size,
+                                                sp<IMemory>* buffer);
+        virtual status_t    queueTimedBuffer(const sp<IMemory>& buffer,
+                                             int64_t pts);
+        virtual status_t    setMediaTimeTransform(const LinearTransform& xform,
+                                                  int target);
         virtual status_t onTransact(
             uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
     private:
-        sp<PlaybackThread::Track> mTrack;
+        const sp<PlaybackThread::Track> mTrack;
     };
 
-    friend class Client;
-    friend class PlaybackThread::Track;
-
-
                 void        removeClient_l(pid_t pid);
                 void        removeNotificationClient(pid_t pid);
 
@@ -939,17 +1136,16 @@
         // record track
         class RecordTrack : public TrackBase {
         public:
-                                RecordTrack(const wp<ThreadBase>& thread,
+                                RecordTrack(RecordThread *thread,
                                         const sp<Client>& client,
                                         uint32_t sampleRate,
-                                        uint32_t format,
+                                        audio_format_t format,
                                         uint32_t channelMask,
                                         int frameCount,
-                                        uint32_t flags,
                                         int sessionId);
-                                ~RecordTrack();
+            virtual             ~RecordTrack();
 
-            virtual status_t    start();
+            virtual status_t    start(pid_t tid);
             virtual void        stop();
 
                     bool        overflow() { bool tmp = mOverflow; mOverflow = false; return tmp; }
@@ -958,13 +1154,14 @@
                     void        dump(char* buffer, size_t size);
 
         private:
-            friend class AudioFlinger;
-            friend class RecordThread;
+            friend class AudioFlinger;  // for mState
 
                                 RecordTrack(const RecordTrack&);
                                 RecordTrack& operator = (const RecordTrack&);
 
-            virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
+            // AudioBufferProvider interface
+            virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts = kInvalidPTS);
+            // releaseBuffer() not overridden
 
             bool                mOverflow;
         };
@@ -974,34 +1171,39 @@
                         AudioStreamIn *input,
                         uint32_t sampleRate,
                         uint32_t channels,
-                        int id,
+                        audio_io_handle_t id,
                         uint32_t device);
-                ~RecordThread();
+                virtual     ~RecordThread();
 
+        // Thread
         virtual bool        threadLoop();
         virtual status_t    readyToRun();
+
+        // RefBase
         virtual void        onFirstRef();
 
-        virtual status_t    initCheck() const { return (mInput == 0) ? NO_INIT : NO_ERROR; }
+        virtual status_t    initCheck() const { return (mInput == NULL) ? NO_INIT : NO_ERROR; }
                 sp<AudioFlinger::RecordThread::RecordTrack>  createRecordTrack_l(
                         const sp<AudioFlinger::Client>& client,
                         uint32_t sampleRate,
-                        int format,
+                        audio_format_t format,
                         int channelMask,
                         int frameCount,
-                        uint32_t flags,
                         int sessionId,
                         status_t *status);
 
                 status_t    start(RecordTrack* recordTrack);
+                status_t    start(RecordTrack* recordTrack, pid_t tid);
                 void        stop(RecordTrack* recordTrack);
                 status_t    dump(int fd, const Vector<String16>& args);
-                AudioStreamIn* getInput();
+                AudioStreamIn* getInput() const;
                 AudioStreamIn* clearInput();
                 virtual audio_stream_t* stream();
 
-        virtual status_t    getNextBuffer(AudioBufferProvider::Buffer* buffer);
+        // AudioBufferProvider interface
+        virtual status_t    getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts);
         virtual void        releaseBuffer(AudioBufferProvider::Buffer* buffer);
+
         virtual bool        checkForNewParameters_l();
         virtual String8     getParameters(const String8& keys);
         virtual void        audioConfigChanged_l(int event, int param = 0);
@@ -1024,22 +1226,23 @@
                 int16_t                             *mRsmpInBuffer;
                 size_t                              mRsmpInIndex;
                 size_t                              mInputBytes;
-                int                                 mReqChannelCount;
-                uint32_t                            mReqSampleRate;
+                const int                           mReqChannelCount;
+                const uint32_t                      mReqSampleRate;
                 ssize_t                             mBytesRead;
     };
 
+    // server side of the client's IAudioRecord
     class RecordHandle : public android::BnAudioRecord {
     public:
         RecordHandle(const sp<RecordThread::RecordTrack>& recordTrack);
         virtual             ~RecordHandle();
-        virtual status_t    start();
-        virtual void        stop();
         virtual sp<IMemory> getCblk() const;
+        virtual status_t    start(pid_t tid);
+        virtual void        stop();
         virtual status_t onTransact(
             uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
     private:
-        sp<RecordThread::RecordTrack> mRecordTrack;
+        const sp<RecordThread::RecordTrack> mRecordTrack;
     };
 
     //--- Audio Effect Management
@@ -1060,12 +1263,12 @@
     // the attached track(s) to accumulate their auxiliary channel.
     class EffectModule: public RefBase {
     public:
-        EffectModule(const wp<ThreadBase>& wThread,
+        EffectModule(ThreadBase *thread,
                         const wp<AudioFlinger::EffectChain>& chain,
                         effect_descriptor_t *desc,
                         int id,
                         int sessionId);
-        ~EffectModule();
+        virtual ~EffectModule();
 
         enum effect_state {
             IDLE,
@@ -1077,7 +1280,7 @@
             DESTROYED
         };
 
-        int         id() { return mId; }
+        int         id() const { return mId; }
         void process();
         void updateState();
         status_t command(uint32_t cmdCode,
@@ -1089,18 +1292,18 @@
         void reset_l();
         status_t configure();
         status_t init();
-        uint32_t state() {
+        effect_state state() const {
             return mState;
         }
         uint32_t status() {
             return mStatus;
         }
-        int sessionId() {
+        int sessionId() const {
             return mSessionId;
         }
         status_t    setEnabled(bool enabled);
-        bool isEnabled();
-        bool isProcessEnabled();
+        bool isEnabled() const;
+        bool isProcessEnabled() const;
 
         void        setInBuffer(int16_t *buffer) { mConfig.inputCfg.buffer.s16 = buffer; }
         int16_t     *inBuffer() { return mConfig.inputCfg.buffer.s16; }
@@ -1108,10 +1311,10 @@
         int16_t     *outBuffer() { return mConfig.outputCfg.buffer.s16; }
         void        setChain(const wp<EffectChain>& chain) { mChain = chain; }
         void        setThread(const wp<ThreadBase>& thread) { mThread = thread; }
-        wp<ThreadBase>& thread() { return mThread; }
+        const wp<ThreadBase>& thread() { return mThread; }
 
-        status_t addHandle(sp<EffectHandle>& handle);
-        void disconnect(const wp<EffectHandle>& handle, bool unpiniflast);
+        status_t addHandle(const sp<EffectHandle>& handle);
+        void disconnect(const wp<EffectHandle>& handle, bool unpinIfLast);
         size_t removeHandle (const wp<EffectHandle>& handle);
 
         effect_descriptor_t& desc() { return mDescriptor; }
@@ -1119,22 +1322,21 @@
 
         status_t         setDevice(uint32_t device);
         status_t         setVolume(uint32_t *left, uint32_t *right, bool controller);
-        status_t         setMode(uint32_t mode);
+        status_t         setMode(audio_mode_t mode);
         status_t         start();
         status_t         stop();
         void             setSuspended(bool suspended);
-        bool             suspended();
+        bool             suspended() const;
 
         sp<EffectHandle> controlHandle();
 
-        bool             isPinned() { return mPinned; }
+        bool             isPinned() const { return mPinned; }
         void             unPin() { mPinned = false; }
 
         status_t         dump(int fd, const Vector<String16>& args);
 
     protected:
-        friend class EffectHandle;
-        friend class AudioFlinger;
+        friend class AudioFlinger;      // for mHandles
         bool                mPinned;
 
         // Maximum time allocated to effect engines to complete the turn off sequence
@@ -1146,7 +1348,7 @@
         status_t start_l();
         status_t stop_l();
 
-        Mutex               mLock;      // mutex for process, commands and handles list protection
+mutable Mutex               mLock;      // mutex for process, commands and handles list protection
         wp<ThreadBase>      mThread;    // parent thread
         wp<EffectChain>     mChain;     // parent effect chain
         int                 mId;        // this instance unique ID
@@ -1154,9 +1356,10 @@
         effect_descriptor_t mDescriptor;// effect descriptor received from effect engine
         effect_config_t     mConfig;    // input and output audio configuration
         effect_handle_t  mEffectInterface; // Effect module C API
-        status_t mStatus;               // initialization status
-        uint32_t mState;                // current activation state (effect_state)
+        status_t            mStatus;    // initialization status
+        effect_state        mState;     // current activation state
         Vector< wp<EffectHandle> > mHandles;    // list of client handles
+                    // First handle in mHandles has highest priority and controls the effect module
         uint32_t mMaxDisableWaitCnt;    // maximum grace period before forcing an effect off after
                                         // sending disable command.
         uint32_t mDisableWaitCnt;       // current process() calls count during disable period.
@@ -1187,8 +1390,10 @@
                                  uint32_t *replySize,
                                  void *pReplyData);
         virtual void disconnect();
-        virtual void disconnect(bool unpiniflast);
-        virtual sp<IMemory> getCblk() const;
+    private:
+                void disconnect(bool unpinIfLast);
+    public:
+        virtual sp<IMemory> getCblk() const { return mCblkMemory; }
         virtual status_t onTransact(uint32_t code, const Parcel& data,
                 Parcel* reply, uint32_t flags);
 
@@ -1204,25 +1409,24 @@
                              uint32_t replySize,
                              void *pReplyData);
         void setEnabled(bool enabled);
-        bool enabled() { return mEnabled; }
+        bool enabled() const { return mEnabled; }
 
         // Getters
-        int id() { return mEffect->id(); }
-        int priority() { return mPriority; }
-        bool hasControl() { return mHasControl; }
-        sp<EffectModule> effect() { return mEffect; }
+        int id() const { return mEffect->id(); }
+        int priority() const { return mPriority; }
+        bool hasControl() const { return mHasControl; }
+        sp<EffectModule> effect() const { return mEffect; }
 
         void dump(char* buffer, size_t size);
 
     protected:
-        friend class AudioFlinger;
-        friend class EffectModule;
+        friend class AudioFlinger;          // for mEffect, mHasControl, mEnabled
         EffectHandle(const EffectHandle&);
         EffectHandle& operator =(const EffectHandle&);
 
         sp<EffectModule> mEffect;           // pointer to controlled EffectModule
         sp<IEffectClient> mEffectClient;    // callback interface for client notifications
-        sp<Client>          mClient;        // client for shared memory allocation
+        /*const*/ sp<Client> mClient;       // client for shared memory allocation, see disconnect()
         sp<IMemory>         mCblkMemory;    // shared memory for control block
         effect_param_cblk_t* mCblk;         // control block for deferred parameter setting via shared memory
         uint8_t*            mBuffer;        // pointer to parameter area in shared memory
@@ -1242,7 +1446,8 @@
     class EffectChain: public RefBase {
     public:
         EffectChain(const wp<ThreadBase>& wThread, int sessionId);
-        ~EffectChain();
+        EffectChain(ThreadBase *thread, int sessionId);
+        virtual ~EffectChain();
 
         // special key used for an entry in mSuspendedEffects keyed vector
         // corresponding to a suspend all request.
@@ -1264,7 +1469,7 @@
         status_t addEffect_l(const sp<EffectModule>& handle);
         size_t removeEffect_l(const sp<EffectModule>& handle);
 
-        int sessionId() { return mSessionId; }
+        int sessionId() const { return mSessionId; }
         void setSessionId(int sessionId) { mSessionId = sessionId; }
 
         sp<EffectModule> getEffectFromDesc_l(effect_descriptor_t *descriptor);
@@ -1272,34 +1477,34 @@
         sp<EffectModule> getEffectFromType_l(const effect_uuid_t *type);
         bool setVolume_l(uint32_t *left, uint32_t *right);
         void setDevice_l(uint32_t device);
-        void setMode_l(uint32_t mode);
+        void setMode_l(audio_mode_t mode);
 
         void setInBuffer(int16_t *buffer, bool ownsBuffer = false) {
             mInBuffer = buffer;
             mOwnInBuffer = ownsBuffer;
         }
-        int16_t *inBuffer() {
+        int16_t *inBuffer() const {
             return mInBuffer;
         }
         void setOutBuffer(int16_t *buffer) {
             mOutBuffer = buffer;
         }
-        int16_t *outBuffer() {
+        int16_t *outBuffer() const {
             return mOutBuffer;
         }
 
         void incTrackCnt() { android_atomic_inc(&mTrackCnt); }
         void decTrackCnt() { android_atomic_dec(&mTrackCnt); }
-        int32_t trackCnt() { return mTrackCnt;}
+        int32_t trackCnt() const { return mTrackCnt;}
 
         void incActiveTrackCnt() { android_atomic_inc(&mActiveTrackCnt);
                                    mTailBufferCount = mMaxTailBuffers; }
         void decActiveTrackCnt() { android_atomic_dec(&mActiveTrackCnt); }
-        int32_t activeTrackCnt() { return mActiveTrackCnt;}
+        int32_t activeTrackCnt() const { return mActiveTrackCnt;}
 
-        uint32_t strategy() { return mStrategy; }
+        uint32_t strategy() const { return mStrategy; }
         void setStrategy(uint32_t strategy)
-                 { mStrategy = strategy; }
+                { mStrategy = strategy; }
 
         // suspend effect of the given type
         void setEffectSuspended_l(const effect_uuid_t *type,
@@ -1313,7 +1518,7 @@
         status_t dump(int fd, const Vector<String16>& args);
 
     protected:
-        friend class AudioFlinger;
+        friend class AudioFlinger;  // for mThread, mEffects
         EffectChain(const EffectChain&);
         EffectChain& operator =(const EffectChain&);
 
@@ -1328,7 +1533,8 @@
 
         // get a list of effect modules to suspend when an effect of the type
         // passed is enabled.
-        Vector< sp<EffectModule> > getSuspendEligibleEffects();
+        void                       getSuspendEligibleEffects(Vector< sp<EffectModule> > &effects);
+
         // get an effect module if it is currently enable
         sp<EffectModule> getEffectIfEnabled(const effect_uuid_t *type);
         // true if the effect whose descriptor is passed can be suspended
@@ -1338,7 +1544,7 @@
 
         wp<ThreadBase> mThread;     // parent mixer thread
         Mutex mLock;                // mutex protecting effect list
-        Vector<sp<EffectModule> > mEffects; // list of effect modules
+        Vector< sp<EffectModule> > mEffects; // list of effect modules
         int mSessionId;             // audio session ID
         int16_t *mInBuffer;         // chain input buffer
         int16_t *mOutBuffer;        // chain output buffer
@@ -1359,54 +1565,119 @@
         KeyedVector< int, sp<SuspendedEffectDesc> > mSuspendedEffects;
     };
 
+    // AudioStreamOut and AudioStreamIn are immutable, so their fields are const.
+    // For emphasis, we could also make all pointers to them be "const *",
+    // but that would clutter the code unnecessarily.
+
     struct AudioStreamOut {
-        audio_hw_device_t   *hwDev;
-        audio_stream_out_t  *stream;
+        audio_hw_device_t*  const hwDev;
+        audio_stream_out_t* const stream;
 
         AudioStreamOut(audio_hw_device_t *dev, audio_stream_out_t *out) :
             hwDev(dev), stream(out) {}
     };
 
     struct AudioStreamIn {
-        audio_hw_device_t   *hwDev;
-        audio_stream_in_t   *stream;
+        audio_hw_device_t* const hwDev;
+        audio_stream_in_t* const stream;
 
         AudioStreamIn(audio_hw_device_t *dev, audio_stream_in_t *in) :
             hwDev(dev), stream(in) {}
     };
 
+    // for mAudioSessionRefs only
     struct AudioSessionRef {
-        int sessionid;
-        pid_t pid;
-        int cnt;
+        AudioSessionRef(int sessionid, pid_t pid) :
+            mSessionid(sessionid), mPid(pid), mCnt(1) {}
+        const int   mSessionid;
+        const pid_t mPid;
+        int         mCnt;
     };
 
-    friend class RecordThread;
-    friend class PlaybackThread;
+    enum master_volume_support {
+        // MVS_NONE:
+        // Audio HAL has no support for master volume, either setting or
+        // getting.  All master volume control must be implemented in SW by the
+        // AudioFlinger mixing core.
+        MVS_NONE,
+
+        // MVS_SETONLY:
+        // Audio HAL has support for setting master volume, but not for getting
+        // master volume (original HAL design did not include a getter).
+        // AudioFlinger needs to keep track of the last set master volume in
+        // addition to needing to set an initial, default, master volume at HAL
+        // load time.
+        MVS_SETONLY,
+
+        // MVS_FULL:
+        // Audio HAL has support both for setting and getting master volume.
+        // AudioFlinger should send all set and get master volume requests
+        // directly to the HAL.
+        MVS_FULL,
+    };
 
     mutable     Mutex                               mLock;
 
-                DefaultKeyedVector< pid_t, wp<Client> >     mClients;
+                DefaultKeyedVector< pid_t, wp<Client> >     mClients;   // see ~Client()
 
                 mutable     Mutex                   mHardwareLock;
-                audio_hw_device_t*                  mPrimaryHardwareDev;
+
+                // These two fields are immutable after onFirstRef(), so no lock needed to access
+                audio_hw_device_t*                  mPrimaryHardwareDev; // mAudioHwDevs[0] or NULL
                 Vector<audio_hw_device_t*>          mAudioHwDevs;
-    mutable     int                                 mHardwareStatus;
+
+    // for dump, indicates which hardware operation is currently in progress (but not stream ops)
+    enum hardware_call_state {
+        AUDIO_HW_IDLE = 0,              // no operation in progress
+        AUDIO_HW_INIT,                  // init_check
+        AUDIO_HW_OUTPUT_OPEN,           // open_output_stream
+        AUDIO_HW_OUTPUT_CLOSE,          // unused
+        AUDIO_HW_INPUT_OPEN,            // unused
+        AUDIO_HW_INPUT_CLOSE,           // unused
+        AUDIO_HW_STANDBY,               // unused
+        AUDIO_HW_SET_MASTER_VOLUME,     // set_master_volume
+        AUDIO_HW_GET_ROUTING,           // unused
+        AUDIO_HW_SET_ROUTING,           // unused
+        AUDIO_HW_GET_MODE,              // unused
+        AUDIO_HW_SET_MODE,              // set_mode
+        AUDIO_HW_GET_MIC_MUTE,          // get_mic_mute
+        AUDIO_HW_SET_MIC_MUTE,          // set_mic_mute
+        AUDIO_HW_SET_VOICE_VOLUME,      // set_voice_volume
+        AUDIO_HW_SET_PARAMETER,         // set_parameters
+        AUDIO_HW_GET_INPUT_BUFFER_SIZE, // get_input_buffer_size
+        AUDIO_HW_GET_MASTER_VOLUME,     // get_master_volume
+        AUDIO_HW_GET_PARAMETER,         // get_parameters
+    };
+
+    mutable     hardware_call_state                 mHardwareStatus;    // for dump only
 
 
-                DefaultKeyedVector< int, sp<PlaybackThread> >  mPlaybackThreads;
-                PlaybackThread::stream_type_t       mStreamTypes[AUDIO_STREAM_CNT];
+                DefaultKeyedVector< audio_io_handle_t, sp<PlaybackThread> >  mPlaybackThreads;
+                stream_type_t                       mStreamTypes[AUDIO_STREAM_CNT];
+
+                // both are protected by mLock
                 float                               mMasterVolume;
+                float                               mMasterVolumeSW;
+                master_volume_support               mMasterVolumeSupportLvl;
                 bool                                mMasterMute;
 
-                DefaultKeyedVector< int, sp<RecordThread> >    mRecordThreads;
+                DefaultKeyedVector< audio_io_handle_t, sp<RecordThread> >    mRecordThreads;
 
                 DefaultKeyedVector< pid_t, sp<NotificationClient> >    mNotificationClients;
-                volatile int32_t                    mNextUniqueId;
-                uint32_t                            mMode;
+                volatile int32_t                    mNextUniqueId;  // updated by android_atomic_inc
+                audio_mode_t                        mMode;
                 bool                                mBtNrecIsOff;
 
+                // protected by mLock
                 Vector<AudioSessionRef*> mAudioSessionRefs;
+
+                float       masterVolume_l() const;
+                float       masterVolumeSW_l() const  { return mMasterVolumeSW; }
+                bool        masterMute_l() const    { return mMasterMute; }
+
+private:
+    sp<Client>  registerPid_l(pid_t pid);    // always returns non-0
+
 };
 
 
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 9dda256..1ec238b 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -1,4 +1,4 @@
-/* //device/include/server/AudioFlinger/AudioMixer.cpp
+/*
 **
 ** Copyright 2007, The Android Open Source Project
 **
@@ -27,240 +27,236 @@
 #include <utils/Log.h>
 
 #include <cutils/bitops.h>
+#include <cutils/compiler.h>
+#include <utils/Debug.h>
 
 #include <system/audio.h>
 
+#include <audio_utils/primitives.h>
+#include <common_time/local_clock.h>
+#include <common_time/cc_helper.h>
+
 #include "AudioMixer.h"
 
 namespace android {
-// ----------------------------------------------------------------------------
-
-static inline int16_t clamp16(int32_t sample)
-{
-    if ((sample>>15) ^ (sample>>31))
-        sample = 0x7FFF ^ (sample>>31);
-    return sample;
-}
 
 // ----------------------------------------------------------------------------
 
 AudioMixer::AudioMixer(size_t frameCount, uint32_t sampleRate)
-    :   mActiveTrack(0), mTrackNames(0), mSampleRate(sampleRate)
+    :   mTrackNames(0), mSampleRate(sampleRate)
 {
+    // AudioMixer is not yet capable of multi-channel beyond stereo
+    COMPILE_TIME_ASSERT_FUNCTION_SCOPE(2 == MAX_NUM_CHANNELS);
+    
+    LocalClock lc;
+
     mState.enabledTracks= 0;
     mState.needsChanged = 0;
     mState.frameCount   = frameCount;
-    mState.outputTemp   = 0;
-    mState.resampleTemp = 0;
     mState.hook         = process__nop;
+    mState.outputTemp   = NULL;
+    mState.resampleTemp = NULL;
+    // mState.reserved
     track_t* t = mState.tracks;
-    for (int i=0 ; i<32 ; i++) {
+    for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) {
         t->needs = 0;
         t->volume[0] = UNITY_GAIN;
         t->volume[1] = UNITY_GAIN;
+        // no initialization needed
+        // t->prevVolume[0]
+        // t->prevVolume[1]
         t->volumeInc[0] = 0;
         t->volumeInc[1] = 0;
         t->auxLevel = 0;
         t->auxInc = 0;
+        // no initialization needed
+        // t->prevAuxLevel
+        // t->frameCount
         t->channelCount = 2;
-        t->enabled = 0;
+        t->enabled = false;
         t->format = 16;
         t->channelMask = AUDIO_CHANNEL_OUT_STEREO;
-        t->buffer.raw = 0;
-        t->bufferProvider = 0;
-        t->hook = 0;
-        t->resampler = 0;
+        t->bufferProvider = NULL;
+        t->buffer.raw = NULL;
+        // t->buffer.frameCount
+        t->hook = NULL;
+        t->in = NULL;
+        t->resampler = NULL;
         t->sampleRate = mSampleRate;
-        t->in = 0;
         t->mainBuffer = NULL;
         t->auxBuffer = NULL;
+        t->localTimeFreq = lc.getLocalFreq();
         t++;
     }
 }
 
- AudioMixer::~AudioMixer()
- {
-     track_t* t = mState.tracks;
-     for (int i=0 ; i<32 ; i++) {
-         delete t->resampler;
-         t++;
-     }
-     delete [] mState.outputTemp;
-     delete [] mState.resampleTemp;
- }
-
- int AudioMixer::getTrackName()
- {
-    uint32_t names = mTrackNames;
-    uint32_t mask = 1;
-    int n = 0;
-    while (names & mask) {
-        mask <<= 1;
-        n++;
+AudioMixer::~AudioMixer()
+{
+    track_t* t = mState.tracks;
+    for (unsigned i=0 ; i < MAX_NUM_TRACKS ; i++) {
+        delete t->resampler;
+        t++;
     }
-    if (mask) {
+    delete [] mState.outputTemp;
+    delete [] mState.resampleTemp;
+}
+
+int AudioMixer::getTrackName()
+{
+    uint32_t names = ~mTrackNames;
+    if (names != 0) {
+        int n = __builtin_ctz(names);
         ALOGV("add track (%d)", n);
-        mTrackNames |= mask;
+        mTrackNames |= 1 << n;
         return TRACK0 + n;
     }
     return -1;
- }
+}
 
- void AudioMixer::invalidateState(uint32_t mask)
- {
+void AudioMixer::invalidateState(uint32_t mask)
+{
     if (mask) {
         mState.needsChanged |= mask;
         mState.hook = process__validate;
     }
  }
 
- void AudioMixer::deleteTrackName(int name)
- {
+void AudioMixer::deleteTrackName(int name)
+{
     name -= TRACK0;
-    if (uint32_t(name) < MAX_NUM_TRACKS) {
-        ALOGV("deleteTrackName(%d)", name);
-        track_t& track(mState.tracks[ name ]);
-        if (track.enabled != 0) {
-            track.enabled = 0;
-            invalidateState(1<<name);
-        }
-        if (track.resampler) {
-            // delete  the resampler
-            delete track.resampler;
-            track.resampler = 0;
-            track.sampleRate = mSampleRate;
-            invalidateState(1<<name);
-        }
-        track.volumeInc[0] = 0;
-        track.volumeInc[1] = 0;
-        mTrackNames &= ~(1<<name);
+    ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
+    ALOGV("deleteTrackName(%d)", name);
+    track_t& track(mState.tracks[ name ]);
+    if (track.enabled) {
+        track.enabled = false;
+        invalidateState(1<<name);
     }
- }
-
-status_t AudioMixer::enable(int name)
-{
-    switch (name) {
-        case MIXING: {
-            if (mState.tracks[ mActiveTrack ].enabled != 1) {
-                mState.tracks[ mActiveTrack ].enabled = 1;
-                ALOGV("enable(%d)", mActiveTrack);
-                invalidateState(1<<mActiveTrack);
-            }
-        } break;
-        default:
-            return NAME_NOT_FOUND;
+    if (track.resampler != NULL) {
+        // delete  the resampler
+        delete track.resampler;
+        track.resampler = NULL;
+        track.sampleRate = mSampleRate;
+        invalidateState(1<<name);
     }
-    return NO_ERROR;
+    track.volumeInc[0] = 0;
+    track.volumeInc[1] = 0;
+    mTrackNames &= ~(1<<name);
 }
 
-status_t AudioMixer::disable(int name)
+void AudioMixer::enable(int name)
 {
-    switch (name) {
-        case MIXING: {
-            if (mState.tracks[ mActiveTrack ].enabled != 0) {
-                mState.tracks[ mActiveTrack ].enabled = 0;
-                ALOGV("disable(%d)", mActiveTrack);
-                invalidateState(1<<mActiveTrack);
-            }
-        } break;
-        default:
-            return NAME_NOT_FOUND;
+    name -= TRACK0;
+    ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
+    track_t& track = mState.tracks[name];
+
+    if (!track.enabled) {
+        track.enabled = true;
+        ALOGV("enable(%d)", name);
+        invalidateState(1 << name);
     }
-    return NO_ERROR;
 }
 
-status_t AudioMixer::setActiveTrack(int track)
+void AudioMixer::disable(int name)
 {
-    if (uint32_t(track-TRACK0) >= MAX_NUM_TRACKS) {
-        return BAD_VALUE;
+    name -= TRACK0;
+    ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
+    track_t& track = mState.tracks[name];
+
+    if (track.enabled) {
+        track.enabled = false;
+        ALOGV("disable(%d)", name);
+        invalidateState(1 << name);
     }
-    mActiveTrack = track - TRACK0;
-    return NO_ERROR;
 }
 
-status_t AudioMixer::setParameter(int target, int name, void *value)
+void AudioMixer::setParameter(int name, int target, int param, void *value)
 {
+    name -= TRACK0;
+    ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
+    track_t& track = mState.tracks[name];
+
     int valueInt = (int)value;
     int32_t *valueBuf = (int32_t *)value;
 
     switch (target) {
-    case TRACK:
-        if (name == CHANNEL_MASK) {
-            uint32_t mask = (uint32_t)value;
-            if (mState.tracks[ mActiveTrack ].channelMask != mask) {
-                uint8_t channelCount = popcount(mask);
-                if ((channelCount <= MAX_NUM_CHANNELS) && (channelCount)) {
-                    mState.tracks[ mActiveTrack ].channelMask = mask;
-                    mState.tracks[ mActiveTrack ].channelCount = channelCount;
-                    ALOGV("setParameter(TRACK, CHANNEL_MASK, %x)", mask);
-                    invalidateState(1<<mActiveTrack);
-                    return NO_ERROR;
-                }
-            } else {
-                return NO_ERROR;
-            }
-        }
-        if (name == MAIN_BUFFER) {
-            if (mState.tracks[ mActiveTrack ].mainBuffer != valueBuf) {
-                mState.tracks[ mActiveTrack ].mainBuffer = valueBuf;
-                ALOGV("setParameter(TRACK, MAIN_BUFFER, %p)", valueBuf);
-                invalidateState(1<<mActiveTrack);
-            }
-            return NO_ERROR;
-        }
-        if (name == AUX_BUFFER) {
-            if (mState.tracks[ mActiveTrack ].auxBuffer != valueBuf) {
-                mState.tracks[ mActiveTrack ].auxBuffer = valueBuf;
-                ALOGV("setParameter(TRACK, AUX_BUFFER, %p)", valueBuf);
-                invalidateState(1<<mActiveTrack);
-            }
-            return NO_ERROR;
-        }
 
-        break;
-    case RESAMPLE:
-        if (name == SAMPLE_RATE) {
-            if (valueInt > 0) {
-                track_t& track = mState.tracks[ mActiveTrack ];
-                if (track.setResampler(uint32_t(valueInt), mSampleRate)) {
-                    ALOGV("setParameter(RESAMPLE, SAMPLE_RATE, %u)",
-                            uint32_t(valueInt));
-                    invalidateState(1<<mActiveTrack);
-                }
-                return NO_ERROR;
+    case TRACK:
+        switch (param) {
+        case CHANNEL_MASK: {
+            uint32_t mask = (uint32_t)value;
+            if (track.channelMask != mask) {
+                uint32_t channelCount = popcount(mask);
+                ALOG_ASSERT((channelCount <= MAX_NUM_CHANNELS) && (channelCount),
+                        "bad channel count %u", channelCount);
+                track.channelMask = mask;
+                track.channelCount = channelCount;
+                ALOGV("setParameter(TRACK, CHANNEL_MASK, %x)", mask);
+                invalidateState(1 << name);
             }
-        }
-        if (name == RESET) {
-            track_t& track = mState.tracks[ mActiveTrack ];
-            track.resetResampler();
-            invalidateState(1<<mActiveTrack);
-            return NO_ERROR;
+            } break;
+        case MAIN_BUFFER:
+            if (track.mainBuffer != valueBuf) {
+                track.mainBuffer = valueBuf;
+                ALOGV("setParameter(TRACK, MAIN_BUFFER, %p)", valueBuf);
+                invalidateState(1 << name);
+            }
+            break;
+        case AUX_BUFFER:
+            if (track.auxBuffer != valueBuf) {
+                track.auxBuffer = valueBuf;
+                ALOGV("setParameter(TRACK, AUX_BUFFER, %p)", valueBuf);
+                invalidateState(1 << name);
+            }
+            break;
+        default:
+            LOG_FATAL("bad param");
         }
         break;
+
+    case RESAMPLE:
+        switch (param) {
+        case SAMPLE_RATE:
+            ALOG_ASSERT(valueInt > 0, "bad sample rate %d", valueInt);
+            if (track.setResampler(uint32_t(valueInt), mSampleRate)) {
+                ALOGV("setParameter(RESAMPLE, SAMPLE_RATE, %u)",
+                        uint32_t(valueInt));
+                invalidateState(1 << name);
+            }
+            break;
+        case RESET:
+            track.resetResampler();
+            invalidateState(1 << name);
+            break;
+        default:
+            LOG_FATAL("bad param");
+        }
+        break;
+
     case RAMP_VOLUME:
     case VOLUME:
-        if ((uint32_t(name-VOLUME0) < MAX_NUM_CHANNELS)) {
-            track_t& track = mState.tracks[ mActiveTrack ];
-            if (track.volume[name-VOLUME0] != valueInt) {
+        switch (param) {
+        case VOLUME0:
+        case VOLUME1:
+            if (track.volume[param-VOLUME0] != valueInt) {
                 ALOGV("setParameter(VOLUME, VOLUME0/1: %04x)", valueInt);
-                track.prevVolume[name-VOLUME0] = track.volume[name-VOLUME0] << 16;
-                track.volume[name-VOLUME0] = valueInt;
+                track.prevVolume[param-VOLUME0] = track.volume[param-VOLUME0] << 16;
+                track.volume[param-VOLUME0] = valueInt;
                 if (target == VOLUME) {
-                    track.prevVolume[name-VOLUME0] = valueInt << 16;
-                    track.volumeInc[name-VOLUME0] = 0;
+                    track.prevVolume[param-VOLUME0] = valueInt << 16;
+                    track.volumeInc[param-VOLUME0] = 0;
                 } else {
-                    int32_t d = (valueInt<<16) - track.prevVolume[name-VOLUME0];
+                    int32_t d = (valueInt<<16) - track.prevVolume[param-VOLUME0];
                     int32_t volInc = d / int32_t(mState.frameCount);
-                    track.volumeInc[name-VOLUME0] = volInc;
+                    track.volumeInc[param-VOLUME0] = volInc;
                     if (volInc == 0) {
-                        track.prevVolume[name-VOLUME0] = valueInt << 16;
+                        track.prevVolume[param-VOLUME0] = valueInt << 16;
                     }
                 }
-                invalidateState(1<<mActiveTrack);
+                invalidateState(1 << name);
             }
-            return NO_ERROR;
-        } else if (name == AUXLEVEL) {
-            track_t& track = mState.tracks[ mActiveTrack ];
+            break;
+        case AUXLEVEL:
+            //ALOG_ASSERT(0 <= valueInt && valueInt <= MAX_GAIN_INT, "bad aux level %d", valueInt);
             if (track.auxLevel != valueInt) {
                 ALOGV("setParameter(VOLUME, AUXLEVEL: %04x)", valueInt);
                 track.prevAuxLevel = track.auxLevel << 16;
@@ -276,13 +272,17 @@
                         track.prevAuxLevel = valueInt << 16;
                     }
                 }
-                invalidateState(1<<mActiveTrack);
+                invalidateState(1 << name);
             }
-            return NO_ERROR;
+            break;
+        default:
+            LOG_FATAL("bad param");
         }
         break;
+
+    default:
+        LOG_FATAL("bad target");
     }
-    return BAD_VALUE;
 }
 
 bool AudioMixer::track_t::setResampler(uint32_t value, uint32_t devSampleRate)
@@ -290,9 +290,10 @@
     if (value!=devSampleRate || resampler) {
         if (sampleRate != value) {
             sampleRate = value;
-            if (resampler == 0) {
+            if (resampler == NULL) {
                 resampler = AudioResampler::create(
                         format, channelCount, devSampleRate);
+                resampler->setLocalTimeFreq(localTimeFreq);
             }
             return true;
         }
@@ -300,22 +301,10 @@
     return false;
 }
 
-bool AudioMixer::track_t::doesResample() const
-{
-    return resampler != 0;
-}
-
-void AudioMixer::track_t::resetResampler()
-{
-    if (resampler != 0) {
-        resampler->reset();
-    }
-}
-
 inline
 void AudioMixer::track_t::adjustVolumeRamp(bool aux)
 {
-    for (int i=0 ; i<2 ; i++) {
+    for (uint32_t i=0 ; i<MAX_NUM_CHANNELS ; i++) {
         if (((volumeInc[i]>0) && (((prevVolume[i]+volumeInc[i])>>16) >= volume[i])) ||
             ((volumeInc[i]<0) && (((prevVolume[i]+volumeInc[i])>>16) <= volume[i]))) {
             volumeInc[i] = 0;
@@ -331,39 +320,31 @@
     }
 }
 
-size_t AudioMixer::track_t::getUnreleasedFrames()
-{
-    if (resampler != NULL) {
-        return resampler->getUnreleasedFrames();
-    }
-    return 0;
-}
-
-size_t AudioMixer::getUnreleasedFrames(int name)
+size_t AudioMixer::getUnreleasedFrames(int name) const
 {
     name -= TRACK0;
     if (uint32_t(name) < MAX_NUM_TRACKS) {
-        track_t& track(mState.tracks[name]);
-        return track.getUnreleasedFrames();
+        return mState.tracks[name].getUnreleasedFrames();
     }
     return 0;
 }
 
-status_t AudioMixer::setBufferProvider(AudioBufferProvider* buffer)
+void AudioMixer::setBufferProvider(int name, AudioBufferProvider* bufferProvider)
 {
-    mState.tracks[ mActiveTrack ].bufferProvider = buffer;
-    return NO_ERROR;
+    name -= TRACK0;
+    ALOG_ASSERT(uint32_t(name) < MAX_NUM_TRACKS, "bad track name %d", name);
+    mState.tracks[name].bufferProvider = bufferProvider;
 }
 
 
 
-void AudioMixer::process()
+void AudioMixer::process(int64_t pts)
 {
-    mState.hook(&mState);
+    mState.hook(&mState, pts);
 }
 
 
-void AudioMixer::process__validate(state_t* state)
+void AudioMixer::process__validate(state_t* state, int64_t pts)
 {
     ALOGW_IF(!state->needsChanged,
         "in process__validate() but nothing's invalid");
@@ -386,9 +367,9 @@
 
     // compute everything we need...
     int countActiveTracks = 0;
-    int all16BitsStereoNoResample = 1;
-    int resampling = 0;
-    int volumeRamp = 0;
+    bool all16BitsStereoNoResample = true;
+    bool resampling = false;
+    bool volumeRamp = false;
     uint32_t en = state->enabledTracks;
     while (en) {
         const int i = 31 - __builtin_clz(en);
@@ -405,7 +386,7 @@
         }
 
         if (t.volumeInc[0]|t.volumeInc[1]) {
-            volumeRamp = 1;
+            volumeRamp = true;
         } else if (!t.doesResample() && t.volumeRL == 0) {
             n |= NEEDS_MUTE_ENABLED;
         }
@@ -415,16 +396,16 @@
             t.hook = track__nop;
         } else {
             if ((n & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED) {
-                all16BitsStereoNoResample = 0;
+                all16BitsStereoNoResample = false;
             }
             if ((n & NEEDS_RESAMPLE__MASK) == NEEDS_RESAMPLE_ENABLED) {
-                all16BitsStereoNoResample = 0;
-                resampling = 1;
+                all16BitsStereoNoResample = false;
+                resampling = true;
                 t.hook = track__genericResample;
             } else {
                 if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_1){
                     t.hook = track__16BitsMono;
-                    all16BitsStereoNoResample = 0;
+                    all16BitsStereoNoResample = false;
                 }
                 if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_2){
                     t.hook = track__16BitsStereo;
@@ -447,11 +428,11 @@
         } else {
             if (state->outputTemp) {
                 delete [] state->outputTemp;
-                state->outputTemp = 0;
+                state->outputTemp = NULL;
             }
             if (state->resampleTemp) {
                 delete [] state->resampleTemp;
-                state->resampleTemp = 0;
+                state->resampleTemp = NULL;
             }
             state->hook = process__genericNoResampling;
             if (all16BitsStereoNoResample && !volumeRamp) {
@@ -467,115 +448,33 @@
         countActiveTracks, state->enabledTracks,
         all16BitsStereoNoResample, resampling, volumeRamp);
 
-   state->hook(state);
+   state->hook(state, pts);
 
-   // Now that the volume ramp has been done, set optimal state and
-   // track hooks for subsequent mixer process
-   if (countActiveTracks) {
-       int allMuted = 1;
-       uint32_t en = state->enabledTracks;
-       while (en) {
-           const int i = 31 - __builtin_clz(en);
-           en &= ~(1<<i);
-           track_t& t = state->tracks[i];
-           if (!t.doesResample() && t.volumeRL == 0)
-           {
-               t.needs |= NEEDS_MUTE_ENABLED;
-               t.hook = track__nop;
-           } else {
-               allMuted = 0;
-           }
-       }
-       if (allMuted) {
-           state->hook = process__nop;
-       } else if (all16BitsStereoNoResample) {
-           if (countActiveTracks == 1) {
-              state->hook = process__OneTrack16BitsStereoNoResampling;
-           }
-       }
-   }
-}
-
-static inline
-int32_t mulAdd(int16_t in, int16_t v, int32_t a)
-{
-#if defined(__arm__) && !defined(__thumb__)
-    int32_t out;
-    asm( "smlabb %[out], %[in], %[v], %[a] \n"
-         : [out]"=r"(out)
-         : [in]"%r"(in), [v]"r"(v), [a]"r"(a)
-         : );
-    return out;
-#else
-    return a + in * int32_t(v);
-#endif
-}
-
-static inline
-int32_t mul(int16_t in, int16_t v)
-{
-#if defined(__arm__) && !defined(__thumb__)
-    int32_t out;
-    asm( "smulbb %[out], %[in], %[v] \n"
-         : [out]"=r"(out)
-         : [in]"%r"(in), [v]"r"(v)
-         : );
-    return out;
-#else
-    return in * int32_t(v);
-#endif
-}
-
-static inline
-int32_t mulAddRL(int left, uint32_t inRL, uint32_t vRL, int32_t a)
-{
-#if defined(__arm__) && !defined(__thumb__)
-    int32_t out;
-    if (left) {
-        asm( "smlabb %[out], %[inRL], %[vRL], %[a] \n"
-             : [out]"=r"(out)
-             : [inRL]"%r"(inRL), [vRL]"r"(vRL), [a]"r"(a)
-             : );
-    } else {
-        asm( "smlatt %[out], %[inRL], %[vRL], %[a] \n"
-             : [out]"=r"(out)
-             : [inRL]"%r"(inRL), [vRL]"r"(vRL), [a]"r"(a)
-             : );
+    // Now that the volume ramp has been done, set optimal state and
+    // track hooks for subsequent mixer process
+    if (countActiveTracks) {
+        bool allMuted = true;
+        uint32_t en = state->enabledTracks;
+        while (en) {
+            const int i = 31 - __builtin_clz(en);
+            en &= ~(1<<i);
+            track_t& t = state->tracks[i];
+            if (!t.doesResample() && t.volumeRL == 0)
+            {
+                t.needs |= NEEDS_MUTE_ENABLED;
+                t.hook = track__nop;
+            } else {
+                allMuted = false;
+            }
+        }
+        if (allMuted) {
+            state->hook = process__nop;
+        } else if (all16BitsStereoNoResample) {
+            if (countActiveTracks == 1) {
+                state->hook = process__OneTrack16BitsStereoNoResampling;
+            }
+        }
     }
-    return out;
-#else
-    if (left) {
-        return a + int16_t(inRL&0xFFFF) * int16_t(vRL&0xFFFF);
-    } else {
-        return a + int16_t(inRL>>16) * int16_t(vRL>>16);
-    }
-#endif
-}
-
-static inline
-int32_t mulRL(int left, uint32_t inRL, uint32_t vRL)
-{
-#if defined(__arm__) && !defined(__thumb__)
-    int32_t out;
-    if (left) {
-        asm( "smulbb %[out], %[inRL], %[vRL] \n"
-             : [out]"=r"(out)
-             : [inRL]"%r"(inRL), [vRL]"r"(vRL)
-             : );
-    } else {
-        asm( "smultt %[out], %[inRL], %[vRL] \n"
-             : [out]"=r"(out)
-             : [inRL]"%r"(inRL), [vRL]"r"(vRL)
-             : );
-    }
-    return out;
-#else
-    if (left) {
-        return int16_t(inRL&0xFFFF) * int16_t(vRL&0xFFFF);
-    } else {
-        return int16_t(inRL>>16) * int16_t(vRL>>16);
-    }
-#endif
 }
 
 
@@ -591,13 +490,13 @@
         t->resampler->setVolume(UNITY_GAIN, UNITY_GAIN);
         memset(temp, 0, outFrameCount * MAX_NUM_CHANNELS * sizeof(int32_t));
         t->resampler->resample(temp, outFrameCount, t->bufferProvider);
-        if UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc) {
+        if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc)) {
             volumeRampStereo(t, out, outFrameCount, temp, aux);
         } else {
             volumeStereo(t, out, outFrameCount, temp, aux);
         }
     } else {
-        if UNLIKELY(t->volumeInc[0]|t->volumeInc[1]) {
+        if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1])) {
             t->resampler->setVolume(UNITY_GAIN, UNITY_GAIN);
             memset(temp, 0, outFrameCount * MAX_NUM_CHANNELS * sizeof(int32_t));
             t->resampler->resample(temp, outFrameCount, t->bufferProvider);
@@ -628,7 +527,7 @@
     //       (vl + vlInc*frameCount)/65536.0f, frameCount);
 
     // ramp volume
-    if UNLIKELY(aux != NULL) {
+    if (CC_UNLIKELY(aux != NULL)) {
         int32_t va = t->prevAuxLevel;
         const int32_t vaInc = t->auxInc;
         int32_t l;
@@ -655,7 +554,7 @@
     }
     t->prevVolume[0] = vl;
     t->prevVolume[1] = vr;
-    t->adjustVolumeRamp((aux != NULL));
+    t->adjustVolumeRamp(aux != NULL);
 }
 
 void AudioMixer::volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
@@ -663,8 +562,8 @@
     const int16_t vl = t->volume[0];
     const int16_t vr = t->volume[1];
 
-    if UNLIKELY(aux != NULL) {
-        const int16_t va = (int16_t)t->auxLevel;
+    if (CC_UNLIKELY(aux != NULL)) {
+        const int16_t va = t->auxLevel;
         do {
             int16_t l = (int16_t)(*temp++ >> 12);
             int16_t r = (int16_t)(*temp++ >> 12);
@@ -688,13 +587,13 @@
 
 void AudioMixer::track__16BitsStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
 {
-    int16_t const *in = static_cast<int16_t const *>(t->in);
+    const int16_t *in = static_cast<const int16_t *>(t->in);
 
-    if UNLIKELY(aux != NULL) {
+    if (CC_UNLIKELY(aux != NULL)) {
         int32_t l;
         int32_t r;
         // ramp gain
-        if UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc) {
+        if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc)) {
             int32_t vl = t->prevVolume[0];
             int32_t vr = t->prevVolume[1];
             int32_t va = t->prevAuxLevel;
@@ -727,7 +626,7 @@
             const uint32_t vrl = t->volumeRL;
             const int16_t va = (int16_t)t->auxLevel;
             do {
-                uint32_t rl = *reinterpret_cast<uint32_t const *>(in);
+                uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
                 int16_t a = (int16_t)(((int32_t)in[0] + in[1]) >> 1);
                 in += 2;
                 out[0] = mulAddRL(1, rl, vrl, out[0]);
@@ -739,7 +638,7 @@
         }
     } else {
         // ramp gain
-        if UNLIKELY(t->volumeInc[0]|t->volumeInc[1]) {
+        if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1])) {
             int32_t vl = t->prevVolume[0];
             int32_t vr = t->prevVolume[1];
             const int32_t vlInc = t->volumeInc[0];
@@ -765,7 +664,7 @@
         else {
             const uint32_t vrl = t->volumeRL;
             do {
-                uint32_t rl = *reinterpret_cast<uint32_t const *>(in);
+                uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
                 in += 2;
                 out[0] = mulAddRL(1, rl, vrl, out[0]);
                 out[1] = mulAddRL(0, rl, vrl, out[1]);
@@ -778,11 +677,11 @@
 
 void AudioMixer::track__16BitsMono(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
 {
-    int16_t const *in = static_cast<int16_t const *>(t->in);
+    const int16_t *in = static_cast<int16_t const *>(t->in);
 
-    if UNLIKELY(aux != NULL) {
+    if (CC_UNLIKELY(aux != NULL)) {
         // ramp gain
-        if UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc) {
+        if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1]|t->auxInc)) {
             int32_t vl = t->prevVolume[0];
             int32_t vr = t->prevVolume[1];
             int32_t va = t->prevAuxLevel;
@@ -825,7 +724,7 @@
         }
     } else {
         // ramp gain
-        if UNLIKELY(t->volumeInc[0]|t->volumeInc[1]) {
+        if (CC_UNLIKELY(t->volumeInc[0]|t->volumeInc[1])) {
             int32_t vl = t->prevVolume[0];
             int32_t vr = t->prevVolume[1];
             const int32_t vlInc = t->volumeInc[0];
@@ -862,21 +761,8 @@
     t->in = in;
 }
 
-void AudioMixer::ditherAndClamp(int32_t* out, int32_t const *sums, size_t c)
-{
-    for (size_t i=0 ; i<c ; i++) {
-        int32_t l = *sums++;
-        int32_t r = *sums++;
-        int32_t nl = l >> 12;
-        int32_t nr = r >> 12;
-        l = clamp16(nl);
-        r = clamp16(nr);
-        *out++ = (r<<16) | (l & 0xFFFF);
-    }
-}
-
 // no-op case
-void AudioMixer::process__nop(state_t* state)
+void AudioMixer::process__nop(state_t* state, int64_t pts)
 {
     uint32_t e0 = state->enabledTracks;
     size_t bufSize = state->frameCount * sizeof(int16_t) * MAX_NUM_CHANNELS;
@@ -891,7 +777,7 @@
             i = 31 - __builtin_clz(e2);
             e2 &= ~(1<<i);
             track_t& t2 = state->tracks[i];
-            if UNLIKELY(t2.mainBuffer != t1.mainBuffer) {
+            if (CC_UNLIKELY(t2.mainBuffer != t1.mainBuffer)) {
                 e1 &= ~(1<<i);
             }
         }
@@ -906,8 +792,10 @@
             size_t outFrames = state->frameCount;
             while (outFrames) {
                 t1.buffer.frameCount = outFrames;
-                t1.bufferProvider->getNextBuffer(&t1.buffer);
-                if (!t1.buffer.raw) break;
+                int64_t outputPTS = calculateOutputPTS(
+                    t1, pts, state->frameCount - outFrames);
+                t1.bufferProvider->getNextBuffer(&t1.buffer, outputPTS);
+                if (t1.buffer.raw == NULL) break;
                 outFrames -= t1.buffer.frameCount;
                 t1.bufferProvider->releaseBuffer(&t1.buffer);
             }
@@ -916,7 +804,7 @@
 }
 
 // generic code without resampling
-void AudioMixer::process__genericNoResampling(state_t* state)
+void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts)
 {
     int32_t outTemp[BLOCKSIZE * MAX_NUM_CHANNELS] __attribute__((aligned(32)));
 
@@ -928,7 +816,7 @@
         e0 &= ~(1<<i);
         track_t& t = state->tracks[i];
         t.buffer.frameCount = state->frameCount;
-        t.bufferProvider->getNextBuffer(&t.buffer);
+        t.bufferProvider->getNextBuffer(&t.buffer, pts);
         t.frameCount = t.buffer.frameCount;
         t.in = t.buffer.raw;
         // t.in == NULL can happen if the track was flushed just after having
@@ -949,7 +837,7 @@
             j = 31 - __builtin_clz(e2);
             e2 &= ~(1<<j);
             track_t& t2 = state->tracks[j];
-            if UNLIKELY(t2.mainBuffer != t1.mainBuffer) {
+            if (CC_UNLIKELY(t2.mainBuffer != t1.mainBuffer)) {
                 e1 &= ~(1<<j);
             }
         }
@@ -966,23 +854,25 @@
                 track_t& t = state->tracks[i];
                 size_t outFrames = BLOCKSIZE;
                 int32_t *aux = NULL;
-                if UNLIKELY((t.needs & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED) {
+                if (CC_UNLIKELY((t.needs & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED)) {
                     aux = t.auxBuffer + numFrames;
                 }
                 while (outFrames) {
                     size_t inFrames = (t.frameCount > outFrames)?outFrames:t.frameCount;
                     if (inFrames) {
-                        (t.hook)(&t, outTemp + (BLOCKSIZE-outFrames)*MAX_NUM_CHANNELS, inFrames, state->resampleTemp, aux);
+                        t.hook(&t, outTemp + (BLOCKSIZE-outFrames)*MAX_NUM_CHANNELS, inFrames, state->resampleTemp, aux);
                         t.frameCount -= inFrames;
                         outFrames -= inFrames;
-                        if UNLIKELY(aux != NULL) {
+                        if (CC_UNLIKELY(aux != NULL)) {
                             aux += inFrames;
                         }
                     }
                     if (t.frameCount == 0 && outFrames) {
                         t.bufferProvider->releaseBuffer(&t.buffer);
                         t.buffer.frameCount = (state->frameCount - numFrames) - (BLOCKSIZE - outFrames);
-                        t.bufferProvider->getNextBuffer(&t.buffer);
+                        int64_t outputPTS = calculateOutputPTS(
+                            t, pts, numFrames + (BLOCKSIZE - outFrames));
+                        t.bufferProvider->getNextBuffer(&t.buffer, outputPTS);
                         t.in = t.buffer.raw;
                         if (t.in == NULL) {
                             enabledTracks &= ~(1<<i);
@@ -1010,9 +900,10 @@
 }
 
 
-  // generic code with resampling
-void AudioMixer::process__genericResampling(state_t* state)
+// generic code with resampling
+void AudioMixer::process__genericResampling(state_t* state, int64_t pts)
 {
+    // this const just means that local variable outTemp doesn't change
     int32_t* const outTemp = state->outputTemp;
     const size_t size = sizeof(int32_t) * MAX_NUM_CHANNELS * state->frameCount;
 
@@ -1030,7 +921,7 @@
             j = 31 - __builtin_clz(e2);
             e2 &= ~(1<<j);
             track_t& t2 = state->tracks[j];
-            if UNLIKELY(t2.mainBuffer != t1.mainBuffer) {
+            if (CC_UNLIKELY(t2.mainBuffer != t1.mainBuffer)) {
                 e1 &= ~(1<<j);
             }
         }
@@ -1042,7 +933,7 @@
             e1 &= ~(1<<i);
             track_t& t = state->tracks[i];
             int32_t *aux = NULL;
-            if UNLIKELY((t.needs & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED) {
+            if (CC_UNLIKELY((t.needs & NEEDS_AUX__MASK) == NEEDS_AUX_ENABLED)) {
                 aux = t.auxBuffer;
             }
 
@@ -1050,23 +941,25 @@
             // acquire/release the buffers because it's done by
             // the resampler.
             if ((t.needs & NEEDS_RESAMPLE__MASK) == NEEDS_RESAMPLE_ENABLED) {
-                (t.hook)(&t, outTemp, numFrames, state->resampleTemp, aux);
+                t.resampler->setPTS(pts);
+                t.hook(&t, outTemp, numFrames, state->resampleTemp, aux);
             } else {
 
                 size_t outFrames = 0;
 
                 while (outFrames < numFrames) {
                     t.buffer.frameCount = numFrames - outFrames;
-                    t.bufferProvider->getNextBuffer(&t.buffer);
+                    int64_t outputPTS = calculateOutputPTS(t, pts, outFrames);
+                    t.bufferProvider->getNextBuffer(&t.buffer, outputPTS);
                     t.in = t.buffer.raw;
                     // t.in == NULL can happen if the track was flushed just after having
                     // been enabled for mixing.
                     if (t.in == NULL) break;
 
-                    if UNLIKELY(aux != NULL) {
+                    if (CC_UNLIKELY(aux != NULL)) {
                         aux += outFrames;
                     }
-                    (t.hook)(&t, outTemp + outFrames*MAX_NUM_CHANNELS, t.buffer.frameCount, state->resampleTemp, aux);
+                    t.hook(&t, outTemp + outFrames*MAX_NUM_CHANNELS, t.buffer.frameCount, state->resampleTemp, aux);
                     outFrames += t.buffer.frameCount;
                     t.bufferProvider->releaseBuffer(&t.buffer);
                 }
@@ -1077,9 +970,15 @@
 }
 
 // one track, 16 bits stereo without resampling is the most common case
-void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state)
+void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state,
+                                                           int64_t pts)
 {
+    // This method is only called when state->enabledTracks has exactly
+    // one bit set.  The asserts below would verify this, but are commented out
+    // since the whole point of this method is to optimize performance.
+    //ALOG_ASSERT(0 != state->enabledTracks, "no tracks enabled");
     const int i = 31 - __builtin_clz(state->enabledTracks);
+    //ALOG_ASSERT((1 << i) == state->enabledTracks, "more than 1 track enabled");
     const track_t& t = state->tracks[i];
 
     AudioBufferProvider::Buffer& b(t.buffer);
@@ -1092,8 +991,9 @@
     const uint32_t vrl = t.volumeRL;
     while (numFrames) {
         b.frameCount = numFrames;
-        t.bufferProvider->getNextBuffer(&b);
-        int16_t const *in = b.i16;
+        int64_t outputPTS = calculateOutputPTS(t, pts, out - t.mainBuffer);
+        t.bufferProvider->getNextBuffer(&b, outputPTS);
+        const int16_t *in = b.i16;
 
         // in == NULL can happen if the track was flushed just after having
         // been enabled for mixing.
@@ -1105,11 +1005,11 @@
         }
         size_t outFrames = b.frameCount;
 
-        if (UNLIKELY(uint32_t(vl) > UNITY_GAIN || uint32_t(vr) > UNITY_GAIN)) {
+        if (CC_UNLIKELY(uint32_t(vl) > UNITY_GAIN || uint32_t(vr) > UNITY_GAIN)) {
             // volume is boosted, so we might need to clamp even though
             // we process only one track.
             do {
-                uint32_t rl = *reinterpret_cast<uint32_t const *>(in);
+                uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
                 in += 2;
                 int32_t l = mulRL(1, rl, vrl) >> 12;
                 int32_t r = mulRL(0, rl, vrl) >> 12;
@@ -1120,7 +1020,7 @@
             } while (--outFrames);
         } else {
             do {
-                uint32_t rl = *reinterpret_cast<uint32_t const *>(in);
+                uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
                 in += 2;
                 int32_t l = mulRL(1, rl, vrl) >> 12;
                 int32_t r = mulRL(0, rl, vrl) >> 12;
@@ -1132,10 +1032,12 @@
     }
 }
 
+#if 0
 // 2 tracks is also a common case
 // NEVER used in current implementation of process__validate()
 // only use if the 2 tracks have the same output buffer
-void AudioMixer::process__TwoTracks16BitsStereoNoResampling(state_t* state)
+void AudioMixer::process__TwoTracks16BitsStereoNoResampling(state_t* state,
+                                                            int64_t pts)
 {
     int i;
     uint32_t en = state->enabledTracks;
@@ -1149,12 +1051,12 @@
     const track_t& t1 = state->tracks[i];
     AudioBufferProvider::Buffer& b1(t1.buffer);
 
-    int16_t const *in0;
+    const int16_t *in0;
     const int16_t vl0 = t0.volume[0];
     const int16_t vr0 = t0.volume[1];
     size_t frameCount0 = 0;
 
-    int16_t const *in1;
+    const int16_t *in1;
     const int16_t vl1 = t1.volume[0];
     const int16_t vr1 = t1.volume[1];
     size_t frameCount1 = 0;
@@ -1162,14 +1064,16 @@
     //FIXME: only works if two tracks use same buffer
     int32_t* out = t0.mainBuffer;
     size_t numFrames = state->frameCount;
-    int16_t const *buff = NULL;
+    const int16_t *buff = NULL;
 
 
     while (numFrames) {
 
         if (frameCount0 == 0) {
             b0.frameCount = numFrames;
-            t0.bufferProvider->getNextBuffer(&b0);
+            int64_t outputPTS = calculateOutputPTS(t0, pts,
+                                                   out - t0.mainBuffer);
+            t0.bufferProvider->getNextBuffer(&b0, outputPTS);
             if (b0.i16 == NULL) {
                 if (buff == NULL) {
                     buff = new int16_t[MAX_NUM_CHANNELS * state->frameCount];
@@ -1183,14 +1087,16 @@
         }
         if (frameCount1 == 0) {
             b1.frameCount = numFrames;
-            t1.bufferProvider->getNextBuffer(&b1);
+            int64_t outputPTS = calculateOutputPTS(t1, pts,
+                                                   out - t0.mainBuffer);
+            t1.bufferProvider->getNextBuffer(&b1, outputPTS);
             if (b1.i16 == NULL) {
                 if (buff == NULL) {
                     buff = new int16_t[MAX_NUM_CHANNELS * state->frameCount];
                 }
                 in1 = buff;
                 b1.frameCount = numFrames;
-               } else {
+            } else {
                 in1 = b1.i16;
             }
             frameCount1 = b1.frameCount;
@@ -1225,11 +1131,18 @@
         }
     }
 
-    if (buff != NULL) {
-        delete [] buff;
-    }
+    delete [] buff;
+}
+#endif
+
+int64_t AudioMixer::calculateOutputPTS(const track_t& t, int64_t basePTS,
+                                       int outputFrameIndex)
+{
+    if (AudioBufferProvider::kInvalidPTS == basePTS)
+        return AudioBufferProvider::kInvalidPTS;
+
+    return basePTS + ((outputFrameIndex * t.localTimeFreq) / t.sampleRate);
 }
 
 // ----------------------------------------------------------------------------
 }; // namespace android
-
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index 0137185..b210212 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -1,4 +1,4 @@
-/* //device/include/server/AudioFlinger/AudioMixer.h
+/*
 **
 ** Copyright 2007, The Android Open Source Project
 **
@@ -28,17 +28,12 @@
 
 // ----------------------------------------------------------------------------
 
-#define LIKELY( exp )       (__builtin_expect( (exp) != 0, true  ))
-#define UNLIKELY( exp )     (__builtin_expect( (exp) != 0, false ))
-
-// ----------------------------------------------------------------------------
-
 class AudioMixer
 {
 public:
                             AudioMixer(size_t frameCount, uint32_t sampleRate);
 
-                            ~AudioMixer();
+    /*virtual*/             ~AudioMixer();  // non-virtual saves a v-table, restore if sub-classed
 
     static const uint32_t MAX_NUM_TRACKS = 32;
     static const uint32_t MAX_NUM_CHANNELS = 2;
@@ -47,11 +42,10 @@
 
     enum { // names
 
-        // track units (32 units)
+        // track names (MAX_NUM_TRACKS units)
         TRACK0          = 0x1000,
 
-        // enable/disable
-        MIXING          = 0x2000,
+        // 0x2000 is unused
 
         // setParameter targets
         TRACK           = 0x3000,
@@ -65,33 +59,31 @@
         FORMAT          = 0x4001,
         MAIN_BUFFER     = 0x4002,
         AUX_BUFFER      = 0x4003,
-        // for TARGET RESAMPLE
+        // for target RESAMPLE
         SAMPLE_RATE     = 0x4100,
         RESET           = 0x4101,
-        // for TARGET VOLUME (8 channels max)
+        // for target RAMP_VOLUME and VOLUME (8 channels max)
         VOLUME0         = 0x4200,
         VOLUME1         = 0x4201,
         AUXLEVEL        = 0x4210,
     };
 
 
+    // For all APIs with "name": TRACK0 <= name < TRACK0 + MAX_NUM_TRACKS
     int         getTrackName();
     void        deleteTrackName(int name);
 
-    status_t    enable(int name);
-    status_t    disable(int name);
+    void        enable(int name);
+    void        disable(int name);
 
-    status_t    setActiveTrack(int track);
-    status_t    setParameter(int target, int name, void *value);
+    void        setParameter(int name, int target, int param, void *value);
 
-    status_t    setBufferProvider(AudioBufferProvider* bufferProvider);
-    void        process();
+    void        setBufferProvider(int name, AudioBufferProvider* bufferProvider);
+    void        process(int64_t pts);
 
     uint32_t    trackNames() const { return mTrackNames; }
 
-    static void ditherAndClamp(int32_t* out, int32_t const *sums, size_t c);
-
-    size_t      getUnreleasedFrames(int name);
+    size_t      getUnreleasedFrames(int name) const;
 
 private:
 
@@ -119,15 +111,9 @@
         NEEDS_AUX_ENABLED      = 0x00010000,
     };
 
-    static inline int32_t applyVolume(int32_t in, int32_t v) {
-        return in * v;
-    }
-
-
     struct state_t;
     struct track_t;
 
-    typedef void (*mix_t)(state_t* state);
     typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp, int32_t* aux);
     static const int BLOCKSIZE = 16; // 4 cache lines
 
@@ -135,41 +121,58 @@
         uint32_t    needs;
 
         union {
-        int16_t     volume[2];      // [0]3.12 fixed point
+        int16_t     volume[MAX_NUM_CHANNELS]; // [0]3.12 fixed point
         int32_t     volumeRL;
         };
 
-        int32_t     prevVolume[2];
+        int32_t     prevVolume[MAX_NUM_CHANNELS];
 
-        int32_t     volumeInc[2];
-        int32_t     auxLevel;
+        // 16-byte boundary
+
+        int32_t     volumeInc[MAX_NUM_CHANNELS];
         int32_t     auxInc;
         int32_t     prevAuxLevel;
 
+        // 16-byte boundary
+
+        int16_t     auxLevel;       // 0 <= auxLevel <= MAX_GAIN_INT, but signed for mul performance
         uint16_t    frameCount;
 
-        uint8_t     channelCount : 4;
-        uint8_t     enabled      : 1;
-        uint8_t     reserved0    : 3;
-        uint8_t     format;
-        uint32_t    channelMask;
+        uint8_t     channelCount;   // 1 or 2, redundant with (needs & NEEDS_CHANNEL_COUNT__MASK)
+        uint8_t     format;         // always 16
+        uint16_t    enabled;        // actually bool
+        uint32_t    channelMask;    // currently under-used
 
         AudioBufferProvider*                bufferProvider;
-        mutable AudioBufferProvider::Buffer buffer;
+
+        // 16-byte boundary
+
+        mutable AudioBufferProvider::Buffer buffer; // 8 bytes
 
         hook_t      hook;
-        void const* in;             // current location in buffer
+        const void* in;             // current location in buffer
+
+        // 16-byte boundary
 
         AudioResampler*     resampler;
         uint32_t            sampleRate;
         int32_t*           mainBuffer;
         int32_t*           auxBuffer;
 
+        // 16-byte boundary
+
+        uint64_t    localTimeFreq;
+
+        int64_t     padding;
+
+        // 16-byte boundary
+
         bool        setResampler(uint32_t sampleRate, uint32_t devSampleRate);
-        bool        doesResample() const;
-        void        resetResampler();
+        bool        doesResample() const { return resampler != NULL; }
+        void        resetResampler() { if (resampler != NULL) resampler->reset(); }
         void        adjustVolumeRamp(bool aux);
-        size_t      getUnreleasedFrames();
+        size_t      getUnreleasedFrames() const { return resampler != NULL ?
+                                                    resampler->getUnreleasedFrames() : 0; };
     };
 
     // pad to 32-bytes to fill cache line
@@ -177,14 +180,14 @@
         uint32_t        enabledTracks;
         uint32_t        needsChanged;
         size_t          frameCount;
-        mix_t           hook;
+        void            (*hook)(state_t* state, int64_t pts);   // one of process__*, never NULL
         int32_t         *outputTemp;
         int32_t         *resampleTemp;
         int32_t         reserved[2];
-        track_t         tracks[32]; __attribute__((aligned(32)));
+        track_t         tracks[MAX_NUM_TRACKS]; __attribute__((aligned(32)));
     };
 
-    int             mActiveTrack;
+    // bitmask of allocated track names, where bit 0 corresponds to TRACK0 etc.
     uint32_t        mTrackNames;
     const uint32_t  mSampleRate;
 
@@ -199,12 +202,19 @@
     static void volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
     static void volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
 
-    static void process__validate(state_t* state);
-    static void process__nop(state_t* state);
-    static void process__genericNoResampling(state_t* state);
-    static void process__genericResampling(state_t* state);
-    static void process__OneTrack16BitsStereoNoResampling(state_t* state);
-    static void process__TwoTracks16BitsStereoNoResampling(state_t* state);
+    static void process__validate(state_t* state, int64_t pts);
+    static void process__nop(state_t* state, int64_t pts);
+    static void process__genericNoResampling(state_t* state, int64_t pts);
+    static void process__genericResampling(state_t* state, int64_t pts);
+    static void process__OneTrack16BitsStereoNoResampling(state_t* state,
+                                                          int64_t pts);
+#if 0
+    static void process__TwoTracks16BitsStereoNoResampling(state_t* state,
+                                                           int64_t pts);
+#endif
+
+    static int64_t calculateOutputPTS(const track_t& t, int64_t basePTS,
+                                      int outputFrameIndex);
 };
 
 // ----------------------------------------------------------------------------
diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp
index 6be669b..c23eb04 100644
--- a/services/audioflinger/AudioPolicyService.cpp
+++ b/services/audioflinger/AudioPolicyService.cpp
@@ -30,8 +30,8 @@
 #include <utils/String16.h>
 #include <utils/threads.h>
 #include "AudioPolicyService.h"
+#include "ServiceUtilities.h"
 #include <cutils/properties.h>
-#include <dlfcn.h>
 #include <hardware_legacy/power.h>
 #include <media/AudioEffect.h>
 #include <media/EffectsFactoryApi.h>
@@ -44,18 +44,11 @@
 
 namespace android {
 
-static const char *kDeadlockedString = "AudioPolicyService may be deadlocked\n";
-static const char *kCmdDeadlockedString = "AudioPolicyService command thread may be deadlocked\n";
+static const char kDeadlockedString[] = "AudioPolicyService may be deadlocked\n";
+static const char kCmdDeadlockedString[] = "AudioPolicyService command thread may be deadlocked\n";
 
 static const int kDumpLockRetries = 50;
-static const int kDumpLockSleep = 20000;
-
-static bool checkPermission() {
-    if (getpid() == IPCThreadState::self()->getCallingPid()) return true;
-    bool ok = checkCallingPermission(String16("android.permission.MODIFY_AUDIO_SETTINGS"));
-    if (!ok) ALOGE("Request requires android.permission.MODIFY_AUDIO_SETTINGS");
-    return ok;
-}
+static const int kDumpLockSleepUs = 20000;
 
 namespace {
     extern struct audio_policy_service_ops aps_ops;
@@ -76,7 +69,7 @@
     // start tone playback thread
     mTonePlaybackThread = new AudioCommandThread(String8(""));
     // start audio commands thread
-    mAudioCommandThread = new AudioCommandThread(String8("ApmCommandThread"));
+    mAudioCommandThread = new AudioCommandThread(String8("ApmCommand"));
 
     /* instantiate the audio policy manager */
     rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module);
@@ -123,19 +116,7 @@
 
     // release audio pre processing resources
     for (size_t i = 0; i < mInputSources.size(); i++) {
-        InputSourceDesc *source = mInputSources.valueAt(i);
-        Vector <EffectDesc *> effects = source->mEffects;
-        for (size_t j = 0; j < effects.size(); j++) {
-            delete effects[j]->mName;
-            Vector <effect_param_t *> params = effects[j]->mParams;
-            for (size_t k = 0; k < params.size(); k++) {
-                delete params[k];
-            }
-            params.clear();
-            delete effects[j];
-        }
-        effects.clear();
-        delete source;
+        delete mInputSources.valueAt(i);
     }
     mInputSources.clear();
 
@@ -145,9 +126,9 @@
     }
     mInputs.clear();
 
-    if (mpAudioPolicy && mpAudioPolicyDev)
+    if (mpAudioPolicy != NULL && mpAudioPolicyDev != NULL)
         mpAudioPolicyDev->destroy_audio_policy(mpAudioPolicyDev, mpAudioPolicy);
-    if (mpAudioPolicyDev)
+    if (mpAudioPolicyDev != NULL)
         audio_policy_dev_close(mpAudioPolicyDev);
 }
 
@@ -158,7 +139,7 @@
     if (mpAudioPolicy == NULL) {
         return NO_INIT;
     }
-    if (!checkPermission()) {
+    if (!settingsAllowed()) {
         return PERMISSION_DENIED;
     }
     if (!audio_is_output_device(device) && !audio_is_input_device(device)) {
@@ -186,15 +167,15 @@
                                                       device_address);
 }
 
-status_t AudioPolicyService::setPhoneState(int state)
+status_t AudioPolicyService::setPhoneState(audio_mode_t state)
 {
     if (mpAudioPolicy == NULL) {
         return NO_INIT;
     }
-    if (!checkPermission()) {
+    if (!settingsAllowed()) {
         return PERMISSION_DENIED;
     }
-    if (state < 0 || state >= AUDIO_MODE_CNT) {
+    if (uint32_t(state) >= AUDIO_MODE_CNT) {
         return BAD_VALUE;
     }
 
@@ -208,26 +189,13 @@
     return NO_ERROR;
 }
 
-status_t AudioPolicyService::setRingerMode(uint32_t mode, uint32_t mask)
-{
-    if (mpAudioPolicy == NULL) {
-        return NO_INIT;
-    }
-    if (!checkPermission()) {
-        return PERMISSION_DENIED;
-    }
-
-    mpAudioPolicy->set_ringer_mode(mpAudioPolicy, mode, mask);
-    return NO_ERROR;
-}
-
 status_t AudioPolicyService::setForceUse(audio_policy_force_use_t usage,
                                          audio_policy_forced_cfg_t config)
 {
     if (mpAudioPolicy == NULL) {
         return NO_INIT;
     }
-    if (!checkPermission()) {
+    if (!settingsAllowed()) {
         return PERMISSION_DENIED;
     }
     if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
@@ -255,7 +223,7 @@
 
 audio_io_handle_t AudioPolicyService::getOutput(audio_stream_type_t stream,
                                     uint32_t samplingRate,
-                                    uint32_t format,
+                                    audio_format_t format,
                                     uint32_t channels,
                                     audio_policy_output_flags_t flags)
 {
@@ -301,9 +269,9 @@
     mpAudioPolicy->release_output(mpAudioPolicy, output);
 }
 
-audio_io_handle_t AudioPolicyService::getInput(int inputSource,
+audio_io_handle_t AudioPolicyService::getInput(audio_source_t inputSource,
                                     uint32_t samplingRate,
-                                    uint32_t format,
+                                    audio_format_t format,
                                     uint32_t channels,
                                     audio_in_acoustics_t acoustics,
                                     int audioSession)
@@ -311,6 +279,10 @@
     if (mpAudioPolicy == NULL) {
         return 0;
     }
+    // already checked by client, but double-check in case the client wrapper is bypassed
+    if (uint32_t(inputSource) >= AUDIO_SOURCE_CNT) {
+        return 0;
+    }
     Mutex::Autolock _l(mLock);
     audio_io_handle_t input = mpAudioPolicy->get_input(mpAudioPolicy, inputSource, samplingRate,
                                                        format, channels, acoustics);
@@ -319,15 +291,14 @@
         return input;
     }
     // create audio pre processors according to input source
-    ssize_t index = mInputSources.indexOfKey((audio_source_t)inputSource);
+    ssize_t index = mInputSources.indexOfKey(inputSource);
     if (index < 0) {
         return input;
     }
     ssize_t idx = mInputs.indexOfKey(input);
     InputDesc *inputDesc;
     if (idx < 0) {
-        inputDesc = new InputDesc();
-        inputDesc->mSessionId = audioSession;
+        inputDesc = new InputDesc(audioSession);
         mInputs.add(input, inputDesc);
     } else {
         inputDesc = mInputs.valueAt(idx);
@@ -386,7 +357,6 @@
     }
     InputDesc *inputDesc = mInputs.valueAt(index);
     setPreProcessorEnabled(inputDesc, false);
-    inputDesc->mEffects.clear();
     delete inputDesc;
     mInputs.removeItemsAt(index);
 }
@@ -398,40 +368,58 @@
     if (mpAudioPolicy == NULL) {
         return NO_INIT;
     }
-    if (!checkPermission()) {
+    if (!settingsAllowed()) {
         return PERMISSION_DENIED;
     }
-    if (stream < 0 || stream >= AUDIO_STREAM_CNT) {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
         return BAD_VALUE;
     }
     mpAudioPolicy->init_stream_volume(mpAudioPolicy, stream, indexMin, indexMax);
     return NO_ERROR;
 }
 
-status_t AudioPolicyService::setStreamVolumeIndex(audio_stream_type_t stream, int index)
+status_t AudioPolicyService::setStreamVolumeIndex(audio_stream_type_t stream,
+                                                  int index,
+                                                  audio_devices_t device)
 {
     if (mpAudioPolicy == NULL) {
         return NO_INIT;
     }
-    if (!checkPermission()) {
+    if (!settingsAllowed()) {
         return PERMISSION_DENIED;
     }
-    if (stream < 0 || stream >= AUDIO_STREAM_CNT) {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
         return BAD_VALUE;
     }
 
-    return mpAudioPolicy->set_stream_volume_index(mpAudioPolicy, stream, index);
+    if (mpAudioPolicy->set_stream_volume_index_for_device) {
+        return mpAudioPolicy->set_stream_volume_index_for_device(mpAudioPolicy,
+                                                                stream,
+                                                                index,
+                                                                device);
+    } else {
+        return mpAudioPolicy->set_stream_volume_index(mpAudioPolicy, stream, index);
+    }
 }
 
-status_t AudioPolicyService::getStreamVolumeIndex(audio_stream_type_t stream, int *index)
+status_t AudioPolicyService::getStreamVolumeIndex(audio_stream_type_t stream,
+                                                  int *index,
+                                                  audio_devices_t device)
 {
     if (mpAudioPolicy == NULL) {
         return NO_INIT;
     }
-    if (stream < 0 || stream >= AUDIO_STREAM_CNT) {
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
         return BAD_VALUE;
     }
-    return mpAudioPolicy->get_stream_volume_index(mpAudioPolicy, stream, index);
+    if (mpAudioPolicy->get_stream_volume_index_for_device) {
+        return mpAudioPolicy->get_stream_volume_index_for_device(mpAudioPolicy,
+                                                                stream,
+                                                                index,
+                                                                device);
+    } else {
+        return mpAudioPolicy->get_stream_volume_index(mpAudioPolicy, stream, index);
+    }
 }
 
 uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream)
@@ -442,10 +430,12 @@
     return mpAudioPolicy->get_strategy_for_stream(mpAudioPolicy, stream);
 }
 
-uint32_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stream)
+//audio policy: use audio_device_t appropriately
+
+audio_devices_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stream)
 {
     if (mpAudioPolicy == NULL) {
-        return 0;
+        return (audio_devices_t)0;
     }
     return mpAudioPolicy->get_devices_for_stream(mpAudioPolicy, stream);
 }
@@ -487,7 +477,7 @@
     return mpAudioPolicy->set_effect_enabled(mpAudioPolicy, id, enabled);
 }
 
-bool AudioPolicyService::isStreamActive(int stream, uint32_t inPastMs) const
+bool AudioPolicyService::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
 {
     if (mpAudioPolicy == NULL) {
         return 0;
@@ -534,7 +524,7 @@
 }
 
 void AudioPolicyService::binderDied(const wp<IBinder>& who) {
-    ALOGW("binderDied() %p, tid %d, calling tid %d", who.unsafe_get(), gettid(),
+    ALOGW("binderDied() %p, tid %d, calling pid %d", who.unsafe_get(), gettid(),
             IPCThreadState::self()->getCallingPid());
 }
 
@@ -546,7 +536,7 @@
             locked = true;
             break;
         }
-        usleep(kDumpLockSleep);
+        usleep(kDumpLockSleepUs);
     }
     return locked;
 }
@@ -570,7 +560,7 @@
 
 status_t AudioPolicyService::dump(int fd, const Vector<String16>& args)
 {
-    if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
+    if (!dumpAllowed()) {
         dumpPermissionDenial(fd);
     } else {
         bool locked = tryLock(mLock);
@@ -580,10 +570,10 @@
         }
 
         dumpInternals(fd);
-        if (mAudioCommandThread != NULL) {
+        if (mAudioCommandThread != 0) {
             mAudioCommandThread->dump(fd);
         }
-        if (mTonePlaybackThread != NULL) {
+        if (mTonePlaybackThread != 0) {
             mTonePlaybackThread->dump(fd);
         }
 
@@ -610,12 +600,11 @@
     return NO_ERROR;
 }
 
-void AudioPolicyService::setPreProcessorEnabled(InputDesc *inputDesc, bool enabled)
+void AudioPolicyService::setPreProcessorEnabled(const InputDesc *inputDesc, bool enabled)
 {
-    Vector<sp<AudioEffect> > fxVector = inputDesc->mEffects;
+    const Vector<sp<AudioEffect> > &fxVector = inputDesc->mEffects;
     for (size_t i = 0; i < fxVector.size(); i++) {
-        sp<AudioEffect> fx = fxVector.itemAt(i);
-        fx->setEnabled(enabled);
+        fxVector.itemAt(i)->setEnabled(enabled);
     }
 }
 
@@ -641,7 +630,7 @@
         release_wake_lock(mName.string());
     }
     mAudioCommands.clear();
-    if (mpToneGenerator != NULL) delete mpToneGenerator;
+    delete mpToneGenerator;
 }
 
 void AudioPolicyService::AudioCommandThread::onFirstRef()
@@ -649,7 +638,7 @@
     if (mName != "") {
         run(mName.string(), ANDROID_PRIORITY_AUDIO);
     } else {
-        run("AudioCommandThread", ANDROID_PRIORITY_AUDIO);
+        run("AudioCommand", ANDROID_PRIORITY_AUDIO);
     }
 }
 
@@ -660,7 +649,7 @@
     mLock.lock();
     while (!exitPending())
     {
-        while(!mAudioCommands.isEmpty()) {
+        while (!mAudioCommands.isEmpty()) {
             nsecs_t curTime = systemTime();
             // commands are sorted by increasing time stamp: execute them from index 0 and up
             if (mAudioCommands[0]->mTime <= curTime) {
@@ -674,8 +663,7 @@
                     ToneData *data = (ToneData *)command->mParam;
                     ALOGV("AudioCommandThread() processing start tone %d on stream %d",
                             data->mType, data->mStream);
-                    if (mpToneGenerator != NULL)
-                        delete mpToneGenerator;
+                    delete mpToneGenerator;
                     mpToneGenerator = new ToneGenerator(data->mStream, 1.0);
                     mpToneGenerator->startTone(data->mType);
                     delete data;
@@ -705,16 +693,16 @@
                     delete data;
                     }break;
                 case SET_PARAMETERS: {
-                     ParametersData *data = (ParametersData *)command->mParam;
-                     ALOGV("AudioCommandThread() processing set parameters string %s, io %d",
-                             data->mKeyValuePairs.string(), data->mIO);
-                     command->mStatus = AudioSystem::setParameters(data->mIO, data->mKeyValuePairs);
-                     if (command->mWaitStatus) {
-                         command->mCond.signal();
-                         mWaitWorkCV.wait(mLock);
-                     }
-                     delete data;
-                     }break;
+                    ParametersData *data = (ParametersData *)command->mParam;
+                    ALOGV("AudioCommandThread() processing set parameters string %s, io %d",
+                            data->mKeyValuePairs.string(), data->mIO);
+                    command->mStatus = AudioSystem::setParameters(data->mIO, data->mKeyValuePairs);
+                    if (command->mWaitStatus) {
+                        command->mCond.signal();
+                        mWaitWorkCV.wait(mLock);
+                    }
+                    delete data;
+                    }break;
                 case SET_VOICE_VOLUME: {
                     VoiceVolumeData *data = (VoiceVolumeData *)command->mParam;
                     ALOGV("AudioCommandThread() processing set voice volume volume %f",
@@ -767,7 +755,7 @@
     snprintf(buffer, SIZE, "- Commands:\n");
     result = String8(buffer);
     result.append("   Command Time        Wait pParam\n");
-    for (int i = 0; i < (int)mAudioCommands.size(); i++) {
+    for (size_t i = 0; i < mAudioCommands.size(); i++) {
         mAudioCommands[i]->dump(buffer, SIZE);
         result.append(buffer);
     }
@@ -782,7 +770,8 @@
     return NO_ERROR;
 }
 
-void AudioPolicyService::AudioCommandThread::startToneCommand(int type, int stream)
+void AudioPolicyService::AudioCommandThread::startToneCommand(ToneGenerator::tone_type type,
+        audio_stream_type_t stream)
 {
     AudioCommand *command = new AudioCommand();
     command->mCommand = START_TONE;
@@ -809,9 +798,9 @@
     mWaitWorkCV.signal();
 }
 
-status_t AudioPolicyService::AudioCommandThread::volumeCommand(int stream,
+status_t AudioPolicyService::AudioCommandThread::volumeCommand(audio_stream_type_t stream,
                                                                float volume,
-                                                               int output,
+                                                               audio_io_handle_t output,
                                                                int delayMs)
 {
     status_t status = NO_ERROR;
@@ -841,7 +830,7 @@
     return status;
 }
 
-status_t AudioPolicyService::AudioCommandThread::parametersCommand(int ioHandle,
+status_t AudioPolicyService::AudioCommandThread::parametersCommand(audio_io_handle_t ioHandle,
                                                                    const char *keyValuePairs,
                                                                    int delayMs)
 {
@@ -900,7 +889,7 @@
 // insertCommand_l() must be called with mLock held
 void AudioPolicyService::AudioCommandThread::insertCommand_l(AudioCommand *command, int delayMs)
 {
-    ssize_t i;
+    ssize_t i;  // not size_t because i will count down to -1
     Vector <AudioCommand *> removedCommands;
 
     command->mTime = systemTime() + milliseconds(delayMs);
@@ -927,19 +916,19 @@
             AudioParameter param = AudioParameter(data->mKeyValuePairs);
             AudioParameter param2 = AudioParameter(data2->mKeyValuePairs);
             for (size_t j = 0; j < param.size(); j++) {
-               String8 key;
-               String8 value;
-               param.getAt(j, key, value);
-               for (size_t k = 0; k < param2.size(); k++) {
-                  String8 key2;
-                  String8 value2;
-                  param2.getAt(k, key2, value2);
-                  if (key2 == key) {
-                      param2.remove(key2);
-                      ALOGV("Filtering out parameter %s", key2.string());
-                      break;
-                  }
-               }
+                String8 key;
+                String8 value;
+                param.getAt(j, key, value);
+                for (size_t k = 0; k < param2.size(); k++) {
+                    String8 key2;
+                    String8 value2;
+                    param2.getAt(k, key2, value2);
+                    if (key2 == key) {
+                        param2.remove(key2);
+                        ALOGV("Filtering out parameter %s", key2.string());
+                        break;
+                    }
+                }
             }
             // if all keys have been filtered out, remove the command.
             // otherwise, update the key value pairs
@@ -1011,7 +1000,7 @@
                                        const char *keyValuePairs,
                                        int delayMs)
 {
-    mAudioCommandThread->parametersCommand((int)ioHandle, keyValuePairs,
+    mAudioCommandThread->parametersCommand(ioHandle, keyValuePairs,
                                            delayMs);
 }
 
@@ -1020,8 +1009,8 @@
                                         audio_io_handle_t output,
                                         int delayMs)
 {
-    return (int)mAudioCommandThread->volumeCommand((int)stream, volume,
-                                                   (int)output, delayMs);
+    return (int)mAudioCommandThread->volumeCommand(stream, volume,
+                                                   output, delayMs);
 }
 
 int AudioPolicyService::startTone(audio_policy_tone_t tone,
@@ -1031,7 +1020,7 @@
         ALOGE("startTone: illegal tone requested (%d)", tone);
     if (stream != AUDIO_STREAM_VOICE_CALL)
         ALOGE("startTone: illegal stream (%d) requested for tone %d", stream,
-             tone);
+            tone);
     mTonePlaybackThread->startToneCommand(ToneGenerator::TONE_SUP_CALL_WAITING,
                                           AUDIO_STREAM_VOICE_CALL);
     return 0;
@@ -1052,7 +1041,7 @@
 // Audio pre-processing configuration
 // ----------------------------------------------------------------------------
 
-const char *AudioPolicyService::kInputSourceNames[AUDIO_SOURCE_CNT -1] = {
+/*static*/ const char * const AudioPolicyService::kInputSourceNames[AUDIO_SOURCE_CNT -1] = {
     MIC_SRC_TAG,
     VOICE_UL_SRC_TAG,
     VOICE_DL_SRC_TAG,
@@ -1152,7 +1141,7 @@
     if (param == NULL && value == NULL) {
         // try to parse simple parameter form {int int}
         param = root->first_child;
-        if (param) {
+        if (param != NULL) {
             // Note: that a pair of random strings is read as 0 0
             int *ptr = (int *)fx_param->data;
             int *ptr2 = (int *)((char *)param + sizeof(effect_param_t));
@@ -1241,7 +1230,7 @@
             node = node->next;
             continue;
         }
-        EffectDesc *effect = new EffectDesc(*effects[i]);
+        EffectDesc *effect = new EffectDesc(*effects[i]);   // deep copy
         loadEffectParameters(node, effect->mParams);
         ALOGV("loadInputSource() adding effect %s uuid %08x", effect->mName, effect->mUuid.timeLow);
         source->mEffects.add(effect);
@@ -1292,11 +1281,7 @@
         ALOGW("loadEffect() invalid uuid %s", node->value);
         return NULL;
     }
-    EffectDesc *effect = new EffectDesc();
-    effect->mName = strdup(root->name);
-    memcpy(&effect->mUuid, &uuid, sizeof(effect_uuid_t));
-
-    return effect;
+    return new EffectDesc(root->name, uuid);
 }
 
 status_t AudioPolicyService::loadEffects(cnode *root, Vector <EffectDesc *>& effects)
@@ -1348,13 +1333,13 @@
 static audio_io_handle_t aps_open_output(void *service,
                                              uint32_t *pDevices,
                                              uint32_t *pSamplingRate,
-                                             uint32_t *pFormat,
+                                             audio_format_t *pFormat,
                                              uint32_t *pChannels,
                                              uint32_t *pLatencyMs,
                                              audio_policy_output_flags_t flags)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == NULL) {
+    if (af == 0) {
         ALOGW("%s: could not get AudioFlinger", __func__);
         return 0;
     }
@@ -1368,7 +1353,7 @@
                                                  audio_io_handle_t output2)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == NULL) {
+    if (af == 0) {
         ALOGW("%s: could not get AudioFlinger", __func__);
         return 0;
     }
@@ -1378,7 +1363,7 @@
 static int aps_close_output(void *service, audio_io_handle_t output)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == NULL)
+    if (af == 0)
         return PERMISSION_DENIED;
 
     return af->closeOutput(output);
@@ -1387,7 +1372,7 @@
 static int aps_suspend_output(void *service, audio_io_handle_t output)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == NULL) {
+    if (af == 0) {
         ALOGW("%s: could not get AudioFlinger", __func__);
         return PERMISSION_DENIED;
     }
@@ -1398,7 +1383,7 @@
 static int aps_restore_output(void *service, audio_io_handle_t output)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == NULL) {
+    if (af == 0) {
         ALOGW("%s: could not get AudioFlinger", __func__);
         return PERMISSION_DENIED;
     }
@@ -1409,12 +1394,12 @@
 static audio_io_handle_t aps_open_input(void *service,
                                             uint32_t *pDevices,
                                             uint32_t *pSamplingRate,
-                                            uint32_t *pFormat,
+                                            audio_format_t *pFormat,
                                             uint32_t *pChannels,
-                                            uint32_t acoustics)
+                                            audio_in_acoustics_t acoustics)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == NULL) {
+    if (af == 0) {
         ALOGW("%s: could not get AudioFlinger", __func__);
         return 0;
     }
@@ -1426,7 +1411,7 @@
 static int aps_close_input(void *service, audio_io_handle_t input)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == NULL)
+    if (af == 0)
         return PERMISSION_DENIED;
 
     return af->closeInput(input);
@@ -1436,7 +1421,7 @@
                                      audio_io_handle_t output)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == NULL)
+    if (af == 0)
         return PERMISSION_DENIED;
 
     return af->setStreamOutput(stream, output);
@@ -1447,10 +1432,10 @@
                                 audio_io_handle_t dst_output)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == NULL)
+    if (af == 0)
         return PERMISSION_DENIED;
 
-    return af->moveEffects(session, (int)src_output, (int)dst_output);
+    return af->moveEffects(session, src_output, dst_output);
 }
 
 static char * aps_get_parameters(void *service, audio_io_handle_t io_handle,
diff --git a/services/audioflinger/AudioPolicyService.h b/services/audioflinger/AudioPolicyService.h
index d898a53..9ed905d 100644
--- a/services/audioflinger/AudioPolicyService.h
+++ b/services/audioflinger/AudioPolicyService.h
@@ -19,6 +19,7 @@
 
 #include <cutils/misc.h>
 #include <cutils/config_utils.h>
+#include <utils/String8.h>
 #include <utils/Vector.h>
 #include <utils/SortedVector.h>
 #include <binder/BinderService.h>
@@ -31,8 +32,6 @@
 
 namespace android {
 
-class String8;
-
 // ----------------------------------------------------------------------------
 
 class AudioPolicyService :
@@ -59,16 +58,15 @@
     virtual audio_policy_dev_state_t getDeviceConnectionState(
                                                                 audio_devices_t device,
                                                                 const char *device_address);
-    virtual status_t setPhoneState(int state);
-    virtual status_t setRingerMode(uint32_t mode, uint32_t mask);
+    virtual status_t setPhoneState(audio_mode_t state);
     virtual status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
     virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
     virtual audio_io_handle_t getOutput(audio_stream_type_t stream,
                                         uint32_t samplingRate = 0,
-                                        uint32_t format = AUDIO_FORMAT_DEFAULT,
+                                        audio_format_t format = AUDIO_FORMAT_DEFAULT,
                                         uint32_t channels = 0,
                                         audio_policy_output_flags_t flags =
-                                            AUDIO_POLICY_OUTPUT_FLAG_INDIRECT);
+                                                AUDIO_POLICY_OUTPUT_FLAG_NONE);
     virtual status_t startOutput(audio_io_handle_t output,
                                  audio_stream_type_t stream,
                                  int session = 0);
@@ -76,12 +74,12 @@
                                 audio_stream_type_t stream,
                                 int session = 0);
     virtual void releaseOutput(audio_io_handle_t output);
-    virtual audio_io_handle_t getInput(int inputSource,
+    virtual audio_io_handle_t getInput(audio_source_t inputSource,
                                     uint32_t samplingRate = 0,
-                                    uint32_t format = AUDIO_FORMAT_DEFAULT,
+                                    audio_format_t format = AUDIO_FORMAT_DEFAULT,
                                     uint32_t channels = 0,
                                     audio_in_acoustics_t acoustics =
-                                            (audio_in_acoustics_t)0,
+                                            (audio_in_acoustics_t)0 /*AUDIO_IN_ACOUSTICS_NONE*/,
                                     int audioSession = 0);
     virtual status_t startInput(audio_io_handle_t input);
     virtual status_t stopInput(audio_io_handle_t input);
@@ -89,11 +87,15 @@
     virtual status_t initStreamVolume(audio_stream_type_t stream,
                                       int indexMin,
                                       int indexMax);
-    virtual status_t setStreamVolumeIndex(audio_stream_type_t stream, int index);
-    virtual status_t getStreamVolumeIndex(audio_stream_type_t stream, int *index);
+    virtual status_t setStreamVolumeIndex(audio_stream_type_t stream,
+                                          int index,
+                                          audio_devices_t device);
+    virtual status_t getStreamVolumeIndex(audio_stream_type_t stream,
+                                          int *index,
+                                          audio_devices_t device);
 
     virtual uint32_t getStrategyForStream(audio_stream_type_t stream);
-    virtual uint32_t getDevicesForStream(audio_stream_type_t stream);
+    virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream);
 
     virtual audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc);
     virtual status_t registerEffect(effect_descriptor_t *desc,
@@ -103,7 +105,7 @@
                                     int id);
     virtual status_t unregisterEffect(int id);
     virtual status_t setEffectEnabled(int id, bool enabled);
-    virtual bool isStreamActive(int stream, uint32_t inPastMs = 0) const;
+    virtual bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
 
     virtual status_t queryDefaultPreProcessing(int audioSession,
                                               effect_descriptor_t *descriptors,
@@ -169,10 +171,13 @@
         virtual     bool        threadLoop();
 
                     void        exit();
-                    void        startToneCommand(int type = 0, int stream = 0);
+                    void        startToneCommand(ToneGenerator::tone_type type,
+                                                 audio_stream_type_t stream);
                     void        stopToneCommand();
-                    status_t    volumeCommand(int stream, float volume, int output, int delayMs = 0);
-                    status_t    parametersCommand(int ioHandle, const char *keyValuePairs, int delayMs = 0);
+                    status_t    volumeCommand(audio_stream_type_t stream, float volume,
+                                            audio_io_handle_t output, int delayMs = 0);
+                    status_t    parametersCommand(audio_io_handle_t ioHandle,
+                                            const char *keyValuePairs, int delayMs = 0);
                     status_t    voiceVolumeCommand(float volume, int delayMs = 0);
                     void        insertCommand_l(AudioCommand *command, int delayMs = 0);
 
@@ -196,20 +201,20 @@
 
         class ToneData {
         public:
-            int mType;      // tone type (START_TONE only)
-            int mStream;    // stream type (START_TONE only)
+            ToneGenerator::tone_type mType; // tone type (START_TONE only)
+            audio_stream_type_t mStream;    // stream type (START_TONE only)
         };
 
         class VolumeData {
         public:
-            int mStream;
+            audio_stream_type_t mStream;
             float mVolume;
-            int mIO;
+            audio_io_handle_t mIO;
         };
 
         class ParametersData {
         public:
-            int mIO;
+            audio_io_handle_t mIO;
             String8 mKeyValuePairs;
         };
 
@@ -228,8 +233,33 @@
 
     class EffectDesc {
     public:
-        EffectDesc() {}
-        virtual ~EffectDesc() {}
+        EffectDesc(const char *name, const effect_uuid_t& uuid) :
+                        mName(strdup(name)),
+                        mUuid(uuid) { }
+        EffectDesc(const EffectDesc& orig) :
+                        mName(strdup(orig.mName)),
+                        mUuid(orig.mUuid) {
+                            // deep copy mParams
+                            for (size_t k = 0; k < orig.mParams.size(); k++) {
+                                effect_param_t *origParam = orig.mParams[k];
+                                // psize and vsize are rounded up to an int boundary for allocation
+                                size_t origSize = sizeof(effect_param_t) +
+                                                  ((origParam->psize + 3) & ~3) +
+                                                  ((origParam->vsize + 3) & ~3);
+                                effect_param_t *dupParam = (effect_param_t *) malloc(origSize);
+                                memcpy(dupParam, origParam, origSize);
+                                // This works because the param buffer allocation is also done by
+                                // multiples of 4 bytes originally. In theory we should memcpy only
+                                // the actual param size, that is without rounding vsize.
+                                mParams.add(dupParam);
+                            }
+                        }
+        /*virtual*/ ~EffectDesc() {
+            free(mName);
+            for (size_t k = 0; k < mParams.size(); k++) {
+                free(mParams[k]);
+            }
+        }
         char *mName;
         effect_uuid_t mUuid;
         Vector <effect_param_t *> mParams;
@@ -238,22 +268,26 @@
     class InputSourceDesc {
     public:
         InputSourceDesc() {}
-        virtual ~InputSourceDesc() {}
+        /*virtual*/ ~InputSourceDesc() {
+            for (size_t j = 0; j < mEffects.size(); j++) {
+                delete mEffects[j];
+            }
+        }
         Vector <EffectDesc *> mEffects;
     };
 
 
     class InputDesc {
     public:
-        InputDesc() {}
-        virtual ~InputDesc() {}
-        int mSessionId;
+        InputDesc(int session) : mSessionId(session) {}
+        /*virtual*/ ~InputDesc() {}
+        const int mSessionId;
         Vector< sp<AudioEffect> >mEffects;
     };
 
-    static const char *kInputSourceNames[AUDIO_SOURCE_CNT -1];
+    static const char * const kInputSourceNames[AUDIO_SOURCE_CNT -1];
 
-    void setPreProcessorEnabled(InputDesc *inputDesc, bool enabled);
+    void setPreProcessorEnabled(const InputDesc *inputDesc, bool enabled);
     status_t loadPreProcessorConfig(const char *path);
     status_t loadEffects(cnode *root, Vector <EffectDesc *>& effects);
     EffectDesc *loadEffect(cnode *root);
@@ -277,8 +311,8 @@
 
     mutable Mutex mLock;    // prevents concurrent access to AudioPolicy manager functions changing
                             // device connection state  or routing
-    sp <AudioCommandThread> mAudioCommandThread;    // audio commands thread
-    sp <AudioCommandThread> mTonePlaybackThread;     // tone playback thread
+    sp<AudioCommandThread> mAudioCommandThread;     // audio commands thread
+    sp<AudioCommandThread> mTonePlaybackThread;     // tone playback thread
     struct audio_policy_device *mpAudioPolicyDev;
     struct audio_policy *mpAudioPolicy;
     KeyedVector< audio_source_t, InputSourceDesc* > mInputSources;
diff --git a/services/audioflinger/AudioResampler.cpp b/services/audioflinger/AudioResampler.cpp
index 4586b54..fbb54cf 100644
--- a/services/audioflinger/AudioResampler.cpp
+++ b/services/audioflinger/AudioResampler.cpp
@@ -23,8 +23,10 @@
 #include <cutils/log.h>
 #include <cutils/properties.h>
 #include "AudioResampler.h"
+#if 0
 #include "AudioResamplerSinc.h"
 #include "AudioResamplerCubic.h"
+#endif
 
 #ifdef __arm__
 #include <machine/cpu-features.h>
@@ -99,6 +101,7 @@
         ALOGV("Create linear Resampler");
         resampler = new AudioResamplerOrder1(bitDepth, inChannelCount, sampleRate);
         break;
+#if 0
     case MED_QUALITY:
         ALOGV("Create cubic Resampler");
         resampler = new AudioResamplerCubic(bitDepth, inChannelCount, sampleRate);
@@ -107,6 +110,7 @@
         ALOGV("Create sinc Resampler");
         resampler = new AudioResamplerSinc(bitDepth, inChannelCount, sampleRate);
         break;
+#endif
     }
 
     // initialize resampler
@@ -118,7 +122,8 @@
         int32_t sampleRate) :
     mBitDepth(bitDepth), mChannelCount(inChannelCount),
             mSampleRate(sampleRate), mInSampleRate(sampleRate), mInputIndex(0),
-            mPhaseFraction(0) {
+            mPhaseFraction(0), mLocalTimeFreq(0),
+            mPTS(AudioBufferProvider::kInvalidPTS) {
     // sanity check on format
     if ((bitDepth != 16) ||(inChannelCount < 1) || (inChannelCount > 2)) {
         ALOGE("Unsupported sample format, %d bits, %d channels", bitDepth,
@@ -130,12 +135,6 @@
     mVolume[0] = mVolume[1] = 0;
     mBuffer.frameCount = 0;
 
-    // save format for quick lookup
-    if (inChannelCount == 1) {
-        mFormat = MONO_16_BIT;
-    } else {
-        mFormat = STEREO_16_BIT;
-    }
 }
 
 AudioResampler::~AudioResampler() {
@@ -152,6 +151,23 @@
     mVolume[1] = right;
 }
 
+void AudioResampler::setLocalTimeFreq(uint64_t freq) {
+    mLocalTimeFreq = freq;
+}
+
+void AudioResampler::setPTS(int64_t pts) {
+    mPTS = pts;
+}
+
+int64_t AudioResampler::calculateOutputPTS(int outputFrameIndex) {
+
+    if (mPTS == AudioBufferProvider::kInvalidPTS) {
+        return AudioBufferProvider::kInvalidPTS;
+    } else {
+        return mPTS + ((outputFrameIndex * mLocalTimeFreq) / mSampleRate);
+    }
+}
+
 void AudioResampler::reset() {
     mInputIndex = 0;
     mPhaseFraction = 0;
@@ -190,7 +206,7 @@
     size_t outputSampleCount = outFrameCount * 2;
     size_t inFrameCount = (outFrameCount*mInSampleRate)/mSampleRate;
 
-    // ALOGE("starting resample %d frames, inputIndex=%d, phaseFraction=%d, phaseIncrement=%d\n",
+    // ALOGE("starting resample %d frames, inputIndex=%d, phaseFraction=%d, phaseIncrement=%d",
     //      outFrameCount, inputIndex, phaseFraction, phaseIncrement);
 
     while (outputIndex < outputSampleCount) {
@@ -198,26 +214,27 @@
         // buffer is empty, fetch a new one
         while (mBuffer.frameCount == 0) {
             mBuffer.frameCount = inFrameCount;
-            provider->getNextBuffer(&mBuffer);
+            provider->getNextBuffer(&mBuffer,
+                                    calculateOutputPTS(outputIndex / 2));
             if (mBuffer.raw == NULL) {
                 goto resampleStereo16_exit;
             }
 
-            // ALOGE("New buffer fetched: %d frames\n", mBuffer.frameCount);
+            // ALOGE("New buffer fetched: %d frames", mBuffer.frameCount);
             if (mBuffer.frameCount > inputIndex) break;
 
             inputIndex -= mBuffer.frameCount;
             mX0L = mBuffer.i16[mBuffer.frameCount*2-2];
             mX0R = mBuffer.i16[mBuffer.frameCount*2-1];
             provider->releaseBuffer(&mBuffer);
-             // mBuffer.frameCount == 0 now so we reload a new buffer
+            // mBuffer.frameCount == 0 now so we reload a new buffer
         }
 
         int16_t *in = mBuffer.i16;
 
         // handle boundary case
         while (inputIndex == 0) {
-            // ALOGE("boundary case\n");
+            // ALOGE("boundary case");
             out[outputIndex++] += vl * Interp(mX0L, in[0], phaseFraction);
             out[outputIndex++] += vr * Interp(mX0R, in[1], phaseFraction);
             Advance(&inputIndex, &phaseFraction, phaseIncrement);
@@ -226,7 +243,7 @@
         }
 
         // process input samples
-        // ALOGE("general case\n");
+        // ALOGE("general case");
 
 #ifdef ASM_ARM_RESAMP1  // asm optimisation for ResamplerOrder1
         if (inputIndex + 2 < mBuffer.frameCount) {
@@ -248,7 +265,7 @@
             Advance(&inputIndex, &phaseFraction, phaseIncrement);
         }
 
-        // ALOGE("loop done - outputIndex=%d, inputIndex=%d\n", outputIndex, inputIndex);
+        // ALOGE("loop done - outputIndex=%d, inputIndex=%d", outputIndex, inputIndex);
 
         // if done with buffer, save samples
         if (inputIndex >= mBuffer.frameCount) {
@@ -265,7 +282,7 @@
         }
     }
 
-    // ALOGE("output buffer full - outputIndex=%d, inputIndex=%d\n", outputIndex, inputIndex);
+    // ALOGE("output buffer full - outputIndex=%d, inputIndex=%d", outputIndex, inputIndex);
 
 resampleStereo16_exit:
     // save state
@@ -286,19 +303,20 @@
     size_t outputSampleCount = outFrameCount * 2;
     size_t inFrameCount = (outFrameCount*mInSampleRate)/mSampleRate;
 
-    // ALOGE("starting resample %d frames, inputIndex=%d, phaseFraction=%d, phaseIncrement=%d\n",
+    // ALOGE("starting resample %d frames, inputIndex=%d, phaseFraction=%d, phaseIncrement=%d",
     //      outFrameCount, inputIndex, phaseFraction, phaseIncrement);
     while (outputIndex < outputSampleCount) {
         // buffer is empty, fetch a new one
         while (mBuffer.frameCount == 0) {
             mBuffer.frameCount = inFrameCount;
-            provider->getNextBuffer(&mBuffer);
+            provider->getNextBuffer(&mBuffer,
+                                    calculateOutputPTS(outputIndex / 2));
             if (mBuffer.raw == NULL) {
                 mInputIndex = inputIndex;
                 mPhaseFraction = phaseFraction;
                 goto resampleMono16_exit;
             }
-            // ALOGE("New buffer fetched: %d frames\n", mBuffer.frameCount);
+            // ALOGE("New buffer fetched: %d frames", mBuffer.frameCount);
             if (mBuffer.frameCount >  inputIndex) break;
 
             inputIndex -= mBuffer.frameCount;
@@ -310,7 +328,7 @@
 
         // handle boundary case
         while (inputIndex == 0) {
-            // ALOGE("boundary case\n");
+            // ALOGE("boundary case");
             int32_t sample = Interp(mX0L, in[0], phaseFraction);
             out[outputIndex++] += vl * sample;
             out[outputIndex++] += vr * sample;
@@ -320,7 +338,7 @@
         }
 
         // process input samples
-        // ALOGE("general case\n");
+        // ALOGE("general case");
 
 #ifdef ASM_ARM_RESAMP1  // asm optimisation for ResamplerOrder1
         if (inputIndex + 2 < mBuffer.frameCount) {
@@ -343,7 +361,7 @@
         }
 
 
-        // ALOGE("loop done - outputIndex=%d, inputIndex=%d\n", outputIndex, inputIndex);
+        // ALOGE("loop done - outputIndex=%d, inputIndex=%d", outputIndex, inputIndex);
 
         // if done with buffer, save samples
         if (inputIndex >= mBuffer.frameCount) {
@@ -359,7 +377,7 @@
         }
     }
 
-    // ALOGE("output buffer full - outputIndex=%d, inputIndex=%d\n", outputIndex, inputIndex);
+    // ALOGE("output buffer full - outputIndex=%d, inputIndex=%d", outputIndex, inputIndex);
 
 resampleMono16_exit:
     // save state
@@ -390,6 +408,7 @@
 *       phaseFraction : phase fraction for next interpolation
 *
 *******************************************************************/
+__attribute__((noinline))
 void AudioResamplerOrder1::AsmMono16Loop(int16_t *in, int32_t* maxOutPt, int32_t maxInIdx,
             size_t &outputIndex, int32_t* out, size_t &inputIndex, int32_t vl, int32_t vr,
             uint32_t &phaseFraction, uint32_t phaseIncrement)
@@ -500,6 +519,7 @@
 *       phaseFraction : phase fraction for next interpolation
 *
 *******************************************************************/
+__attribute__((noinline))
 void AudioResamplerOrder1::AsmStereo16Loop(int16_t *in, int32_t* maxOutPt, int32_t maxInIdx,
             size_t &outputIndex, int32_t* out, size_t &inputIndex, int32_t vl, int32_t vr,
             uint32_t &phaseFraction, uint32_t phaseIncrement)
@@ -600,6 +620,5 @@
 
 
 // ----------------------------------------------------------------------------
-}
-; // namespace android
 
+} // namespace android
diff --git a/services/audioflinger/AudioResampler.h b/services/audioflinger/AudioResampler.h
index ffa690a..1610e00 100644
--- a/services/audioflinger/AudioResampler.h
+++ b/services/audioflinger/AudioResampler.h
@@ -33,7 +33,7 @@
     //  HIGH_QUALITY: fixed multi-tap FIR (e.g. 48KHz->44.1KHz)
     // NOTE: high quality SRC will only be supported for
     // certain fixed rate conversions. Sample rate cannot be
-    // changed dynamically. 
+    // changed dynamically.
     enum src_quality {
         DEFAULT=0,
         LOW_QUALITY=1,
@@ -49,12 +49,16 @@
     virtual void init() = 0;
     virtual void setSampleRate(int32_t inSampleRate);
     virtual void setVolume(int16_t left, int16_t right);
+    virtual void setLocalTimeFreq(uint64_t freq);
+
+    // set the PTS of the next buffer output by the resampler
+    virtual void setPTS(int64_t pts);
 
     virtual void resample(int32_t* out, size_t outFrameCount,
             AudioBufferProvider* provider) = 0;
 
     virtual void reset();
-    virtual size_t getUnreleasedFrames() { return mInputIndex; }
+    virtual size_t getUnreleasedFrames() const { return mInputIndex; }
 
 protected:
     // number of bits for phase fraction - 30 bits allows nearly 2x downsampling
@@ -66,16 +70,17 @@
     // multiplier to calculate fixed point phase increment
     static const double kPhaseMultiplier = 1L << kNumPhaseBits;
 
-    enum format {MONO_16_BIT, STEREO_16_BIT};
     AudioResampler(int bitDepth, int inChannelCount, int32_t sampleRate);
 
     // prevent copying
     AudioResampler(const AudioResampler&);
     AudioResampler& operator=(const AudioResampler&);
 
-    int32_t mBitDepth;
-    int32_t mChannelCount;
-    int32_t mSampleRate;
+    int64_t calculateOutputPTS(int outputFrameIndex);
+
+    const int32_t mBitDepth;
+    const int32_t mChannelCount;
+    const int32_t mSampleRate;
     int32_t mInSampleRate;
     AudioBufferProvider::Buffer mBuffer;
     union {
@@ -83,10 +88,11 @@
         uint32_t mVolumeRL;
     };
     int16_t mTargetVolume[2];
-    format mFormat;
     size_t mInputIndex;
     int32_t mPhaseIncrement;
     uint32_t mPhaseFraction;
+    uint64_t mLocalTimeFreq;
+    int64_t mPTS;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/services/audioflinger/AudioResamplerCubic.cpp b/services/audioflinger/AudioResamplerCubic.cpp
index 47205ba..18e59e9 100644
--- a/services/audioflinger/AudioResamplerCubic.cpp
+++ b/services/audioflinger/AudioResamplerCubic.cpp
@@ -65,7 +65,7 @@
     // fetch first buffer
     if (mBuffer.frameCount == 0) {
         mBuffer.frameCount = inFrameCount;
-        provider->getNextBuffer(&mBuffer);
+        provider->getNextBuffer(&mBuffer, mPTS);
         if (mBuffer.raw == NULL)
             return;
         // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount);
@@ -95,11 +95,12 @@
                 inputIndex = 0;
                 provider->releaseBuffer(&mBuffer);
                 mBuffer.frameCount = inFrameCount;
-                provider->getNextBuffer(&mBuffer);
+                provider->getNextBuffer(&mBuffer,
+                                        calculateOutputPTS(outputIndex / 2));
                 if (mBuffer.raw == NULL)
                     goto save_state;  // ugly, but efficient
                 in = mBuffer.i16;
-                // ALOGW("New buffer: offset=%p, frames=%d\n", mBuffer.raw, mBuffer.frameCount);
+                // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount);
             }
 
             // advance sample state
@@ -130,10 +131,10 @@
     // fetch first buffer
     if (mBuffer.frameCount == 0) {
         mBuffer.frameCount = inFrameCount;
-        provider->getNextBuffer(&mBuffer);
+        provider->getNextBuffer(&mBuffer, mPTS);
         if (mBuffer.raw == NULL)
             return;
-        // ALOGW("New buffer: offset=%p, frames=%d\n", mBuffer.raw, mBuffer.frameCount);
+        // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount);
     }
     int16_t *in = mBuffer.i16;
 
@@ -160,7 +161,8 @@
                 inputIndex = 0;
                 provider->releaseBuffer(&mBuffer);
                 mBuffer.frameCount = inFrameCount;
-                provider->getNextBuffer(&mBuffer);
+                provider->getNextBuffer(&mBuffer,
+                                        calculateOutputPTS(outputIndex / 2));
                 if (mBuffer.raw == NULL)
                     goto save_state;  // ugly, but efficient
                 // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount);
@@ -181,4 +183,3 @@
 // ----------------------------------------------------------------------------
 }
 ; // namespace android
-
diff --git a/services/audioflinger/AudioResamplerCubic.h b/services/audioflinger/AudioResamplerCubic.h
index b72b62a..892785a 100644
--- a/services/audioflinger/AudioResamplerCubic.h
+++ b/services/audioflinger/AudioResamplerCubic.h
@@ -55,7 +55,7 @@
         p->y1 = p->y2;
         p->y2 = p->y3;
         p->y3 = in;
-        p->a = (3 * (p->y1 - p->y2) - p->y0 + p->y3) >> 1;            
+        p->a = (3 * (p->y1 - p->y2) - p->y0 + p->y3) >> 1;
         p->b = (p->y2 << 1) + p->y0 - (((5 * p->y1 + p->y3)) >> 1);
         p->c = (p->y2 - p->y0) >> 1;
     }
diff --git a/services/audioflinger/AudioResamplerSinc.cpp b/services/audioflinger/AudioResamplerSinc.cpp
index 9e5e254..76662d8 100644
--- a/services/audioflinger/AudioResamplerSinc.cpp
+++ b/services/audioflinger/AudioResamplerSinc.cpp
@@ -199,33 +199,33 @@
     size_t outputSampleCount = outFrameCount * 2;
     size_t inFrameCount = (outFrameCount*mInSampleRate)/mSampleRate;
 
-    AudioBufferProvider::Buffer& buffer(mBuffer);
     while (outputIndex < outputSampleCount) {
         // buffer is empty, fetch a new one
-        while (buffer.frameCount == 0) {
-            buffer.frameCount = inFrameCount;
-            provider->getNextBuffer(&buffer);
-            if (buffer.raw == NULL) {
+        while (mBuffer.frameCount == 0) {
+            mBuffer.frameCount = inFrameCount;
+            provider->getNextBuffer(&mBuffer,
+                                    calculateOutputPTS(outputIndex / 2));
+            if (mBuffer.raw == NULL) {
                 goto resample_exit;
             }
             const uint32_t phaseIndex = phaseFraction >> kNumPhaseBits;
             if (phaseIndex == 1) {
                 // read one frame
-                read<CHANNELS>(impulse, phaseFraction, buffer.i16, inputIndex);
+                read<CHANNELS>(impulse, phaseFraction, mBuffer.i16, inputIndex);
             } else if (phaseIndex == 2) {
                 // read 2 frames
-                read<CHANNELS>(impulse, phaseFraction, buffer.i16, inputIndex);
+                read<CHANNELS>(impulse, phaseFraction, mBuffer.i16, inputIndex);
                 inputIndex++;
                 if (inputIndex >= mBuffer.frameCount) {
                     inputIndex -= mBuffer.frameCount;
-                    provider->releaseBuffer(&buffer);
+                    provider->releaseBuffer(&mBuffer);
                 } else {
-                    read<CHANNELS>(impulse, phaseFraction, buffer.i16, inputIndex);
+                    read<CHANNELS>(impulse, phaseFraction, mBuffer.i16, inputIndex);
                 }
-           }
+            }
         }
-        int16_t *in = buffer.i16;
-        const size_t frameCount = buffer.frameCount;
+        int16_t *in = mBuffer.i16;
+        const size_t frameCount = mBuffer.frameCount;
 
         // Always read-in the first samples from the input buffer
         int16_t* head = impulse + halfNumCoefs*CHANNELS;
@@ -247,7 +247,7 @@
                 if (inputIndex >= frameCount)
                     break;  // need a new buffer
                 read<CHANNELS>(impulse, phaseFraction, in, inputIndex);
-            } else if(phaseIndex == 2) {    // maximum value
+            } else if (phaseIndex == 2) {    // maximum value
                 inputIndex++;
                 if (inputIndex >= frameCount)
                     break;  // 0 frame available, 2 frames needed
@@ -264,7 +264,7 @@
         // if done with buffer, save samples
         if (inputIndex >= frameCount) {
             inputIndex -= frameCount;
-            provider->releaseBuffer(&buffer);
+            provider->releaseBuffer(&mBuffer);
         }
     }
 
@@ -284,7 +284,7 @@
 **/
 void AudioResamplerSinc::read(
         int16_t*& impulse, uint32_t& phaseFraction,
-        int16_t const* in, size_t inputIndex)
+        const int16_t* in, size_t inputIndex)
 {
     const uint32_t phaseIndex = phaseFraction >> kNumPhaseBits;
     impulse += CHANNELS;
@@ -302,7 +302,7 @@
 
 template<int CHANNELS>
 void AudioResamplerSinc::filterCoefficient(
-        int32_t& l, int32_t& r, uint32_t phase, int16_t const *samples)
+        int32_t& l, int32_t& r, uint32_t phase, const int16_t *samples)
 {
     // compute the index of the coefficient on the positive side and
     // negative side
@@ -317,9 +317,9 @@
 
     l = 0;
     r = 0;
-    int32_t const* coefs = mFirCoefs;
-    int16_t const *sP = samples;
-    int16_t const *sN = samples+CHANNELS;
+    const int32_t* coefs = mFirCoefs;
+    const int16_t *sP = samples;
+    const int16_t *sN = samples+CHANNELS;
     for (unsigned int i=0 ; i<halfNumCoefs/4 ; i++) {
         interpolate<CHANNELS>(l, r, coefs+indexP, lerpP, sP);
         interpolate<CHANNELS>(l, r, coefs+indexN, lerpN, sN);
@@ -339,13 +339,13 @@
 template<int CHANNELS>
 void AudioResamplerSinc::interpolate(
         int32_t& l, int32_t& r,
-        int32_t const* coefs, int16_t lerp, int16_t const* samples)
+        const int32_t* coefs, int16_t lerp, const int16_t* samples)
 {
     int32_t c0 = coefs[0];
     int32_t c1 = coefs[1];
     int32_t sinc = mulAdd(lerp, (c1-c0)<<1, c0);
     if (CHANNELS == 2) {
-        uint32_t rl = *reinterpret_cast<uint32_t const*>(samples);
+        uint32_t rl = *reinterpret_cast<const uint32_t*>(samples);
         l = mulAddRL(1, rl, sinc, l);
         r = mulAddRL(0, rl, sinc, r);
     } else {
@@ -355,4 +355,3 @@
 
 // ----------------------------------------------------------------------------
 }; // namespace android
-
diff --git a/services/audioflinger/AudioResamplerSinc.h b/services/audioflinger/AudioResamplerSinc.h
index e6cb90b..f0a07b8 100644
--- a/services/audioflinger/AudioResamplerSinc.h
+++ b/services/audioflinger/AudioResamplerSinc.h
@@ -31,7 +31,7 @@
 public:
     AudioResamplerSinc(int bitDepth, int inChannelCount, int32_t sampleRate);
 
-    ~AudioResamplerSinc();
+    virtual ~AudioResamplerSinc();
 
     virtual void resample(int32_t* out, size_t outFrameCount,
             AudioBufferProvider* provider);
@@ -44,22 +44,22 @@
 
     template<int CHANNELS>
     inline void filterCoefficient(
-            int32_t& l, int32_t& r, uint32_t phase, int16_t const *samples);
+            int32_t& l, int32_t& r, uint32_t phase, const int16_t *samples);
 
     template<int CHANNELS>
     inline void interpolate(
             int32_t& l, int32_t& r,
-            int32_t const* coefs, int16_t lerp, int16_t const* samples);
+            const int32_t* coefs, int16_t lerp, const int16_t* samples);
 
     template<int CHANNELS>
     inline void read(int16_t*& impulse, uint32_t& phaseFraction,
-            int16_t const* in, size_t inputIndex);
+            const int16_t* in, size_t inputIndex);
 
     int16_t *mState;
     int16_t *mImpulse;
     int16_t *mRingFull;
 
-    int32_t const * mFirCoefs;
+    const int32_t * mFirCoefs;
     static const int32_t mFirCoefsDown[];
     static const int32_t mFirCoefsUp[];
 
diff --git a/services/audioflinger/ServiceUtilities.cpp b/services/audioflinger/ServiceUtilities.cpp
new file mode 100644
index 0000000..6a58852
--- /dev/null
+++ b/services/audioflinger/ServiceUtilities.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <binder/IPCThreadState.h>
+#include <binder/IServiceManager.h>
+#include <binder/PermissionCache.h>
+#include "ServiceUtilities.h"
+
+namespace android {
+
+// This optimization assumes mediaserver process doesn't fork, which it doesn't
+const pid_t getpid_cached = getpid();
+
+bool recordingAllowed() {
+    if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true;
+    static const String16 sRecordAudio("android.permission.RECORD_AUDIO");
+    // don't use PermissionCache; this is not a system permission
+    bool ok = checkCallingPermission(sRecordAudio);
+    if (!ok) ALOGE("Request requires android.permission.RECORD_AUDIO");
+    return ok;
+}
+
+bool settingsAllowed() {
+    if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true;
+    static const String16 sAudioSettings("android.permission.MODIFY_AUDIO_SETTINGS");
+    // don't use PermissionCache; this is not a system permission
+    bool ok = checkCallingPermission(sAudioSettings);
+    if (!ok) ALOGE("Request requires android.permission.MODIFY_AUDIO_SETTINGS");
+    return ok;
+}
+
+bool dumpAllowed() {
+    // don't optimize for same pid, since mediaserver never dumps itself
+    static const String16 sDump("android.permission.DUMP");
+    // OK to use PermissionCache; this is a system permission
+    bool ok = PermissionCache::checkCallingPermission(sDump);
+    // convention is for caller to dump an error message to fd instead of logging here
+    //if (!ok) ALOGE("Request requires android.permission.DUMP");
+    return ok;
+}
+
+} // namespace android
diff --git a/include/media/thread_init.h b/services/audioflinger/ServiceUtilities.h
similarity index 73%
rename from include/media/thread_init.h
rename to services/audioflinger/ServiceUtilities.h
index 2feac86..f77ec5b 100644
--- a/include/media/thread_init.h
+++ b/services/audioflinger/ServiceUtilities.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008 The Android Open Source Project
+ * Copyright (C) 2012 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,11 +14,14 @@
  * limitations under the License.
  */
 
-#ifndef THREAD_INIT_H
-#define THREAD_INIT_H
+#include <unistd.h>
 
-bool InitializeForThread();
-void UninitializeForThread();
+namespace android {
 
-#endif /* THREAD_INIT_H*/
-	
+extern const pid_t getpid_cached;
+
+bool recordingAllowed();
+bool settingsAllowed();
+bool dumpAllowed();
+
+}
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index e35435e..3cae1f5 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -15,6 +15,7 @@
     libbinder \
     libcutils \
     libmedia \
+    libmedia_native \
     libcamera_client \
     libgui \
     libhardware
diff --git a/services/camera/libcameraservice/CameraHardwareInterface.h b/services/camera/libcameraservice/CameraHardwareInterface.h
index 34087b5..87a0802 100644
--- a/services/camera/libcameraservice/CameraHardwareInterface.h
+++ b/services/camera/libcameraservice/CameraHardwareInterface.h
@@ -21,8 +21,6 @@
 #include <binder/MemoryBase.h>
 #include <binder/MemoryHeapBase.h>
 #include <utils/RefBase.h>
-#include <surfaceflinger/ISurface.h>
-#include <ui/android_native_buffer.h>
 #include <ui/GraphicBuffer.h>
 #include <camera/Camera.h>
 #include <camera/CameraParameters.h>
@@ -635,6 +633,12 @@
         return native_window_set_crop(a, &crop);
     }
 
+    static int __set_timestamp(struct preview_stream_ops *w,
+                               int64_t timestamp) {
+        ANativeWindow *a = anw(w);
+        return native_window_set_buffers_timestamp(a, timestamp);
+    }
+
     static int __set_usage(struct preview_stream_ops* w, int usage)
     {
         ANativeWindow *a = anw(w);
@@ -664,6 +668,7 @@
         mHalPreviewWindow.nw.set_buffer_count = __set_buffer_count;
         mHalPreviewWindow.nw.set_buffers_geometry = __set_buffers_geometry;
         mHalPreviewWindow.nw.set_crop = __set_crop;
+        mHalPreviewWindow.nw.set_timestamp = __set_timestamp;
         mHalPreviewWindow.nw.set_usage = __set_usage;
         mHalPreviewWindow.nw.set_swap_interval = __set_swap_interval;
 
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 918f31e..22836e3 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -21,6 +21,7 @@
 #include <stdio.h>
 #include <sys/types.h>
 #include <pthread.h>
+#include <time.h>
 
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
@@ -29,10 +30,11 @@
 #include <cutils/atomic.h>
 #include <cutils/properties.h>
 #include <gui/SurfaceTextureClient.h>
+#include <gui/Surface.h>
 #include <hardware/hardware.h>
 #include <media/AudioSystem.h>
 #include <media/mediaplayer.h>
-#include <surfaceflinger/ISurface.h>
+#include <utils/Condition.h>
 #include <utils/Errors.h>
 #include <utils/Log.h>
 #include <utils/String16.h>
@@ -42,6 +44,8 @@
 
 namespace android {
 
+#define WAIT_RELEASE_TIMEOUT 250 // 250ms
+
 // ----------------------------------------------------------------------------
 // Logging support -- this is for debugging only
 // Use "adb shell dumpsys media.camera -v 1" to change it.
@@ -64,6 +68,13 @@
     return IPCThreadState::self()->getCallingUid();
 }
 
+static long long getTimeInMs() {
+    struct timeval t;
+    t.tv_sec = t.tv_usec = 0;
+    gettimeofday(&t, NULL);
+    return t.tv_sec * 1000LL + t.tv_usec / 1000;
+}
+
 // ----------------------------------------------------------------------------
 
 // This is ugly and only safe if we never re-create the CameraService, but
@@ -131,7 +142,7 @@
 }
 
 sp<ICamera> CameraService::connect(
-        const sp<ICameraClient>& cameraClient, int cameraId) {
+        const sp<ICameraClient>& cameraClient, int cameraId, bool force, bool keep) {
     int callingPid = getCallingPid();
     sp<CameraHardwareInterface> hardware = NULL;
 
@@ -157,27 +168,73 @@
         return NULL;
     }
 
-    Mutex::Autolock lock(mServiceLock);
-    if (mClient[cameraId] != 0) {
-        client = mClient[cameraId].promote();
-        if (client != 0) {
-            if (cameraClient->asBinder() == client->getCameraClient()->asBinder()) {
-                LOG1("CameraService::connect X (pid %d) (the same client)",
-                    callingPid);
-                return client;
-            } else {
-                ALOGW("CameraService::connect X (pid %d) rejected (existing client).",
-                    callingPid);
-                return NULL;
-            }
-        }
-        mClient[cameraId].clear();
+    if (keep && !checkCallingPermission(String16("android.permission.KEEP_CAMERA"))) {
+        ALOGE("connect X (pid %d) rejected (no KEEP_CAMERA permission).", callingPid);
+        return NULL;
     }
 
-    if (mBusy[cameraId]) {
-        ALOGW("CameraService::connect X (pid %d) rejected"
-             " (camera %d is still busy).", callingPid, cameraId);
-        return NULL;
+    Mutex::Autolock lock(mServiceLock);
+    // Check if there is an existing client.
+    client = mClient[cameraId].promote();
+    if (client != 0 &&
+            cameraClient->asBinder() == client->getCameraClient()->asBinder()) {
+        LOG1("connect X (pid %d) (the same client)", callingPid);
+        return client;
+    }
+
+    if (!force) {
+        if (mClient[cameraId].promote() != 0) {
+            ALOGW("connect X (pid %d) rejected (existing client).", callingPid);
+            return NULL;
+        }
+        mClient[cameraId].clear();
+        if (mBusy[cameraId]) {
+            ALOGW("connect X (pid %d) rejected (camera %d is still busy).",
+                  callingPid, cameraId);
+            return NULL;
+        }
+    } else { // force == true
+        int i = 0;
+        long long start_time = getTimeInMs();
+        while (i < mNumberOfCameras) {
+            if (getTimeInMs() - start_time >= 3000LL) {
+                ALOGE("connect X (pid %d) rejected (timeout 3s)", callingPid);
+                return NULL;
+            }
+
+            client = mClient[i].promote();
+            if (client != 0) {
+                if (client->keep()) {
+                    ALOGW("connect X (pid %d) rejected (existing client wants to keeps the camera)",
+                          callingPid);
+                    return NULL;
+                } else {
+                    ALOGW("New client (pid %d, id=%d). Disconnect the existing client (id=%d).",
+                         callingPid, cameraId, i);
+                    // Do not hold mServiceLock because disconnect will try to get it.
+                    mServiceLock.unlock();
+                    client->notifyCallback(CAMERA_MSG_ERROR, CAMERA_ERROR_RELEASED, 0, &i);
+                    client->waitRelease(WAIT_RELEASE_TIMEOUT);
+                    client->disconnectInternal(false);
+                    mServiceLock.lock();
+                    // Restart from the first client because a new client may have connected
+                    // when mServiceLock is unlocked.
+                    i = 0;
+                    continue;
+                }
+            }
+
+            if (mBusy[i]) {
+                // Give the client a chance to release the hardware.
+                mServiceLock.unlock();
+                usleep(10 * 1000);
+                mServiceLock.lock();
+                i = 0; // Restart from the first client
+                continue;
+            }
+
+            i++;
+        }
     }
 
     struct camera_info info;
@@ -195,9 +252,15 @@
         return NULL;
     }
 
-    client = new Client(this, cameraClient, hardware, cameraId, info.facing, callingPid);
+    client = new Client(this, cameraClient, hardware, cameraId, info.facing,
+                        callingPid, keep);
+    // We need to clear the hardware here. After the destructor of mServiceLock
+    // finishes, a new client may connect and disconnect this client. If this
+    // reference is not cleared, the destructor of CameraHardwareInterface
+    // cannot run. The new client will not be able to connect.
+    hardware.clear();
     mClient[cameraId] = client;
-    LOG1("CameraService::connect X");
+    LOG1("CameraService::connect X (id %d)", cameraId);
     return client;
 }
 
@@ -331,9 +394,9 @@
 CameraService::Client::Client(const sp<CameraService>& cameraService,
         const sp<ICameraClient>& cameraClient,
         const sp<CameraHardwareInterface>& hardware,
-        int cameraId, int cameraFacing, int clientPid) {
+        int cameraId, int cameraFacing, int clientPid, bool keep) {
     int callingPid = getCallingPid();
-    LOG1("Client::Client E (pid %d)", callingPid);
+    LOG1("Client::Client E (pid %d, id %d)", callingPid, cameraId);
 
     mCameraService = cameraService;
     mCameraClient = cameraClient;
@@ -341,6 +404,7 @@
     mCameraId = cameraId;
     mCameraFacing = cameraFacing;
     mClientPid = clientPid;
+    mKeep = keep;
     mMsgEnabled = 0;
     mSurface = 0;
     mPreviewWindow = 0;
@@ -351,7 +415,7 @@
 
     // Enable zoom, error, focus, and metadata messages by default
     enableMsgType(CAMERA_MSG_ERROR | CAMERA_MSG_ZOOM | CAMERA_MSG_FOCUS |
-                  CAMERA_MSG_PREVIEW_METADATA);
+                  CAMERA_MSG_PREVIEW_METADATA | CAMERA_MSG_FOCUS_MOVE);
 
     // Callback is disabled by default
     mPreviewCallbackFlag = CAMERA_FRAME_CALLBACK_FLAG_NOOP;
@@ -359,7 +423,7 @@
     mPlayShutterSound = true;
     cameraService->setCameraBusy(cameraId);
     cameraService->loadSound();
-    LOG1("Client::Client X (pid %d)", callingPid);
+    LOG1("Client::Client X (pid %d, id %d)", callingPid, cameraId);
 }
 
 // tear down the client
@@ -468,18 +532,24 @@
 }
 
 void CameraService::Client::disconnect() {
+    disconnectInternal(true);
+}
+
+void CameraService::Client::disconnectInternal(bool needCheckPid) {
     int callingPid = getCallingPid();
-    LOG1("disconnect E (pid %d)", callingPid);
+    LOG1("disconnectInternal E (pid %d)", callingPid);
     Mutex::Autolock lock(mLock);
 
-    if (checkPid() != NO_ERROR) {
-        ALOGW("different client - don't disconnect");
-        return;
-    }
+    if (needCheckPid) {
+        if (checkPid() != NO_ERROR) {
+            ALOGW("different client - don't disconnect");
+            return;
+        }
 
-    if (mClientPid <= 0) {
-        LOG1("camera is unlocked (mClientPid = %d), don't tear down hardware", mClientPid);
-        return;
+        if (mClientPid <= 0) {
+            LOG1("camera is unlocked (mClientPid = %d), don't tear down hardware", mClientPid);
+            return;
+        }
     }
 
     // Make sure disconnect() is done once and once only, whether it is called
@@ -506,8 +576,16 @@
 
     mCameraService->removeClient(mCameraClient);
     mCameraService->setCameraFree(mCameraId);
+    mReleaseCondition.signal();
 
-    LOG1("disconnect X (pid %d)", callingPid);
+    LOG1("disconnectInternal X (pid %d)", callingPid);
+}
+
+void CameraService::Client::waitRelease(int ms) {
+    Mutex::Autolock lock(mLock);
+    if (mHardware != 0) {
+        mReleaseCondition.waitRelative(mLock, ms * 1000000);
+    }
 }
 
 // ----------------------------------------------------------------------------
@@ -874,6 +952,9 @@
         return OK;
     } else if (cmd == CAMERA_CMD_PLAY_RECORDING_SOUND) {
         mCameraService->playSound(SOUND_RECORDING);
+    } else if (cmd == CAMERA_CMD_PING) {
+        // If mHardware is 0, checkPidAndHardware will return error.
+        return OK;
     }
 
     return mHardware->sendCommand(cmd, arg1, arg2);
@@ -1217,6 +1298,10 @@
     return -1;
 }
 
+// Whether the client wants to keep the camera from taking
+bool CameraService::Client::keep() const {
+    return mKeep;
+}
 
 // ----------------------------------------------------------------------------
 
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index bad41f5..457c79b 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -46,7 +46,8 @@
     virtual int32_t     getNumberOfCameras();
     virtual status_t    getCameraInfo(int cameraId,
                                       struct CameraInfo* cameraInfo);
-    virtual sp<ICamera> connect(const sp<ICameraClient>& cameraClient, int cameraId);
+    virtual sp<ICamera> connect(const sp<ICameraClient>& cameraClient, int cameraId,
+                                bool force, bool keep);
     virtual void        removeClient(const sp<ICameraClient>& cameraClient);
     virtual sp<Client>  getClientById(int cameraId);
 
@@ -114,7 +115,8 @@
                                        const sp<CameraHardwareInterface>& hardware,
                                        int cameraId,
                                        int cameraFacing,
-                                       int clientPid);
+                                       int clientPid,
+                                       bool keep);
                                 ~Client();
 
         // return our camera client
@@ -172,12 +174,19 @@
                                     const sp<IBinder>& binder,
                                     const sp<ANativeWindow>& window);
 
+        void                    disconnectInternal(bool needCheckPid);
+        bool                    keep() const;
+        void                    waitRelease(int ms);
+
+
         // these are initialized in the constructor.
         sp<CameraService>               mCameraService;  // immutable after constructor
         sp<ICameraClient>               mCameraClient;
         int                             mCameraId;       // immutable after constructor
         int                             mCameraFacing;   // immutable after constructor
         pid_t                           mClientPid;
+        // Client wants to keep the camera from taking by other clients.
+        bool                            mKeep;
         sp<CameraHardwareInterface>     mHardware;       // cleared after disconnect()
         int                             mPreviewCallbackFlag;
         int                             mOrientation;     // Current display orientation
@@ -185,6 +194,8 @@
 
         // Ensures atomicity among the public methods
         mutable Mutex                   mLock;
+        // This will get notified when the hardware is released.
+        Condition                       mReleaseCondition;
         // This is a binder of Surface or SurfaceTexture.
         sp<IBinder>                     mSurface;
         sp<ANativeWindow>               mPreviewWindow;
diff --git a/services/camera/tests/CameraServiceTest/CameraServiceTest.cpp b/services/camera/tests/CameraServiceTest/CameraServiceTest.cpp
index 1055538..e417b79 100644
--- a/services/camera/tests/CameraServiceTest/CameraServiceTest.cpp
+++ b/services/camera/tests/CameraServiceTest/CameraServiceTest.cpp
@@ -22,7 +22,6 @@
 #include <sys/types.h>
 #include <sys/wait.h>
 #include <unistd.h>
-#include <surfaceflinger/ISurface.h>
 #include <camera/Camera.h>
 #include <camera/CameraParameters.h>
 #include <ui/GraphicBuffer.h>