Merge "ESQueue:use 0x000001 instead of 0x00000001 as sync word in H264/MPEG_VIDEO"
diff --git a/camera/Android.mk b/camera/Android.mk
index da5ac59..4c4700b 100644
--- a/camera/Android.mk
+++ b/camera/Android.mk
@@ -30,12 +30,10 @@
 	ICameraServiceListener.cpp \
 	ICameraRecordingProxy.cpp \
 	ICameraRecordingProxyListener.cpp \
-	IProCameraUser.cpp \
-	IProCameraCallbacks.cpp \
 	camera2/ICameraDeviceUser.cpp \
 	camera2/ICameraDeviceCallbacks.cpp \
 	camera2/CaptureRequest.cpp \
-	ProCamera.cpp \
+	camera2/OutputConfiguration.cpp \
 	CameraBase.cpp \
 	CameraUtils.cpp \
 	VendorTagDescriptor.cpp
diff --git a/camera/CameraBase.cpp b/camera/CameraBase.cpp
index 65a1a47..5d50aa8 100644
--- a/camera/CameraBase.cpp
+++ b/camera/CameraBase.cpp
@@ -29,7 +29,6 @@
 #include <camera/ICameraService.h>
 
 // needed to instantiate
-#include <camera/ProCamera.h>
 #include <camera/Camera.h>
 
 #include <system/camera_metadata.h>
@@ -217,7 +216,6 @@
     return cs->removeListener(listener);
 }
 
-template class CameraBase<ProCamera>;
 template class CameraBase<Camera>;
 
 } // namespace android
diff --git a/camera/CameraMetadata.cpp b/camera/CameraMetadata.cpp
index 043437f..e216d26 100644
--- a/camera/CameraMetadata.cpp
+++ b/camera/CameraMetadata.cpp
@@ -74,7 +74,7 @@
     clear();
 }
 
-const camera_metadata_t* CameraMetadata::getAndLock() {
+const camera_metadata_t* CameraMetadata::getAndLock() const {
     mLocked = true;
     return mBuffer;
 }
diff --git a/camera/CameraParameters.cpp b/camera/CameraParameters.cpp
index 3dbf75e..68969cf 100644
--- a/camera/CameraParameters.cpp
+++ b/camera/CameraParameters.cpp
@@ -530,4 +530,8 @@
         -1;
 }
 
+bool CameraParameters::isEmpty() const {
+    return mMap.isEmpty();
+}
+
 }; // namespace android
diff --git a/camera/ICameraService.cpp b/camera/ICameraService.cpp
index a75cb48..51a775b 100644
--- a/camera/ICameraService.cpp
+++ b/camera/ICameraService.cpp
@@ -2,16 +2,16 @@
 **
 ** Copyright 2008, The Android Open Source Project
 **
-** Licensed under the Apache License, Version 2.0 (the "License"); 
-** you may not use this file except in compliance with the License. 
-** You may obtain a copy of the License at 
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
 **
-**     http://www.apache.org/licenses/LICENSE-2.0 
+**     http://www.apache.org/licenses/LICENSE-2.0
 **
-** Unless required by applicable law or agreed to in writing, software 
-** distributed under the License is distributed on an "AS IS" BASIS, 
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
-** See the License for the specific language governing permissions and 
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
 ** limitations under the License.
 */
 
@@ -29,8 +29,6 @@
 
 #include <camera/ICameraService.h>
 #include <camera/ICameraServiceListener.h>
-#include <camera/IProCameraUser.h>
-#include <camera/IProCameraCallbacks.h>
 #include <camera/ICamera.h>
 #include <camera/ICameraClient.h>
 #include <camera/camera2/ICameraDeviceUser.h>
@@ -223,28 +221,6 @@
         return reply.readInt32();
     }
 
-    // connect to camera service (pro client)
-    virtual status_t connectPro(const sp<IProCameraCallbacks>& cameraCb, int cameraId,
-                                const String16 &clientPackageName, int clientUid,
-                                /*out*/
-                                sp<IProCameraUser>& device)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
-        data.writeStrongBinder(IInterface::asBinder(cameraCb));
-        data.writeInt32(cameraId);
-        data.writeString16(clientPackageName);
-        data.writeInt32(clientUid);
-        remote()->transact(BnCameraService::CONNECT_PRO, data, &reply);
-
-        if (readExceptionCode(reply)) return -EPROTO;
-        status_t status = reply.readInt32();
-        if (reply.readInt32() != 0) {
-            device = interface_cast<IProCameraUser>(reply.readStrongBinder());
-        }
-        return status;
-    }
-
     // connect to camera service (android.hardware.camera2.CameraDevice)
     virtual status_t connectDevice(
             const sp<ICameraDeviceCallbacks>& cameraCb,
@@ -326,6 +302,15 @@
         status_t res = data.readInt32();
         return res;
     }
+
+    virtual void notifySystemEvent(int eventId, int arg0) {
+        Parcel data, reply;
+        data.writeInt32(eventId);
+        data.writeInt32(arg0);
+        remote()->transact(BnCameraService::NOTIFY_SYSTEM_EVENT, data, &reply,
+                IBinder::FLAG_ONEWAY);
+    }
+
 };
 
 IMPLEMENT_META_INTERFACE(CameraService, "android.hardware.ICameraService");
@@ -404,26 +389,6 @@
             }
             return NO_ERROR;
         } break;
-        case CONNECT_PRO: {
-            CHECK_INTERFACE(ICameraService, data, reply);
-            sp<IProCameraCallbacks> cameraClient =
-                interface_cast<IProCameraCallbacks>(data.readStrongBinder());
-            int32_t cameraId = data.readInt32();
-            const String16 clientName = data.readString16();
-            int32_t clientUid = data.readInt32();
-            sp<IProCameraUser> camera;
-            status_t status = connectPro(cameraClient, cameraId,
-                    clientName, clientUid, /*out*/camera);
-            reply->writeNoException();
-            reply->writeInt32(status);
-            if (camera != NULL) {
-                reply->writeInt32(1);
-                reply->writeStrongBinder(IInterface::asBinder(camera));
-            } else {
-                reply->writeInt32(0);
-            }
-            return NO_ERROR;
-        } break;
         case CONNECT_DEVICE: {
             CHECK_INTERFACE(ICameraService, data, reply);
             sp<ICameraDeviceCallbacks> cameraClient =
@@ -514,6 +479,13 @@
             reply->writeInt32(status);
             return NO_ERROR;
         } break;
+        case NOTIFY_SYSTEM_EVENT: {
+            CHECK_INTERFACE(ICameraService, data, reply);
+            int eventId = data.readInt32();
+            int arg0 = data.readInt32();
+            notifySystemEvent(eventId, arg0);
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/camera/IProCameraCallbacks.cpp b/camera/IProCameraCallbacks.cpp
deleted file mode 100644
index bd3d420..0000000
--- a/camera/IProCameraCallbacks.cpp
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
-**
-** Copyright 2013, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-**     http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "IProCameraCallbacks"
-#include <utils/Log.h>
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <binder/Parcel.h>
-#include <gui/IGraphicBufferProducer.h>
-#include <gui/Surface.h>
-#include <utils/Mutex.h>
-
-#include <camera/IProCameraCallbacks.h>
-
-#include "camera/CameraMetadata.h"
-
-namespace android {
-
-enum {
-    NOTIFY_CALLBACK = IBinder::FIRST_CALL_TRANSACTION,
-    LOCK_STATUS_CHANGED,
-    RESULT_RECEIVED,
-};
-
-class BpProCameraCallbacks: public BpInterface<IProCameraCallbacks>
-{
-public:
-    BpProCameraCallbacks(const sp<IBinder>& impl)
-        : BpInterface<IProCameraCallbacks>(impl)
-    {
-    }
-
-    // generic callback from camera service to app
-    void notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2)
-    {
-        ALOGV("notifyCallback");
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraCallbacks::getInterfaceDescriptor());
-        data.writeInt32(msgType);
-        data.writeInt32(ext1);
-        data.writeInt32(ext2);
-        remote()->transact(NOTIFY_CALLBACK, data, &reply, IBinder::FLAG_ONEWAY);
-    }
-
-    void onLockStatusChanged(LockStatus newLockStatus) {
-        ALOGV("onLockStatusChanged");
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraCallbacks::getInterfaceDescriptor());
-        data.writeInt32(newLockStatus);
-        remote()->transact(LOCK_STATUS_CHANGED, data, &reply,
-                           IBinder::FLAG_ONEWAY);
-    }
-
-    void onResultReceived(int32_t requestId, camera_metadata* result) {
-        ALOGV("onResultReceived");
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraCallbacks::getInterfaceDescriptor());
-        data.writeInt32(requestId);
-        CameraMetadata::writeToParcel(data, result);
-        remote()->transact(RESULT_RECEIVED, data, &reply, IBinder::FLAG_ONEWAY);
-    }
-};
-
-IMPLEMENT_META_INTERFACE(ProCameraCallbacks,
-                                        "android.hardware.IProCameraCallbacks");
-
-// ----------------------------------------------------------------------
-
-status_t BnProCameraCallbacks::onTransact(
-    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
-    ALOGV("onTransact - code = %d", code);
-    switch(code) {
-        case NOTIFY_CALLBACK: {
-            ALOGV("NOTIFY_CALLBACK");
-            CHECK_INTERFACE(IProCameraCallbacks, data, reply);
-            int32_t msgType = data.readInt32();
-            int32_t ext1 = data.readInt32();
-            int32_t ext2 = data.readInt32();
-            notifyCallback(msgType, ext1, ext2);
-            return NO_ERROR;
-        } break;
-        case LOCK_STATUS_CHANGED: {
-            ALOGV("LOCK_STATUS_CHANGED");
-            CHECK_INTERFACE(IProCameraCallbacks, data, reply);
-            LockStatus newLockStatus
-                                 = static_cast<LockStatus>(data.readInt32());
-            onLockStatusChanged(newLockStatus);
-            return NO_ERROR;
-        } break;
-        case RESULT_RECEIVED: {
-            ALOGV("RESULT_RECEIVED");
-            CHECK_INTERFACE(IProCameraCallbacks, data, reply);
-            int32_t requestId = data.readInt32();
-            camera_metadata_t *result = NULL;
-            CameraMetadata::readFromParcel(data, &result);
-            onResultReceived(requestId, result);
-            return NO_ERROR;
-            break;
-        }
-        default:
-            return BBinder::onTransact(code, data, reply, flags);
-    }
-}
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
-
diff --git a/camera/IProCameraUser.cpp b/camera/IProCameraUser.cpp
deleted file mode 100644
index 9bd7597..0000000
--- a/camera/IProCameraUser.cpp
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
-**
-** Copyright 2013, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-**     http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-// #define LOG_NDEBUG 0
-#define LOG_TAG "IProCameraUser"
-#include <utils/Log.h>
-#include <stdint.h>
-#include <sys/types.h>
-#include <binder/Parcel.h>
-#include <camera/IProCameraUser.h>
-#include <gui/IGraphicBufferProducer.h>
-#include <gui/Surface.h>
-#include "camera/CameraMetadata.h"
-
-namespace android {
-
-enum {
-    DISCONNECT = IBinder::FIRST_CALL_TRANSACTION,
-    CONNECT,
-    EXCLUSIVE_TRY_LOCK,
-    EXCLUSIVE_LOCK,
-    EXCLUSIVE_UNLOCK,
-    HAS_EXCLUSIVE_LOCK,
-    SUBMIT_REQUEST,
-    CANCEL_REQUEST,
-    DELETE_STREAM,
-    CREATE_STREAM,
-    CREATE_DEFAULT_REQUEST,
-    GET_CAMERA_INFO,
-};
-
-class BpProCameraUser: public BpInterface<IProCameraUser>
-{
-public:
-    BpProCameraUser(const sp<IBinder>& impl)
-        : BpInterface<IProCameraUser>(impl)
-    {
-    }
-
-    // disconnect from camera service
-    void disconnect()
-    {
-        ALOGV("disconnect");
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        remote()->transact(DISCONNECT, data, &reply);
-        reply.readExceptionCode();
-    }
-
-    virtual status_t connect(const sp<IProCameraCallbacks>& cameraClient)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        data.writeStrongBinder(IInterface::asBinder(cameraClient));
-        remote()->transact(CONNECT, data, &reply);
-        return reply.readInt32();
-    }
-
-    /* Shared ProCameraUser */
-
-    virtual status_t exclusiveTryLock()
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        remote()->transact(EXCLUSIVE_TRY_LOCK, data, &reply);
-        return reply.readInt32();
-    }
-    virtual status_t exclusiveLock()
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        remote()->transact(EXCLUSIVE_LOCK, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual status_t exclusiveUnlock()
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        remote()->transact(EXCLUSIVE_UNLOCK, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual bool hasExclusiveLock()
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        remote()->transact(HAS_EXCLUSIVE_LOCK, data, &reply);
-        return !!reply.readInt32();
-    }
-
-    virtual int submitRequest(camera_metadata_t* metadata, bool streaming)
-    {
-
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-
-        // arg0+arg1
-        CameraMetadata::writeToParcel(data, metadata);
-
-        // arg2 = streaming (bool)
-        data.writeInt32(streaming);
-
-        remote()->transact(SUBMIT_REQUEST, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual status_t cancelRequest(int requestId)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        data.writeInt32(requestId);
-
-        remote()->transact(CANCEL_REQUEST, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual status_t deleteStream(int streamId)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        data.writeInt32(streamId);
-
-        remote()->transact(DELETE_STREAM, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual status_t createStream(int width, int height, int format,
-                          const sp<IGraphicBufferProducer>& bufferProducer,
-                          /*out*/
-                          int* streamId)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        data.writeInt32(width);
-        data.writeInt32(height);
-        data.writeInt32(format);
-
-        sp<IBinder> b(IInterface::asBinder(bufferProducer));
-        data.writeStrongBinder(b);
-
-        remote()->transact(CREATE_STREAM, data, &reply);
-
-        int sId = reply.readInt32();
-        if (streamId) {
-            *streamId = sId;
-        }
-        return reply.readInt32();
-    }
-
-    // Create a request object from a template.
-    virtual status_t createDefaultRequest(int templateId,
-                                 /*out*/
-                                  camera_metadata** request)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        data.writeInt32(templateId);
-        remote()->transact(CREATE_DEFAULT_REQUEST, data, &reply);
-        CameraMetadata::readFromParcel(reply, /*out*/request);
-        return reply.readInt32();
-    }
-
-
-    virtual status_t getCameraInfo(int cameraId, camera_metadata** info)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        data.writeInt32(cameraId);
-        remote()->transact(GET_CAMERA_INFO, data, &reply);
-        CameraMetadata::readFromParcel(reply, /*out*/info);
-        return reply.readInt32();
-    }
-
-
-private:
-
-
-};
-
-IMPLEMENT_META_INTERFACE(ProCameraUser, "android.hardware.IProCameraUser");
-
-// ----------------------------------------------------------------------
-
-status_t BnProCameraUser::onTransact(
-    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
-    switch(code) {
-        case DISCONNECT: {
-            ALOGV("DISCONNECT");
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-            disconnect();
-            reply->writeNoException();
-            return NO_ERROR;
-        } break;
-        case CONNECT: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-            sp<IProCameraCallbacks> cameraClient =
-                   interface_cast<IProCameraCallbacks>(data.readStrongBinder());
-            reply->writeInt32(connect(cameraClient));
-            return NO_ERROR;
-        } break;
-
-        /* Shared ProCameraUser */
-        case EXCLUSIVE_TRY_LOCK: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-            reply->writeInt32(exclusiveTryLock());
-            return NO_ERROR;
-        } break;
-        case EXCLUSIVE_LOCK: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-            reply->writeInt32(exclusiveLock());
-            return NO_ERROR;
-        } break;
-        case EXCLUSIVE_UNLOCK: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-            reply->writeInt32(exclusiveUnlock());
-            return NO_ERROR;
-        } break;
-        case HAS_EXCLUSIVE_LOCK: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-            reply->writeInt32(hasExclusiveLock());
-            return NO_ERROR;
-        } break;
-        case SUBMIT_REQUEST: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-            camera_metadata_t* metadata;
-            CameraMetadata::readFromParcel(data, /*out*/&metadata);
-
-            // arg2 = streaming (bool)
-            bool streaming = data.readInt32();
-
-            // return code: requestId (int32)
-            reply->writeInt32(submitRequest(metadata, streaming));
-
-            return NO_ERROR;
-        } break;
-        case CANCEL_REQUEST: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-            int requestId = data.readInt32();
-            reply->writeInt32(cancelRequest(requestId));
-            return NO_ERROR;
-        } break;
-        case DELETE_STREAM: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-            int streamId = data.readInt32();
-            reply->writeInt32(deleteStream(streamId));
-            return NO_ERROR;
-        } break;
-        case CREATE_STREAM: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-            int width, height, format;
-
-            width = data.readInt32();
-            height = data.readInt32();
-            format = data.readInt32();
-
-            sp<IGraphicBufferProducer> bp =
-               interface_cast<IGraphicBufferProducer>(data.readStrongBinder());
-
-            int streamId = -1;
-            status_t ret;
-            ret = createStream(width, height, format, bp, &streamId);
-
-            reply->writeInt32(streamId);
-            reply->writeInt32(ret);
-
-            return NO_ERROR;
-        } break;
-
-        case CREATE_DEFAULT_REQUEST: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-
-            int templateId = data.readInt32();
-
-            camera_metadata_t* request = NULL;
-            status_t ret;
-            ret = createDefaultRequest(templateId, &request);
-
-            CameraMetadata::writeToParcel(*reply, request);
-            reply->writeInt32(ret);
-
-            free_camera_metadata(request);
-
-            return NO_ERROR;
-        } break;
-        case GET_CAMERA_INFO: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-
-            int cameraId = data.readInt32();
-
-            camera_metadata_t* info = NULL;
-            status_t ret;
-            ret = getCameraInfo(cameraId, &info);
-
-            CameraMetadata::writeToParcel(*reply, info);
-            reply->writeInt32(ret);
-
-            free_camera_metadata(info);
-
-            return NO_ERROR;
-        } break;
-        default:
-            return BBinder::onTransact(code, data, reply, flags);
-    }
-}
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
diff --git a/camera/ProCamera.cpp b/camera/ProCamera.cpp
deleted file mode 100644
index 48f8e8e..0000000
--- a/camera/ProCamera.cpp
+++ /dev/null
@@ -1,436 +0,0 @@
-/*
-**
-** Copyright (C) 2013, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-**     http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "ProCamera"
-#include <utils/Log.h>
-#include <utils/threads.h>
-#include <utils/Mutex.h>
-
-#include <binder/IPCThreadState.h>
-#include <binder/IServiceManager.h>
-#include <binder/IMemory.h>
-
-#include <camera/ProCamera.h>
-#include <camera/IProCameraUser.h>
-#include <camera/IProCameraCallbacks.h>
-
-#include <gui/IGraphicBufferProducer.h>
-
-#include <system/camera_metadata.h>
-
-namespace android {
-
-sp<ProCamera> ProCamera::connect(int cameraId)
-{
-    return CameraBaseT::connect(cameraId, String16(),
-                                 ICameraService::USE_CALLING_UID);
-}
-
-ProCamera::ProCamera(int cameraId)
-    : CameraBase(cameraId)
-{
-}
-
-CameraTraits<ProCamera>::TCamConnectService CameraTraits<ProCamera>::fnConnectService =
-        &ICameraService::connectPro;
-
-ProCamera::~ProCamera()
-{
-
-}
-
-/* IProCameraUser's implementation */
-
-// callback from camera service
-void ProCamera::notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2)
-{
-    return CameraBaseT::notifyCallback(msgType, ext1, ext2);
-}
-
-void ProCamera::onLockStatusChanged(
-                                 IProCameraCallbacks::LockStatus newLockStatus)
-{
-    ALOGV("%s: newLockStatus = %d", __FUNCTION__, newLockStatus);
-
-    sp<ProCameraListener> listener;
-    {
-        Mutex::Autolock _l(mLock);
-        listener = mListener;
-    }
-    if (listener != NULL) {
-        switch (newLockStatus) {
-            case IProCameraCallbacks::LOCK_ACQUIRED:
-                listener->onLockAcquired();
-                break;
-            case IProCameraCallbacks::LOCK_RELEASED:
-                listener->onLockReleased();
-                break;
-            case IProCameraCallbacks::LOCK_STOLEN:
-                listener->onLockStolen();
-                break;
-            default:
-                ALOGE("%s: Unknown lock status: %d",
-                      __FUNCTION__, newLockStatus);
-        }
-    }
-}
-
-void ProCamera::onResultReceived(int32_t requestId, camera_metadata* result) {
-    ALOGV("%s: requestId = %d, result = %p", __FUNCTION__, requestId, result);
-
-    sp<ProCameraListener> listener;
-    {
-        Mutex::Autolock _l(mLock);
-        listener = mListener;
-    }
-
-    CameraMetadata tmp(result);
-
-    // Unblock waitForFrame(id) callers
-    {
-        Mutex::Autolock al(mWaitMutex);
-        mMetadataReady = true;
-        mLatestMetadata = tmp; // make copy
-        mWaitCondition.broadcast();
-    }
-
-    result = tmp.release();
-
-    if (listener != NULL) {
-        listener->onResultReceived(requestId, result);
-    } else {
-        free_camera_metadata(result);
-    }
-
-}
-
-status_t ProCamera::exclusiveTryLock()
-{
-    sp <IProCameraUser> c = mCamera;
-    if (c == 0) return NO_INIT;
-
-    return c->exclusiveTryLock();
-}
-status_t ProCamera::exclusiveLock()
-{
-    sp <IProCameraUser> c = mCamera;
-    if (c == 0) return NO_INIT;
-
-    return c->exclusiveLock();
-}
-status_t ProCamera::exclusiveUnlock()
-{
-    sp <IProCameraUser> c = mCamera;
-    if (c == 0) return NO_INIT;
-
-    return c->exclusiveUnlock();
-}
-bool ProCamera::hasExclusiveLock()
-{
-    sp <IProCameraUser> c = mCamera;
-    if (c == 0) return NO_INIT;
-
-    return c->hasExclusiveLock();
-}
-
-// Note that the callee gets a copy of the metadata.
-int ProCamera::submitRequest(const struct camera_metadata* metadata,
-                             bool streaming)
-{
-    sp <IProCameraUser> c = mCamera;
-    if (c == 0) return NO_INIT;
-
-    return c->submitRequest(const_cast<struct camera_metadata*>(metadata),
-                            streaming);
-}
-
-status_t ProCamera::cancelRequest(int requestId)
-{
-    sp <IProCameraUser> c = mCamera;
-    if (c == 0) return NO_INIT;
-
-    return c->cancelRequest(requestId);
-}
-
-status_t ProCamera::deleteStream(int streamId)
-{
-    sp <IProCameraUser> c = mCamera;
-    if (c == 0) return NO_INIT;
-
-    status_t s = c->deleteStream(streamId);
-
-    mStreams.removeItem(streamId);
-
-    return s;
-}
-
-status_t ProCamera::createStream(int width, int height, int format,
-                                 const sp<Surface>& surface,
-                                 /*out*/
-                                 int* streamId)
-{
-    *streamId = -1;
-
-    ALOGV("%s: createStreamW %dx%d (fmt=0x%x)", __FUNCTION__, width, height,
-                                                                       format);
-
-    if (surface == 0) {
-        return BAD_VALUE;
-    }
-
-    return createStream(width, height, format,
-                        surface->getIGraphicBufferProducer(),
-                        streamId);
-}
-
-status_t ProCamera::createStream(int width, int height, int format,
-                                 const sp<IGraphicBufferProducer>& bufferProducer,
-                                 /*out*/
-                                 int* streamId) {
-    *streamId = -1;
-
-    ALOGV("%s: createStreamT %dx%d (fmt=0x%x)", __FUNCTION__, width, height,
-                                                                       format);
-
-    if (bufferProducer == 0) {
-        return BAD_VALUE;
-    }
-
-    sp <IProCameraUser> c = mCamera;
-    status_t stat = c->createStream(width, height, format, bufferProducer,
-                                    streamId);
-
-    if (stat == OK) {
-        StreamInfo s(*streamId);
-
-        mStreams.add(*streamId, s);
-    }
-
-    return stat;
-}
-
-status_t ProCamera::createStreamCpu(int width, int height, int format,
-                                    int heapCount,
-                                    /*out*/
-                                    sp<CpuConsumer>* cpuConsumer,
-                                    int* streamId) {
-    return createStreamCpu(width, height, format, heapCount,
-                           /*synchronousMode*/true,
-                           cpuConsumer, streamId);
-}
-
-status_t ProCamera::createStreamCpu(int width, int height, int format,
-                                    int heapCount,
-                                    bool synchronousMode,
-                                    /*out*/
-                                    sp<CpuConsumer>* cpuConsumer,
-                                    int* streamId)
-{
-    ALOGV("%s: createStreamW %dx%d (fmt=0x%x)", __FUNCTION__, width, height,
-                                                                        format);
-
-    *cpuConsumer = NULL;
-
-    sp <IProCameraUser> c = mCamera;
-    if (c == 0) return NO_INIT;
-
-    sp<IGraphicBufferProducer> producer;
-    sp<IGraphicBufferConsumer> consumer;
-    BufferQueue::createBufferQueue(&producer, &consumer);
-    sp<CpuConsumer> cc = new CpuConsumer(consumer, heapCount
-            /*, synchronousMode*/);
-    cc->setName(String8("ProCamera::mCpuConsumer"));
-
-    sp<Surface> stc = new Surface(producer);
-
-    status_t s = createStream(width, height, format,
-                              stc->getIGraphicBufferProducer(),
-                              streamId);
-
-    if (s != OK) {
-        ALOGE("%s: Failure to create stream %dx%d (fmt=0x%x)", __FUNCTION__,
-                    width, height, format);
-        return s;
-    }
-
-    sp<ProFrameListener> frameAvailableListener =
-        new ProFrameListener(this, *streamId);
-
-    getStreamInfo(*streamId).cpuStream = true;
-    getStreamInfo(*streamId).cpuConsumer = cc;
-    getStreamInfo(*streamId).synchronousMode = synchronousMode;
-    getStreamInfo(*streamId).stc = stc;
-    // for lifetime management
-    getStreamInfo(*streamId).frameAvailableListener = frameAvailableListener;
-
-    cc->setFrameAvailableListener(frameAvailableListener);
-
-    *cpuConsumer = cc;
-
-    return s;
-}
-
-camera_metadata* ProCamera::getCameraInfo(int cameraId) {
-    ALOGV("%s: cameraId = %d", __FUNCTION__, cameraId);
-
-    sp <IProCameraUser> c = mCamera;
-    if (c == 0) return NULL;
-
-    camera_metadata* ptr = NULL;
-    status_t status = c->getCameraInfo(cameraId, &ptr);
-
-    if (status != OK) {
-        ALOGE("%s: Failed to get camera info, error = %d", __FUNCTION__, status);
-    }
-
-    return ptr;
-}
-
-status_t ProCamera::createDefaultRequest(int templateId,
-                                             camera_metadata** request) const {
-    ALOGV("%s: templateId = %d", __FUNCTION__, templateId);
-
-    sp <IProCameraUser> c = mCamera;
-    if (c == 0) return NO_INIT;
-
-    return c->createDefaultRequest(templateId, request);
-}
-
-void ProCamera::onFrameAvailable(int streamId) {
-    ALOGV("%s: streamId = %d", __FUNCTION__, streamId);
-
-    sp<ProCameraListener> listener = mListener;
-    StreamInfo& stream = getStreamInfo(streamId);
-
-    if (listener.get() != NULL) {
-        listener->onFrameAvailable(streamId, stream.cpuConsumer);
-    }
-
-    // Unblock waitForFrame(id) callers
-    {
-        Mutex::Autolock al(mWaitMutex);
-        getStreamInfo(streamId).frameReady++;
-        mWaitCondition.broadcast();
-    }
-}
-
-int ProCamera::waitForFrameBuffer(int streamId) {
-    status_t stat = BAD_VALUE;
-    Mutex::Autolock al(mWaitMutex);
-
-    StreamInfo& si = getStreamInfo(streamId);
-
-    if (si.frameReady > 0) {
-        int numFrames = si.frameReady;
-        si.frameReady = 0;
-        return numFrames;
-    } else {
-        while (true) {
-            stat = mWaitCondition.waitRelative(mWaitMutex,
-                                                mWaitTimeout);
-            if (stat != OK) {
-                ALOGE("%s: Error while waiting for frame buffer: %d",
-                    __FUNCTION__, stat);
-                return stat;
-            }
-
-            if (si.frameReady > 0) {
-                int numFrames = si.frameReady;
-                si.frameReady = 0;
-                return numFrames;
-            }
-            // else it was some other stream that got unblocked
-        }
-    }
-
-    return stat;
-}
-
-int ProCamera::dropFrameBuffer(int streamId, int count) {
-    StreamInfo& si = getStreamInfo(streamId);
-
-    if (!si.cpuStream) {
-        return BAD_VALUE;
-    } else if (count < 0) {
-        return BAD_VALUE;
-    }
-
-    if (!si.synchronousMode) {
-        ALOGW("%s: No need to drop frames on asynchronous streams,"
-              " as asynchronous mode only keeps 1 latest frame around.",
-              __FUNCTION__);
-        return BAD_VALUE;
-    }
-
-    int numDropped = 0;
-    for (int i = 0; i < count; ++i) {
-        CpuConsumer::LockedBuffer buffer;
-        if (si.cpuConsumer->lockNextBuffer(&buffer) != OK) {
-            break;
-        }
-
-        si.cpuConsumer->unlockBuffer(buffer);
-        numDropped++;
-    }
-
-    return numDropped;
-}
-
-status_t ProCamera::waitForFrameMetadata() {
-    status_t stat = BAD_VALUE;
-    Mutex::Autolock al(mWaitMutex);
-
-    if (mMetadataReady) {
-        return OK;
-    } else {
-        while (true) {
-            stat = mWaitCondition.waitRelative(mWaitMutex,
-                                               mWaitTimeout);
-
-            if (stat != OK) {
-                ALOGE("%s: Error while waiting for metadata: %d",
-                        __FUNCTION__, stat);
-                return stat;
-            }
-
-            if (mMetadataReady) {
-                mMetadataReady = false;
-                return OK;
-            }
-            // else it was some other stream or metadata
-        }
-    }
-
-    return stat;
-}
-
-CameraMetadata ProCamera::consumeFrameMetadata() {
-    Mutex::Autolock al(mWaitMutex);
-
-    // Destructive: Subsequent calls return empty metadatas
-    CameraMetadata tmp = mLatestMetadata;
-    mLatestMetadata.clear();
-
-    return tmp;
-}
-
-ProCamera::StreamInfo& ProCamera::getStreamInfo(int streamId) {
-    return mStreams.editValueFor(streamId);
-}
-
-}; // namespace android
diff --git a/camera/camera2/ICameraDeviceUser.cpp b/camera/camera2/ICameraDeviceUser.cpp
index d1d63d5..89c6fb7 100644
--- a/camera/camera2/ICameraDeviceUser.cpp
+++ b/camera/camera2/ICameraDeviceUser.cpp
@@ -26,6 +26,7 @@
 #include <gui/Surface.h>
 #include <camera/CameraMetadata.h>
 #include <camera/camera2/CaptureRequest.h>
+#include <camera/camera2/OutputConfiguration.h>
 
 namespace android {
 
@@ -208,17 +209,16 @@
         return reply.readInt32();
     }
 
-    virtual status_t createStream(
-                          const sp<IGraphicBufferProducer>& bufferProducer)
+    virtual status_t createStream(const OutputConfiguration& outputConfiguration)
     {
         Parcel data, reply;
         data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-
-        data.writeInt32(1); // marker that bufferProducer is not null
-        data.writeString16(String16("unknown_name")); // name of surface
-        sp<IBinder> b(IInterface::asBinder(bufferProducer));
-        data.writeStrongBinder(b);
-
+        if (outputConfiguration.getGraphicBufferProducer() != NULL) {
+            data.writeInt32(1); // marker that OutputConfiguration is not null. Mimic aidl behavior
+            outputConfiguration.writeToParcel(data);
+        } else {
+            data.writeInt32(0);
+        }
         remote()->transact(CREATE_STREAM, data, &reply);
 
         reply.readExceptionCode();
@@ -394,22 +394,14 @@
         case CREATE_STREAM: {
             CHECK_INTERFACE(ICameraDeviceUser, data, reply);
 
-            sp<IGraphicBufferProducer> bp;
+            status_t ret = BAD_VALUE;
             if (data.readInt32() != 0) {
-                String16 name = readMaybeEmptyString16(data);
-                bp = interface_cast<IGraphicBufferProducer>(
-                        data.readStrongBinder());
-
-                ALOGV("%s: CREATE_STREAM: bp = %p, name = %s", __FUNCTION__,
-                      bp.get(), String8(name).string());
+                OutputConfiguration outputConfiguration(data);
+                ret = createStream(outputConfiguration);
             } else {
-                ALOGV("%s: CREATE_STREAM: bp = unset, name = unset",
-                      __FUNCTION__);
+                ALOGE("%s: cannot take an empty OutputConfiguration", __FUNCTION__);
             }
 
-            status_t ret;
-            ret = createStream(bp);
-
             reply->writeNoException();
             ALOGV("%s: CREATE_STREAM: write noException", __FUNCTION__);
             reply->writeInt32(ret);
diff --git a/camera/camera2/OutputConfiguration.cpp b/camera/camera2/OutputConfiguration.cpp
new file mode 100644
index 0000000..24acaa0
--- /dev/null
+++ b/camera/camera2/OutputConfiguration.cpp
@@ -0,0 +1,79 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "OutputConfiguration"
+#include <utils/Log.h>
+
+#include <camera/camera2/OutputConfiguration.h>
+#include <binder/Parcel.h>
+
+namespace android {
+
+
+const int OutputConfiguration::INVALID_ROTATION = -1;
+
+// Read empty strings without printing a false error message.
+String16 OutputConfiguration::readMaybeEmptyString16(const Parcel& parcel) {
+    size_t len;
+    const char16_t* str = parcel.readString16Inplace(&len);
+    if (str != NULL) {
+        return String16(str, len);
+    } else {
+        return String16();
+    }
+}
+
+sp<IGraphicBufferProducer> OutputConfiguration::getGraphicBufferProducer() const {
+    return mGbp;
+}
+
+int OutputConfiguration::getRotation() const {
+    return mRotation;
+}
+
+OutputConfiguration::OutputConfiguration(const Parcel& parcel) {
+    status_t err;
+    int rotation = 0;
+    if ((err = parcel.readInt32(&rotation)) != OK) {
+        ALOGE("%s: Failed to read rotation from parcel", __FUNCTION__);
+        mGbp = NULL;
+        mRotation = INVALID_ROTATION;
+        return;
+    }
+
+    String16 name = readMaybeEmptyString16(parcel);
+    const sp<IGraphicBufferProducer>& gbp =
+            interface_cast<IGraphicBufferProducer>(parcel.readStrongBinder());
+    mGbp = gbp;
+    mRotation = rotation;
+
+    ALOGV("%s: OutputConfiguration: bp = %p, name = %s", __FUNCTION__,
+          gbp.get(), String8(name).string());
+}
+
+status_t OutputConfiguration::writeToParcel(Parcel& parcel) const {
+
+    parcel.writeInt32(mRotation);
+    parcel.writeString16(String16("unknown_name")); // name of surface
+    sp<IBinder> b(IInterface::asBinder(mGbp));
+    parcel.writeStrongBinder(b);
+
+    return OK;
+}
+
+}; // namespace android
+
diff --git a/camera/tests/Android.mk b/camera/tests/Android.mk
index 2db4c14..5d37f9e 100644
--- a/camera/tests/Android.mk
+++ b/camera/tests/Android.mk
@@ -17,7 +17,6 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
 
 LOCAL_SRC_FILES:= \
-	ProCameraTests.cpp \
 	VendorTagDescriptorTests.cpp
 
 LOCAL_SHARED_LIBRARIES := \
diff --git a/camera/tests/ProCameraTests.cpp b/camera/tests/ProCameraTests.cpp
deleted file mode 100644
index 24b2327..0000000
--- a/camera/tests/ProCameraTests.cpp
+++ /dev/null
@@ -1,1284 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-#include <iostream>
-
-#include <binder/IPCThreadState.h>
-#include <utils/Thread.h>
-
-#include "Camera.h"
-#include "ProCamera.h"
-#include <utils/Vector.h>
-#include <utils/Mutex.h>
-#include <utils/Condition.h>
-
-#include <gui/SurfaceComposerClient.h>
-#include <gui/Surface.h>
-
-#include <system/camera_metadata.h>
-#include <hardware/camera2.h> // for CAMERA2_TEMPLATE_PREVIEW only
-#include <camera/CameraMetadata.h>
-
-#include <camera/ICameraServiceListener.h>
-
-namespace android {
-namespace camera2 {
-namespace tests {
-namespace client {
-
-#define CAMERA_ID 0
-#define TEST_DEBUGGING 0
-
-#define TEST_LISTENER_TIMEOUT 1000000000 // 1 second listener timeout
-#define TEST_FORMAT HAL_PIXEL_FORMAT_Y16 //TODO: YUY2 instead
-
-#define TEST_FORMAT_MAIN HAL_PIXEL_FORMAT_Y8
-#define TEST_FORMAT_DEPTH HAL_PIXEL_FORMAT_Y16
-
-// defaults for display "test"
-#define TEST_DISPLAY_FORMAT HAL_PIXEL_FORMAT_Y8
-#define TEST_DISPLAY_WIDTH 320
-#define TEST_DISPLAY_HEIGHT 240
-
-#define TEST_CPU_FRAME_COUNT 2
-#define TEST_CPU_HEAP_COUNT 5
-
-#define TEST_FRAME_PROCESSING_DELAY_US 200000 // 200 ms
-
-#if TEST_DEBUGGING
-#define dout std::cerr
-#else
-#define dout if (0) std::cerr
-#endif
-
-#define EXPECT_OK(x) EXPECT_EQ(OK, (x))
-#define ASSERT_OK(x) ASSERT_EQ(OK, (x))
-
-class ProCameraTest;
-
-struct ServiceListener : public BnCameraServiceListener {
-
-    ServiceListener() :
-        mLatestStatus(STATUS_UNKNOWN),
-        mPrevStatus(STATUS_UNKNOWN)
-    {
-    }
-
-    void onStatusChanged(Status status, int32_t cameraId) {
-        dout << "On status changed: 0x" << std::hex
-             << (unsigned int) status << " cameraId " << cameraId
-             << std::endl;
-
-        Mutex::Autolock al(mMutex);
-
-        mLatestStatus = status;
-        mCondition.broadcast();
-    }
-
-    void onTorchStatusChanged(TorchStatus status, const String16& cameraId) {
-        dout << "On torch status changed: 0x" << std::hex
-             << (unsigned int) status << " cameraId " << cameraId.string()
-             << std::endl;
-    }
-
-    status_t waitForStatusChange(Status& newStatus) {
-        Mutex::Autolock al(mMutex);
-
-        if (mLatestStatus != mPrevStatus) {
-            newStatus = mLatestStatus;
-            mPrevStatus = mLatestStatus;
-            return OK;
-        }
-
-        status_t stat = mCondition.waitRelative(mMutex,
-                                               TEST_LISTENER_TIMEOUT);
-
-        if (stat == OK) {
-            newStatus = mLatestStatus;
-            mPrevStatus = mLatestStatus;
-        }
-
-        return stat;
-    }
-
-    Condition mCondition;
-    Mutex mMutex;
-
-    Status mLatestStatus;
-    Status mPrevStatus;
-};
-
-enum ProEvent {
-    UNKNOWN,
-    ACQUIRED,
-    RELEASED,
-    STOLEN,
-    FRAME_RECEIVED,
-    RESULT_RECEIVED,
-};
-
-inline int ProEvent_Mask(ProEvent e) {
-    return (1 << static_cast<int>(e));
-}
-
-typedef Vector<ProEvent> EventList;
-
-class ProCameraTestThread : public Thread
-{
-public:
-    ProCameraTestThread() {
-    }
-
-    virtual bool threadLoop() {
-        mProc = ProcessState::self();
-        mProc->startThreadPool();
-
-        IPCThreadState *ptr = IPCThreadState::self();
-
-        ptr->joinThreadPool();
-
-        return false;
-    }
-
-    sp<ProcessState> mProc;
-};
-
-class ProCameraTestListener : public ProCameraListener {
-
-public:
-    static const int EVENT_MASK_ALL = 0xFFFFFFFF;
-
-    ProCameraTestListener() {
-        mEventMask = EVENT_MASK_ALL;
-        mDropFrames = false;
-    }
-
-    status_t WaitForEvent() {
-        Mutex::Autolock cal(mConditionMutex);
-
-        {
-            Mutex::Autolock al(mListenerMutex);
-
-            if (mProEventList.size() > 0) {
-                return OK;
-            }
-        }
-
-        return mListenerCondition.waitRelative(mConditionMutex,
-                                               TEST_LISTENER_TIMEOUT);
-    }
-
-    /* Read events into out. Existing queue is flushed */
-    void ReadEvents(EventList& out) {
-        Mutex::Autolock al(mListenerMutex);
-
-        for (size_t i = 0; i < mProEventList.size(); ++i) {
-            out.push(mProEventList[i]);
-        }
-
-        mProEventList.clear();
-    }
-
-    /**
-      * Dequeue 1 event from the event queue.
-      * Returns UNKNOWN if queue is empty
-      */
-    ProEvent ReadEvent() {
-        Mutex::Autolock al(mListenerMutex);
-
-        if (mProEventList.size() == 0) {
-            return UNKNOWN;
-        }
-
-        ProEvent ev = mProEventList[0];
-        mProEventList.removeAt(0);
-
-        return ev;
-    }
-
-    void SetEventMask(int eventMask) {
-        Mutex::Autolock al(mListenerMutex);
-        mEventMask = eventMask;
-    }
-
-    // Automatically acquire/release frames as they are available
-    void SetDropFrames(bool dropFrames) {
-        Mutex::Autolock al(mListenerMutex);
-        mDropFrames = dropFrames;
-    }
-
-private:
-    void QueueEvent(ProEvent ev) {
-        bool eventAdded = false;
-        {
-            Mutex::Autolock al(mListenerMutex);
-
-            // Drop events not part of mask
-            if (ProEvent_Mask(ev) & mEventMask) {
-                mProEventList.push(ev);
-                eventAdded = true;
-            }
-        }
-
-        if (eventAdded) {
-            mListenerCondition.broadcast();
-        }
-    }
-
-protected:
-
-    //////////////////////////////////////////////////
-    ///////// ProCameraListener //////////////////////
-    //////////////////////////////////////////////////
-
-
-    // Lock has been acquired. Write operations now available.
-    virtual void onLockAcquired() {
-        QueueEvent(ACQUIRED);
-    }
-    // Lock has been released with exclusiveUnlock
-    virtual void onLockReleased() {
-        QueueEvent(RELEASED);
-    }
-
-    // Lock has been stolen by another client.
-    virtual void onLockStolen() {
-        QueueEvent(STOLEN);
-    }
-
-    // Lock free.
-    virtual void onTriggerNotify(int32_t ext1, int32_t ext2, int32_t ext3) {
-
-        dout << "Trigger notify: " << ext1 << " " << ext2
-             << " " << ext3 << std::endl;
-    }
-
-    virtual void onFrameAvailable(int streamId,
-                                  const sp<CpuConsumer>& consumer) {
-
-        QueueEvent(FRAME_RECEIVED);
-
-        Mutex::Autolock al(mListenerMutex);
-        if (mDropFrames) {
-            CpuConsumer::LockedBuffer buf;
-            status_t ret;
-
-            if (OK == (ret = consumer->lockNextBuffer(&buf))) {
-
-                dout << "Frame received on streamId = " << streamId <<
-                        ", dataPtr = " << (void*)buf.data <<
-                        ", timestamp = " << buf.timestamp << std::endl;
-
-                EXPECT_OK(consumer->unlockBuffer(buf));
-            }
-        } else {
-            dout << "Frame received on streamId = " << streamId << std::endl;
-        }
-    }
-
-    virtual void onResultReceived(int32_t requestId,
-                                  camera_metadata* request) {
-        dout << "Result received requestId = " << requestId
-             << ", requestPtr = " << (void*)request << std::endl;
-        QueueEvent(RESULT_RECEIVED);
-        free_camera_metadata(request);
-    }
-
-    virtual void notify(int32_t msg, int32_t ext1, int32_t ext2) {
-        dout << "Notify received: msg " << std::hex << msg
-             << ", ext1: " << std::hex << ext1 << ", ext2: " << std::hex << ext2
-             << std::endl;
-    }
-
-    Vector<ProEvent> mProEventList;
-    Mutex             mListenerMutex;
-    Mutex             mConditionMutex;
-    Condition         mListenerCondition;
-    int               mEventMask;
-    bool              mDropFrames;
-};
-
-class ProCameraTest : public ::testing::Test {
-
-public:
-    ProCameraTest() {
-        char* displaySecsEnv = getenv("TEST_DISPLAY_SECS");
-        if (displaySecsEnv != NULL) {
-            mDisplaySecs = atoi(displaySecsEnv);
-            if (mDisplaySecs < 0) {
-                mDisplaySecs = 0;
-            }
-        } else {
-            mDisplaySecs = 0;
-        }
-
-        char* displayFmtEnv = getenv("TEST_DISPLAY_FORMAT");
-        if (displayFmtEnv != NULL) {
-            mDisplayFmt = FormatFromString(displayFmtEnv);
-        } else {
-            mDisplayFmt = TEST_DISPLAY_FORMAT;
-        }
-
-        char* displayWidthEnv = getenv("TEST_DISPLAY_WIDTH");
-        if (displayWidthEnv != NULL) {
-            mDisplayW = atoi(displayWidthEnv);
-            if (mDisplayW < 0) {
-                mDisplayW = 0;
-            }
-        } else {
-            mDisplayW = TEST_DISPLAY_WIDTH;
-        }
-
-        char* displayHeightEnv = getenv("TEST_DISPLAY_HEIGHT");
-        if (displayHeightEnv != NULL) {
-            mDisplayH = atoi(displayHeightEnv);
-            if (mDisplayH < 0) {
-                mDisplayH = 0;
-            }
-        } else {
-            mDisplayH = TEST_DISPLAY_HEIGHT;
-        }
-    }
-
-    static void SetUpTestCase() {
-        // Binder Thread Pool Initialization
-        mTestThread = new ProCameraTestThread();
-        mTestThread->run("ProCameraTestThread");
-    }
-
-    virtual void SetUp() {
-        mCamera = ProCamera::connect(CAMERA_ID);
-        ASSERT_NE((void*)NULL, mCamera.get());
-
-        mListener = new ProCameraTestListener();
-        mCamera->setListener(mListener);
-    }
-
-    virtual void TearDown() {
-        ASSERT_NE((void*)NULL, mCamera.get());
-        mCamera->disconnect();
-    }
-
-protected:
-    sp<ProCamera> mCamera;
-    sp<ProCameraTestListener> mListener;
-
-    static sp<Thread> mTestThread;
-
-    int mDisplaySecs;
-    int mDisplayFmt;
-    int mDisplayW;
-    int mDisplayH;
-
-    sp<SurfaceComposerClient> mComposerClient;
-    sp<SurfaceControl> mSurfaceControl;
-
-    sp<SurfaceComposerClient> mDepthComposerClient;
-    sp<SurfaceControl> mDepthSurfaceControl;
-
-    int getSurfaceWidth() {
-        return 512;
-    }
-    int getSurfaceHeight() {
-        return 512;
-    }
-
-    void createOnScreenSurface(sp<Surface>& surface) {
-        mComposerClient = new SurfaceComposerClient;
-        ASSERT_EQ(NO_ERROR, mComposerClient->initCheck());
-
-        mSurfaceControl = mComposerClient->createSurface(
-                String8("ProCameraTest StreamingImage Surface"),
-                getSurfaceWidth(), getSurfaceHeight(),
-                PIXEL_FORMAT_RGB_888, 0);
-
-        mSurfaceControl->setPosition(0, 0);
-
-        ASSERT_TRUE(mSurfaceControl != NULL);
-        ASSERT_TRUE(mSurfaceControl->isValid());
-
-        SurfaceComposerClient::openGlobalTransaction();
-        ASSERT_EQ(NO_ERROR, mSurfaceControl->setLayer(0x7FFFFFFF));
-        ASSERT_EQ(NO_ERROR, mSurfaceControl->show());
-        SurfaceComposerClient::closeGlobalTransaction();
-
-        sp<ANativeWindow> window = mSurfaceControl->getSurface();
-        surface = mSurfaceControl->getSurface();
-
-        ASSERT_NE((void*)NULL, surface.get());
-    }
-
-    void createDepthOnScreenSurface(sp<Surface>& surface) {
-        mDepthComposerClient = new SurfaceComposerClient;
-        ASSERT_EQ(NO_ERROR, mDepthComposerClient->initCheck());
-
-        mDepthSurfaceControl = mDepthComposerClient->createSurface(
-                String8("ProCameraTest StreamingImage Surface"),
-                getSurfaceWidth(), getSurfaceHeight(),
-                PIXEL_FORMAT_RGB_888, 0);
-
-        mDepthSurfaceControl->setPosition(640, 0);
-
-        ASSERT_TRUE(mDepthSurfaceControl != NULL);
-        ASSERT_TRUE(mDepthSurfaceControl->isValid());
-
-        SurfaceComposerClient::openGlobalTransaction();
-        ASSERT_EQ(NO_ERROR, mDepthSurfaceControl->setLayer(0x7FFFFFFF));
-        ASSERT_EQ(NO_ERROR, mDepthSurfaceControl->show());
-        SurfaceComposerClient::closeGlobalTransaction();
-
-        sp<ANativeWindow> window = mDepthSurfaceControl->getSurface();
-        surface = mDepthSurfaceControl->getSurface();
-
-        ASSERT_NE((void*)NULL, surface.get());
-    }
-
-    template <typename T>
-    static bool ExistsItem(T needle, T* array, size_t count) {
-        if (!array) {
-            return false;
-        }
-
-        for (size_t i = 0; i < count; ++i) {
-            if (array[i] == needle) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-
-    static int FormatFromString(const char* str) {
-        std::string s(str);
-
-#define CMP_STR(x, y)                               \
-        if (s == #x) return HAL_PIXEL_FORMAT_ ## y;
-#define CMP_STR_SAME(x) CMP_STR(x, x)
-
-        CMP_STR_SAME( Y16);
-        CMP_STR_SAME( Y8);
-        CMP_STR_SAME( YV12);
-        CMP_STR(NV16, YCbCr_422_SP);
-        CMP_STR(NV21, YCrCb_420_SP);
-        CMP_STR(YUY2, YCbCr_422_I);
-        CMP_STR(RAW,  RAW16);
-        CMP_STR(RGBA, RGBA_8888);
-
-        std::cerr << "Unknown format string " << str << std::endl;
-        return -1;
-
-    }
-
-    /**
-     * Creating a streaming request for these output streams from a template,
-     *  and submit it
-     */
-    void createSubmitRequestForStreams(int32_t* streamIds, size_t count, int requestCount=-1) {
-
-        ASSERT_NE((void*)NULL, streamIds);
-        ASSERT_LT(0u, count);
-
-        camera_metadata_t *requestTmp = NULL;
-        EXPECT_OK(mCamera->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
-                                                /*out*/&requestTmp));
-        ASSERT_NE((void*)NULL, requestTmp);
-        CameraMetadata request(requestTmp);
-
-        // set the output streams. default is empty
-
-        uint32_t tag = static_cast<uint32_t>(ANDROID_REQUEST_OUTPUT_STREAMS);
-        request.update(tag, streamIds, count);
-
-        requestTmp = request.release();
-
-        if (requestCount < 0) {
-            EXPECT_OK(mCamera->submitRequest(requestTmp, /*streaming*/true));
-        } else {
-            for (int i = 0; i < requestCount; ++i) {
-                EXPECT_OK(mCamera->submitRequest(requestTmp,
-                                                 /*streaming*/false));
-            }
-        }
-        request.acquire(requestTmp);
-    }
-};
-
-sp<Thread> ProCameraTest::mTestThread;
-
-TEST_F(ProCameraTest, AvailableFormats) {
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    CameraMetadata staticInfo = mCamera->getCameraInfo(CAMERA_ID);
-    ASSERT_FALSE(staticInfo.isEmpty());
-
-    uint32_t tag = static_cast<uint32_t>(ANDROID_SCALER_AVAILABLE_FORMATS);
-    EXPECT_TRUE(staticInfo.exists(tag));
-    camera_metadata_entry_t entry = staticInfo.find(tag);
-
-    EXPECT_TRUE(ExistsItem<int32_t>(HAL_PIXEL_FORMAT_YV12,
-                                                  entry.data.i32, entry.count));
-    EXPECT_TRUE(ExistsItem<int32_t>(HAL_PIXEL_FORMAT_YCrCb_420_SP,
-                                                  entry.data.i32, entry.count));
-}
-
-// test around exclusiveTryLock (immediate locking)
-TEST_F(ProCameraTest, LockingImmediate) {
-
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    mListener->SetEventMask(ProEvent_Mask(ACQUIRED) |
-                            ProEvent_Mask(STOLEN)   |
-                            ProEvent_Mask(RELEASED));
-
-    EXPECT_FALSE(mCamera->hasExclusiveLock());
-    EXPECT_EQ(OK, mCamera->exclusiveTryLock());
-    // at this point we definitely have the lock
-
-    EXPECT_EQ(OK, mListener->WaitForEvent());
-    EXPECT_EQ(ACQUIRED, mListener->ReadEvent());
-
-    EXPECT_TRUE(mCamera->hasExclusiveLock());
-    EXPECT_EQ(OK, mCamera->exclusiveUnlock());
-
-    EXPECT_EQ(OK, mListener->WaitForEvent());
-    EXPECT_EQ(RELEASED, mListener->ReadEvent());
-
-    EXPECT_FALSE(mCamera->hasExclusiveLock());
-}
-
-// test around exclusiveLock (locking at some future point in time)
-TEST_F(ProCameraTest, LockingAsynchronous) {
-
-    if (HasFatalFailure()) {
-        return;
-    }
-
-
-    mListener->SetEventMask(ProEvent_Mask(ACQUIRED) |
-                            ProEvent_Mask(STOLEN)   |
-                            ProEvent_Mask(RELEASED));
-
-    // TODO: Add another procamera that has a lock here.
-    // then we can be test that the lock wont immediately be acquired
-
-    EXPECT_FALSE(mCamera->hasExclusiveLock());
-    EXPECT_EQ(OK, mCamera->exclusiveTryLock());
-    // at this point we definitely have the lock
-
-    EXPECT_EQ(OK, mListener->WaitForEvent());
-    EXPECT_EQ(ACQUIRED, mListener->ReadEvent());
-
-    EXPECT_TRUE(mCamera->hasExclusiveLock());
-    EXPECT_EQ(OK, mCamera->exclusiveUnlock());
-
-    EXPECT_EQ(OK, mListener->WaitForEvent());
-    EXPECT_EQ(RELEASED, mListener->ReadEvent());
-
-    EXPECT_FALSE(mCamera->hasExclusiveLock());
-}
-
-// Stream directly to the screen.
-TEST_F(ProCameraTest, DISABLED_StreamingImageSingle) {
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    sp<Surface> surface;
-    if (mDisplaySecs > 0) {
-        createOnScreenSurface(/*out*/surface);
-    }
-    else {
-        dout << "Skipping, will not render to screen" << std::endl;
-        return;
-    }
-
-    int depthStreamId = -1;
-
-    sp<ServiceListener> listener = new ServiceListener();
-    EXPECT_OK(ProCamera::addServiceListener(listener));
-
-    ServiceListener::Status currentStatus;
-
-    // when subscribing a new listener,
-    // we immediately get a callback to the current status
-    while (listener->waitForStatusChange(/*out*/currentStatus) != OK);
-    EXPECT_EQ(ServiceListener::STATUS_PRESENT, currentStatus);
-
-    dout << "Will now stream and resume infinitely..." << std::endl;
-    while (true) {
-
-        if (currentStatus == ServiceListener::STATUS_PRESENT) {
-
-            ASSERT_OK(mCamera->createStream(mDisplayW, mDisplayH, mDisplayFmt,
-                                            surface,
-                                            &depthStreamId));
-            EXPECT_NE(-1, depthStreamId);
-
-            EXPECT_OK(mCamera->exclusiveTryLock());
-
-            int32_t streams[] = { depthStreamId };
-            ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(
-                                                 streams,
-                                                 /*count*/1));
-        }
-
-        ServiceListener::Status stat = ServiceListener::STATUS_UNKNOWN;
-
-        // TODO: maybe check for getch every once in a while?
-        while (listener->waitForStatusChange(/*out*/stat) != OK);
-
-        if (currentStatus != stat) {
-            if (stat == ServiceListener::STATUS_PRESENT) {
-                dout << "Reconnecting to camera" << std::endl;
-                mCamera = ProCamera::connect(CAMERA_ID);
-            } else if (stat == ServiceListener::STATUS_NOT_AVAILABLE) {
-                dout << "Disconnecting from camera" << std::endl;
-                mCamera->disconnect();
-            } else if (stat == ServiceListener::STATUS_NOT_PRESENT) {
-                dout << "Camera unplugged" << std::endl;
-                mCamera = NULL;
-            } else {
-                dout << "Unknown status change "
-                     << std::hex << stat << std::endl;
-            }
-
-            currentStatus = stat;
-        }
-    }
-
-    EXPECT_OK(ProCamera::removeServiceListener(listener));
-    EXPECT_OK(mCamera->deleteStream(depthStreamId));
-    EXPECT_OK(mCamera->exclusiveUnlock());
-}
-
-// Stream directly to the screen.
-TEST_F(ProCameraTest, DISABLED_StreamingImageDual) {
-    if (HasFatalFailure()) {
-        return;
-    }
-    sp<Surface> surface;
-    sp<Surface> depthSurface;
-    if (mDisplaySecs > 0) {
-        createOnScreenSurface(/*out*/surface);
-        createDepthOnScreenSurface(/*out*/depthSurface);
-    }
-
-    int streamId = -1;
-    EXPECT_OK(mCamera->createStream(/*width*/1280, /*height*/960,
-              TEST_FORMAT_MAIN, surface, &streamId));
-    EXPECT_NE(-1, streamId);
-
-    int depthStreamId = -1;
-    EXPECT_OK(mCamera->createStream(/*width*/320, /*height*/240,
-              TEST_FORMAT_DEPTH, depthSurface, &depthStreamId));
-    EXPECT_NE(-1, depthStreamId);
-
-    EXPECT_OK(mCamera->exclusiveTryLock());
-    /*
-    */
-    /* iterate in a loop submitting requests every frame.
-     *  what kind of requests doesnt really matter, just whatever.
-     */
-
-    // it would probably be better to use CameraMetadata from camera service.
-    camera_metadata_t *request = NULL;
-    EXPECT_OK(mCamera->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
-              /*out*/&request));
-    EXPECT_NE((void*)NULL, request);
-
-    /*FIXME: dont need this later, at which point the above should become an
-             ASSERT_NE*/
-    if(request == NULL) request = allocate_camera_metadata(10, 100);
-
-    // set the output streams to just this stream ID
-
-    // wow what a verbose API.
-    int32_t allStreams[] = { streamId, depthStreamId };
-    // IMPORTANT. bad things will happen if its not a uint8.
-    size_t streamCount = sizeof(allStreams) / sizeof(allStreams[0]);
-    camera_metadata_entry_t entry;
-    uint32_t tag = static_cast<uint32_t>(ANDROID_REQUEST_OUTPUT_STREAMS);
-    int find = find_camera_metadata_entry(request, tag, &entry);
-    if (find == -ENOENT) {
-        if (add_camera_metadata_entry(request, tag, &allStreams,
-                                      /*data_count*/streamCount) != OK) {
-            camera_metadata_t *tmp = allocate_camera_metadata(1000, 10000);
-            ASSERT_OK(append_camera_metadata(tmp, request));
-            free_camera_metadata(request);
-            request = tmp;
-
-            ASSERT_OK(add_camera_metadata_entry(request, tag, &allStreams,
-                                                /*data_count*/streamCount));
-        }
-    } else {
-        ASSERT_OK(update_camera_metadata_entry(request, entry.index,
-                  &allStreams, /*data_count*/streamCount, &entry));
-    }
-
-    EXPECT_OK(mCamera->submitRequest(request, /*streaming*/true));
-
-    dout << "will sleep now for " << mDisplaySecs << std::endl;
-    sleep(mDisplaySecs);
-
-    free_camera_metadata(request);
-
-    for (size_t i = 0; i < streamCount; ++i) {
-        EXPECT_OK(mCamera->deleteStream(allStreams[i]));
-    }
-    EXPECT_OK(mCamera->exclusiveUnlock());
-}
-
-TEST_F(ProCameraTest, CpuConsumerSingle) {
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    mListener->SetEventMask(ProEvent_Mask(ACQUIRED) |
-                            ProEvent_Mask(STOLEN)   |
-                            ProEvent_Mask(RELEASED) |
-                            ProEvent_Mask(FRAME_RECEIVED));
-    mListener->SetDropFrames(true);
-
-    int streamId = -1;
-    sp<CpuConsumer> consumer;
-    EXPECT_OK(mCamera->createStreamCpu(/*width*/320, /*height*/240,
-                TEST_FORMAT_DEPTH, TEST_CPU_HEAP_COUNT, &consumer, &streamId));
-    EXPECT_NE(-1, streamId);
-
-    EXPECT_OK(mCamera->exclusiveTryLock());
-    EXPECT_EQ(OK, mListener->WaitForEvent());
-    EXPECT_EQ(ACQUIRED, mListener->ReadEvent());
-    /* iterate in a loop submitting requests every frame.
-     *  what kind of requests doesnt really matter, just whatever.
-     */
-
-    // it would probably be better to use CameraMetadata from camera service.
-    camera_metadata_t *request = NULL;
-    EXPECT_OK(mCamera->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
-        /*out*/&request));
-    EXPECT_NE((void*)NULL, request);
-
-    /*FIXME: dont need this later, at which point the above should become an
-      ASSERT_NE*/
-    if(request == NULL) request = allocate_camera_metadata(10, 100);
-
-    // set the output streams to just this stream ID
-
-    int32_t allStreams[] = { streamId };
-    camera_metadata_entry_t entry;
-    uint32_t tag = static_cast<uint32_t>(ANDROID_REQUEST_OUTPUT_STREAMS);
-    int find = find_camera_metadata_entry(request, tag, &entry);
-    if (find == -ENOENT) {
-        if (add_camera_metadata_entry(request, tag, &allStreams,
-                /*data_count*/1) != OK) {
-            camera_metadata_t *tmp = allocate_camera_metadata(1000, 10000);
-            ASSERT_OK(append_camera_metadata(tmp, request));
-            free_camera_metadata(request);
-            request = tmp;
-
-            ASSERT_OK(add_camera_metadata_entry(request, tag, &allStreams,
-                /*data_count*/1));
-        }
-    } else {
-        ASSERT_OK(update_camera_metadata_entry(request, entry.index,
-            &allStreams, /*data_count*/1, &entry));
-    }
-
-    EXPECT_OK(mCamera->submitRequest(request, /*streaming*/true));
-
-    // Consume a couple of frames
-    for (int i = 0; i < TEST_CPU_FRAME_COUNT; ++i) {
-        EXPECT_EQ(OK, mListener->WaitForEvent());
-        EXPECT_EQ(FRAME_RECEIVED, mListener->ReadEvent());
-    }
-
-    // Done: clean up
-    free_camera_metadata(request);
-    EXPECT_OK(mCamera->deleteStream(streamId));
-    EXPECT_OK(mCamera->exclusiveUnlock());
-}
-
-TEST_F(ProCameraTest, CpuConsumerDual) {
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    mListener->SetEventMask(ProEvent_Mask(FRAME_RECEIVED));
-    mListener->SetDropFrames(true);
-
-    int streamId = -1;
-    sp<CpuConsumer> consumer;
-    EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960,
-                TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, &consumer, &streamId));
-    EXPECT_NE(-1, streamId);
-
-    int depthStreamId = -1;
-    EXPECT_OK(mCamera->createStreamCpu(/*width*/320, /*height*/240,
-            TEST_FORMAT_DEPTH, TEST_CPU_HEAP_COUNT, &consumer, &depthStreamId));
-    EXPECT_NE(-1, depthStreamId);
-
-    EXPECT_OK(mCamera->exclusiveTryLock());
-    /*
-    */
-    /* iterate in a loop submitting requests every frame.
-     *  what kind of requests doesnt really matter, just whatever.
-     */
-
-    // it would probably be better to use CameraMetadata from camera service.
-    camera_metadata_t *request = NULL;
-    EXPECT_OK(mCamera->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
-                                            /*out*/&request));
-    EXPECT_NE((void*)NULL, request);
-
-    if(request == NULL) request = allocate_camera_metadata(10, 100);
-
-    // set the output streams to just this stream ID
-
-    // wow what a verbose API.
-    int32_t allStreams[] = { streamId, depthStreamId };
-    size_t streamCount = 2;
-    camera_metadata_entry_t entry;
-    uint32_t tag = static_cast<uint32_t>(ANDROID_REQUEST_OUTPUT_STREAMS);
-    int find = find_camera_metadata_entry(request, tag, &entry);
-    if (find == -ENOENT) {
-        if (add_camera_metadata_entry(request, tag, &allStreams,
-                                      /*data_count*/streamCount) != OK) {
-            camera_metadata_t *tmp = allocate_camera_metadata(1000, 10000);
-            ASSERT_OK(append_camera_metadata(tmp, request));
-            free_camera_metadata(request);
-            request = tmp;
-
-            ASSERT_OK(add_camera_metadata_entry(request, tag, &allStreams,
-                                                   /*data_count*/streamCount));
-        }
-    } else {
-        ASSERT_OK(update_camera_metadata_entry(request, entry.index,
-                              &allStreams, /*data_count*/streamCount, &entry));
-    }
-
-    EXPECT_OK(mCamera->submitRequest(request, /*streaming*/true));
-
-    // Consume a couple of frames
-    for (int i = 0; i < TEST_CPU_FRAME_COUNT; ++i) {
-        // stream id 1
-        EXPECT_EQ(OK, mListener->WaitForEvent());
-        EXPECT_EQ(FRAME_RECEIVED, mListener->ReadEvent());
-
-        // stream id 2
-        EXPECT_EQ(OK, mListener->WaitForEvent());
-        EXPECT_EQ(FRAME_RECEIVED, mListener->ReadEvent());
-
-        //TODO: events should be a struct with some data like the stream id
-    }
-
-    // Done: clean up
-    free_camera_metadata(request);
-    EXPECT_OK(mCamera->deleteStream(streamId));
-    EXPECT_OK(mCamera->exclusiveUnlock());
-}
-
-TEST_F(ProCameraTest, ResultReceiver) {
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    mListener->SetEventMask(ProEvent_Mask(RESULT_RECEIVED));
-    mListener->SetDropFrames(true);
-    //FIXME: if this is run right after the previous test we get FRAME_RECEIVED
-    // need to filter out events at read time
-
-    int streamId = -1;
-    sp<CpuConsumer> consumer;
-    EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960,
-                TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, &consumer, &streamId));
-    EXPECT_NE(-1, streamId);
-
-    EXPECT_OK(mCamera->exclusiveTryLock());
-    /*
-    */
-    /* iterate in a loop submitting requests every frame.
-     *  what kind of requests doesnt really matter, just whatever.
-     */
-
-    camera_metadata_t *request = NULL;
-    EXPECT_OK(mCamera->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
-                                            /*out*/&request));
-    EXPECT_NE((void*)NULL, request);
-
-    /*FIXME*/
-    if(request == NULL) request = allocate_camera_metadata(10, 100);
-
-    // set the output streams to just this stream ID
-
-    int32_t allStreams[] = { streamId };
-    size_t streamCount = 1;
-    camera_metadata_entry_t entry;
-    uint32_t tag = static_cast<uint32_t>(ANDROID_REQUEST_OUTPUT_STREAMS);
-    int find = find_camera_metadata_entry(request, tag, &entry);
-    if (find == -ENOENT) {
-        if (add_camera_metadata_entry(request, tag, &allStreams,
-                                      /*data_count*/streamCount) != OK) {
-            camera_metadata_t *tmp = allocate_camera_metadata(1000, 10000);
-            ASSERT_OK(append_camera_metadata(tmp, request));
-            free_camera_metadata(request);
-            request = tmp;
-
-            ASSERT_OK(add_camera_metadata_entry(request, tag, &allStreams,
-                                                /*data_count*/streamCount));
-        }
-    } else {
-        ASSERT_OK(update_camera_metadata_entry(request, entry.index,
-                               &allStreams, /*data_count*/streamCount, &entry));
-    }
-
-    EXPECT_OK(mCamera->submitRequest(request, /*streaming*/true));
-
-    // Consume a couple of results
-    for (int i = 0; i < TEST_CPU_FRAME_COUNT; ++i) {
-        EXPECT_EQ(OK, mListener->WaitForEvent());
-        EXPECT_EQ(RESULT_RECEIVED, mListener->ReadEvent());
-    }
-
-    // Done: clean up
-    free_camera_metadata(request);
-    EXPECT_OK(mCamera->deleteStream(streamId));
-    EXPECT_OK(mCamera->exclusiveUnlock());
-}
-
-// FIXME: This is racy and sometimes fails on waitForFrameMetadata
-TEST_F(ProCameraTest, DISABLED_WaitForResult) {
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    mListener->SetDropFrames(true);
-
-    int streamId = -1;
-    sp<CpuConsumer> consumer;
-    EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960,
-                 TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, &consumer, &streamId));
-    EXPECT_NE(-1, streamId);
-
-    EXPECT_OK(mCamera->exclusiveTryLock());
-
-    int32_t streams[] = { streamId };
-    ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(streams, /*count*/1));
-
-    // Consume a couple of results
-    for (int i = 0; i < TEST_CPU_FRAME_COUNT; ++i) {
-        EXPECT_OK(mCamera->waitForFrameMetadata());
-        CameraMetadata meta = mCamera->consumeFrameMetadata();
-        EXPECT_FALSE(meta.isEmpty());
-    }
-
-    // Done: clean up
-    EXPECT_OK(mCamera->deleteStream(streamId));
-    EXPECT_OK(mCamera->exclusiveUnlock());
-}
-
-TEST_F(ProCameraTest, WaitForSingleStreamBuffer) {
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    int streamId = -1;
-    sp<CpuConsumer> consumer;
-    EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960,
-                  TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, &consumer, &streamId));
-    EXPECT_NE(-1, streamId);
-
-    EXPECT_OK(mCamera->exclusiveTryLock());
-
-    int32_t streams[] = { streamId };
-    ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(streams, /*count*/1,
-                                            /*requests*/TEST_CPU_FRAME_COUNT));
-
-    // Consume a couple of results
-    for (int i = 0; i < TEST_CPU_FRAME_COUNT; ++i) {
-        EXPECT_EQ(1, mCamera->waitForFrameBuffer(streamId));
-
-        CpuConsumer::LockedBuffer buf;
-        EXPECT_OK(consumer->lockNextBuffer(&buf));
-
-        dout << "Buffer synchronously received on streamId = " << streamId <<
-                ", dataPtr = " << (void*)buf.data <<
-                ", timestamp = " << buf.timestamp << std::endl;
-
-        EXPECT_OK(consumer->unlockBuffer(buf));
-    }
-
-    // Done: clean up
-    EXPECT_OK(mCamera->deleteStream(streamId));
-    EXPECT_OK(mCamera->exclusiveUnlock());
-}
-
-// FIXME: This is racy and sometimes fails on waitForFrameMetadata
-TEST_F(ProCameraTest, DISABLED_WaitForDualStreamBuffer) {
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    const int REQUEST_COUNT = TEST_CPU_FRAME_COUNT * 10;
-
-    // 15 fps
-    int streamId = -1;
-    sp<CpuConsumer> consumer;
-    EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960,
-                 TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, &consumer, &streamId));
-    EXPECT_NE(-1, streamId);
-
-    // 30 fps
-    int depthStreamId = -1;
-    sp<CpuConsumer> depthConsumer;
-    EXPECT_OK(mCamera->createStreamCpu(/*width*/320, /*height*/240,
-       TEST_FORMAT_DEPTH, TEST_CPU_HEAP_COUNT, &depthConsumer, &depthStreamId));
-    EXPECT_NE(-1, depthStreamId);
-
-    EXPECT_OK(mCamera->exclusiveTryLock());
-
-    int32_t streams[] = { streamId, depthStreamId };
-    ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(streams, /*count*/2,
-                                                    /*requests*/REQUEST_COUNT));
-
-    int depthFrames = 0;
-    int greyFrames = 0;
-
-    // Consume two frames simultaneously. Unsynchronized by timestamps.
-    for (int i = 0; i < REQUEST_COUNT; ++i) {
-
-        // Exhaust event queue so it doesn't keep growing
-        while (mListener->ReadEvent() != UNKNOWN);
-
-        // Get the metadata
-        EXPECT_OK(mCamera->waitForFrameMetadata());
-        CameraMetadata meta = mCamera->consumeFrameMetadata();
-        EXPECT_FALSE(meta.isEmpty());
-
-        // Get the buffers
-
-        EXPECT_EQ(1, mCamera->waitForFrameBuffer(depthStreamId));
-
-        /**
-          * Guaranteed to be able to consume the depth frame,
-          * since we waited on it.
-          */
-        CpuConsumer::LockedBuffer depthBuffer;
-        EXPECT_OK(depthConsumer->lockNextBuffer(&depthBuffer));
-
-        dout << "Depth Buffer synchronously received on streamId = " <<
-                streamId <<
-                ", dataPtr = " << (void*)depthBuffer.data <<
-                ", timestamp = " << depthBuffer.timestamp << std::endl;
-
-        EXPECT_OK(depthConsumer->unlockBuffer(depthBuffer));
-
-        depthFrames++;
-
-
-        /** Consume Greyscale frames if there are any.
-          * There may not be since it runs at half FPS */
-        CpuConsumer::LockedBuffer greyBuffer;
-        while (consumer->lockNextBuffer(&greyBuffer) == OK) {
-
-            dout << "GRAY Buffer synchronously received on streamId = " <<
-                streamId <<
-                ", dataPtr = " << (void*)greyBuffer.data <<
-                ", timestamp = " << greyBuffer.timestamp << std::endl;
-
-            EXPECT_OK(consumer->unlockBuffer(greyBuffer));
-
-            greyFrames++;
-        }
-    }
-
-    dout << "Done, summary: depth frames " << std::dec << depthFrames
-         << ", grey frames " << std::dec << greyFrames << std::endl;
-
-    // Done: clean up
-    EXPECT_OK(mCamera->deleteStream(streamId));
-    EXPECT_OK(mCamera->exclusiveUnlock());
-}
-
-TEST_F(ProCameraTest, WaitForSingleStreamBufferAndDropFramesSync) {
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    const int NUM_REQUESTS = 20 * TEST_CPU_FRAME_COUNT;
-
-    int streamId = -1;
-    sp<CpuConsumer> consumer;
-    EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960,
-                  TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT,
-                  /*synchronousMode*/true, &consumer, &streamId));
-    EXPECT_NE(-1, streamId);
-
-    EXPECT_OK(mCamera->exclusiveTryLock());
-
-    int32_t streams[] = { streamId };
-    ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(streams, /*count*/1,
-                                                     /*requests*/NUM_REQUESTS));
-
-    // Consume a couple of results
-    for (int i = 0; i < NUM_REQUESTS; ++i) {
-        int numFrames;
-        EXPECT_TRUE((numFrames = mCamera->waitForFrameBuffer(streamId)) > 0);
-
-        // Drop all but the newest framebuffer
-        EXPECT_EQ(numFrames-1, mCamera->dropFrameBuffer(streamId, numFrames-1));
-
-        dout << "Dropped " << (numFrames - 1) << " frames" << std::endl;
-
-        // Skip the counter ahead, don't try to consume these frames again
-        i += numFrames-1;
-
-        // "Consume" the buffer
-        CpuConsumer::LockedBuffer buf;
-        EXPECT_OK(consumer->lockNextBuffer(&buf));
-
-        dout << "Buffer synchronously received on streamId = " << streamId <<
-                ", dataPtr = " << (void*)buf.data <<
-                ", timestamp = " << buf.timestamp << std::endl;
-
-        // Process at 10fps, stream is at 15fps.
-        // This means we will definitely fill up the buffer queue with
-        // extra buffers and need to drop them.
-        usleep(TEST_FRAME_PROCESSING_DELAY_US);
-
-        EXPECT_OK(consumer->unlockBuffer(buf));
-    }
-
-    // Done: clean up
-    EXPECT_OK(mCamera->deleteStream(streamId));
-    EXPECT_OK(mCamera->exclusiveUnlock());
-}
-
-TEST_F(ProCameraTest, WaitForSingleStreamBufferAndDropFramesAsync) {
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    const int NUM_REQUESTS = 20 * TEST_CPU_FRAME_COUNT;
-
-    int streamId = -1;
-    sp<CpuConsumer> consumer;
-    EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960,
-                  TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT,
-                  /*synchronousMode*/false, &consumer, &streamId));
-    EXPECT_NE(-1, streamId);
-
-    EXPECT_OK(mCamera->exclusiveTryLock());
-
-    int32_t streams[] = { streamId };
-    ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(streams, /*count*/1,
-                                                     /*requests*/NUM_REQUESTS));
-
-    uint64_t lastFrameNumber = 0;
-    int numFrames;
-
-    // Consume a couple of results
-    int i;
-    for (i = 0; i < NUM_REQUESTS && lastFrameNumber < NUM_REQUESTS; ++i) {
-        EXPECT_LT(0, (numFrames = mCamera->waitForFrameBuffer(streamId)));
-
-        dout << "Dropped " << (numFrames - 1) << " frames" << std::endl;
-
-        // Skip the counter ahead, don't try to consume these frames again
-        i += numFrames-1;
-
-        // "Consume" the buffer
-        CpuConsumer::LockedBuffer buf;
-
-        EXPECT_EQ(OK, consumer->lockNextBuffer(&buf));
-
-        lastFrameNumber = buf.frameNumber;
-
-        dout << "Buffer asynchronously received on streamId = " << streamId <<
-                ", dataPtr = " << (void*)buf.data <<
-                ", timestamp = " << buf.timestamp <<
-                ", framenumber = " << buf.frameNumber << std::endl;
-
-        // Process at 10fps, stream is at 15fps.
-        // This means we will definitely fill up the buffer queue with
-        // extra buffers and need to drop them.
-        usleep(TEST_FRAME_PROCESSING_DELAY_US);
-
-        EXPECT_OK(consumer->unlockBuffer(buf));
-    }
-
-    dout << "Done after " << i << " iterations " << std::endl;
-
-    // Done: clean up
-    EXPECT_OK(mCamera->deleteStream(streamId));
-    EXPECT_OK(mCamera->exclusiveUnlock());
-}
-
-
-
-//TODO: refactor into separate file
-TEST_F(ProCameraTest, ServiceListenersSubscribe) {
-
-    ASSERT_EQ(4u, sizeof(ServiceListener::Status));
-
-    sp<ServiceListener> listener = new ServiceListener();
-
-    EXPECT_EQ(BAD_VALUE, ProCamera::removeServiceListener(listener));
-    EXPECT_OK(ProCamera::addServiceListener(listener));
-
-    EXPECT_EQ(ALREADY_EXISTS, ProCamera::addServiceListener(listener));
-    EXPECT_OK(ProCamera::removeServiceListener(listener));
-
-    EXPECT_EQ(BAD_VALUE, ProCamera::removeServiceListener(listener));
-}
-
-//TODO: refactor into separate file
-TEST_F(ProCameraTest, ServiceListenersFunctional) {
-
-    sp<ServiceListener> listener = new ServiceListener();
-
-    EXPECT_OK(ProCamera::addServiceListener(listener));
-
-    sp<Camera> cam = Camera::connect(CAMERA_ID,
-                                     /*clientPackageName*/String16(),
-                                     -1);
-    EXPECT_NE((void*)NULL, cam.get());
-
-    ServiceListener::Status stat = ServiceListener::STATUS_UNKNOWN;
-    EXPECT_OK(listener->waitForStatusChange(/*out*/stat));
-
-    EXPECT_EQ(ServiceListener::STATUS_NOT_AVAILABLE, stat);
-
-    if (cam.get()) {
-        cam->disconnect();
-    }
-
-    EXPECT_OK(listener->waitForStatusChange(/*out*/stat));
-    EXPECT_EQ(ServiceListener::STATUS_PRESENT, stat);
-
-    EXPECT_OK(ProCamera::removeServiceListener(listener));
-}
-
-
-
-}
-}
-}
-}
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
index 96fca94..6b8c772 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
@@ -48,12 +48,13 @@
         KeyType keyType,
         const KeyedVector<String8, String8>& optionalParameters,
         Vector<uint8_t>& request,
-        String8& defaultUrl) {
+        String8& defaultUrl,
+        DrmPlugin::KeyRequestType *keyRequestType) {
     UNUSED(optionalParameters);
     if (keyType != kKeyType_Streaming) {
         return android::ERROR_DRM_CANNOT_HANDLE;
     }
-
+    *keyRequestType = DrmPlugin::kKeyRequestType_Initial;
     sp<Session> session = mSessionLibrary->findSession(scope);
     defaultUrl.clear();
     return session->getKeyRequest(initData, initDataType, &request);
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
index 6139f1f..ba4aefe 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
@@ -54,7 +54,8 @@
             KeyType keyType,
             const KeyedVector<String8, String8>& optionalParameters,
             Vector<uint8_t>& request,
-            String8& defaultUrl);
+            String8& defaultUrl,
+            DrmPlugin::KeyRequestType *keyRequestType);
 
     virtual status_t provideKeyResponse(
             const Vector<uint8_t>& scope,
diff --git a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
index 7eac0a1..9b786c5 100644
--- a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
@@ -111,7 +111,8 @@
                                           Vector<uint8_t> const &initData,
                                           String8 const &mimeType, KeyType keyType,
                                           KeyedVector<String8, String8> const &optionalParameters,
-                                          Vector<uint8_t> &request, String8 &defaultUrl)
+                                          Vector<uint8_t> &request, String8 &defaultUrl,
+                                          KeyRequestType *keyRequestType)
     {
         Mutex::Autolock lock(mLock);
         ALOGD("MockDrmPlugin::getKeyRequest(sessionId=%s, initData=%s, mimeType=%s"
@@ -149,6 +150,7 @@
         // Properties used in mock test, set by cts test app returned from mock plugin
         //   byte[] mock-request       -> request
         //   string mock-default-url   -> defaultUrl
+        //   string mock-key-request-type -> keyRequestType
 
         index = mByteArrayProperties.indexOfKey(String8("mock-request"));
         if (index < 0) {
@@ -165,6 +167,16 @@
         } else {
             defaultUrl = mStringProperties.valueAt(index);
         }
+
+        index = mStringProperties.indexOfKey(String8("mock-keyRequestType"));
+        if (index < 0) {
+            ALOGD("Missing 'mock-keyRequestType' parameter for mock");
+            return BAD_VALUE;
+        } else {
+            *keyRequestType = static_cast<KeyRequestType>(
+                atoi(mStringProperties.valueAt(index).string()));
+        }
+
         return OK;
     }
 
diff --git a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
index d1d8058..d0f2ddb 100644
--- a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
+++ b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
@@ -62,7 +62,8 @@
                                Vector<uint8_t> const &initData,
                                String8 const &mimeType, KeyType keyType,
                                KeyedVector<String8, String8> const &optionalParameters,
-                               Vector<uint8_t> &request, String8 &defaultUrl);
+                               Vector<uint8_t> &request, String8 &defaultUrl,
+                               KeyRequestType *keyRequestType);
 
         status_t provideKeyResponse(Vector<uint8_t> const &sessionId,
                                     Vector<uint8_t> const &response,
diff --git a/include/camera/CameraMetadata.h b/include/camera/CameraMetadata.h
index 1254d3c..953d711 100644
--- a/include/camera/CameraMetadata.h
+++ b/include/camera/CameraMetadata.h
@@ -56,7 +56,7 @@
      * thread-safety, it simply prevents the camera_metadata_t pointer returned
      * here from being accidentally invalidated by CameraMetadata operations.
      */
-    const camera_metadata_t* getAndLock();
+    const camera_metadata_t* getAndLock() const;
 
     /**
      * Unlock the CameraMetadata for use again. After this unlock, the pointer
@@ -208,7 +208,7 @@
 
   private:
     camera_metadata_t *mBuffer;
-    bool               mLocked;
+    mutable bool       mLocked;
 
     /**
      * Check if tag has a given type
diff --git a/include/camera/CameraParameters.h b/include/camera/CameraParameters.h
index c6074fc..ba33ffe 100644
--- a/include/camera/CameraParameters.h
+++ b/include/camera/CameraParameters.h
@@ -108,6 +108,9 @@
      */
     void getSupportedPreviewFormats(Vector<int>& formats) const;
 
+    // Returns true if no keys are present
+    bool isEmpty() const;
+
     // Parameter keys to communicate between camera application and driver.
     // The access (read/write, read only, or write only) is viewed from the
     // perspective of applications, not driver.
diff --git a/include/camera/ICameraService.h b/include/camera/ICameraService.h
index 194a646..cad275e 100644
--- a/include/camera/ICameraService.h
+++ b/include/camera/ICameraService.h
@@ -25,8 +25,6 @@
 
 class ICamera;
 class ICameraClient;
-class IProCameraUser;
-class IProCameraCallbacks;
 class ICameraServiceListener;
 class ICameraDeviceUser;
 class ICameraDeviceCallbacks;
@@ -44,7 +42,6 @@
         GET_NUMBER_OF_CAMERAS = IBinder::FIRST_CALL_TRANSACTION,
         GET_CAMERA_INFO,
         CONNECT,
-        CONNECT_PRO,
         CONNECT_DEVICE,
         ADD_LISTENER,
         REMOVE_LISTENER,
@@ -54,6 +51,7 @@
         SUPPORTS_CAMERA_API,
         CONNECT_LEGACY,
         SET_TORCH_MODE,
+        NOTIFY_SYSTEM_EVENT,
     };
 
     enum {
@@ -67,7 +65,18 @@
 
     enum {
         CAMERA_HAL_API_VERSION_UNSPECIFIED = -1
-      };
+    };
+
+    /**
+     * Keep up-to-date with declarations in
+     * frameworks/base/services/core/java/com/android/server/camera/CameraService.java
+     *
+     * These event codes are intended to be used with the notifySystemEvent call.
+     */
+    enum {
+        NO_EVENT = 0,
+        USER_SWITCHED,
+    };
 
 public:
     DECLARE_META_INTERFACE(CameraService);
@@ -105,13 +114,6 @@
             /*out*/
             sp<ICamera>& device) = 0;
 
-    virtual status_t connectPro(const sp<IProCameraCallbacks>& cameraCb,
-            int cameraId,
-            const String16& clientPackageName,
-            int clientUid,
-            /*out*/
-            sp<IProCameraUser>& device) = 0;
-
     virtual status_t connectDevice(
             const sp<ICameraDeviceCallbacks>& cameraCb,
             int cameraId,
@@ -158,6 +160,11 @@
      */
     virtual status_t setTorchMode(const String16& cameraId, bool enabled,
             const sp<IBinder>& clientBinder) = 0;
+
+    /**
+     * Notify the camera service of a system event.  Should only be called from system_server.
+     */
+    virtual void notifySystemEvent(int eventId, int arg0) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/camera/IProCameraCallbacks.h b/include/camera/IProCameraCallbacks.h
deleted file mode 100644
index e8abb89..0000000
--- a/include/camera/IProCameraCallbacks.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_IPROCAMERA_CALLBACKS_H
-#define ANDROID_HARDWARE_IPROCAMERA_CALLBACKS_H
-
-#include <utils/RefBase.h>
-#include <binder/IInterface.h>
-#include <binder/Parcel.h>
-#include <binder/IMemory.h>
-#include <utils/Timers.h>
-#include <system/camera.h>
-
-struct camera_metadata;
-
-namespace android {
-
-class IProCameraCallbacks : public IInterface
-{
-    /**
-     * Keep up-to-date with IProCameraCallbacks.aidl in frameworks/base
-     */
-public:
-    DECLARE_META_INTERFACE(ProCameraCallbacks);
-
-    virtual void            notifyCallback(int32_t msgType,
-                                           int32_t ext1,
-                                           int32_t ext2) = 0;
-
-    enum LockStatus {
-        LOCK_ACQUIRED,
-        LOCK_RELEASED,
-        LOCK_STOLEN,
-    };
-
-    virtual void            onLockStatusChanged(LockStatus newLockStatus) = 0;
-
-    /** Missing by design: implementation is client-side in ProCamera.cpp **/
-    // virtual void onBufferReceived(int streamId,
-    //                               const CpuConsumer::LockedBufer& buf);
-    virtual void            onResultReceived(int32_t requestId,
-                                             camera_metadata* result) = 0;
-};
-
-// ----------------------------------------------------------------------------
-
-class BnProCameraCallbacks : public BnInterface<IProCameraCallbacks>
-{
-public:
-    virtual status_t    onTransact( uint32_t code,
-                                    const Parcel& data,
-                                    Parcel* reply,
-                                    uint32_t flags = 0);
-};
-
-}; // namespace android
-
-#endif
diff --git a/include/camera/IProCameraUser.h b/include/camera/IProCameraUser.h
deleted file mode 100644
index 2ccc4d2..0000000
--- a/include/camera/IProCameraUser.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_IPROCAMERAUSER_H
-#define ANDROID_HARDWARE_IPROCAMERAUSER_H
-
-#include <utils/RefBase.h>
-#include <binder/IInterface.h>
-#include <binder/Parcel.h>
-#include <binder/IMemory.h>
-#include <utils/String8.h>
-#include <camera/IProCameraCallbacks.h>
-
-struct camera_metadata;
-
-namespace android {
-
-class IProCameraUserClient;
-class IGraphicBufferProducer;
-class Surface;
-
-class IProCameraUser: public IInterface
-{
-    /**
-     * Keep up-to-date with IProCameraUser.aidl in frameworks/base
-     */
-public:
-    DECLARE_META_INTERFACE(ProCameraUser);
-
-    virtual void            disconnect() = 0;
-
-    // connect to the service, given a callbacks listener
-    virtual status_t        connect(const sp<IProCameraCallbacks>& callbacks)
-                                                                            = 0;
-
-    /**
-     * Locking
-     **/
-    virtual status_t        exclusiveTryLock() = 0;
-    virtual status_t        exclusiveLock() = 0;
-    virtual status_t        exclusiveUnlock() = 0;
-
-    virtual bool            hasExclusiveLock() = 0;
-
-    /**
-     * Request Handling
-     **/
-
-    // Note that the callee gets a copy of the metadata.
-    virtual int             submitRequest(struct camera_metadata* metadata,
-                                          bool streaming = false) = 0;
-    virtual status_t        cancelRequest(int requestId) = 0;
-
-    virtual status_t        deleteStream(int streamId) = 0;
-    virtual status_t        createStream(
-                                      int width, int height, int format,
-                                      const sp<IGraphicBufferProducer>& bufferProducer,
-                                      /*out*/
-                                      int* streamId) = 0;
-
-    // Create a request object from a template.
-    virtual status_t        createDefaultRequest(int templateId,
-                                                 /*out*/
-                                                 camera_metadata** request)
-                                                                           = 0;
-
-    // Get static camera metadata
-    virtual status_t        getCameraInfo(int cameraId,
-                                          /*out*/
-                                          camera_metadata** info) = 0;
-
-};
-
-// ----------------------------------------------------------------------------
-
-class BnProCameraUser: public BnInterface<IProCameraUser>
-{
-public:
-    virtual status_t    onTransact( uint32_t code,
-                                    const Parcel& data,
-                                    Parcel* reply,
-                                    uint32_t flags = 0);
-};
-
-}; // namespace android
-
-#endif
diff --git a/include/camera/ProCamera.h b/include/camera/ProCamera.h
deleted file mode 100644
index e9b687a..0000000
--- a/include/camera/ProCamera.h
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_PRO_CAMERA_H
-#define ANDROID_HARDWARE_PRO_CAMERA_H
-
-#include <utils/Timers.h>
-#include <utils/KeyedVector.h>
-#include <gui/IGraphicBufferProducer.h>
-#include <system/camera.h>
-#include <camera/IProCameraCallbacks.h>
-#include <camera/IProCameraUser.h>
-#include <camera/Camera.h>
-#include <camera/CameraMetadata.h>
-#include <camera/ICameraService.h>
-#include <gui/CpuConsumer.h>
-
-#include <gui/Surface.h>
-
-#include <utils/Condition.h>
-#include <utils/Mutex.h>
-
-#include <camera/CameraBase.h>
-
-struct camera_metadata;
-
-namespace android {
-
-// All callbacks on this class are concurrent
-// (they come from separate threads)
-class ProCameraListener : virtual public RefBase
-{
-public:
-    virtual void notify(int32_t msgType, int32_t ext1, int32_t ext2) = 0;
-
-    // Lock has been acquired. Write operations now available.
-    virtual void onLockAcquired() = 0;
-    // Lock has been released with exclusiveUnlock.
-    virtual void onLockReleased() = 0;
-    // Lock has been stolen by another client.
-    virtual void onLockStolen() = 0;
-
-    // Lock free.
-    virtual void onTriggerNotify(int32_t msgType, int32_t ext1, int32_t ext2)
-                                                                            = 0;
-    // onFrameAvailable and OnResultReceived can come in with any order,
-    // use android.sensor.timestamp and LockedBuffer.timestamp to correlate them
-
-    /**
-      * A new metadata buffer has been received.
-      * -- Ownership of request passes on to the callee, free with
-      *    free_camera_metadata.
-      */
-    virtual void onResultReceived(int32_t frameId, camera_metadata* result) = 0;
-
-    // TODO: make onFrameAvailable pure virtual
-
-    // A new frame buffer has been received for this stream.
-    // -- This callback only fires for createStreamCpu streams
-    // -- A buffer may be obtained by calling cpuConsumer->lockNextBuffer
-    // -- Use buf.timestamp to correlate with result's android.sensor.timestamp
-    // -- The buffer should be accessed with CpuConsumer::lockNextBuffer
-    //      and CpuConsumer::unlockBuffer
-    virtual void onFrameAvailable(int /*streamId*/,
-                                  const sp<CpuConsumer>& /*cpuConsumer*/) {
-    }
-
-};
-
-class ProCamera;
-
-template <>
-struct CameraTraits<ProCamera>
-{
-    typedef ProCameraListener     TCamListener;
-    typedef IProCameraUser        TCamUser;
-    typedef IProCameraCallbacks   TCamCallbacks;
-    typedef status_t (ICameraService::*TCamConnectService)(const sp<IProCameraCallbacks>&,
-                                                           int, const String16&, int,
-                                                           /*out*/
-                                                           sp<IProCameraUser>&);
-    static TCamConnectService     fnConnectService;
-};
-
-
-class ProCamera :
-    public CameraBase<ProCamera>,
-    public BnProCameraCallbacks
-{
-public:
-    /**
-     * Connect a shared camera. By default access is restricted to read only
-     * (Lock free) operations. To be able to submit custom requests a lock needs
-     * to be acquired with exclusive[Try]Lock.
-     */
-    static sp<ProCamera> connect(int cameraId);
-    virtual ~ProCamera();
-
-    /**
-     * Exclusive Locks:
-     * - We may request exclusive access to a camera if no other
-     *   clients are using the camera. This works as a traditional
-     *   client, writing/reading any camera state.
-     * - An application opening the camera (a regular 'Camera') will
-     *   always steal away the exclusive lock from a ProCamera,
-     *   this will call onLockReleased.
-     * - onLockAcquired will be called again once it is possible
-     *   to again exclusively lock the camera.
-     *
-     */
-
-    /**
-     * All exclusiveLock/unlock functions are asynchronous. The remote endpoint
-     * shall not block while waiting to acquire the lock. Instead the lock
-     * notifications will come in asynchronously on the listener.
-     */
-
-    /**
-      * Attempt to acquire the lock instantly (non-blocking)
-      * - If this succeeds, you do not need to wait for onLockAcquired
-      *   but the event will still be fired
-      *
-      * Returns -EBUSY if already locked. 0 on success.
-      */
-    status_t exclusiveTryLock();
-    // always returns 0. wait for onLockAcquired before lock is acquired.
-    status_t exclusiveLock();
-    // release a lock if we have one, or cancel the lock request.
-    status_t exclusiveUnlock();
-
-    // exclusive lock = do whatever we want. no lock = read only.
-    bool hasExclusiveLock();
-
-    /**
-     * < 0 error, >= 0 the request ID. streaming to have the request repeat
-     *    until cancelled.
-     * The request queue is flushed when a lock is released or stolen
-     *    if not locked will return PERMISSION_DENIED
-     */
-    int submitRequest(const struct camera_metadata* metadata,
-                                                        bool streaming = false);
-    // if not locked will return PERMISSION_DENIED, BAD_VALUE if requestId bad
-    status_t cancelRequest(int requestId);
-
-    /**
-     * Ask for a stream to be enabled.
-     * Lock free. Service maintains counter of streams.
-     */
-    status_t requestStream(int streamId);
-// TODO: remove requestStream, its useless.
-
-    /**
-      * Delete a stream.
-      * Lock free.
-      *
-      * NOTE: As a side effect this cancels ALL streaming requests.
-      *
-      * Errors: BAD_VALUE if unknown stream ID.
-      *         PERMISSION_DENIED if the stream wasn't yours
-      */
-    status_t deleteStream(int streamId);
-
-    /**
-      * Create a new HW stream, whose sink will be the window.
-      * Lock free. Service maintains counter of streams.
-      * Errors: -EBUSY if too many streams created
-      */
-    status_t createStream(int width, int height, int format,
-                          const sp<Surface>& surface,
-                          /*out*/
-                          int* streamId);
-
-    /**
-      * Create a new HW stream, whose sink will be the SurfaceTexture.
-      * Lock free. Service maintains counter of streams.
-      * Errors: -EBUSY if too many streams created
-      */
-    status_t createStream(int width, int height, int format,
-                          const sp<IGraphicBufferProducer>& bufferProducer,
-                          /*out*/
-                          int* streamId);
-    status_t createStreamCpu(int width, int height, int format,
-                          int heapCount,
-                          /*out*/
-                          sp<CpuConsumer>* cpuConsumer,
-                          int* streamId);
-    status_t createStreamCpu(int width, int height, int format,
-                          int heapCount,
-                          bool synchronousMode,
-                          /*out*/
-                          sp<CpuConsumer>* cpuConsumer,
-                          int* streamId);
-
-    // Create a request object from a template.
-    status_t createDefaultRequest(int templateId,
-                                 /*out*/
-                                  camera_metadata** request) const;
-
-    // Get static camera metadata
-    camera_metadata* getCameraInfo(int cameraId);
-
-    // Blocks until a frame is available (CPU streams only)
-    // - Obtain the frame data by calling CpuConsumer::lockNextBuffer
-    // - Release the frame data after use with CpuConsumer::unlockBuffer
-    // Return value:
-    // - >0 - number of frames available to be locked
-    // - <0 - error (refer to error codes)
-    // Error codes:
-    // -ETIMEDOUT if it took too long to get a frame
-    int waitForFrameBuffer(int streamId);
-
-    // Blocks until a metadata result is available
-    // - Obtain the metadata by calling consumeFrameMetadata()
-    // Error codes:
-    // -ETIMEDOUT if it took too long to get a frame
-    status_t waitForFrameMetadata();
-
-    // Get the latest metadata. This is destructive.
-    // - Calling this repeatedly will produce empty metadata objects.
-    // - Use waitForFrameMetadata to sync until new data is available.
-    CameraMetadata consumeFrameMetadata();
-
-    // Convenience method to drop frame buffers (CPU streams only)
-    // Return values:
-    //  >=0 - number of frames dropped (up to count)
-    //  <0  - error code
-    // Error codes:
-    //   BAD_VALUE - invalid streamId or count passed
-    int dropFrameBuffer(int streamId, int count);
-
-protected:
-    ////////////////////////////////////////////////////////
-    // IProCameraCallbacks implementation
-    ////////////////////////////////////////////////////////
-    virtual void        notifyCallback(int32_t msgType,
-                                       int32_t ext,
-                                       int32_t ext2);
-
-    virtual void        onLockStatusChanged(
-                                IProCameraCallbacks::LockStatus newLockStatus);
-
-    virtual void        onResultReceived(int32_t requestId,
-                                         camera_metadata* result);
-private:
-    ProCamera(int cameraId);
-
-    class ProFrameListener : public CpuConsumer::FrameAvailableListener {
-    public:
-        ProFrameListener(wp<ProCamera> camera, int streamID) {
-            mCamera = camera;
-            mStreamId = streamID;
-        }
-
-    protected:
-        virtual void onFrameAvailable(const BufferItem& /* item */) {
-            sp<ProCamera> c = mCamera.promote();
-            if (c.get() != NULL) {
-                c->onFrameAvailable(mStreamId);
-            }
-        }
-
-    private:
-        wp<ProCamera> mCamera;
-        int mStreamId;
-    };
-    friend class ProFrameListener;
-
-    struct StreamInfo
-    {
-        StreamInfo(int streamId) {
-            this->streamID = streamId;
-            cpuStream = false;
-            frameReady = 0;
-        }
-
-        StreamInfo() {
-            streamID = -1;
-            cpuStream = false;
-        }
-
-        int  streamID;
-        bool cpuStream;
-        sp<CpuConsumer> cpuConsumer;
-        bool synchronousMode;
-        sp<ProFrameListener> frameAvailableListener;
-        sp<Surface> stc;
-        int frameReady;
-    };
-
-    Condition mWaitCondition;
-    Mutex     mWaitMutex;
-    static const nsecs_t mWaitTimeout = 1000000000; // 1sec
-    KeyedVector<int, StreamInfo> mStreams;
-    bool mMetadataReady;
-    CameraMetadata mLatestMetadata;
-
-    void onFrameAvailable(int streamId);
-
-    StreamInfo& getStreamInfo(int streamId);
-
-    friend class CameraBase;
-};
-
-}; // namespace android
-
-#endif
diff --git a/include/camera/camera2/ICameraDeviceUser.h b/include/camera/camera2/ICameraDeviceUser.h
index bfc2aa0..e9f1f5a 100644
--- a/include/camera/camera2/ICameraDeviceUser.h
+++ b/include/camera/camera2/ICameraDeviceUser.h
@@ -27,9 +27,9 @@
 
 class ICameraDeviceUserClient;
 class IGraphicBufferProducer;
-class Surface;
 class CaptureRequest;
 class CameraMetadata;
+class OutputConfiguration;
 
 enum {
     NO_IN_FLIGHT_REPEATING_FRAMES = -1,
@@ -100,8 +100,8 @@
     virtual status_t        endConfigure() = 0;
 
     virtual status_t        deleteStream(int streamId) = 0;
-    virtual status_t        createStream(
-            const sp<IGraphicBufferProducer>& bufferProducer) = 0;
+
+    virtual status_t        createStream(const OutputConfiguration& outputConfiguration) = 0;
 
     // Create a request object from a template.
     virtual status_t        createDefaultRequest(int templateId,
diff --git a/include/camera/camera2/OutputConfiguration.h b/include/camera/camera2/OutputConfiguration.h
new file mode 100644
index 0000000..e6b679f
--- /dev/null
+++ b/include/camera/camera2/OutputConfiguration.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_CAMERA2_OUTPUTCONFIGURATION_H
+#define ANDROID_HARDWARE_CAMERA2_OUTPUTCONFIGURATION_H
+
+#include <utils/RefBase.h>
+#include <gui/IGraphicBufferProducer.h>
+
+namespace android {
+
+class Surface;
+
+class OutputConfiguration : public virtual RefBase {
+public:
+
+    static const int INVALID_ROTATION;
+    sp<IGraphicBufferProducer> getGraphicBufferProducer() const;
+    int                        getRotation() const;
+
+    /**
+     * Keep impl up-to-date with OutputConfiguration.java in frameworks/base
+     */
+    status_t                   writeToParcel(Parcel& parcel) const;
+    // getGraphicBufferProducer will be NULL if error occurred
+    // getRotation will be INVALID_ROTATION if error occurred
+    OutputConfiguration(const Parcel& parcel);
+
+private:
+    sp<IGraphicBufferProducer> mGbp;
+    int                        mRotation;
+
+    // helper function
+    static String16 readMaybeEmptyString16(const Parcel& parcel);
+};
+}; // namespace android
+
+#endif
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index a68a9cb..7be2c3e 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -42,8 +42,7 @@
         EVENT_MORE_DATA = 0,        // Request to read available data from buffer.
                                     // If this event is delivered but the callback handler
                                     // does not want to read the available data, the handler must
-                                    // explicitly
-                                    // ignore the event by setting frameCount to zero.
+                                    // explicitly ignore the event by setting frameCount to zero.
         EVENT_OVERRUN = 1,          // Buffer overrun occurred.
         EVENT_MARKER = 2,           // Record head is at the specified marker position
                                     // (See setMarkerPosition()).
@@ -53,7 +52,7 @@
                                     // voluntary invalidation by mediaserver, or mediaserver crash.
     };
 
-    /* Client should declare Buffer on the stack and pass address to obtainBuffer()
+    /* Client should declare a Buffer and pass address to obtainBuffer()
      * and releaseBuffer().  See also callback_t for EVENT_MORE_DATA.
      */
 
@@ -62,20 +61,25 @@
     public:
         // FIXME use m prefix
         size_t      frameCount;     // number of sample frames corresponding to size;
-                                    // on input it is the number of frames available,
-                                    // on output is the number of frames actually drained
-                                    // (currently ignored but will make the primary field in future)
+                                    // on input to obtainBuffer() it is the number of frames desired
+                                    // on output from obtainBuffer() it is the number of available
+                                    //    frames to be read
+                                    // on input to releaseBuffer() it is currently ignored
 
         size_t      size;           // input/output in bytes == frameCount * frameSize
-                                    // on output is the number of bytes actually drained
-                                    // FIXME this is redundant with respect to frameCount,
-                                    // and TRANSFER_OBTAIN mode is broken for 8-bit data
-                                    // since we don't define the frame format
+                                    // on input to obtainBuffer() it is ignored
+                                    // on output from obtainBuffer() it is the number of available
+                                    //    bytes to be read, which is frameCount * frameSize
+                                    // on input to releaseBuffer() it is the number of bytes to
+                                    //    release
+                                    // FIXME This is redundant with respect to frameCount.  Consider
+                                    //    removing size and making frameCount the primary field.
 
         union {
             void*       raw;
             short*      i16;        // signed 16-bit
             int8_t*     i8;         // unsigned 8-bit, offset by 0x80
+                                    // input to obtainBuffer(): unused, output: pointer to buffer
         };
     };
 
@@ -119,7 +123,7 @@
     enum transfer_type {
         TRANSFER_DEFAULT,   // not specified explicitly; determine from the other parameters
         TRANSFER_CALLBACK,  // callback EVENT_MORE_DATA
-        TRANSFER_OBTAIN,    // FIXME deprecated: call obtainBuffer() and releaseBuffer()
+        TRANSFER_OBTAIN,    // call obtainBuffer() and releaseBuffer()
         TRANSFER_SYNC,      // synchronous read()
     };
 
@@ -145,15 +149,16 @@
      *                     be larger if the requested size is not compatible with current audio HAL
      *                     latency.  Zero means to use a default value.
      * cbf:                Callback function. If not null, this function is called periodically
-     *                     to consume new data and inform of marker, position updates, etc.
+     *                     to consume new data in TRANSFER_CALLBACK mode
+     *                     and inform of marker, position updates, etc.
      * user:               Context for use by the callback receiver.
      * notificationFrames: The callback function is called each time notificationFrames PCM
      *                     frames are ready in record track output buffer.
      * sessionId:          Not yet supported.
      * transferType:       How data is transferred from AudioRecord.
      * flags:              See comments on audio_input_flags_t in <system/audio.h>
+     * pAttributes:        If not NULL, supersedes inputSource for use case selection.
      * threadCanCallJava:  Not present in parameter list, and so is fixed at false.
-     * pAttributes:        if not NULL, supersedes inputSource for use case selection
      */
 
                         AudioRecord(audio_source_t inputSource,
@@ -178,6 +183,7 @@
 
     /* Initialize an AudioRecord that was created using the AudioRecord() constructor.
      * Don't call set() more than once, or after an AudioRecord() constructor that takes parameters.
+     * set() is not multi-thread safe.
      * Returned status (from utils/Errors.h) can be:
      *  - NO_ERROR: successful intialization
      *  - INVALID_OPERATION: AudioRecord is already initialized or record device is already in use
@@ -212,7 +218,7 @@
             status_t    initCheck() const   { return mStatus; }
 
     /* Returns this track's estimated latency in milliseconds.
-     * This includes the latency due to AudioRecord buffer size,
+     * This includes the latency due to AudioRecord buffer size, resampling if applicable,
      * and audio hardware driver.
      */
             uint32_t    latency() const     { return mLatency; }
@@ -244,11 +250,6 @@
      */
             uint32_t    getSampleRate() const   { return mSampleRate; }
 
-    /* Return the notification frame count.
-     * This is approximately how often the callback is invoked, for transfer type TRANSFER_CALLBACK.
-     */
-            size_t      notificationFrames() const  { return mNotificationFramesAct; }
-
     /* Sets marker position. When record reaches the number of frames specified,
      * a callback with event type EVENT_MARKER is called. Calling setMarkerPosition
      * with marker == 0 cancels marker notification callback.
@@ -310,7 +311,12 @@
      * Returned value:
      *  handle on audio hardware input
      */
-            audio_io_handle_t    getInput() const;
+// FIXME The only known public caller is frameworks/opt/net/voip/src/jni/rtp/AudioGroup.cpp
+            audio_io_handle_t    getInput() const __attribute__((__deprecated__))
+                                                { return getInputPrivate(); }
+private:
+            audio_io_handle_t    getInputPrivate() const;
+public:
 
     /* Returns the audio session ID associated with this AudioRecord.
      *
@@ -324,7 +330,8 @@
      */
             int    getSessionId() const { return mSessionId; }
 
-    /* Obtains a buffer of up to "audioBuffer->frameCount" full frames.
+    /* Public API for TRANSFER_OBTAIN mode.
+     * Obtains a buffer of up to "audioBuffer->frameCount" full frames.
      * After draining these frames of data, the caller should release them with releaseBuffer().
      * If the track buffer is not empty, obtainBuffer() returns as many contiguous
      * full frames as are available immediately.
@@ -337,9 +344,6 @@
      * or return WOULD_BLOCK depending on the value of the "waitCount"
      * parameter.
      *
-     * obtainBuffer() and releaseBuffer() are deprecated for direct use by applications,
-     * which should use read() or callback EVENT_MORE_DATA instead.
-     *
      * Interpretation of waitCount:
      *  +n  limits wait time to n * WAIT_PERIOD_MS,
      *  -1  causes an (almost) infinite wait time,
@@ -348,6 +352,8 @@
      * Buffer fields
      * On entry:
      *  frameCount  number of frames requested
+     *  size        ignored
+     *  raw         ignored
      * After error return:
      *  frameCount  0
      *  size        0
@@ -358,9 +364,7 @@
      *  raw         pointer to the buffer
      */
 
-    /* FIXME Deprecated public API for TRANSFER_OBTAIN mode */
-            status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
-                                __attribute__((__deprecated__));
+            status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount);
 
 private:
     /* If nonContig is non-NULL, it is an output parameter that will be set to the number of
@@ -373,9 +377,15 @@
                                      struct timespec *elapsed = NULL, size_t *nonContig = NULL);
 public:
 
-    /* Release an emptied buffer of "audioBuffer->frameCount" frames for AudioFlinger to re-fill. */
-    // FIXME make private when obtainBuffer() for TRANSFER_OBTAIN is removed
-            void        releaseBuffer(Buffer* audioBuffer);
+    /* Public API for TRANSFER_OBTAIN mode.
+     * Release an emptied buffer of "audioBuffer->frameCount" frames for AudioFlinger to re-fill.
+     *
+     * Buffer fields:
+     *  frameCount  currently ignored but recommend to set to actual number of frames consumed
+     *  size        actual number of bytes consumed, must be multiple of frameSize
+     *  raw         ignored
+     */
+            void        releaseBuffer(const Buffer* audioBuffer);
 
     /* As a convenience we provide a read() interface to the audio buffer.
      * Input parameter 'size' is in byte units.
@@ -387,8 +397,11 @@
      *      WOULD_BLOCK         when obtainBuffer() returns same, or
      *                          AudioRecord was stopped during the read
      *      or any other error code returned by IAudioRecord::start() or restoreRecord_l().
+     * Default behavior is to only return when all data has been transferred. Set 'blocking' to
+     * false for the method to return immediately without waiting to try multiple times to read
+     * the full content of the buffer.
      */
-            ssize_t     read(void* buffer, size_t size);
+            ssize_t     read(void* buffer, size_t size, bool blocking = true);
 
     /* Return the number of input frames lost in the audio driver since the last call of this
      * function.  Audio driver is expected to reset the value to 0 and restart counting upon
@@ -417,6 +430,7 @@
 
                 void        pause();    // suspend thread from execution at next loop boundary
                 void        resume();   // allow thread to execute, if not requested to exit
+                void        wake();     // wake to handle changed notification conditions.
 
     private:
                 void        pauseInternal(nsecs_t ns = 0LL);
@@ -431,7 +445,9 @@
         bool                mPaused;    // whether thread is requested to pause at next loop entry
         bool                mPausedInt; // whether thread internally requests pause
         nsecs_t             mPausedNs;  // if mPausedInt then associated timeout, otherwise ignored
-        bool                mIgnoreNextPausedInt;   // whether to ignore next mPausedInt request
+        bool                mIgnoreNextPausedInt;   // skip any internal pause and go immediately
+                                        // to processAudioBuffer() as state may have changed
+                                        // since pause time calculated.
     };
 
             // body of AudioRecordThread::threadLoop()
@@ -459,7 +475,7 @@
     bool                    mActive;
 
     // for client callback handler
-    callback_t              mCbf;               // callback handler for events, or NULL
+    callback_t              mCbf;                   // callback handler for events, or NULL
     void*                   mUserData;
 
     // for notification APIs
@@ -476,10 +492,10 @@
     bool                    mRetryOnPartialBuffer;  // sleep and retry after partial obtainBuffer()
     uint32_t                mObservedSequence;      // last observed value of mSequence
 
-    uint32_t                mMarkerPosition;    // in wrapping (overflow) frame units
+    uint32_t                mMarkerPosition;        // in wrapping (overflow) frame units
     bool                    mMarkerReached;
-    uint32_t                mNewPosition;       // in frames
-    uint32_t                mUpdatePeriod;      // in frames, zero means no EVENT_NEW_POS
+    uint32_t                mNewPosition;           // in frames
+    uint32_t                mUpdatePeriod;          // in frames, zero means no EVENT_NEW_POS
 
     status_t                mStatus;
 
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index ad5d6ed..f5db1bb 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -346,7 +346,8 @@
 
     };
 
-    static void setAudioPortCallback(sp<AudioPortCallback> callBack);
+    static status_t addAudioPortCallback(const sp<AudioPortCallback>& callBack);
+    static status_t removeAudioPortCallback(const sp<AudioPortCallback>& callBack);
 
 private:
 
@@ -373,12 +374,19 @@
         AudioPolicyServiceClient() {
         }
 
+        status_t addAudioPortCallback(const sp<AudioPortCallback>& callBack);
+        status_t removeAudioPortCallback(const sp<AudioPortCallback>& callBack);
+
         // DeathRecipient
         virtual void binderDied(const wp<IBinder>& who);
 
         // IAudioPolicyServiceClient
         virtual void onAudioPortListUpdate();
         virtual void onAudioPatchListUpdate();
+
+    private:
+        Mutex                               mLock;
+        Vector <sp <AudioPortCallback> >    mAudioPortCallbacks;
     };
 
     static sp<AudioFlingerClient> gAudioFlingerClient;
@@ -390,7 +398,6 @@
     static Mutex gLockCache; // protects gOutputs, gPrevInSamplingRate, gPrevInFormat,
                              // gPrevInChannelMask and gInBuffSize
     static Mutex gLockAPS;   // protects gAudioPolicyService and gAudioPolicyServiceClient
-    static Mutex gLockAPC;   // protects gAudioPortCallback
     static sp<IAudioFlinger> gAudioFlinger;
     static audio_error_callback gAudioErrorCallback;
 
@@ -405,8 +412,6 @@
     // list of output descriptors containing cached parameters
     // (sampling rate, framecount, channel count...)
     static DefaultKeyedVector<audio_io_handle_t, OutputDescriptor *> gOutputs;
-
-    static sp<AudioPortCallback> gAudioPortCallback;
 };
 
 };  // namespace android
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 3de0774..d9b7057 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -91,7 +91,7 @@
             void*       raw;
             short*      i16;      // signed 16-bit
             int8_t*     i8;       // unsigned 8-bit, offset by 0x80
-        };                        // input: unused, output: pointer to buffer
+        };                        // input to obtainBuffer(): unused, output: pointer to buffer
     };
 
     /* As a convenience, if a callback is supplied, a handler thread
@@ -125,6 +125,7 @@
      *  - BAD_VALUE: unsupported configuration
      * frameCount is guaranteed to be non-zero if status is NO_ERROR,
      * and is undefined otherwise.
+     * FIXME This API assumes a route, and so should be deprecated.
      */
 
     static status_t getMinFrameCount(size_t* frameCount,
@@ -136,7 +137,7 @@
     enum transfer_type {
         TRANSFER_DEFAULT,   // not specified explicitly; determine from the other parameters
         TRANSFER_CALLBACK,  // callback EVENT_MORE_DATA
-        TRANSFER_OBTAIN,    // FIXME deprecated: call obtainBuffer() and releaseBuffer()
+        TRANSFER_OBTAIN,    // call obtainBuffer() and releaseBuffer()
         TRANSFER_SYNC,      // synchronous write()
         TRANSFER_SHARED,    // shared memory
     };
@@ -149,9 +150,6 @@
     /* Creates an AudioTrack object and registers it with AudioFlinger.
      * Once created, the track needs to be started before it can be used.
      * Unspecified values are set to appropriate default values.
-     * With this constructor, the track is configured for streaming mode.
-     * Data to be rendered is supplied by write() or by the callback EVENT_MORE_DATA.
-     * Intermixing a combination of write() and non-ignored EVENT_MORE_DATA is not allowed.
      *
      * Parameters:
      *
@@ -169,20 +167,28 @@
      *                     configuration.  Zero means to use a default value.
      * flags:              See comments on audio_output_flags_t in <system/audio.h>.
      * cbf:                Callback function. If not null, this function is called periodically
-     *                     to provide new data and inform of marker, position updates, etc.
+     *                     to provide new data in TRANSFER_CALLBACK mode
+     *                     and inform of marker, position updates, etc.
      * user:               Context for use by the callback receiver.
      * notificationFrames: The callback function is called each time notificationFrames PCM
      *                     frames have been consumed from track input buffer.
      *                     This is expressed in units of frames at the initial source sample rate.
      * sessionId:          Specific session ID, or zero to use default.
      * transferType:       How data is transferred to AudioTrack.
+     * offloadInfo:        If not NULL, provides offload parameters for
+     *                     AudioSystem::getOutputForAttr().
+     * uid:                User ID of the app which initially requested this AudioTrack
+     *                     for power management tracking, or -1 for current user ID.
+     * pid:                Process ID of the app which initially requested this AudioTrack
+     *                     for power management tracking, or -1 for current process ID.
+     * pAttributes:        If not NULL, supersedes streamType for use case selection.
      * threadCanCallJava:  Not present in parameter list, and so is fixed at false.
      */
 
                         AudioTrack( audio_stream_type_t streamType,
                                     uint32_t sampleRate,
                                     audio_format_t format,
-                                    audio_channel_mask_t,
+                                    audio_channel_mask_t channelMask,
                                     size_t frameCount    = 0,
                                     audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                                     callback_t cbf       = NULL,
@@ -198,7 +204,9 @@
     /* Creates an audio track and registers it with AudioFlinger.
      * With this constructor, the track is configured for static buffer mode.
      * Data to be rendered is passed in a shared memory buffer
-     * identified by the argument sharedBuffer, which must be non-0.
+     * identified by the argument sharedBuffer, which should be non-0.
+     * If sharedBuffer is zero, this constructor is equivalent to the previous constructor
+     * but without the ability to specify a non-zero value for the frameCount parameter.
      * The memory should be initialized to the desired data before calling start().
      * The write() method is not supported in this case.
      * It is recommended to pass a callback function to be notified of playback end by an
@@ -230,6 +238,7 @@
 
     /* Initialize an AudioTrack that was created using the AudioTrack() constructor.
      * Don't call set() more than once, or after the AudioTrack() constructors that take parameters.
+     * set() is not multi-thread safe.
      * Returned status (from utils/Errors.h) can be:
      *  - NO_ERROR: successful initialization
      *  - INVALID_OPERATION: AudioTrack is already initialized
@@ -464,7 +473,9 @@
      *  handle on audio hardware output, or AUDIO_IO_HANDLE_NONE if the
      *  track needed to be re-created but that failed
      */
+private:
             audio_io_handle_t    getOutput() const;
+public:
 
     /* Returns the unique session ID associated with this track.
      *
@@ -511,9 +522,6 @@
      * or return WOULD_BLOCK depending on the value of the "waitCount"
      * parameter.
      *
-     * obtainBuffer() and releaseBuffer() are deprecated for direct use by applications,
-     * which should use write() or callback EVENT_MORE_DATA instead.
-     *
      * Interpretation of waitCount:
      *  +n  limits wait time to n * WAIT_PERIOD_MS,
      *  -1  causes an (almost) infinite wait time,
@@ -533,10 +541,8 @@
      *  size        actual number of bytes available
      *  raw         pointer to the buffer
      */
-    /* FIXME Deprecated public API for TRANSFER_OBTAIN mode */
             status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount,
-                                size_t *nonContig = NULL)
-                                __attribute__((__deprecated__));
+                                size_t *nonContig = NULL);
 
 private:
     /* If nonContig is non-NULL, it is an output parameter that will be set to the number of
@@ -559,9 +565,7 @@
      *  frameCount  currently ignored but recommend to set to actual number of frames filled
      *  size        actual number of bytes filled, must be multiple of frameSize
      *  raw         ignored
-     *
      */
-    // FIXME make private when obtainBuffer() for TRANSFER_OBTAIN is removed
             void        releaseBuffer(const Buffer* audioBuffer);
 
     /* As a convenience we provide a write() interface to the audio buffer.
@@ -574,7 +578,7 @@
      *      WOULD_BLOCK         when obtainBuffer() returns same, or
      *                          AudioTrack was stopped during the write
      *      or any other error code returned by IAudioTrack::start() or restoreTrack_l().
-     * Default behavior is to only return until all data has been transferred. Set 'blocking' to
+     * Default behavior is to only return when all data has been transferred. Set 'blocking' to
      * false for the method to return immediately without waiting to try multiple times to write
      * the full content of the buffer.
      */
@@ -582,6 +586,7 @@
 
     /*
      * Dumps the state of an audio track.
+     * Not a general-purpose API; intended only for use by media player service to dump its tracks.
      */
             status_t    dump(int fd, const Vector<String16>& args) const;
 
@@ -623,8 +628,6 @@
                         AudioTrack(const AudioTrack& other);
             AudioTrack& operator = (const AudioTrack& other);
 
-            void        setAttributesFromStreamType(audio_stream_type_t streamType);
-
     /* a small internal class to handle the callback */
     class AudioTrackThread : public Thread
     {
@@ -667,10 +670,6 @@
             static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3;
             nsecs_t processAudioBuffer();
 
-            bool     isOffloaded() const;
-            bool     isDirect() const;
-            bool     isOffloadedOrDirect() const;
-
             // caller must hold lock on mLock for all _l methods
 
             status_t createTrack_l();
@@ -683,6 +682,10 @@
             // FIXME enum is faster than strcmp() for parameter 'from'
             status_t restoreTrack_l(const char *from);
 
+            bool     isOffloaded() const;
+            bool     isDirect() const;
+            bool     isOffloadedOrDirect() const;
+
             bool     isOffloaded_l() const
                 { return (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0; }
 
@@ -773,6 +776,7 @@
     bool                    mMarkerReached;
     uint32_t                mNewPosition;           // in frames
     uint32_t                mUpdatePeriod;          // in frames, zero means no EVENT_NEW_POS
+
     uint32_t                mServer;                // in frames, last known mProxy->getPosition()
                                                     // which is count of frames consumed by server,
                                                     // reset by new IAudioTrack,
diff --git a/include/media/IDrm.h b/include/media/IDrm.h
index affcbd7..9449beb6 100644
--- a/include/media/IDrm.h
+++ b/include/media/IDrm.h
@@ -47,7 +47,8 @@
                       Vector<uint8_t> const &initData,
                       String8 const &mimeType, DrmPlugin::KeyType keyType,
                       KeyedVector<String8, String8> const &optionalParameters,
-                      Vector<uint8_t> &request, String8 &defaultUrl) = 0;
+                      Vector<uint8_t> &request, String8 &defaultUrl,
+                      DrmPlugin::KeyRequestType *keyRequestType) = 0;
 
     virtual status_t provideKeyResponse(Vector<uint8_t> const &sessionId,
                                         Vector<uint8_t> const &response,
diff --git a/include/media/IResourceManagerClient.h b/include/media/IResourceManagerClient.h
new file mode 100644
index 0000000..3587aea
--- /dev/null
+++ b/include/media/IResourceManagerClient.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IRESOURCEMANAGERCLIENT_H
+#define ANDROID_IRESOURCEMANAGERCLIENT_H
+
+#include <utils/RefBase.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+
+namespace android {
+
+class IResourceManagerClient: public IInterface
+{
+public:
+    DECLARE_META_INTERFACE(ResourceManagerClient);
+
+    virtual bool reclaimResource() = 0;
+};
+
+// ----------------------------------------------------------------------------
+
+class BnResourceManagerClient: public BnInterface<IResourceManagerClient>
+{
+public:
+    virtual status_t onTransact(uint32_t code,
+                                const Parcel &data,
+                                Parcel *reply,
+                                uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif // ANDROID_IRESOURCEMANAGERCLIENT_H
diff --git a/include/media/IResourceManagerService.h b/include/media/IResourceManagerService.h
new file mode 100644
index 0000000..067392c
--- /dev/null
+++ b/include/media/IResourceManagerService.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IRESOURCEMANAGERSERVICE_H
+#define ANDROID_IRESOURCEMANAGERSERVICE_H
+
+#include <utils/Errors.h>  // for status_t
+#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+
+#include <media/IResourceManagerClient.h>
+#include <media/MediaResource.h>
+#include <media/MediaResourcePolicy.h>
+
+namespace android {
+
+class IResourceManagerService: public IInterface
+{
+public:
+    DECLARE_META_INTERFACE(ResourceManagerService);
+
+    virtual void config(const Vector<MediaResourcePolicy> &policies) = 0;
+
+    virtual void addResource(
+            int pid,
+            int64_t clientId,
+            const sp<IResourceManagerClient> client,
+            const Vector<MediaResource> &resources) = 0;
+
+    virtual void removeResource(int64_t clientId) = 0;
+
+    virtual bool reclaimResource(
+            int callingPid,
+            const Vector<MediaResource> &resources) = 0;
+};
+
+// ----------------------------------------------------------------------------
+
+class BnResourceManagerService: public BnInterface<IResourceManagerService>
+{
+public:
+    virtual status_t onTransact(uint32_t code,
+                                const Parcel &data,
+                                Parcel *reply,
+                                uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif // ANDROID_IRESOURCEMANAGERSERVICE_H
diff --git a/include/media/IStreamSource.h b/include/media/IStreamSource.h
index 677119b..149bd49 100644
--- a/include/media/IStreamSource.h
+++ b/include/media/IStreamSource.h
@@ -81,6 +81,13 @@
     // with the next PTS occuring in the stream. The value is of type int64_t.
     static const char *const kKeyMediaTimeUs;
 
+    // Optionally signalled as part of a discontinuity that includes
+    // DISCONTINUITY_TIME. It indicates the media time (in us) of a recent
+    // sample from the same content, and is used as a hint for the parser to
+    // handle PTS wraparound. This is required when a new parser is created
+    // to continue parsing content from the same timeline.
+    static const char *const kKeyRecentMediaTimeUs;
+
     virtual void issueCommand(
             Command cmd, bool synchronous, const sp<AMessage> &msg = NULL) = 0;
 };
diff --git a/include/media/MediaResource.h b/include/media/MediaResource.h
new file mode 100644
index 0000000..0b57c84
--- /dev/null
+++ b/include/media/MediaResource.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_MEDIA_RESOURCE_H
+#define ANDROID_MEDIA_RESOURCE_H
+
+#include <binder/Parcel.h>
+#include <utils/String8.h>
+
+namespace android {
+
+extern const char kResourceSecureCodec[];
+extern const char kResourceNonSecureCodec[];
+extern const char kResourceGraphicMemory[];
+
+class MediaResource {
+public:
+    MediaResource();
+    MediaResource(String8 type, uint64_t value);
+    MediaResource(String8 type, String8 subType, uint64_t value);
+
+    void readFromParcel(const Parcel &parcel);
+    void writeToParcel(Parcel *parcel) const;
+
+    String8 toString() const;
+
+    bool operator==(const MediaResource &other) const;
+    bool operator!=(const MediaResource &other) const;
+
+    String8 mType;
+    String8 mSubType;
+    uint64_t mValue;
+};
+
+}; // namespace android
+
+#endif  // ANDROID_MEDIA_RESOURCE_H
diff --git a/include/media/MediaResourcePolicy.h b/include/media/MediaResourcePolicy.h
new file mode 100644
index 0000000..1e1c341
--- /dev/null
+++ b/include/media/MediaResourcePolicy.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_MEDIA_RESOURCE_POLICY_H
+#define ANDROID_MEDIA_RESOURCE_POLICY_H
+
+#include <binder/Parcel.h>
+#include <utils/String8.h>
+
+namespace android {
+
+extern const char kPolicySupportsMultipleSecureCodecs[];
+extern const char kPolicySupportsSecureWithNonSecureCodec[];
+
+class MediaResourcePolicy {
+public:
+    MediaResourcePolicy();
+    MediaResourcePolicy(String8 type, uint64_t value);
+
+    void readFromParcel(const Parcel &parcel);
+    void writeToParcel(Parcel *parcel) const;
+
+    String8 toString() const;
+
+    String8 mType;
+    uint64_t mValue;
+};
+
+}; // namespace android
+
+#endif  // ANDROID_MEDIA_RESOURCE_POLICY_H
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index aa91485..c1483f3 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -291,7 +291,7 @@
             OMX_U32 portIndex, OMX_AUDIO_CODINGTYPE desiredFormat);
 
     status_t setupAMRCodec(bool encoder, bool isWAMR, int32_t bitRate);
-    status_t setupG711Codec(bool encoder, int32_t numChannels);
+    status_t setupG711Codec(bool encoder, int32_t sampleRate, int32_t numChannels);
 
     status_t setupFlacCodec(
             bool encoder, int32_t numChannels, int32_t sampleRate, int32_t compressionLevel);
diff --git a/include/media/stagefright/MediaClock.h b/include/media/stagefright/MediaClock.h
index 660764f..e9c09a1 100644
--- a/include/media/stagefright/MediaClock.h
+++ b/include/media/stagefright/MediaClock.h
@@ -35,9 +35,9 @@
     // It's required to use timestamp of just rendered frame as
     // anchor time in paused state.
     void updateAnchor(
-        int64_t anchorTimeMediaUs,
-        int64_t anchorTimeRealUs,
-        int64_t maxTimeMediaUs = INT64_MAX);
+            int64_t anchorTimeMediaUs,
+            int64_t anchorTimeRealUs,
+            int64_t maxTimeMediaUs = INT64_MAX);
 
     void updateMaxTimeMedia(int64_t maxTimeMediaUs);
 
@@ -45,22 +45,24 @@
 
     // query media time corresponding to real time |realUs|, and save the
     // result in |outMediaUs|.
-    status_t getMediaTime(int64_t realUs,
-                          int64_t *outMediaUs,
-                          bool allowPastMaxTime = false);
+    status_t getMediaTime(
+            int64_t realUs,
+            int64_t *outMediaUs,
+            bool allowPastMaxTime = false) const;
     // query real time corresponding to media time |targetMediaUs|.
     // The result is saved in |outRealUs|.
-    status_t getRealTimeFor(int64_t targetMediaUs, int64_t *outRealUs);
+    status_t getRealTimeFor(int64_t targetMediaUs, int64_t *outRealUs) const;
 
 protected:
     virtual ~MediaClock();
 
 private:
-    status_t getMediaTime_l(int64_t realUs,
-                            int64_t *outMediaUs,
-                            bool allowPastMaxTime);
+    status_t getMediaTime_l(
+            int64_t realUs,
+            int64_t *outMediaUs,
+            bool allowPastMaxTime) const;
 
-    Mutex mLock;
+    mutable Mutex mLock;
 
     int64_t mAnchorTimeMediaUs;
     int64_t mAnchorTimeRealUs;
diff --git a/include/media/stagefright/MediaSync.h b/include/media/stagefright/MediaSync.h
new file mode 100644
index 0000000..8bb8c7f
--- /dev/null
+++ b/include/media/stagefright/MediaSync.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_SYNC_H
+#define MEDIA_SYNC_H
+
+#include <gui/IConsumerListener.h>
+#include <gui/IProducerListener.h>
+
+#include <media/stagefright/foundation/AHandler.h>
+
+#include <utils/Condition.h>
+#include <utils/Mutex.h>
+
+namespace android {
+
+class AudioTrack;
+class BufferItem;
+class Fence;
+class GraphicBuffer;
+class IGraphicBufferConsumer;
+class IGraphicBufferProducer;
+struct MediaClock;
+
+// MediaSync manages media playback and its synchronization to a media clock
+// source. It can be also used for video-only playback.
+//
+// For video playback, it requires an output surface and provides an input
+// surface. It then controls the rendering of input buffers (buffer queued to
+// the input surface) on the output surface to happen at the appropriate time.
+//
+// For audio playback, it requires an audio track and takes updates of
+// information of rendered audio data to maintain media clock when audio track
+// serves as media clock source. (TODO: move audio rendering from JAVA to
+// native code).
+//
+// It can use the audio or video track as media clock source, as well as an
+// external clock. (TODO: actually support external clock as media clock
+// sources; use video track as media clock source for audio-and-video stream).
+//
+// In video-only mode, MediaSync will playback every video frame even though
+// a video frame arrives late based on its timestamp and last frame's.
+//
+// The client needs to configure surface (for output video rendering) and audio
+// track (for querying information of audio rendering) for MediaSync.
+//
+// Then the client needs to obtain a surface from MediaSync and render video
+// frames onto that surface. Internally, the MediaSync will receive those video
+// frames and render them onto the output surface at the appropriate time.
+//
+// The client needs to call updateQueuedAudioData() immediately after it writes
+// audio data to the audio track. Such information will be used to update media
+// clock.
+//
+class MediaSync : public AHandler {
+public:
+    // Create an instance of MediaSync.
+    static sp<MediaSync> create();
+
+    // Called when MediaSync is used to render video. It should be called
+    // before createInputSurface().
+    status_t configureSurface(const sp<IGraphicBufferProducer> &output);
+
+    // Called when audio track is used as media clock source. It should be
+    // called before updateQueuedAudioData().
+    // |nativeSampleRateInHz| is the sample rate of audio data fed into audio
+    // track. It's the same number used to create AudioTrack.
+    status_t configureAudioTrack(
+            const sp<AudioTrack> &audioTrack, uint32_t nativeSampleRateInHz);
+
+    // Create a surface for client to render video frames. This is the surface
+    // on which the client should render video frames. Those video frames will
+    // be internally directed to output surface for rendering at appropriate
+    // time.
+    status_t createInputSurface(sp<IGraphicBufferProducer> *outBufferProducer);
+
+    // Update just-rendered audio data size and the presentation timestamp of
+    // the first frame of that audio data. It should be called immediately
+    // after the client write audio data into AudioTrack.
+    // This function assumes continous audio stream.
+    // TODO: support gap or backwards updates.
+    status_t updateQueuedAudioData(
+            size_t sizeInBytes, int64_t presentationTimeUs);
+
+    // Set the consumer name of the input queue.
+    void setName(const AString &name);
+
+    // Set the playback in a desired speed.
+    // This method can be called any time.
+    // |rate| is the ratio between desired speed and the normal one, and should
+    // be non-negative. The meaning of rate values:
+    // 1.0 -- normal playback
+    // 0.0 -- stop or pause
+    // larger than 1.0 -- faster than normal speed
+    // between 0.0 and 1.0 -- slower than normal speed
+    status_t setPlaybackRate(float rate);
+
+    // Get the media clock used by the MediaSync so that the client can obtain
+    // corresponding media time or real time via
+    // MediaClock::getMediaTime() and MediaClock::getRealTimeFor().
+    sp<const MediaClock> getMediaClock();
+
+protected:
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum {
+        kWhatDrainVideo = 'dVid',
+    };
+
+    static const int MAX_OUTSTANDING_BUFFERS = 2;
+
+    // This is a thin wrapper class that lets us listen to
+    // IConsumerListener::onFrameAvailable from mInput.
+    class InputListener : public BnConsumerListener,
+                          public IBinder::DeathRecipient {
+    public:
+        InputListener(const sp<MediaSync> &sync);
+        virtual ~InputListener();
+
+        // From IConsumerListener
+        virtual void onFrameAvailable(const BufferItem &item);
+
+        // From IConsumerListener
+        // We don't care about released buffers because we detach each buffer as
+        // soon as we acquire it. See the comment for onBufferReleased below for
+        // some clarifying notes about the name.
+        virtual void onBuffersReleased() {}
+
+        // From IConsumerListener
+        // We don't care about sideband streams, since we won't relay them.
+        virtual void onSidebandStreamChanged();
+
+        // From IBinder::DeathRecipient
+        virtual void binderDied(const wp<IBinder> &who);
+
+    private:
+        sp<MediaSync> mSync;
+    };
+
+    // This is a thin wrapper class that lets us listen to
+    // IProducerListener::onBufferReleased from mOutput.
+    class OutputListener : public BnProducerListener,
+                           public IBinder::DeathRecipient {
+    public:
+        OutputListener(const sp<MediaSync> &sync);
+        virtual ~OutputListener();
+
+        // From IProducerListener
+        virtual void onBufferReleased();
+
+        // From IBinder::DeathRecipient
+        virtual void binderDied(const wp<IBinder> &who);
+
+    private:
+        sp<MediaSync> mSync;
+    };
+
+    // mIsAbandoned is set to true when the input or output dies.
+    // Once the MediaSync has been abandoned by one side, it will disconnect
+    // from the other side and not attempt to communicate with it further.
+    bool mIsAbandoned;
+
+    mutable Mutex mMutex;
+    Condition mReleaseCondition;
+    size_t mNumOutstandingBuffers;
+    sp<IGraphicBufferConsumer> mInput;
+    sp<IGraphicBufferProducer> mOutput;
+
+    sp<AudioTrack> mAudioTrack;
+    uint32_t mNativeSampleRateInHz;
+    int64_t mNumFramesWritten;
+    bool mHasAudio;
+
+    int64_t mNextBufferItemMediaUs;
+    List<BufferItem> mBufferItems;
+    sp<ALooper> mLooper;
+    float mPlaybackRate;
+
+    sp<MediaClock> mMediaClock;
+
+    MediaSync();
+
+    // Must be accessed through RefBase
+    virtual ~MediaSync();
+
+    int64_t getRealTime(int64_t mediaTimeUs, int64_t nowUs);
+    int64_t getDurationIfPlayedAtNativeSampleRate_l(int64_t numFrames);
+    int64_t getPlayedOutAudioDurationMedia_l(int64_t nowUs);
+
+    void onDrainVideo_l();
+
+    // This implements the onFrameAvailable callback from IConsumerListener.
+    // It gets called from an InputListener.
+    // During this callback, we detach the buffer from the input, and queue
+    // it for rendering on the output. This call can block if there are too
+    // many outstanding buffers. If it blocks, it will resume when
+    // onBufferReleasedByOutput releases a buffer back to the input.
+    void onFrameAvailableFromInput();
+
+    // Send |bufferItem| to the output for rendering.
+    void renderOneBufferItem_l(const BufferItem &bufferItem);
+
+    // This implements the onBufferReleased callback from IProducerListener.
+    // It gets called from an OutputListener.
+    // During this callback, we detach the buffer from the output, and release
+    // it to the input. A blocked onFrameAvailable call will be allowed to proceed.
+    void onBufferReleasedByOutput();
+
+    // Return |buffer| back to the input.
+    void returnBufferToInput_l(const sp<GraphicBuffer> &buffer, const sp<Fence> &fence);
+
+    // When this is called, the MediaSync disconnects from (i.e., abandons) its
+    // input or output, and signals any waiting onFrameAvailable calls to wake
+    // up. This must be called with mMutex locked.
+    void onAbandoned_l(bool isInput);
+
+    // helper.
+    bool isPlaying() { return mPlaybackRate != 0.0; }
+
+    DISALLOW_EVIL_CONSTRUCTORS(MediaSync);
+};
+
+} // namespace android
+
+#endif
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index e341160..84b1b1a 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -250,7 +250,7 @@
 
     status_t setAC3Format(int32_t numChannels, int32_t sampleRate);
 
-    void setG711Format(int32_t numChannels);
+    void setG711Format(int32_t sampleRate, int32_t numChannels);
 
     status_t setVideoPortFormatType(
             OMX_U32 portIndex,
diff --git a/include/media/stagefright/Utils.h b/include/media/stagefright/Utils.h
index a795c80..ec3a10e 100644
--- a/include/media/stagefright/Utils.h
+++ b/include/media/stagefright/Utils.h
@@ -65,6 +65,17 @@
 
 AString uriDebugString(const AString &uri, bool incognito = false);
 
+struct HLSTime {
+    int32_t mSeq;
+    int64_t mTimeUs;
+    sp<AMessage> mMeta;
+
+    HLSTime(const sp<AMessage> &meta = NULL);
+    int64_t getSegmentTimeUs(bool midpoint = false) const;
+};
+
+bool operator <(const HLSTime &t0, const HLSTime &t1);
+
 }  // namespace android
 
 #endif  // UTILS_H_
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 5378bf2..3b260d6 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -36,6 +36,8 @@
     IMediaRecorder.cpp \
     IRemoteDisplay.cpp \
     IRemoteDisplayClient.cpp \
+    IResourceManagerClient.cpp \
+    IResourceManagerService.cpp \
     IStreamSource.cpp \
     MediaCodecInfo.cpp \
     Metadata.cpp \
@@ -53,6 +55,8 @@
     CharacterEncodingDetector.cpp \
     IMediaDeathNotifier.cpp \
     MediaProfiles.cpp \
+    MediaResource.cpp \
+    MediaResourcePolicy.cpp \
     IEffect.cpp \
     IEffectClient.cpp \
     AudioEffect.cpp \
diff --git a/media/libmedia/AudioEffect.cpp b/media/libmedia/AudioEffect.cpp
index af103c1..7d8222f 100644
--- a/media/libmedia/AudioEffect.cpp
+++ b/media/libmedia/AudioEffect.cpp
@@ -486,4 +486,4 @@
 }
 
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/AudioParameter.cpp b/media/libmedia/AudioParameter.cpp
index 33dbf0b..8c8cf45 100644
--- a/media/libmedia/AudioParameter.cpp
+++ b/media/libmedia/AudioParameter.cpp
@@ -180,4 +180,4 @@
     }
 }
 
-};  // namespace android
+} // namespace android
diff --git a/media/libmedia/AudioPolicy.cpp b/media/libmedia/AudioPolicy.cpp
index d2d0971..c7dafcb 100644
--- a/media/libmedia/AudioPolicy.cpp
+++ b/media/libmedia/AudioPolicy.cpp
@@ -112,4 +112,4 @@
     return NO_ERROR;
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 48abb96..100a914 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -112,7 +112,9 @@
         mCblkMemory.clear();
         mBufferMemory.clear();
         IPCThreadState::self()->flushCommands();
-        AudioSystem::releaseAudioSessionId(mSessionId, -1);
+        ALOGV("~AudioRecord, releasing session id %d",
+                mSessionId);
+        AudioSystem::releaseAudioSessionId(mSessionId, -1 /*pid*/);
     }
 }
 
@@ -159,8 +161,6 @@
     }
     mTransfer = transferType;
 
-    AutoMutex lock(mLock);
-
     // invariant that mAudioRecord != 0 is true only after set() returns successfully
     if (mAudioRecord != 0) {
         ALOGE("Track already in use");
@@ -233,6 +233,7 @@
     if (cbf != NULL) {
         mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
         mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
+        // thread begins in paused state, and will not reference us until start()
     }
 
     // create the IAudioRecord
@@ -286,7 +287,6 @@
 
     status_t status = NO_ERROR;
     if (!(flags & CBLK_INVALID)) {
-        ALOGV("mAudioRecord->start()");
         status = mAudioRecord->start(event, triggerSession);
         if (status == DEAD_OBJECT) {
             flags |= CBLK_INVALID;
@@ -352,6 +352,10 @@
     mMarkerPosition = marker;
     mMarkerReached = false;
 
+    sp<AudioRecordThread> t = mAudioRecordThread;
+    if (t != 0) {
+        t->wake();
+    }
     return NO_ERROR;
 }
 
@@ -378,6 +382,10 @@
     mNewPosition = mProxy->getPosition() + updatePeriod;
     mUpdatePeriod = updatePeriod;
 
+    sp<AudioRecordThread> t = mAudioRecordThread;
+    if (t != 0) {
+        t->wake();
+    }
     return NO_ERROR;
 }
 
@@ -408,7 +416,7 @@
 uint32_t AudioRecord::getInputFramesLost() const
 {
     // no need to check mActive, because if inactive this will return 0, which is what we want
-    return AudioSystem::getInputFramesLost(getInput());
+    return AudioSystem::getInputFramesLost(getInputPrivate());
 }
 
 // -------------------------------------------------------------------------
@@ -416,7 +424,6 @@
 // must be called with mLock held
 status_t AudioRecord::openRecord_l(size_t epoch)
 {
-    status_t status;
     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
     if (audioFlinger == 0) {
         ALOGE("Could not get audioflinger");
@@ -431,12 +438,16 @@
     }
 
     // Client can only express a preference for FAST.  Server will perform additional tests.
-    if ((mFlags & AUDIO_INPUT_FLAG_FAST) && !(
-            // use case: callback transfer mode
-            (mTransfer == TRANSFER_CALLBACK) &&
+    if ((mFlags & AUDIO_INPUT_FLAG_FAST) && !((
+            // either of these use cases:
+            // use case 1: callback transfer mode
+            (mTransfer == TRANSFER_CALLBACK) ||
+            // use case 2: obtain/release mode
+            (mTransfer == TRANSFER_OBTAIN)) &&
             // matching sample rate
             (mSampleRate == afSampleRate))) {
-        ALOGW("AUDIO_INPUT_FLAG_FAST denied by client");
+        ALOGW("AUDIO_INPUT_FLAG_FAST denied by client; transfer %d, track %u Hz, primary %u Hz",
+                mTransfer, mSampleRate, afSampleRate);
         // once denied, do not request again if IAudioRecord is re-created
         mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST);
     }
@@ -452,7 +463,8 @@
     }
 
     audio_io_handle_t input;
-    status = AudioSystem::getInputForAttr(&mAttributes, &input, (audio_session_t)mSessionId,
+    status_t status = AudioSystem::getInputForAttr(&mAttributes, &input,
+                                        (audio_session_t)mSessionId,
                                         mSampleRate, mFormat, mChannelMask, mFlags);
 
     if (status != NO_ERROR) {
@@ -684,9 +696,9 @@
     return status;
 }
 
-void AudioRecord::releaseBuffer(Buffer* audioBuffer)
+void AudioRecord::releaseBuffer(const Buffer* audioBuffer)
 {
-    // all TRANSFER_* are valid
+    // FIXME add error checking on mode, by adding an internal version
 
     size_t stepCount = audioBuffer->size / mFrameSize;
     if (stepCount == 0) {
@@ -704,7 +716,7 @@
     // the server does not automatically disable recorder on overrun, so no need to restart
 }
 
-audio_io_handle_t AudioRecord::getInput() const
+audio_io_handle_t AudioRecord::getInputPrivate() const
 {
     AutoMutex lock(mLock);
     return mInput;
@@ -712,7 +724,7 @@
 
 // -------------------------------------------------------------------------
 
-ssize_t AudioRecord::read(void* buffer, size_t userSize)
+ssize_t AudioRecord::read(void* buffer, size_t userSize, bool blocking)
 {
     if (mTransfer != TRANSFER_SYNC) {
         return INVALID_OPERATION;
@@ -731,7 +743,8 @@
     while (userSize >= mFrameSize) {
         audioBuffer.frameCount = userSize / mFrameSize;
 
-        status_t err = obtainBuffer(&audioBuffer, &ClientProxy::kForever);
+        status_t err = obtainBuffer(&audioBuffer,
+                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
         if (err < 0) {
             if (read > 0) {
                 break;
@@ -993,14 +1006,13 @@
 {
     ALOGW("dead IAudioRecord, creating a new one from %s()", from);
     ++mSequence;
-    status_t result;
 
     // if the new IAudioRecord is created, openRecord_l() will modify the
     // following member variables: mAudioRecord, mCblkMemory, mCblk, mBufferMemory.
     // It will also delete the strong references on previous IAudioRecord and IMemory
     size_t position = mProxy->getPosition();
     mNewPosition = position + mUpdatePeriod;
-    result = openRecord_l(position);
+    status_t result = openRecord_l(position);
     if (result == NO_ERROR) {
         if (mActive) {
             // callback thread or sync event hasn't changed
@@ -1072,8 +1084,8 @@
     case NS_NEVER:
         return false;
     case NS_WHENEVER:
-        // FIXME increase poll interval, or make event-driven
-        ns = 1000000000LL;
+        // Event driven: call wake() when callback notifications conditions change.
+        ns = INT64_MAX;
         // fall through
     default:
         LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
@@ -1106,6 +1118,17 @@
     }
 }
 
+void AudioRecord::AudioRecordThread::wake()
+{
+    AutoMutex _l(mMyLock);
+    if (!mPaused && mPausedInt && mPausedNs > 0) {
+        // audio record is active and internally paused with timeout.
+        mIgnoreNextPausedInt = true;
+        mPausedInt = false;
+        mMyCond.signal();
+    }
+}
+
 void AudioRecord::AudioRecordThread::pauseInternal(nsecs_t ns)
 {
     AutoMutex _l(mMyLock);
@@ -1115,4 +1138,4 @@
 
 // -------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index f5a5712..9150a94 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -34,7 +34,6 @@
 Mutex AudioSystem::gLock;
 Mutex AudioSystem::gLockCache;
 Mutex AudioSystem::gLockAPS;
-Mutex AudioSystem::gLockAPC;
 sp<IAudioFlinger> AudioSystem::gAudioFlinger;
 sp<AudioSystem::AudioFlingerClient> AudioSystem::gAudioFlingerClient;
 audio_error_callback AudioSystem::gAudioErrorCallback = NULL;
@@ -48,8 +47,6 @@
 audio_channel_mask_t AudioSystem::gPrevInChannelMask;
 size_t AudioSystem::gInBuffSize = 0;    // zero indicates cache is invalid
 
-sp<AudioSystem::AudioPortCallback> AudioSystem::gAudioPortCallback;
-
 // establish binder interface to AudioFlinger service
 const sp<IAudioFlinger> AudioSystem::get_audio_flinger()
 {
@@ -873,7 +870,6 @@
         Mutex::Autolock _l(gLockAPS);
         gAudioPolicyService.clear();
     }
-    // Do not clear gAudioPortCallback
 }
 
 bool AudioSystem::isOffloadSupported(const audio_offload_info_t& info)
@@ -933,12 +929,31 @@
     return aps->setAudioPortConfig(config);
 }
 
-void AudioSystem::setAudioPortCallback(sp<AudioPortCallback> callBack)
+status_t AudioSystem::addAudioPortCallback(const sp<AudioPortCallback>& callBack)
 {
-    Mutex::Autolock _l(gLockAPC);
-    gAudioPortCallback = callBack;
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+
+    Mutex::Autolock _l(gLockAPS);
+    if (gAudioPolicyServiceClient == 0) {
+        return NO_INIT;
+    }
+    return gAudioPolicyServiceClient->addAudioPortCallback(callBack);
 }
 
+status_t AudioSystem::removeAudioPortCallback(const sp<AudioPortCallback>& callBack)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+
+    Mutex::Autolock _l(gLockAPS);
+    if (gAudioPolicyServiceClient == 0) {
+        return NO_INIT;
+    }
+    return gAudioPolicyServiceClient->removeAudioPortCallback(callBack);
+}
+
+
 status_t AudioSystem::acquireSoundTriggerSession(audio_session_t *session,
                                        audio_io_handle_t *ioHandle,
                                        audio_devices_t *device)
@@ -971,12 +986,58 @@
 
 // ---------------------------------------------------------------------------
 
+status_t AudioSystem::AudioPolicyServiceClient::addAudioPortCallback(
+        const sp<AudioPortCallback>& callBack)
+{
+    Mutex::Autolock _l(mLock);
+    for (size_t i = 0; i < mAudioPortCallbacks.size(); i++) {
+        if (mAudioPortCallbacks[i] == callBack) {
+            return INVALID_OPERATION;
+        }
+    }
+    mAudioPortCallbacks.add(callBack);
+    return NO_ERROR;
+}
+
+status_t AudioSystem::AudioPolicyServiceClient::removeAudioPortCallback(
+        const sp<AudioPortCallback>& callBack)
+{
+    Mutex::Autolock _l(mLock);
+    size_t i;
+    for (i = 0; i < mAudioPortCallbacks.size(); i++) {
+        if (mAudioPortCallbacks[i] == callBack) {
+            break;
+        }
+    }
+    if (i == mAudioPortCallbacks.size()) {
+        return INVALID_OPERATION;
+    }
+    mAudioPortCallbacks.removeAt(i);
+    return NO_ERROR;
+}
+
+void AudioSystem::AudioPolicyServiceClient::onAudioPortListUpdate()
+{
+    Mutex::Autolock _l(mLock);
+    for (size_t i = 0; i < mAudioPortCallbacks.size(); i++) {
+        mAudioPortCallbacks[i]->onAudioPortListUpdate();
+    }
+}
+
+void AudioSystem::AudioPolicyServiceClient::onAudioPatchListUpdate()
+{
+    Mutex::Autolock _l(mLock);
+    for (size_t i = 0; i < mAudioPortCallbacks.size(); i++) {
+        mAudioPortCallbacks[i]->onAudioPatchListUpdate();
+    }
+}
+
 void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who __unused)
 {
     {
-        Mutex::Autolock _l(gLockAPC);
-        if (gAudioPortCallback != 0) {
-            gAudioPortCallback->onServiceDied();
+        Mutex::Autolock _l(mLock);
+        for (size_t i = 0; i < mAudioPortCallbacks.size(); i++) {
+            mAudioPortCallbacks[i]->onServiceDied();
         }
     }
     {
@@ -987,20 +1048,4 @@
     ALOGW("AudioPolicyService server died!");
 }
 
-void AudioSystem::AudioPolicyServiceClient::onAudioPortListUpdate()
-{
-    Mutex::Autolock _l(gLockAPC);
-    if (gAudioPortCallback != 0) {
-        gAudioPortCallback->onAudioPortListUpdate();
-    }
-}
-
-void AudioSystem::AudioPolicyServiceClient::onAudioPatchListUpdate()
-{
-    Mutex::Autolock _l(gLockAPC);
-    if (gAudioPortCallback != 0) {
-        gAudioPortCallback->onAudioPatchListUpdate();
-    }
-}
-
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index c775e7b..ce30c62 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -203,8 +203,8 @@
         mCblkMemory.clear();
         mSharedBuffer.clear();
         IPCThreadState::self()->flushCommands();
-        ALOGV("~AudioTrack, releasing session id from %d on behalf of %d",
-                IPCThreadState::self()->getCallingPid(), mClientPid);
+        ALOGV("~AudioTrack, releasing session id %d from %d on behalf of %d",
+                mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
         AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
     }
 }
@@ -229,9 +229,9 @@
         const audio_attributes_t* pAttributes)
 {
     ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
-          "flags #%x, notificationFrames %u, sessionId %d, transferType %d",
+          "flags #%x, notificationFrames %u, sessionId %d, transferType %d, uid %d, pid %d",
           streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
-          sessionId, transferType);
+          sessionId, transferType, uid, pid);
 
     switch (transferType) {
     case TRANSFER_DEFAULT:
@@ -274,8 +274,6 @@
 
     ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
 
-    AutoMutex lock(mLock);
-
     // invariant that mAudioTrack != 0 is true only after set() returns successfully
     if (mAudioTrack != 0) {
         ALOGE("Track already in use");
@@ -401,6 +399,7 @@
     if (cbf != NULL) {
         mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
         mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
+        // thread begins in paused state, and will not reference us until start()
     }
 
     // create the IAudioTrack
@@ -668,14 +667,18 @@
 
 status_t AudioTrack::setSampleRate(uint32_t rate)
 {
-    if (mIsTimed || isOffloadedOrDirect()) {
+    AutoMutex lock(mLock);
+    if (rate == mSampleRate) {
+        return NO_ERROR;
+    }
+    if (mIsTimed || isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
         return INVALID_OPERATION;
     }
-
-    AutoMutex lock(mLock);
     if (mOutput == AUDIO_IO_HANDLE_NONE) {
         return NO_INIT;
     }
+    // NOTE: it is theoretically possible, but highly unlikely, that a device change
+    // could mean a previously allowed sampling rate is no longer allowed.
     uint32_t afSamplingRate;
     if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
         return NO_INIT;
@@ -964,9 +967,9 @@
 
 
     if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
-        ALOGE("Could not get audio output for stream type %d, usage %d, sample rate %u, format %#x,"
+        ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u, format %#x,"
               " channel mask %#x, flags %#x",
-              streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
+              mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
         return BAD_VALUE;
     }
     {
@@ -981,6 +984,7 @@
         ALOGE("getLatency(%d) failed status %d", output, status);
         goto release;
     }
+    ALOGV("createTrack_l() output %d afLatency %u", output, afLatency);
 
     size_t afFrameCount;
     status = AudioSystem::getFrameCount(output, &afFrameCount);
@@ -1010,11 +1014,11 @@
             (mTransfer == TRANSFER_OBTAIN)) &&
             // matching sample rate
             (mSampleRate == afSampleRate))) {
-        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
+        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d, track %u Hz, output %u Hz",
+                mTransfer, mSampleRate, afSampleRate);
         // once denied, do not request again if IAudioTrack is re-created
         mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
     }
-    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
 
     // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
     //  n = 1   fast track with single buffering; nBuffering is ignored
@@ -1090,6 +1094,7 @@
 
     size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
                                 // but we will still need the original value also
+    int originalSessionId = mSessionId;
     sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
                                                       mSampleRate,
                                                       mFormat,
@@ -1102,6 +1107,8 @@
                                                       &mSessionId,
                                                       mClientUid,
                                                       &status);
+    ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
+            "session ID changed from %d to %d", originalSessionId, mSessionId);
 
     if (status != NO_ERROR) {
         ALOGE("AudioFlinger could not create track, status: %d", status);
@@ -1194,9 +1201,13 @@
     // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
     void* buffers;
     if (mSharedBuffer == 0) {
-        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
+        buffers = cblk + 1;
     } else {
         buffers = mSharedBuffer->pointer();
+        if (buffers == NULL) {
+            ALOGE("Could not get buffer pointer");
+            return NO_INIT;
+        }
     }
 
     mAudioTrack->attachAuxEffect(mAuxEffectId);
@@ -1415,8 +1426,7 @@
             return ssize_t(err);
         }
 
-        size_t toWrite;
-        toWrite = audioBuffer.size;
+        size_t toWrite = audioBuffer.size;
         memcpy(audioBuffer.i8, buffer, toWrite);
         buffer = ((const char *) buffer) + toWrite;
         userSize -= toWrite;
@@ -1784,7 +1794,7 @@
             return WAIT_PERIOD_MS * 1000000LL;
         }
 
-        size_t releasedFrames = audioBuffer.size / mFrameSize;
+        size_t releasedFrames = writtenSize / mFrameSize;
         audioBuffer.frameCount = releasedFrames;
         mRemainingFrames -= releasedFrames;
         if (misalignment >= releasedFrames) {
@@ -1829,7 +1839,6 @@
     ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
           isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
     ++mSequence;
-    status_t result;
 
     // refresh the audio configuration cache in this process to make sure we get new
     // output parameters and new IAudioFlinger in createTrack_l()
@@ -1851,13 +1860,13 @@
     // following member variables: mAudioTrack, mCblkMemory and mCblk.
     // It will also delete the strong references on previous IAudioTrack and IMemory.
     // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
-    result = createTrack_l();
+    status_t result = createTrack_l();
 
     // take the frames that will be lost by track recreation into account in saved position
     // For streaming tracks, this is the amount we obtained from the user/client
     // (not the number actually consumed at the server - those are already lost).
     (void) updateAndGetPosition_l();
-    if (mStaticProxy != 0) {
+    if (mStaticProxy == 0) {
         mPosition = mReleased;
     }
 
@@ -2185,4 +2194,4 @@
     mPausedNs = ns;
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 8e3b633..38055f9 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -83,6 +83,8 @@
     GET_AUDIO_HW_SYNC
 };
 
+#define MAX_ITEMS_PER_LIST 1024
+
 class BpAudioFlinger : public BpInterface<IAudioFlinger>
 {
 public:
@@ -1289,15 +1291,27 @@
         } break;
         case LIST_AUDIO_PORTS: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            unsigned int num_ports = data.readInt32();
+            unsigned int numPortsReq = data.readInt32();
+            if (numPortsReq > MAX_ITEMS_PER_LIST) {
+                numPortsReq = MAX_ITEMS_PER_LIST;
+            }
+            unsigned int numPorts = numPortsReq;
             struct audio_port *ports =
-                    (struct audio_port *)calloc(num_ports,
+                    (struct audio_port *)calloc(numPortsReq,
                                                            sizeof(struct audio_port));
-            status_t status = listAudioPorts(&num_ports, ports);
+            if (ports == NULL) {
+                reply->writeInt32(NO_MEMORY);
+                reply->writeInt32(0);
+                return NO_ERROR;
+            }
+            status_t status = listAudioPorts(&numPorts, ports);
             reply->writeInt32(status);
+            reply->writeInt32(numPorts);
             if (status == NO_ERROR) {
-                reply->writeInt32(num_ports);
-                reply->write(&ports, num_ports * sizeof(struct audio_port));
+                if (numPortsReq > numPorts) {
+                    numPortsReq = numPorts;
+                }
+                reply->write(ports, numPortsReq * sizeof(struct audio_port));
             }
             free(ports);
             return NO_ERROR;
@@ -1336,15 +1350,27 @@
         } break;
         case LIST_AUDIO_PATCHES: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            unsigned int num_patches = data.readInt32();
+            unsigned int numPatchesReq = data.readInt32();
+            if (numPatchesReq > MAX_ITEMS_PER_LIST) {
+                numPatchesReq = MAX_ITEMS_PER_LIST;
+            }
+            unsigned int numPatches = numPatchesReq;
             struct audio_patch *patches =
-                    (struct audio_patch *)calloc(num_patches,
+                    (struct audio_patch *)calloc(numPatchesReq,
                                                  sizeof(struct audio_patch));
-            status_t status = listAudioPatches(&num_patches, patches);
+            if (patches == NULL) {
+                reply->writeInt32(NO_MEMORY);
+                reply->writeInt32(0);
+                return NO_ERROR;
+            }
+            status_t status = listAudioPatches(&numPatches, patches);
             reply->writeInt32(status);
+            reply->writeInt32(numPatches);
             if (status == NO_ERROR) {
-                reply->writeInt32(num_patches);
-                reply->write(&patches, num_patches * sizeof(struct audio_patch));
+                if (numPatchesReq > numPatches) {
+                    numPatchesReq = numPatches;
+                }
+                reply->write(patches, numPatchesReq * sizeof(struct audio_patch));
             }
             free(patches);
             return NO_ERROR;
@@ -1369,4 +1395,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IAudioFlingerClient.cpp b/media/libmedia/IAudioFlingerClient.cpp
index 1c299f7..641e6c1 100644
--- a/media/libmedia/IAudioFlingerClient.cpp
+++ b/media/libmedia/IAudioFlingerClient.cpp
@@ -99,4 +99,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index f2ff27b..39374d8 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -1228,4 +1228,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IAudioPolicyServiceClient.cpp b/media/libmedia/IAudioPolicyServiceClient.cpp
index e802277..7c65878 100644
--- a/media/libmedia/IAudioPolicyServiceClient.cpp
+++ b/media/libmedia/IAudioPolicyServiceClient.cpp
@@ -80,4 +80,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IAudioRecord.cpp b/media/libmedia/IAudioRecord.cpp
index 8a4a383..9d80753 100644
--- a/media/libmedia/IAudioRecord.cpp
+++ b/media/libmedia/IAudioRecord.cpp
@@ -91,4 +91,4 @@
     }
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IAudioTrack.cpp b/media/libmedia/IAudioTrack.cpp
index df209fd..651cb61 100644
--- a/media/libmedia/IAudioTrack.cpp
+++ b/media/libmedia/IAudioTrack.cpp
@@ -292,4 +292,4 @@
     }
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IDrm.cpp b/media/libmedia/IDrm.cpp
index b08fa82..714a0b3 100644
--- a/media/libmedia/IDrm.cpp
+++ b/media/libmedia/IDrm.cpp
@@ -125,7 +125,8 @@
                       Vector<uint8_t> const &initData,
                       String8 const &mimeType, DrmPlugin::KeyType keyType,
                       KeyedVector<String8, String8> const &optionalParameters,
-                      Vector<uint8_t> &request, String8 &defaultUrl) {
+                      Vector<uint8_t> &request, String8 &defaultUrl,
+                      DrmPlugin::KeyRequestType *keyRequestType) {
         Parcel data, reply;
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
 
@@ -143,6 +144,7 @@
 
         readVector(reply, request);
         defaultUrl = reply.readString8();
+        *keyRequestType = static_cast<DrmPlugin::KeyRequestType>(reply.readInt32());
 
         return reply.readInt32();
     }
@@ -562,13 +564,15 @@
 
             Vector<uint8_t> request;
             String8 defaultUrl;
+            DrmPlugin::KeyRequestType keyRequestType;
 
-            status_t result = getKeyRequest(sessionId, initData,
-                                            mimeType, keyType,
-                                            optionalParameters,
-                                            request, defaultUrl);
+            status_t result = getKeyRequest(sessionId, initData, mimeType,
+                    keyType, optionalParameters, request, defaultUrl,
+                    &keyRequestType);
+
             writeVector(reply, request);
             reply->writeString8(defaultUrl);
+            reply->writeInt32(static_cast<int32_t>(keyRequestType));
             reply->writeInt32(result);
             return OK;
         }
diff --git a/media/libmedia/IDrmClient.cpp b/media/libmedia/IDrmClient.cpp
index f50715e..490c6ed 100644
--- a/media/libmedia/IDrmClient.cpp
+++ b/media/libmedia/IDrmClient.cpp
@@ -78,4 +78,4 @@
     }
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IEffect.cpp b/media/libmedia/IEffect.cpp
index c2fff78..eb4b098 100644
--- a/media/libmedia/IEffect.cpp
+++ b/media/libmedia/IEffect.cpp
@@ -201,4 +201,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IEffectClient.cpp b/media/libmedia/IEffectClient.cpp
index aef4371..1322e72 100644
--- a/media/libmedia/IEffectClient.cpp
+++ b/media/libmedia/IEffectClient.cpp
@@ -141,4 +141,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IMediaCodecList.cpp b/media/libmedia/IMediaCodecList.cpp
index bf7c5ca..80020db 100644
--- a/media/libmedia/IMediaCodecList.cpp
+++ b/media/libmedia/IMediaCodecList.cpp
@@ -160,4 +160,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IMediaDeathNotifier.cpp b/media/libmedia/IMediaDeathNotifier.cpp
index 38e9ca0..d4360ea 100644
--- a/media/libmedia/IMediaDeathNotifier.cpp
+++ b/media/libmedia/IMediaDeathNotifier.cpp
@@ -108,4 +108,4 @@
     }
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IMediaHTTPConnection.cpp b/media/libmedia/IMediaHTTPConnection.cpp
index 7e26ee6..2ff7658 100644
--- a/media/libmedia/IMediaHTTPConnection.cpp
+++ b/media/libmedia/IMediaHTTPConnection.cpp
@@ -178,5 +178,4 @@
 IMPLEMENT_META_INTERFACE(
         MediaHTTPConnection, "android.media.IMediaHTTPConnection");
 
-}  // namespace android
-
+} // namespace android
diff --git a/media/libmedia/IMediaHTTPService.cpp b/media/libmedia/IMediaHTTPService.cpp
index 1260582..f30d0f3 100644
--- a/media/libmedia/IMediaHTTPService.cpp
+++ b/media/libmedia/IMediaHTTPService.cpp
@@ -54,5 +54,4 @@
 IMPLEMENT_META_INTERFACE(
         MediaHTTPService, "android.media.IMediaHTTPService");
 
-}  // namespace android
-
+} // namespace android
diff --git a/media/libmedia/IMediaLogService.cpp b/media/libmedia/IMediaLogService.cpp
index a4af7b7..230749e 100644
--- a/media/libmedia/IMediaLogService.cpp
+++ b/media/libmedia/IMediaLogService.cpp
@@ -91,4 +91,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IMediaMetadataRetriever.cpp b/media/libmedia/IMediaMetadataRetriever.cpp
index aa2665a..551cffe 100644
--- a/media/libmedia/IMediaMetadataRetriever.cpp
+++ b/media/libmedia/IMediaMetadataRetriever.cpp
@@ -297,4 +297,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index dcd5670..ce3009a 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -574,4 +574,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IMediaPlayerClient.cpp b/media/libmedia/IMediaPlayerClient.cpp
index a670c96..d608386 100644
--- a/media/libmedia/IMediaPlayerClient.cpp
+++ b/media/libmedia/IMediaPlayerClient.cpp
@@ -75,4 +75,4 @@
     }
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IMediaPlayerService.cpp b/media/libmedia/IMediaPlayerService.cpp
index feea267..aa7b2e1 100644
--- a/media/libmedia/IMediaPlayerService.cpp
+++ b/media/libmedia/IMediaPlayerService.cpp
@@ -234,4 +234,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index 9181f86..8ca256c 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -463,4 +463,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IMediaRecorderClient.cpp b/media/libmedia/IMediaRecorderClient.cpp
index e7907e3..6795d23 100644
--- a/media/libmedia/IMediaRecorderClient.cpp
+++ b/media/libmedia/IMediaRecorderClient.cpp
@@ -67,4 +67,4 @@
     }
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IRemoteDisplay.cpp b/media/libmedia/IRemoteDisplay.cpp
index 1e15434..869d11a 100644
--- a/media/libmedia/IRemoteDisplay.cpp
+++ b/media/libmedia/IRemoteDisplay.cpp
@@ -91,4 +91,4 @@
     }
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IRemoteDisplayClient.cpp b/media/libmedia/IRemoteDisplayClient.cpp
index 9d63bc9..bedeb6c 100644
--- a/media/libmedia/IRemoteDisplayClient.cpp
+++ b/media/libmedia/IRemoteDisplayClient.cpp
@@ -101,4 +101,4 @@
     }
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IResourceManagerClient.cpp b/media/libmedia/IResourceManagerClient.cpp
new file mode 100644
index 0000000..6fa56fc
--- /dev/null
+++ b/media/libmedia/IResourceManagerClient.cpp
@@ -0,0 +1,70 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#include <utils/RefBase.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+
+#include <media/IResourceManagerClient.h>
+
+namespace android {
+
+enum {
+    RECLAIM_RESOURCE = IBinder::FIRST_CALL_TRANSACTION,
+};
+
+class BpResourceManagerClient: public BpInterface<IResourceManagerClient>
+{
+public:
+    BpResourceManagerClient(const sp<IBinder> &impl)
+        : BpInterface<IResourceManagerClient>(impl)
+    {
+    }
+
+    virtual bool reclaimResource() {
+        Parcel data, reply;
+        data.writeInterfaceToken(IResourceManagerClient::getInterfaceDescriptor());
+
+        bool ret = false;
+        status_t status = remote()->transact(RECLAIM_RESOURCE, data, &reply);
+        if (status == NO_ERROR) {
+            ret = (bool)reply.readInt32();
+        }
+        return ret;
+    }
+};
+
+IMPLEMENT_META_INTERFACE(ResourceManagerClient, "android.media.IResourceManagerClient");
+
+// ----------------------------------------------------------------------
+
+status_t BnResourceManagerClient::onTransact(
+    uint32_t code, const Parcel &data, Parcel *reply, uint32_t flags)
+{
+    switch (code) {
+        case RECLAIM_RESOURCE: {
+            CHECK_INTERFACE(IResourceManagerClient, data, reply);
+            bool ret = reclaimResource();
+            reply->writeInt32(ret);
+            return NO_ERROR;
+        } break;
+        default:
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+
+}; // namespace android
diff --git a/media/libmedia/IResourceManagerService.cpp b/media/libmedia/IResourceManagerService.cpp
new file mode 100644
index 0000000..95a2d1c
--- /dev/null
+++ b/media/libmedia/IResourceManagerService.cpp
@@ -0,0 +1,169 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "IResourceManagerService"
+#include <utils/Log.h>
+
+#include "media/IResourceManagerService.h"
+
+#include <binder/Parcel.h>
+
+#include <stdint.h>
+#include <sys/types.h>
+
+namespace android {
+
+enum {
+    CONFIG = IBinder::FIRST_CALL_TRANSACTION,
+    ADD_RESOURCE,
+    REMOVE_RESOURCE,
+    RECLAIM_RESOURCE,
+};
+
+template <typename T>
+static void writeToParcel(Parcel *data, const Vector<T> &items) {
+    size_t size = items.size();
+    size_t sizePosition = data->dataPosition();
+    // truncates size, but should be okay for this usecase
+    data->writeUint32(static_cast<uint32_t>(size));
+    for (size_t i = 0; i < size; i++) {
+        size_t position = data->dataPosition();
+        items[i].writeToParcel(data);
+    }
+}
+
+template <typename T>
+static void readFromParcel(const Parcel &data, Vector<T> *items) {
+    size_t size = (size_t)data.readUint32();
+    for (size_t i = 0; i < size; i++) {
+        T item;
+        item.readFromParcel(data);
+        items->add(item);
+    }
+}
+
+class BpResourceManagerService : public BpInterface<IResourceManagerService>
+{
+public:
+    BpResourceManagerService(const sp<IBinder> &impl)
+        : BpInterface<IResourceManagerService>(impl)
+    {
+    }
+
+    virtual void config(const Vector<MediaResourcePolicy> &policies) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IResourceManagerService::getInterfaceDescriptor());
+        writeToParcel(&data, policies);
+        remote()->transact(CONFIG, data, &reply);
+    }
+
+    virtual void addResource(
+            int pid,
+            int64_t clientId,
+            const sp<IResourceManagerClient> client,
+            const Vector<MediaResource> &resources) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IResourceManagerService::getInterfaceDescriptor());
+        data.writeInt32(pid);
+        data.writeInt64(clientId);
+        data.writeStrongBinder(IInterface::asBinder(client));
+        writeToParcel(&data, resources);
+
+        remote()->transact(ADD_RESOURCE, data, &reply);
+    }
+
+    virtual void removeResource(int64_t clientId) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IResourceManagerService::getInterfaceDescriptor());
+        data.writeInt64(clientId);
+
+        remote()->transact(REMOVE_RESOURCE, data, &reply);
+    }
+
+    virtual bool reclaimResource(int callingPid, const Vector<MediaResource> &resources) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IResourceManagerService::getInterfaceDescriptor());
+        data.writeInt32(callingPid);
+        writeToParcel(&data, resources);
+
+        bool ret = false;
+        status_t status = remote()->transact(RECLAIM_RESOURCE, data, &reply);
+        if (status == NO_ERROR) {
+            ret = (bool)reply.readInt32();
+        }
+        return ret;
+    }
+};
+
+IMPLEMENT_META_INTERFACE(ResourceManagerService, "android.media.IResourceManagerService");
+
+// ----------------------------------------------------------------------
+
+
+status_t BnResourceManagerService::onTransact(
+    uint32_t code, const Parcel &data, Parcel *reply, uint32_t flags)
+{
+    switch (code) {
+        case CONFIG: {
+            CHECK_INTERFACE(IResourceManagerService, data, reply);
+            int pid = data.readInt32();
+            sp<IResourceManagerClient> client(
+                    interface_cast<IResourceManagerClient>(data.readStrongBinder()));
+            Vector<MediaResourcePolicy> policies;
+            readFromParcel(data, &policies);
+            config(policies);
+            return NO_ERROR;
+        } break;
+
+        case ADD_RESOURCE: {
+            CHECK_INTERFACE(IResourceManagerService, data, reply);
+            int pid = data.readInt32();
+            int64_t clientId = data.readInt64();
+            sp<IResourceManagerClient> client(
+                    interface_cast<IResourceManagerClient>(data.readStrongBinder()));
+            Vector<MediaResource> resources;
+            readFromParcel(data, &resources);
+            addResource(pid, clientId, client, resources);
+            return NO_ERROR;
+        } break;
+
+        case REMOVE_RESOURCE: {
+            CHECK_INTERFACE(IResourceManagerService, data, reply);
+            int64_t clientId = data.readInt64();
+            removeResource(clientId);
+            return NO_ERROR;
+        } break;
+
+        case RECLAIM_RESOURCE: {
+            CHECK_INTERFACE(IResourceManagerService, data, reply);
+            int callingPid = data.readInt32();
+            Vector<MediaResource> resources;
+            readFromParcel(data, &resources);
+            bool ret = reclaimResource(callingPid, resources);
+            reply->writeInt32(ret);
+            return NO_ERROR;
+        } break;
+
+        default:
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
diff --git a/media/libmedia/IStreamSource.cpp b/media/libmedia/IStreamSource.cpp
index d480aef..840e453 100644
--- a/media/libmedia/IStreamSource.cpp
+++ b/media/libmedia/IStreamSource.cpp
@@ -35,6 +35,9 @@
 // static
 const char *const IStreamListener::kKeyMediaTimeUs = "media-time-us";
 
+// static
+const char *const IStreamListener::kKeyRecentMediaTimeUs = "recent-media-time-us";
+
 enum {
     // IStreamSource
     SET_LISTENER = IBinder::FIRST_CALL_TRANSACTION,
diff --git a/media/libmedia/MediaResource.cpp b/media/libmedia/MediaResource.cpp
new file mode 100644
index 0000000..8be01bc
--- /dev/null
+++ b/media/libmedia/MediaResource.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaResource"
+#include <utils/Log.h>
+#include <media/MediaResource.h>
+
+namespace android {
+
+const char kResourceSecureCodec[] = "secure-codec";
+const char kResourceNonSecureCodec[] = "non-secure-codec";
+const char kResourceGraphicMemory[] = "graphic-memory";
+
+MediaResource::MediaResource() : mValue(0) {}
+
+MediaResource::MediaResource(String8 type, uint64_t value)
+        : mType(type),
+          mValue(value) {}
+
+MediaResource::MediaResource(String8 type, String8 subType, uint64_t value)
+        : mType(type),
+          mSubType(subType),
+          mValue(value) {}
+
+void MediaResource::readFromParcel(const Parcel &parcel) {
+    mType = parcel.readString8();
+    mSubType = parcel.readString8();
+    mValue = parcel.readUint64();
+}
+
+void MediaResource::writeToParcel(Parcel *parcel) const {
+    parcel->writeString8(mType);
+    parcel->writeString8(mSubType);
+    parcel->writeUint64(mValue);
+}
+
+String8 MediaResource::toString() const {
+    String8 str;
+    str.appendFormat("%s/%s:%llu", mType.string(), mSubType.string(), mValue);
+    return str;
+}
+
+bool MediaResource::operator==(const MediaResource &other) const {
+    return (other.mType == mType) && (other.mSubType == mSubType) && (other.mValue == mValue);
+}
+
+bool MediaResource::operator!=(const MediaResource &other) const {
+    return !(*this == other);
+}
+
+}; // namespace android
diff --git a/media/libmedia/MediaResourcePolicy.cpp b/media/libmedia/MediaResourcePolicy.cpp
new file mode 100644
index 0000000..2bb996a
--- /dev/null
+++ b/media/libmedia/MediaResourcePolicy.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaResourcePolicy"
+#include <utils/Log.h>
+#include <media/MediaResourcePolicy.h>
+
+namespace android {
+
+const char kPolicySupportsMultipleSecureCodecs[] = "supports-multiple-secure-codecs";
+const char kPolicySupportsSecureWithNonSecureCodec[] = "supports-secure-with-non-secure-codec";
+
+MediaResourcePolicy::MediaResourcePolicy() : mValue(0) {}
+
+MediaResourcePolicy::MediaResourcePolicy(String8 type, uint64_t value)
+        : mType(type),
+          mValue(value) {}
+
+void MediaResourcePolicy::readFromParcel(const Parcel &parcel) {
+    mType = parcel.readString8();
+    mValue = parcel.readUint64();
+}
+
+void MediaResourcePolicy::writeToParcel(Parcel *parcel) const {
+    parcel->writeString8(mType);
+    parcel->writeUint64(mValue);
+}
+
+String8 MediaResourcePolicy::toString() const {
+    String8 str;
+    str.appendFormat("%s:%llu", mType.string(), mValue);
+    return str;
+}
+
+}; // namespace android
diff --git a/media/libmedia/StringArray.cpp b/media/libmedia/StringArray.cpp
index 477e3fd..b2e5907 100644
--- a/media/libmedia/StringArray.cpp
+++ b/media/libmedia/StringArray.cpp
@@ -110,4 +110,4 @@
 }
 
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index f91e3e4..9d69b6a 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -429,4 +429,4 @@
     return false;
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/mediametadataretriever.cpp b/media/libmedia/mediametadataretriever.cpp
index 8e8a1ed..873808a 100644
--- a/media/libmedia/mediametadataretriever.cpp
+++ b/media/libmedia/mediametadataretriever.cpp
@@ -176,4 +176,4 @@
     }
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index d1d51cc..5dd8c02 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -877,4 +877,4 @@
     return mPlayer->setNextPlayer(next == NULL ? NULL : next->mPlayer);
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index 973e156..a2d6e53 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -680,4 +680,4 @@
     notify(MEDIA_RECORDER_EVENT_ERROR, MEDIA_ERROR_SERVER_DIED, 0);
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmediaplayerservice/Drm.cpp b/media/libmediaplayerservice/Drm.cpp
index d4f6fab..49e01d1 100644
--- a/media/libmediaplayerservice/Drm.cpp
+++ b/media/libmediaplayerservice/Drm.cpp
@@ -358,7 +358,8 @@
                             Vector<uint8_t> const &initData,
                             String8 const &mimeType, DrmPlugin::KeyType keyType,
                             KeyedVector<String8, String8> const &optionalParameters,
-                            Vector<uint8_t> &request, String8 &defaultUrl) {
+                            Vector<uint8_t> &request, String8 &defaultUrl,
+                            DrmPlugin::KeyRequestType *keyRequestType) {
     Mutex::Autolock autoLock(mLock);
 
     if (mInitCheck != OK) {
@@ -372,7 +373,8 @@
     DrmSessionManager::Instance()->useSession(sessionId);
 
     return mPlugin->getKeyRequest(sessionId, initData, mimeType, keyType,
-                                  optionalParameters, request, defaultUrl);
+                                  optionalParameters, request, defaultUrl,
+                                  keyRequestType);
 }
 
 status_t Drm::provideKeyResponse(Vector<uint8_t> const &sessionId,
diff --git a/media/libmediaplayerservice/Drm.h b/media/libmediaplayerservice/Drm.h
index 0cea639..7e8f246 100644
--- a/media/libmediaplayerservice/Drm.h
+++ b/media/libmediaplayerservice/Drm.h
@@ -53,7 +53,8 @@
                       Vector<uint8_t> const &initData,
                       String8 const &mimeType, DrmPlugin::KeyType keyType,
                       KeyedVector<String8, String8> const &optionalParameters,
-                      Vector<uint8_t> &request, String8 &defaultUrl);
+                      Vector<uint8_t> &request, String8 &defaultUrl,
+                      DrmPlugin::KeyRequestType *keyRequestType);
 
     virtual status_t provideKeyResponse(Vector<uint8_t> const &sessionId,
                                         Vector<uint8_t> const &response,
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index a040343..5a31b74 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -265,6 +265,12 @@
         }
     }
 
+    mBitrate = totalBitrate;
+
+    return OK;
+}
+
+status_t NuPlayer::GenericSource::startSources() {
     // Start the selected A/V tracks now before we start buffering.
     // Widevine sources might re-initialize crypto when starting, if we delay
     // this to start(), all data buffered during prepare would be wasted.
@@ -279,8 +285,6 @@
         return UNKNOWN_ERROR;
     }
 
-    mBitrate = totalBitrate;
-
     return OK;
 }
 
@@ -415,6 +419,32 @@
             | FLAG_CAN_SEEK_FORWARD
             | FLAG_CAN_SEEK);
 
+    if (mIsSecure) {
+        // secure decoders must be instantiated before starting widevine source
+        sp<AMessage> reply = new AMessage(kWhatSecureDecodersInstantiated, this);
+        notifyInstantiateSecureDecoders(reply);
+    } else {
+        finishPrepareAsync();
+    }
+}
+
+void NuPlayer::GenericSource::onSecureDecodersInstantiated(status_t err) {
+    if (err != OK) {
+        ALOGE("Failed to instantiate secure decoders!");
+        notifyPreparedAndCleanup(err);
+        return;
+    }
+    finishPrepareAsync();
+}
+
+void NuPlayer::GenericSource::finishPrepareAsync() {
+    status_t err = startSources();
+    if (err != OK) {
+        ALOGE("Failed to init start data source!");
+        notifyPreparedAndCleanup(err);
+        return;
+    }
+
     if (mIsStreaming) {
         mPrepareBuffering = true;
 
@@ -430,6 +460,7 @@
         mDataSource.clear();
         mCachedSource.clear();
         mHttpSource.clear();
+        mBitrate = -1;
 
         cancelPollBuffering();
     }
@@ -805,6 +836,14 @@
           break;
       }
 
+      case kWhatSecureDecodersInstantiated:
+      {
+          int32_t err;
+          CHECK(msg->findInt32("err", &err));
+          onSecureDecodersInstantiated(err);
+          break;
+      }
+
       case kWhatStopWidevine:
       {
           // mStopRead is only used for Widevine to prevent the video source
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 5fc41ec..862ee5f 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -96,6 +96,7 @@
         kWhatStopWidevine,
         kWhatStart,
         kWhatResume,
+        kWhatSecureDecodersInstantiated,
     };
 
     struct Track {
@@ -158,6 +159,9 @@
     void setDrmPlaybackStatusIfNeeded(int playbackStatus, int64_t position);
 
     void notifyPreparedAndCleanup(status_t err);
+    void onSecureDecodersInstantiated(status_t err);
+    void finishPrepareAsync();
+    status_t startSources();
 
     void onGetFormatMeta(sp<AMessage> msg) const;
     sp<MetaData> doGetFormatMeta(bool audio) const;
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index d01e83a..0476c9b 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -281,6 +281,34 @@
             break;
         }
 
+        case LiveSession::kWhatBufferingStart:
+        {
+            sp<AMessage> notify = dupNotify();
+            notify->setInt32("what", kWhatPauseOnBufferingStart);
+            notify->post();
+            break;
+        }
+
+        case LiveSession::kWhatBufferingEnd:
+        {
+            sp<AMessage> notify = dupNotify();
+            notify->setInt32("what", kWhatResumeOnBufferingEnd);
+            notify->post();
+            break;
+        }
+
+
+        case LiveSession::kWhatBufferingUpdate:
+        {
+            sp<AMessage> notify = dupNotify();
+            int32_t percentage;
+            CHECK(msg->findInt32("percentage", &percentage));
+            notify->setInt32("what", kWhatBufferingUpdate);
+            notify->setInt32("percentage", percentage);
+            notify->post();
+            break;
+        }
+
         case LiveSession::kWhatError:
         {
             break;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index f4d3794..02d9f32 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -1021,23 +1021,41 @@
     }
 }
 
+status_t NuPlayer::onInstantiateSecureDecoders() {
+    status_t err;
+    if (!(mSourceFlags & Source::FLAG_SECURE)) {
+        return BAD_TYPE;
+    }
+
+    if (mRenderer != NULL) {
+        ALOGE("renderer should not be set when instantiating secure decoders");
+        return UNKNOWN_ERROR;
+    }
+
+    // TRICKY: We rely on mRenderer being null, so that decoder does not start requesting
+    // data on instantiation.
+    if (mNativeWindow != NULL) {
+        err = instantiateDecoder(false, &mVideoDecoder);
+        if (err != OK) {
+            return err;
+        }
+    }
+
+    if (mAudioSink != NULL) {
+        err = instantiateDecoder(true, &mAudioDecoder);
+        if (err != OK) {
+            return err;
+        }
+    }
+    return OK;
+}
+
 void NuPlayer::onStart() {
     mOffloadAudio = false;
     mAudioEOS = false;
     mVideoEOS = false;
     mStarted = true;
 
-    /* instantiate decoders now for secure playback */
-    if (mSourceFlags & Source::FLAG_SECURE) {
-        if (mNativeWindow != NULL) {
-            instantiateDecoder(false, &mVideoDecoder);
-        }
-
-        if (mAudioSink != NULL) {
-            instantiateDecoder(true, &mAudioDecoder);
-        }
-    }
-
     mSource->start();
 
     uint32_t flags = 0;
@@ -1239,6 +1257,8 @@
         notify->setInt32("generation", mAudioDecoderGeneration);
 
         if (mOffloadAudio) {
+            const bool hasVideo = (mSource->getFormat(false /*audio */) != NULL);
+            format->setInt32("has-video", hasVideo);
             *decoder = new DecoderPassThrough(notify, mSource, mRenderer);
         } else {
             *decoder = new Decoder(notify, mSource, mRenderer);
@@ -1390,7 +1410,7 @@
     FlushStatus newStatus =
         needShutdown ? FLUSHING_DECODER_SHUTDOWN : FLUSHING_DECODER;
 
-    mFlushComplete[audio][false /* isDecoder */] = false;
+    mFlushComplete[audio][false /* isDecoder */] = (mRenderer == NULL);
     mFlushComplete[audio][true /* isDecoder */] = false;
     if (audio) {
         ALOGE_IF(mFlushingAudio != NONE,
@@ -1675,6 +1695,23 @@
     CHECK(msg->findInt32("what", &what));
 
     switch (what) {
+        case Source::kWhatInstantiateSecureDecoders:
+        {
+            if (mSource == NULL) {
+                // This is a stale notification from a source that was
+                // asynchronously preparing when the client called reset().
+                // We handled the reset, the source is gone.
+                break;
+            }
+
+            sp<AMessage> reply;
+            CHECK(msg->findMessage("reply", &reply));
+            status_t err = onInstantiateSecureDecoders();
+            reply->setInt32("err", err);
+            reply->post();
+            break;
+        }
+
         case Source::kWhatPrepared:
         {
             if (mSource == NULL) {
@@ -1687,6 +1724,14 @@
             int32_t err;
             CHECK(msg->findInt32("err", &err));
 
+            if (err != OK) {
+                // shut down potential secure codecs in case client never calls reset
+                mDeferredActions.push_back(
+                        new FlushDecoderAction(FLUSH_CMD_SHUTDOWN /* audio */,
+                                               FLUSH_CMD_SHUTDOWN /* video */));
+                processDeferredActions();
+            }
+
             sp<NuPlayerDriver> driver = mDriver.promote();
             if (driver != NULL) {
                 // notify duration first, so that it's definitely set when
@@ -1971,6 +2016,13 @@
     notify->post();
 }
 
+void NuPlayer::Source::notifyInstantiateSecureDecoders(const sp<AMessage> &reply) {
+    sp<AMessage> notify = dupNotify();
+    notify->setInt32("what", kWhatInstantiateSecureDecoders);
+    notify->setMessage("reply", reply);
+    notify->post();
+}
+
 void NuPlayer::Source::onMessageReceived(const sp<AMessage> & /* msg */) {
     TRESPASS();
 }
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index a2cb53e..2bc20d7 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -204,6 +204,8 @@
 
     status_t instantiateDecoder(bool audio, sp<DecoderBase> *decoder);
 
+    status_t onInstantiateSecureDecoders();
+
     void updateVideoSize(
             const sp<AMessage> &inputFormat,
             const sp<AMessage> &outputFormat = NULL);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
index 29b4c26..563de5e 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
@@ -74,11 +74,14 @@
 
     onRequestInputBuffers();
 
+    int32_t hasVideo = 0;
+    format->findInt32("has-video", &hasVideo);
+
     // The audio sink is already opened before the PassThrough decoder is created.
     // Opening again might be relevant if decoder is instantiated after shutdown and
     // format is different.
     status_t err = mRenderer->openAudioSink(
-            format, true /* offloadOnly */, false /* hasVideo */,
+            format, true /* offloadOnly */, hasVideo,
             AUDIO_OUTPUT_FLAG_NONE /* flags */, NULL /* isOffloaded */);
     if (err != OK) {
         handleError(err);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 5887e50..1fa9cef 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -356,14 +356,6 @@
         case STATE_PREPARED:
         case STATE_STOPPED_AND_PREPARED:
         {
-            int curpos = 0;
-            if (mPositionUs > 0) {
-                curpos = (mPositionUs + 500ll) / 1000;
-            }
-            if (curpos == msec) {
-                // nothing to do, and doing something anyway could result in deadlock (b/15323063)
-                break;
-            }
             mStartupSeekTimeUs = seekTimeUs;
             // pretend that the seek completed. It will actually happen when starting playback.
             // TODO: actually perform the seek here, so the player is ready to go at the new
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 4bccfa8..a2ec51c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -566,12 +566,22 @@
 }
 
 bool NuPlayer::Renderer::onDrainAudioQueue() {
-#if 0
+    // TODO: This call to getPosition checks if AudioTrack has been created
+    // in AudioSink before draining audio. If AudioTrack doesn't exist, then
+    // CHECKs on getPosition will fail.
+    // We still need to figure out why AudioTrack is not created when
+    // this function is called. One possible reason could be leftover
+    // audio. Another possible place is to check whether decoder
+    // has received INFO_FORMAT_CHANGED as the first buffer since
+    // AudioSink is opened there, and possible interactions with flush
+    // immediately after start. Investigate error message
+    // "vorbis_dsp_synthesis returned -135", along with RTSP.
     uint32_t numFramesPlayed;
     if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
         return false;
     }
 
+#if 0
     ssize_t numFramesAvailableToWrite =
         mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 5a8beb1..d9f14a2 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -55,6 +55,7 @@
         kWhatTimedTextData,
         kWhatQueueDecoderShutdown,
         kWhatDrmNoLicense,
+        kWhatInstantiateSecureDecoders,
     };
 
     // The provides message is used to notify the player about various
@@ -125,6 +126,7 @@
 
     void notifyFlagsChanged(uint32_t flags);
     void notifyVideoSizeChanged(const sp<AMessage> &format = NULL);
+    void notifyInstantiateSecureDecoders(const sp<AMessage> &reply);
     void notifyPrepared(status_t err = OK);
 
 private:
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 31e10ce..97f3e20 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -1591,7 +1591,11 @@
         if (!msg->findInt32("channel-count", &numChannels)) {
             err = INVALID_OPERATION;
         } else {
-            err = setupG711Codec(encoder, numChannels);
+            int32_t sampleRate;
+            if (!msg->findInt32("sample-rate", &sampleRate)) {
+                sampleRate = 8000;
+            }
+            err = setupG711Codec(encoder, sampleRate, numChannels);
         }
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_FLAC)) {
         int32_t numChannels, sampleRate, compressionLevel = -1;
@@ -2066,11 +2070,11 @@
             1 /* numChannels */);
 }
 
-status_t ACodec::setupG711Codec(bool encoder, int32_t numChannels) {
+status_t ACodec::setupG711Codec(bool encoder, int32_t sampleRate, int32_t numChannels) {
     CHECK(!encoder);  // XXX TODO
 
     return setupRawAudioFormat(
-            kPortIndexInput, 8000 /* sampleRate */, numChannels);
+            kPortIndexInput, sampleRate, numChannels);
 }
 
 status_t ACodec::setupFlacCodec(
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 177293d..a2cbdaf 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -37,6 +37,7 @@
         MediaCodecSource.cpp              \
         MediaDefs.cpp                     \
         MediaExtractor.cpp                \
+        MediaSync.cpp                     \
         MidiExtractor.cpp                 \
         http/MediaHTTP.cpp                \
         MediaMuxer.cpp                    \
diff --git a/media/libstagefright/MediaClock.cpp b/media/libstagefright/MediaClock.cpp
index 38db5e4..433f555 100644
--- a/media/libstagefright/MediaClock.cpp
+++ b/media/libstagefright/MediaClock.cpp
@@ -93,13 +93,17 @@
 }
 
 status_t MediaClock::getMediaTime(
-        int64_t realUs, int64_t *outMediaUs, bool allowPastMaxTime) {
+        int64_t realUs, int64_t *outMediaUs, bool allowPastMaxTime) const {
+    if (outMediaUs == NULL) {
+        return BAD_VALUE;
+    }
+
     Mutex::Autolock autoLock(mLock);
     return getMediaTime_l(realUs, outMediaUs, allowPastMaxTime);
 }
 
 status_t MediaClock::getMediaTime_l(
-        int64_t realUs, int64_t *outMediaUs, bool allowPastMaxTime) {
+        int64_t realUs, int64_t *outMediaUs, bool allowPastMaxTime) const {
     if (mAnchorTimeRealUs == -1) {
         return NO_INIT;
     }
@@ -119,7 +123,12 @@
     return OK;
 }
 
-status_t MediaClock::getRealTimeFor(int64_t targetMediaUs, int64_t *outRealUs) {
+status_t MediaClock::getRealTimeFor(
+        int64_t targetMediaUs, int64_t *outRealUs) const {
+    if (outRealUs == NULL) {
+        return BAD_VALUE;
+    }
+
     Mutex::Autolock autoLock(mLock);
     if (mPlaybackRate == 0.0) {
         return NO_INIT;
diff --git a/media/libstagefright/MediaSync.cpp b/media/libstagefright/MediaSync.cpp
new file mode 100644
index 0000000..7b6c7d9
--- /dev/null
+++ b/media/libstagefright/MediaSync.cpp
@@ -0,0 +1,541 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaSync"
+#include <inttypes.h>
+
+#include <gui/BufferQueue.h>
+#include <gui/IGraphicBufferConsumer.h>
+#include <gui/IGraphicBufferProducer.h>
+
+#include <media/AudioTrack.h>
+#include <media/stagefright/MediaClock.h>
+#include <media/stagefright/MediaSync.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+#include <ui/GraphicBuffer.h>
+
+// Maximum late time allowed for a video frame to be rendered. When a video
+// frame arrives later than this number, it will be discarded without rendering.
+static const int64_t kMaxAllowedVideoLateTimeUs = 40000ll;
+
+namespace android {
+
+// static
+sp<MediaSync> MediaSync::create() {
+    sp<MediaSync> sync = new MediaSync();
+    sync->mLooper->registerHandler(sync);
+    return sync;
+}
+
+MediaSync::MediaSync()
+      : mIsAbandoned(false),
+        mMutex(),
+        mReleaseCondition(),
+        mNumOutstandingBuffers(0),
+        mNativeSampleRateInHz(0),
+        mNumFramesWritten(0),
+        mHasAudio(false),
+        mNextBufferItemMediaUs(-1),
+        mPlaybackRate(0.0) {
+    mMediaClock = new MediaClock;
+
+    mLooper = new ALooper;
+    mLooper->setName("MediaSync");
+    mLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
+}
+
+MediaSync::~MediaSync() {
+    if (mInput != NULL) {
+        mInput->consumerDisconnect();
+    }
+    if (mOutput != NULL) {
+        mOutput->disconnect(NATIVE_WINDOW_API_MEDIA);
+    }
+
+    if (mLooper != NULL) {
+        mLooper->unregisterHandler(id());
+        mLooper->stop();
+    }
+}
+
+status_t MediaSync::configureSurface(const sp<IGraphicBufferProducer> &output) {
+    Mutex::Autolock lock(mMutex);
+
+    // TODO: support suface change.
+    if (mOutput != NULL) {
+        ALOGE("configureSurface: output surface has already been configured.");
+        return INVALID_OPERATION;
+    }
+
+    if (output != NULL) {
+        IGraphicBufferProducer::QueueBufferOutput queueBufferOutput;
+        sp<OutputListener> listener(new OutputListener(this));
+        IInterface::asBinder(output)->linkToDeath(listener);
+        status_t status =
+            output->connect(listener,
+                            NATIVE_WINDOW_API_MEDIA,
+                            true /* producerControlledByApp */,
+                            &queueBufferOutput);
+        if (status != NO_ERROR) {
+            ALOGE("configureSurface: failed to connect (%d)", status);
+            return status;
+        }
+
+        mOutput = output;
+    }
+
+    return NO_ERROR;
+}
+
+// |audioTrack| is used only for querying information.
+status_t MediaSync::configureAudioTrack(
+        const sp<AudioTrack> &audioTrack, uint32_t nativeSampleRateInHz) {
+    Mutex::Autolock lock(mMutex);
+
+    // TODO: support audio track change.
+    if (mAudioTrack != NULL) {
+        ALOGE("configureAudioTrack: audioTrack has already been configured.");
+        return INVALID_OPERATION;
+    }
+
+    mAudioTrack = audioTrack;
+    mNativeSampleRateInHz = nativeSampleRateInHz;
+
+    return NO_ERROR;
+}
+
+status_t MediaSync::createInputSurface(
+        sp<IGraphicBufferProducer> *outBufferProducer) {
+    if (outBufferProducer == NULL) {
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock lock(mMutex);
+
+    if (mOutput == NULL) {
+        return NO_INIT;
+    }
+
+    if (mInput != NULL) {
+        return INVALID_OPERATION;
+    }
+
+    sp<IGraphicBufferProducer> bufferProducer;
+    sp<IGraphicBufferConsumer> bufferConsumer;
+    BufferQueue::createBufferQueue(&bufferProducer, &bufferConsumer);
+
+    sp<InputListener> listener(new InputListener(this));
+    IInterface::asBinder(bufferConsumer)->linkToDeath(listener);
+    status_t status =
+        bufferConsumer->consumerConnect(listener, false /* controlledByApp */);
+    if (status == NO_ERROR) {
+        bufferConsumer->setConsumerName(String8("MediaSync"));
+        *outBufferProducer = bufferProducer;
+        mInput = bufferConsumer;
+    }
+    return status;
+}
+
+status_t MediaSync::setPlaybackRate(float rate) {
+    if (rate < 0.0) {
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock lock(mMutex);
+
+    if (rate > mPlaybackRate) {
+        mNextBufferItemMediaUs = -1;
+    }
+    mPlaybackRate = rate;
+    mMediaClock->setPlaybackRate(rate);
+    onDrainVideo_l();
+
+    return OK;
+}
+
+sp<const MediaClock> MediaSync::getMediaClock() {
+    return mMediaClock;
+}
+
+status_t MediaSync::updateQueuedAudioData(
+        size_t sizeInBytes, int64_t presentationTimeUs) {
+    if (sizeInBytes == 0) {
+        return OK;
+    }
+
+    Mutex::Autolock lock(mMutex);
+
+    if (mAudioTrack == NULL) {
+        ALOGW("updateQueuedAudioData: audioTrack has NOT been configured.");
+        return INVALID_OPERATION;
+    }
+
+    int64_t numFrames = sizeInBytes / mAudioTrack->frameSize();
+    int64_t maxMediaTimeUs = presentationTimeUs
+            + getDurationIfPlayedAtNativeSampleRate_l(numFrames);
+    mNumFramesWritten += numFrames;
+
+    int64_t nowUs = ALooper::GetNowUs();
+    int64_t nowMediaUs = maxMediaTimeUs
+            - getDurationIfPlayedAtNativeSampleRate_l(mNumFramesWritten)
+            + getPlayedOutAudioDurationMedia_l(nowUs);
+
+    int64_t oldRealTime = -1;
+    if (mNextBufferItemMediaUs != -1) {
+        oldRealTime = getRealTime(mNextBufferItemMediaUs, nowUs);
+    }
+
+    mMediaClock->updateAnchor(nowMediaUs, nowUs, maxMediaTimeUs);
+    mHasAudio = true;
+
+    if (oldRealTime != -1) {
+        int64_t newRealTime = getRealTime(mNextBufferItemMediaUs, nowUs);
+        if (newRealTime < oldRealTime) {
+            mNextBufferItemMediaUs = -1;
+            onDrainVideo_l();
+        }
+    }
+
+    return OK;
+}
+
+void MediaSync::setName(const AString &name) {
+    Mutex::Autolock lock(mMutex);
+    mInput->setConsumerName(String8(name.c_str()));
+}
+
+int64_t MediaSync::getRealTime(int64_t mediaTimeUs, int64_t nowUs) {
+    int64_t realUs;
+    if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
+        // If failed to get current position, e.g. due to audio clock is
+        // not ready, then just play out video immediately without delay.
+        return nowUs;
+    }
+    return realUs;
+}
+
+int64_t MediaSync::getDurationIfPlayedAtNativeSampleRate_l(int64_t numFrames) {
+    return (numFrames * 1000000LL / mNativeSampleRateInHz);
+}
+
+int64_t MediaSync::getPlayedOutAudioDurationMedia_l(int64_t nowUs) {
+    CHECK(mAudioTrack != NULL);
+
+    uint32_t numFramesPlayed;
+    int64_t numFramesPlayedAt;
+    AudioTimestamp ts;
+    static const int64_t kStaleTimestamp100ms = 100000;
+
+    status_t res = mAudioTrack->getTimestamp(ts);
+    if (res == OK) {
+        // case 1: mixing audio tracks.
+        numFramesPlayed = ts.mPosition;
+        numFramesPlayedAt =
+            ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000;
+        const int64_t timestampAge = nowUs - numFramesPlayedAt;
+        if (timestampAge > kStaleTimestamp100ms) {
+            // This is an audio FIXME.
+            // getTimestamp returns a timestamp which may come from audio
+            // mixing threads. After pausing, the MixerThread may go idle,
+            // thus the mTime estimate may become stale. Assuming that the
+            // MixerThread runs 20ms, with FastMixer at 5ms, the max latency
+            // should be about 25ms with an average around 12ms (to be
+            // verified). For safety we use 100ms.
+            ALOGV("getTimestamp: returned stale timestamp nowUs(%lld) "
+                  "numFramesPlayedAt(%lld)",
+                  (long long)nowUs, (long long)numFramesPlayedAt);
+            numFramesPlayedAt = nowUs - kStaleTimestamp100ms;
+        }
+        //ALOGD("getTimestamp: OK %d %lld",
+        //      numFramesPlayed, (long long)numFramesPlayedAt);
+    } else if (res == WOULD_BLOCK) {
+        // case 2: transitory state on start of a new track
+        numFramesPlayed = 0;
+        numFramesPlayedAt = nowUs;
+        //ALOGD("getTimestamp: WOULD_BLOCK %d %lld",
+        //      numFramesPlayed, (long long)numFramesPlayedAt);
+    } else {
+        // case 3: transitory at new track or audio fast tracks.
+        res = mAudioTrack->getPosition(&numFramesPlayed);
+        CHECK_EQ(res, (status_t)OK);
+        numFramesPlayedAt = nowUs;
+        numFramesPlayedAt += 1000LL * mAudioTrack->latency() / 2; /* XXX */
+        //ALOGD("getPosition: %d %lld", numFramesPlayed, numFramesPlayedAt);
+    }
+
+    //can't be negative until 12.4 hrs, test.
+    //CHECK_EQ(numFramesPlayed & (1 << 31), 0);
+    int64_t durationUs =
+        getDurationIfPlayedAtNativeSampleRate_l(numFramesPlayed)
+            + nowUs - numFramesPlayedAt;
+    if (durationUs < 0) {
+        // Occurs when numFramesPlayed position is very small and the following:
+        // (1) In case 1, the time nowUs is computed before getTimestamp() is
+        //     called and numFramesPlayedAt is greater than nowUs by time more
+        //     than numFramesPlayed.
+        // (2) In case 3, using getPosition and adding mAudioTrack->latency()
+        //     to numFramesPlayedAt, by a time amount greater than
+        //     numFramesPlayed.
+        //
+        // Both of these are transitory conditions.
+        ALOGV("getPlayedOutAudioDurationMedia_l: negative duration %lld "
+              "set to zero", (long long)durationUs);
+        durationUs = 0;
+    }
+    ALOGV("getPlayedOutAudioDurationMedia_l(%lld) nowUs(%lld) frames(%u) "
+          "framesAt(%lld)",
+          (long long)durationUs, (long long)nowUs, numFramesPlayed,
+          (long long)numFramesPlayedAt);
+    return durationUs;
+}
+
+void MediaSync::onDrainVideo_l() {
+    if (!isPlaying()) {
+        return;
+    }
+
+    int64_t nowUs = ALooper::GetNowUs();
+
+    while (!mBufferItems.empty()) {
+        BufferItem *bufferItem = &*mBufferItems.begin();
+        int64_t itemMediaUs = bufferItem->mTimestamp / 1000;
+        int64_t itemRealUs = getRealTime(itemMediaUs, nowUs);
+        if (itemRealUs <= nowUs) {
+            if (mHasAudio) {
+                if (nowUs - itemRealUs <= kMaxAllowedVideoLateTimeUs) {
+                    renderOneBufferItem_l(*bufferItem);
+                } else {
+                    // too late.
+                    returnBufferToInput_l(
+                            bufferItem->mGraphicBuffer, bufferItem->mFence);
+                }
+            } else {
+                // always render video buffer in video-only mode.
+                renderOneBufferItem_l(*bufferItem);
+
+                // smooth out videos >= 10fps
+                mMediaClock->updateAnchor(
+                        itemMediaUs, nowUs, itemMediaUs + 100000);
+            }
+
+            mBufferItems.erase(mBufferItems.begin());
+
+            if (mBufferItems.empty()) {
+                mNextBufferItemMediaUs = -1;
+            }
+        } else {
+            if (mNextBufferItemMediaUs == -1
+                    || mNextBufferItemMediaUs != itemMediaUs) {
+                sp<AMessage> msg = new AMessage(kWhatDrainVideo, this);
+                msg->post(itemRealUs - nowUs);
+            }
+            break;
+        }
+    }
+}
+
+void MediaSync::onFrameAvailableFromInput() {
+    Mutex::Autolock lock(mMutex);
+
+    // If there are too many outstanding buffers, wait until a buffer is
+    // released back to the input in onBufferReleased.
+    while (mNumOutstandingBuffers >= MAX_OUTSTANDING_BUFFERS) {
+        mReleaseCondition.wait(mMutex);
+
+        // If the sync is abandoned while we are waiting, the release
+        // condition variable will be broadcast, and we should just return
+        // without attempting to do anything more (since the input queue will
+        // also be abandoned).
+        if (mIsAbandoned) {
+            return;
+        }
+    }
+    ++mNumOutstandingBuffers;
+
+    // Acquire and detach the buffer from the input.
+    BufferItem bufferItem;
+    status_t status = mInput->acquireBuffer(&bufferItem, 0 /* presentWhen */);
+    if (status != NO_ERROR) {
+        ALOGE("acquiring buffer from input failed (%d)", status);
+        return;
+    }
+
+    ALOGV("acquired buffer %#llx from input", (long long)bufferItem.mGraphicBuffer->getId());
+
+    status = mInput->detachBuffer(bufferItem.mBuf);
+    if (status != NO_ERROR) {
+        ALOGE("detaching buffer from input failed (%d)", status);
+        if (status == NO_INIT) {
+            // If the input has been abandoned, move on.
+            onAbandoned_l(true /* isInput */);
+        }
+        return;
+    }
+
+    mBufferItems.push_back(bufferItem);
+    onDrainVideo_l();
+}
+
+void MediaSync::renderOneBufferItem_l( const BufferItem &bufferItem) {
+    IGraphicBufferProducer::QueueBufferInput queueInput(
+            bufferItem.mTimestamp,
+            bufferItem.mIsAutoTimestamp,
+            bufferItem.mDataSpace,
+            bufferItem.mCrop,
+            static_cast<int32_t>(bufferItem.mScalingMode),
+            bufferItem.mTransform,
+            bufferItem.mIsDroppable,
+            bufferItem.mFence);
+
+    // Attach and queue the buffer to the output.
+    int slot;
+    status_t status = mOutput->attachBuffer(&slot, bufferItem.mGraphicBuffer);
+    ALOGE_IF(status != NO_ERROR, "attaching buffer to output failed (%d)", status);
+    if (status == NO_ERROR) {
+        IGraphicBufferProducer::QueueBufferOutput queueOutput;
+        status = mOutput->queueBuffer(slot, queueInput, &queueOutput);
+        ALOGE_IF(status != NO_ERROR, "queueing buffer to output failed (%d)", status);
+    }
+
+    if (status != NO_ERROR) {
+        returnBufferToInput_l(bufferItem.mGraphicBuffer, bufferItem.mFence);
+        if (status == NO_INIT) {
+            // If the output has been abandoned, move on.
+            onAbandoned_l(false /* isInput */);
+        }
+        return;
+    }
+
+    ALOGV("queued buffer %#llx to output", (long long)bufferItem.mGraphicBuffer->getId());
+}
+
+void MediaSync::onBufferReleasedByOutput() {
+    Mutex::Autolock lock(mMutex);
+
+    sp<GraphicBuffer> buffer;
+    sp<Fence> fence;
+    status_t status = mOutput->detachNextBuffer(&buffer, &fence);
+    ALOGE_IF(status != NO_ERROR, "detaching buffer from output failed (%d)", status);
+
+    if (status == NO_INIT) {
+        // If the output has been abandoned, we can't do anything else,
+        // since buffer is invalid.
+        onAbandoned_l(false /* isInput */);
+        return;
+    }
+
+    ALOGV("detached buffer %#llx from output", (long long)buffer->getId());
+
+    // If we've been abandoned, we can't return the buffer to the input, so just
+    // move on.
+    if (mIsAbandoned) {
+        return;
+    }
+
+    returnBufferToInput_l(buffer, fence);
+}
+
+void MediaSync::returnBufferToInput_l(
+        const sp<GraphicBuffer> &buffer, const sp<Fence> &fence) {
+    // Attach and release the buffer back to the input.
+    int consumerSlot;
+    status_t status = mInput->attachBuffer(&consumerSlot, buffer);
+    ALOGE_IF(status != NO_ERROR, "attaching buffer to input failed (%d)", status);
+    if (status == NO_ERROR) {
+        status = mInput->releaseBuffer(consumerSlot, 0 /* frameNumber */,
+                EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, fence);
+        ALOGE_IF(status != NO_ERROR, "releasing buffer to input failed (%d)", status);
+    }
+
+    if (status != NO_ERROR) {
+        // TODO: do we need to try to return this buffer later?
+        return;
+    }
+
+    ALOGV("released buffer %#llx to input", (long long)buffer->getId());
+
+    // Notify any waiting onFrameAvailable calls.
+    --mNumOutstandingBuffers;
+    mReleaseCondition.signal();
+}
+
+void MediaSync::onAbandoned_l(bool isInput) {
+    ALOGE("the %s has abandoned me", (isInput ? "input" : "output"));
+    if (!mIsAbandoned) {
+        if (isInput) {
+            mOutput->disconnect(NATIVE_WINDOW_API_MEDIA);
+        } else {
+            mInput->consumerDisconnect();
+        }
+        mIsAbandoned = true;
+    }
+    mReleaseCondition.broadcast();
+}
+
+void MediaSync::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatDrainVideo:
+        {
+            Mutex::Autolock lock(mMutex);
+            onDrainVideo_l();
+            break;
+        }
+
+        default:
+            TRESPASS();
+            break;
+    }
+}
+
+MediaSync::InputListener::InputListener(const sp<MediaSync> &sync)
+      : mSync(sync) {}
+
+MediaSync::InputListener::~InputListener() {}
+
+void MediaSync::InputListener::onFrameAvailable(const BufferItem &/* item */) {
+    mSync->onFrameAvailableFromInput();
+}
+
+// We don't care about sideband streams, since we won't relay them.
+void MediaSync::InputListener::onSidebandStreamChanged() {
+    ALOGE("onSidebandStreamChanged: got sideband stream unexpectedly.");
+}
+
+
+void MediaSync::InputListener::binderDied(const wp<IBinder> &/* who */) {
+    Mutex::Autolock lock(mSync->mMutex);
+    mSync->onAbandoned_l(true /* isInput */);
+}
+
+MediaSync::OutputListener::OutputListener(const sp<MediaSync> &sync)
+      : mSync(sync) {}
+
+MediaSync::OutputListener::~OutputListener() {}
+
+void MediaSync::OutputListener::onBufferReleased() {
+    mSync->onBufferReleasedByOutput();
+}
+
+void MediaSync::OutputListener::binderDied(const wp<IBinder> &/* who */) {
+    Mutex::Autolock lock(mSync->mMutex);
+    mSync->onAbandoned_l(false /* isInput */);
+}
+
+} // namespace android
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index ea19ab2..4d30069 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -629,10 +629,14 @@
         // These are PCM-like formats with a fixed sample rate but
         // a variable number of channels.
 
+        int32_t sampleRate;
         int32_t numChannels;
         CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+        if (!meta->findInt32(kKeySampleRate, &sampleRate)) {
+            sampleRate = 8000;
+        }
 
-        setG711Format(numChannels);
+        setG711Format(sampleRate, numChannels);
     } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_RAW, mMIME)) {
         CHECK(!mIsEncoder);
 
@@ -3616,9 +3620,9 @@
             sizeof(def));
 }
 
-void OMXCodec::setG711Format(int32_t numChannels) {
+void OMXCodec::setG711Format(int32_t sampleRate, int32_t numChannels) {
     CHECK(!mIsEncoder);
-    setRawAudioFormat(kPortIndexInput, 8000, numChannels);
+    setRawAudioFormat(kPortIndexInput, sampleRate, numChannels);
 }
 
 void OMXCodec::setImageOutputFormat(
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index c0be136..8506e37 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -822,5 +822,36 @@
     return AString("<no-scheme URI suppressed>");
 }
 
+HLSTime::HLSTime(const sp<AMessage>& meta) :
+    mSeq(-1),
+    mTimeUs(-1ll),
+    mMeta(meta) {
+    if (meta != NULL) {
+        CHECK(meta->findInt32("discontinuitySeq", &mSeq));
+        CHECK(meta->findInt64("timeUs", &mTimeUs));
+    }
+}
+
+int64_t HLSTime::getSegmentTimeUs(bool midpoint) const {
+    int64_t segmentStartTimeUs = -1ll;
+    if (mMeta != NULL) {
+        CHECK(mMeta->findInt64("segmentStartTimeUs", &segmentStartTimeUs));
+        if (midpoint) {
+            int64_t durationUs;
+            CHECK(mMeta->findInt64("segmentDurationUs", &durationUs));
+            segmentStartTimeUs += durationUs / 2;
+        }
+    }
+    return segmentStartTimeUs;
+}
+
+bool operator <(const HLSTime &t0, const HLSTime &t1) {
+    // we can only compare discontinuity sequence and timestamp.
+    // (mSegmentTimeUs is not reliable in live streaming case, it's the
+    // time starting from beginning of playlist but playlist could change.)
+    return t0.mSeq < t1.mSeq
+            || (t0.mSeq == t1.mSeq && t0.mTimeUs < t1.mTimeUs);
+}
+
 }  // namespace android
 
diff --git a/media/libstagefright/codecs/g711/dec/SoftG711.cpp b/media/libstagefright/codecs/g711/dec/SoftG711.cpp
index 3a69095..015515e 100644
--- a/media/libstagefright/codecs/g711/dec/SoftG711.cpp
+++ b/media/libstagefright/codecs/g711/dec/SoftG711.cpp
@@ -41,8 +41,9 @@
         OMX_COMPONENTTYPE **component)
     : SimpleSoftOMXComponent(name, callbacks, appData, component),
       mIsMLaw(true),
+      mSignalledError(false),
       mNumChannels(1),
-      mSignalledError(false) {
+      mSamplingRate(8000) {
     if (!strcmp(name, "OMX.google.g711.alaw.decoder")) {
         mIsMLaw = false;
     } else {
@@ -129,7 +130,7 @@
             pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF;
 
             pcmParams->nChannels = mNumChannels;
-            pcmParams->nSamplingRate = 8000;
+            pcmParams->nSamplingRate = mSamplingRate;
 
             return OMX_ErrorNone;
         }
@@ -159,6 +160,8 @@
                 mNumChannels = pcmParams->nChannels;
             }
 
+            mSamplingRate = pcmParams->nSamplingRate;
+
             return OMX_ErrorNone;
         }
 
diff --git a/media/libstagefright/codecs/g711/dec/SoftG711.h b/media/libstagefright/codecs/g711/dec/SoftG711.h
index bff0c68..16b6340 100644
--- a/media/libstagefright/codecs/g711/dec/SoftG711.h
+++ b/media/libstagefright/codecs/g711/dec/SoftG711.h
@@ -46,8 +46,9 @@
     };
 
     bool mIsMLaw;
-    OMX_U32 mNumChannels;
     bool mSignalledError;
+    OMX_U32 mNumChannels;
+    int32_t mSamplingRate;
 
     void initPorts();
 
diff --git a/media/libstagefright/data/media_codecs_google_audio.xml b/media/libstagefright/data/media_codecs_google_audio.xml
index a06684b..b957b0c 100644
--- a/media/libstagefright/data/media_codecs_google_audio.xml
+++ b/media/libstagefright/data/media_codecs_google_audio.xml
@@ -38,12 +38,12 @@
         </MediaCodec>
         <MediaCodec name="OMX.google.g711.alaw.decoder" type="audio/g711-alaw">
             <Limit name="channel-count" max="1" />
-            <Limit name="sample-rate" ranges="8000" />
+            <Limit name="sample-rate" ranges="8000-48000" />
             <Limit name="bitrate" range="64000" />
         </MediaCodec>
         <MediaCodec name="OMX.google.g711.mlaw.decoder" type="audio/g711-mlaw">
             <Limit name="channel-count" max="1" />
-            <Limit name="sample-rate" ranges="8000" />
+            <Limit name="sample-rate" ranges="8000-48000" />
             <Limit name="bitrate" range="64000" />
         </MediaCodec>
         <MediaCodec name="OMX.google.vorbis.decoder" type="audio/vorbis">
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index a94754b..2d93152 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -33,6 +33,7 @@
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/AUtils.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/FileSource.h>
 #include <media/stagefright/MediaErrors.h>
@@ -50,12 +51,95 @@
 namespace android {
 
 // static
-// Number of recently-read bytes to use for bandwidth estimation
-const size_t LiveSession::kBandwidthHistoryBytes = 200 * 1024;
-// High water mark to start up switch or report prepared)
-const int64_t LiveSession::kHighWaterMark = 8000000ll;
-const int64_t LiveSession::kMidWaterMark = 5000000ll;
-const int64_t LiveSession::kLowWaterMark = 3000000ll;
+// Bandwidth Switch Mark Defaults
+const int64_t LiveSession::kUpSwitchMarkUs = 15000000ll;
+const int64_t LiveSession::kDownSwitchMarkUs = 20000000ll;
+const int64_t LiveSession::kUpSwitchMarginUs = 5000000ll;
+const int64_t LiveSession::kResumeThresholdUs = 100000ll;
+
+// Buffer Prepare/Ready/Underflow Marks
+const int64_t LiveSession::kReadyMarkUs = 5000000ll;
+const int64_t LiveSession::kPrepareMarkUs = 1500000ll;
+const int64_t LiveSession::kUnderflowMarkUs = 1000000ll;
+
+struct LiveSession::BandwidthEstimator : public RefBase {
+    BandwidthEstimator();
+
+    void addBandwidthMeasurement(size_t numBytes, int64_t delayUs);
+    bool estimateBandwidth(int32_t *bandwidth);
+
+private:
+    // Bandwidth estimation parameters
+    static const int32_t kMaxBandwidthHistoryItems = 20;
+    static const int64_t kMaxBandwidthHistoryWindowUs = 5000000ll; // 5 sec
+
+    struct BandwidthEntry {
+        int64_t mDelayUs;
+        size_t mNumBytes;
+    };
+
+    Mutex mLock;
+    List<BandwidthEntry> mBandwidthHistory;
+    int64_t mTotalTransferTimeUs;
+    size_t mTotalTransferBytes;
+
+    DISALLOW_EVIL_CONSTRUCTORS(BandwidthEstimator);
+};
+
+LiveSession::BandwidthEstimator::BandwidthEstimator() :
+    mTotalTransferTimeUs(0),
+    mTotalTransferBytes(0) {
+}
+
+void LiveSession::BandwidthEstimator::addBandwidthMeasurement(
+        size_t numBytes, int64_t delayUs) {
+    AutoMutex autoLock(mLock);
+
+    BandwidthEntry entry;
+    entry.mDelayUs = delayUs;
+    entry.mNumBytes = numBytes;
+    mTotalTransferTimeUs += delayUs;
+    mTotalTransferBytes += numBytes;
+    mBandwidthHistory.push_back(entry);
+
+    // trim old samples, keeping at least kMaxBandwidthHistoryItems samples,
+    // and total transfer time at least kMaxBandwidthHistoryWindowUs.
+    while (mBandwidthHistory.size() > kMaxBandwidthHistoryItems) {
+        List<BandwidthEntry>::iterator it = mBandwidthHistory.begin();
+        if (mTotalTransferTimeUs - it->mDelayUs < kMaxBandwidthHistoryWindowUs) {
+            break;
+        }
+        mTotalTransferTimeUs -= it->mDelayUs;
+        mTotalTransferBytes -= it->mNumBytes;
+        mBandwidthHistory.erase(mBandwidthHistory.begin());
+    }
+}
+
+bool LiveSession::BandwidthEstimator::estimateBandwidth(int32_t *bandwidthBps) {
+    AutoMutex autoLock(mLock);
+
+    if (mBandwidthHistory.size() < 2) {
+        return false;
+    }
+
+    *bandwidthBps = ((double)mTotalTransferBytes * 8E6 / mTotalTransferTimeUs);
+    return true;
+}
+
+//static
+const char *LiveSession::getKeyForStream(StreamType type) {
+    switch (type) {
+        case STREAMTYPE_VIDEO:
+            return "timeUsVideo";
+        case STREAMTYPE_AUDIO:
+            return "timeUsAudio";
+        case STREAMTYPE_SUBTITLES:
+            return "timeUsSubtitle";
+        default:
+            TRESPASS();
+    }
+    return NULL;
+}
 
 LiveSession::LiveSession(
         const sp<AMessage> &notify, uint32_t flags,
@@ -63,24 +147,30 @@
     : mNotify(notify),
       mFlags(flags),
       mHTTPService(httpService),
+      mBuffering(false),
       mInPreparationPhase(true),
+      mPollBufferingGeneration(0),
+      mPrevBufferPercentage(-1),
       mHTTPDataSource(new MediaHTTP(mHTTPService->makeHTTPConnection())),
       mCurBandwidthIndex(-1),
+      mOrigBandwidthIndex(-1),
+      mLastBandwidthBps(-1ll),
+      mBandwidthEstimator(new BandwidthEstimator()),
       mStreamMask(0),
       mNewStreamMask(0),
       mSwapMask(0),
-      mCheckBandwidthGeneration(0),
       mSwitchGeneration(0),
       mSubtitleGeneration(0),
       mLastDequeuedTimeUs(0ll),
       mRealTimeBaseUs(0ll),
       mReconfigurationInProgress(false),
       mSwitchInProgress(false),
+      mUpSwitchMark(kUpSwitchMarkUs),
+      mDownSwitchMark(kDownSwitchMarkUs),
+      mUpSwitchMargin(kUpSwitchMarginUs),
       mFirstTimeUsValid(false),
       mFirstTimeUs(0),
-      mLastSeekTimeUs(0),
-      mPollBufferingGeneration(0) {
-
+      mLastSeekTimeUs(0) {
     mStreams[kAudioIndex] = StreamItem("audio");
     mStreams[kVideoIndex] = StreamItem("video");
     mStreams[kSubtitleIndex] = StreamItem("subtitles");
@@ -89,13 +179,6 @@
         mPacketSources.add(indexToType(i), new AnotherPacketSource(NULL /* meta */));
         mPacketSources2.add(indexToType(i), new AnotherPacketSource(NULL /* meta */));
     }
-
-    size_t numHistoryItems = kBandwidthHistoryBytes /
-            PlaylistFetcher::kDownloadBlockSize + 1;
-    if (numHistoryItems < 5) {
-        numHistoryItems = 5;
-    }
-    mHTTPDataSource->setBandwidthHistorySize(numHistoryItems);
 }
 
 LiveSession::~LiveSession() {
@@ -104,37 +187,19 @@
     }
 }
 
-sp<ABuffer> LiveSession::createFormatChangeBuffer(bool swap) {
-    ABuffer *discontinuity = new ABuffer(0);
-    discontinuity->meta()->setInt32("discontinuity", ATSParser::DISCONTINUITY_FORMATCHANGE);
-    discontinuity->meta()->setInt32("swapPacketSource", swap);
-    discontinuity->meta()->setInt32("switchGeneration", mSwitchGeneration);
-    discontinuity->meta()->setInt64("timeUs", -1);
-    return discontinuity;
-}
-
-void LiveSession::swapPacketSource(StreamType stream) {
-    sp<AnotherPacketSource> &aps = mPacketSources.editValueFor(stream);
-    sp<AnotherPacketSource> &aps2 = mPacketSources2.editValueFor(stream);
-    sp<AnotherPacketSource> tmp = aps;
-    aps = aps2;
-    aps2 = tmp;
-    aps2->clear();
-}
-
 status_t LiveSession::dequeueAccessUnit(
         StreamType stream, sp<ABuffer> *accessUnit) {
-    if (!(mStreamMask & stream)) {
-        // return -EWOULDBLOCK to avoid halting the decoder
-        // when switching between audio/video and audio only.
-        return -EWOULDBLOCK;
-    }
-
     status_t finalResult = OK;
     sp<AnotherPacketSource> packetSource = mPacketSources.valueFor(stream);
 
     ssize_t idx = typeToIndex(stream);
-    if (!packetSource->hasBufferAvailable(&finalResult)) {
+    // Do not let client pull data if we don't have data packets yet.
+    // We might only have a format discontinuity queued without data.
+    // When NuPlayerDecoder dequeues the format discontinuity, it will
+    // immediately try to getFormat. If we return NULL, NuPlayerDecoder
+    // thinks it can do seamless change, so will not shutdown decoder.
+    // When the actual format arrives, it can't handle it and get stuck.
+    if (!packetSource->hasDataBufferAvailable(&finalResult)) {
         if (finalResult == OK) {
             return -EAGAIN;
         } else {
@@ -142,49 +207,8 @@
         }
     }
 
-    // Do not let client pull data if we don't have format yet.
-    // We might only have a format discontinuity queued without actual data.
-    // When NuPlayerDecoder dequeues the format discontinuity, it will
-    // immediately try to getFormat. If we return NULL, NuPlayerDecoder
-    // thinks it can do seamless change, so will not shutdown decoder.
-    // When the actual format arrives, it can't handle it and get stuck.
-    // TODO: We need a method to check if the packet source has any
-    //       data packets available, dequeuing should only start then.
-    sp<MetaData> format = packetSource->getFormat();
-    if (format == NULL) {
-        return -EAGAIN;
-    }
-    int32_t targetDuration = 0;
-    sp<AMessage> meta = packetSource->getLatestEnqueuedMeta();
-    if (meta != NULL) {
-        meta->findInt32("targetDuration", &targetDuration);
-    }
-
-    int64_t targetDurationUs = targetDuration * 1000000ll;
-    if (targetDurationUs == 0 ||
-            targetDurationUs > PlaylistFetcher::kMinBufferedDurationUs) {
-        // Fetchers limit buffering to
-        // min(3 * targetDuration, kMinBufferedDurationUs)
-        targetDurationUs = PlaylistFetcher::kMinBufferedDurationUs;
-    }
-
-    // wait for counterpart
-    sp<AnotherPacketSource> otherSource;
-    uint32_t mask = mNewStreamMask & mStreamMask;
-    uint32_t fetchersMask  = 0;
-    for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
-        uint32_t fetcherMask = mFetcherInfos.valueAt(i).mFetcher->getStreamTypeMask();
-        fetchersMask |= fetcherMask;
-    }
-    mask &= fetchersMask;
-    if (stream == STREAMTYPE_AUDIO && (mask & STREAMTYPE_VIDEO)) {
-        otherSource = mPacketSources.valueFor(STREAMTYPE_VIDEO);
-    } else if (stream == STREAMTYPE_VIDEO && (mask & STREAMTYPE_AUDIO)) {
-        otherSource = mPacketSources.valueFor(STREAMTYPE_AUDIO);
-    }
-    if (otherSource != NULL && !otherSource->hasBufferAvailable(&finalResult)) {
-        return finalResult == OK ? -EAGAIN : finalResult;
-    }
+    // Let the client dequeue as long as we have buffers available
+    // Do not make pause/resume decisions here.
 
     status_t err = packetSource->dequeueAccessUnit(accessUnit);
 
@@ -222,42 +246,6 @@
               streamStr,
               type,
               extra == NULL ? "NULL" : extra->debugString().c_str());
-
-        int32_t swap;
-        if ((*accessUnit)->meta()->findInt32("swapPacketSource", &swap) && swap) {
-            int32_t switchGeneration;
-            CHECK((*accessUnit)->meta()->findInt32("switchGeneration", &switchGeneration));
-            {
-                Mutex::Autolock lock(mSwapMutex);
-                if (switchGeneration == mSwitchGeneration) {
-                    swapPacketSource(stream);
-                    sp<AMessage> msg = new AMessage(kWhatSwapped, this);
-                    msg->setInt32("stream", stream);
-                    msg->setInt32("switchGeneration", switchGeneration);
-                    msg->post();
-                }
-            }
-        } else {
-            size_t seq = strm.mCurDiscontinuitySeq;
-            int64_t offsetTimeUs;
-            if (mDiscontinuityOffsetTimesUs.indexOfKey(seq) >= 0) {
-                offsetTimeUs = mDiscontinuityOffsetTimesUs.valueFor(seq);
-            } else {
-                offsetTimeUs = 0;
-            }
-
-            seq += 1;
-            if (mDiscontinuityAbsStartTimesUs.indexOfKey(strm.mCurDiscontinuitySeq) >= 0) {
-                int64_t firstTimeUs;
-                firstTimeUs = mDiscontinuityAbsStartTimesUs.valueFor(strm.mCurDiscontinuitySeq);
-                offsetTimeUs += strm.mLastDequeuedTimeUs - firstTimeUs;
-                offsetTimeUs += strm.mLastSampleDurationUs;
-            } else {
-                offsetTimeUs += strm.mLastSampleDurationUs;
-            }
-
-            mDiscontinuityOffsetTimesUs.add(seq, offsetTimeUs);
-        }
     } else if (err == OK) {
 
         if (stream == STREAMTYPE_AUDIO || stream == STREAMTYPE_VIDEO) {
@@ -265,7 +253,26 @@
             int32_t discontinuitySeq = 0;
             CHECK((*accessUnit)->meta()->findInt64("timeUs",  &timeUs));
             (*accessUnit)->meta()->findInt32("discontinuitySeq", &discontinuitySeq);
-            strm.mCurDiscontinuitySeq = discontinuitySeq;
+            if (discontinuitySeq > (int32_t) strm.mCurDiscontinuitySeq) {
+                int64_t offsetTimeUs;
+                if (mDiscontinuityOffsetTimesUs.indexOfKey(strm.mCurDiscontinuitySeq) >= 0) {
+                    offsetTimeUs = mDiscontinuityOffsetTimesUs.valueFor(strm.mCurDiscontinuitySeq);
+                } else {
+                    offsetTimeUs = 0;
+                }
+
+                if (mDiscontinuityAbsStartTimesUs.indexOfKey(strm.mCurDiscontinuitySeq) >= 0) {
+                    int64_t firstTimeUs;
+                    firstTimeUs = mDiscontinuityAbsStartTimesUs.valueFor(strm.mCurDiscontinuitySeq);
+                    offsetTimeUs += strm.mLastDequeuedTimeUs - firstTimeUs;
+                    offsetTimeUs += strm.mLastSampleDurationUs;
+                } else {
+                    offsetTimeUs += strm.mLastSampleDurationUs;
+                }
+
+                mDiscontinuityOffsetTimesUs.add(discontinuitySeq, offsetTimeUs);
+                strm.mCurDiscontinuitySeq = discontinuitySeq;
+            }
 
             int32_t discard = 0;
             int64_t firstTimeUs;
@@ -318,7 +325,6 @@
 }
 
 status_t LiveSession::getStreamFormat(StreamType stream, sp<AMessage> *format) {
-    // No swapPacketSource race condition; called from the same thread as dequeueAccessUnit.
     if (!(mStreamMask & stream)) {
         return UNKNOWN_ERROR;
     }
@@ -331,9 +337,18 @@
         return -EAGAIN;
     }
 
+    if (stream == STREAMTYPE_AUDIO) {
+        // set AAC input buffer size to 32K bytes (256kbps x 1sec)
+        meta->setInt32(kKeyMaxInputSize, 32 * 1024);
+    }
+
     return convertMetaDataToMessage(meta, format);
 }
 
+sp<HTTPBase> LiveSession::getHTTPDataSource() {
+    return new MediaHTTP(mHTTPService->makeHTTPConnection());
+}
+
 void LiveSession::connectAsync(
         const char *url, const KeyedVector<String8, String8> *headers) {
     sp<AMessage> msg = new AMessage(kWhatConnect, this);
@@ -367,6 +382,95 @@
     return err;
 }
 
+bool LiveSession::checkSwitchProgress(
+        sp<AMessage> &stopParams, int64_t delayUs, bool *needResumeUntil) {
+    AString newUri;
+    CHECK(stopParams->findString("uri", &newUri));
+
+    *needResumeUntil = false;
+    sp<AMessage> firstNewMeta[kMaxStreams];
+    for (size_t i = 0; i < kMaxStreams; ++i) {
+        StreamType stream = indexToType(i);
+        if (!(mSwapMask & mNewStreamMask & stream)
+            || (mStreams[i].mNewUri != newUri)) {
+            continue;
+        }
+        if (stream == STREAMTYPE_SUBTITLES) {
+            continue;
+        }
+        sp<AnotherPacketSource> &source = mPacketSources.editValueAt(i);
+
+        // First, get latest dequeued meta, which is where the decoder is at.
+        // (when upswitching, we take the meta after a certain delay, so that
+        // the decoder is left with some cushion)
+        sp<AMessage> lastDequeueMeta, lastEnqueueMeta;
+        if (delayUs > 0) {
+            lastDequeueMeta = source->getMetaAfterLastDequeued(delayUs);
+            if (lastDequeueMeta == NULL) {
+                // this means we don't have enough cushion, try again later
+                ALOGV("[%s] up switching failed due to insufficient buffer",
+                        stream == STREAMTYPE_AUDIO ? "audio" : "video");
+                return false;
+            }
+        } else {
+            // It's okay for lastDequeueMeta to be NULL here, it means the
+            // decoder hasn't even started dequeueing
+            lastDequeueMeta = source->getLatestDequeuedMeta();
+        }
+        // Then, trim off packets at beginning of mPacketSources2 that's before
+        // the latest dequeued time. These samples are definitely too late.
+        firstNewMeta[i] = mPacketSources2.editValueAt(i)
+                            ->trimBuffersBeforeMeta(lastDequeueMeta);
+
+        // Now firstNewMeta[i] is the first sample after the trim.
+        // If it's NULL, we failed because dequeue already past all samples
+        // in mPacketSource2, we have to try again.
+        if (firstNewMeta[i] == NULL) {
+            HLSTime dequeueTime(lastDequeueMeta);
+            ALOGV("[%s] dequeue time (%d, %lld) past start time",
+                    stream == STREAMTYPE_AUDIO ? "audio" : "video",
+                    dequeueTime.mSeq, (long long) dequeueTime.mTimeUs);
+            return false;
+        }
+
+        // Otherwise, we check if mPacketSources2 overlaps with what old fetcher
+        // already fetched, and see if we need to resumeUntil
+        lastEnqueueMeta = source->getLatestEnqueuedMeta();
+        // lastEnqueueMeta == NULL means old fetcher stopped at a discontinuity
+        // boundary, no need to resume as the content will look different anyways
+        if (lastEnqueueMeta != NULL) {
+            HLSTime lastTime(lastEnqueueMeta), startTime(firstNewMeta[i]);
+
+            // no need to resume old fetcher if new fetcher started in different
+            // discontinuity sequence, as the content will look different.
+            *needResumeUntil |= (startTime.mSeq == lastTime.mSeq
+                    && startTime.mTimeUs - lastTime.mTimeUs > kResumeThresholdUs);
+
+            // update the stopTime for resumeUntil
+            stopParams->setInt32("discontinuitySeq", startTime.mSeq);
+            stopParams->setInt64(getKeyForStream(stream), startTime.mTimeUs);
+        }
+    }
+
+    // if we're here, it means dequeue progress hasn't passed some samples in
+    // mPacketSource2, we can trim off the excess in mPacketSource.
+    // (old fetcher might still need to resumeUntil the start time of new fetcher)
+    for (size_t i = 0; i < kMaxStreams; ++i) {
+        StreamType stream = indexToType(i);
+        if (!(mSwapMask & mNewStreamMask & stream)
+            || (newUri != mStreams[i].mNewUri)
+            || stream == STREAMTYPE_SUBTITLES) {
+            continue;
+        }
+        mPacketSources.valueFor(stream)->trimBuffersAfterMeta(firstNewMeta[i]);
+    }
+
+    // no resumeUntil if already underflow
+    *needResumeUntil &= !mBuffering;
+
+    return true;
+}
+
 void LiveSession::onMessageReceived(const sp<AMessage> &msg) {
     switch (msg->what()) {
         case kWhatConnect:
@@ -413,21 +517,25 @@
                 case PlaylistFetcher::kWhatPaused:
                 case PlaylistFetcher::kWhatStopped:
                 {
-                    if (what == PlaylistFetcher::kWhatStopped) {
-                        AString uri;
-                        CHECK(msg->findString("uri", &uri));
-                        ssize_t index = mFetcherInfos.indexOfKey(uri);
-                        if (index < 0) {
-                            // ignore duplicated kWhatStopped messages.
-                            break;
-                        }
+                    AString uri;
+                    CHECK(msg->findString("uri", &uri));
+                    ssize_t index = mFetcherInfos.indexOfKey(uri);
+                    if (index < 0) {
+                        // ignore msgs from fetchers that's already gone
+                        break;
+                    }
 
+                    if (what == PlaylistFetcher::kWhatStopped) {
                         mFetcherLooper->unregisterHandler(
                                 mFetcherInfos[index].mFetcher->id());
                         mFetcherInfos.removeItemsAt(index);
-
-                        if (mSwitchInProgress) {
-                            tryToFinishBandwidthSwitch();
+                    } else if (what == PlaylistFetcher::kWhatPaused) {
+                        int32_t seekMode;
+                        CHECK(msg->findInt32("seekMode", &seekMode));
+                        for (size_t i = 0; i < kMaxStreams; ++i) {
+                            if (mStreams[i].mUri == uri) {
+                                mStreams[i].mSeekMode = (SeekMode) seekMode;
+                            }
                         }
                     }
 
@@ -456,6 +564,16 @@
                     break;
                 }
 
+                case PlaylistFetcher::kWhatTargetDurationUpdate:
+                {
+                    int64_t targetDurationUs;
+                    CHECK(msg->findInt64("targetDurationUs", &targetDurationUs));
+                    mUpSwitchMark = min(kUpSwitchMarkUs, targetDurationUs * 7 / 4);
+                    mDownSwitchMark = min(kDownSwitchMarkUs, targetDurationUs * 9 / 4);
+                    mUpSwitchMargin = min(kUpSwitchMarginUs, targetDurationUs);
+                    break;
+                }
+
                 case PlaylistFetcher::kWhatError:
                 {
                     status_t err;
@@ -493,10 +611,23 @@
                     mPacketSources.valueFor(
                             STREAMTYPE_SUBTITLES)->signalEOS(err);
 
-                    sp<AMessage> notify = mNotify->dup();
-                    notify->setInt32("what", kWhatError);
-                    notify->setInt32("err", err);
-                    notify->post();
+                    postError(err);
+                    break;
+                }
+
+                case PlaylistFetcher::kWhatStopReached:
+                {
+                    ALOGV("kWhatStopReached");
+
+                    AString oldUri;
+                    CHECK(msg->findString("uri", &oldUri));
+
+                    ssize_t index = mFetcherInfos.indexOfKey(oldUri);
+                    if (index < 0) {
+                        break;
+                    }
+
+                    tryToFinishBandwidthSwitch(oldUri);
                     break;
                 }
 
@@ -509,15 +640,69 @@
                         break;
                     }
 
-                    // Resume fetcher for the original variant; the resumed fetcher should
-                    // continue until the timestamps found in msg, which is stored by the
-                    // new fetcher to indicate where the new variant has started buffering.
-                    for (size_t i = 0; i < mFetcherInfos.size(); i++) {
-                        const FetcherInfo info = mFetcherInfos.valueAt(i);
-                        if (info.mToBeRemoved) {
-                            info.mFetcher->resumeUntilAsync(msg);
+                    AString uri;
+                    CHECK(msg->findString("uri", &uri));
+
+                    // mark new fetcher mToBeResumed
+                    ssize_t index = mFetcherInfos.indexOfKey(uri);
+                    if (index >= 0) {
+                        mFetcherInfos.editValueAt(index).mToBeResumed = true;
+                    }
+
+                    // temporarily disable packet sources to be swapped to prevent
+                    // NuPlayerDecoder from dequeuing while we check progress
+                    for (size_t i = 0; i < mPacketSources.size(); ++i) {
+                        if ((mSwapMask & mPacketSources.keyAt(i))
+                                && uri == mStreams[i].mNewUri) {
+                            mPacketSources.editValueAt(i)->enable(false);
                         }
                     }
+                    bool switchUp = (mCurBandwidthIndex > mOrigBandwidthIndex);
+                    // If switching up, require a cushion bigger than kUnderflowMark
+                    // to avoid buffering immediately after the switch.
+                    // (If we don't have that cushion we'd rather cancel and try again.)
+                    int64_t delayUs = switchUp ? (kUnderflowMarkUs + 1000000ll) : 0;
+                    bool needResumeUntil = false;
+                    sp<AMessage> stopParams = msg;
+                    if (checkSwitchProgress(stopParams, delayUs, &needResumeUntil)) {
+                        // playback time hasn't passed startAt time
+                        if (!needResumeUntil) {
+                            for (size_t i = 0; i < kMaxStreams; ++i) {
+                                if ((mSwapMask & indexToType(i))
+                                        && uri == mStreams[i].mNewUri) {
+                                    // have to make a copy of mStreams[i].mUri because
+                                    // tryToFinishBandwidthSwitch is modifying mStreams[]
+                                    AString oldURI = mStreams[i].mUri;
+                                    tryToFinishBandwidthSwitch(oldURI);
+                                    break;
+                                }
+                            }
+                        } else {
+                            // startAt time is after last enqueue time
+                            // Resume fetcher for the original variant; the resumed fetcher should
+                            // continue until the timestamps found in msg, which is stored by the
+                            // new fetcher to indicate where the new variant has started buffering.
+                            for (size_t i = 0; i < mFetcherInfos.size(); i++) {
+                                const FetcherInfo &info = mFetcherInfos.valueAt(i);
+                                if (info.mToBeRemoved) {
+                                    info.mFetcher->resumeUntilAsync(stopParams);
+                                }
+                            }
+                        }
+                    } else {
+                        // playback time passed startAt time
+                        if (switchUp) {
+                            // if switching up, cancel and retry if condition satisfies again
+                            cancelBandwidthSwitch(true /* resume */);
+                        } else {
+                            resumeFetcher(uri, mSwapMask, -1, true /* newUri */);
+                        }
+                    }
+                    // re-enable all packet sources
+                    for (size_t i = 0; i < mPacketSources.size(); ++i) {
+                        mPacketSources.editValueAt(i)->enable(true);
+                    }
+
                     break;
                 }
 
@@ -552,12 +737,6 @@
             break;
         }
 
-        case kWhatSwapped:
-        {
-            onSwapped(msg);
-            break;
-        }
-
         case kWhatPollBuffering:
         {
             int32_t generation;
@@ -652,6 +831,7 @@
     size_t initialBandwidthIndex = 0;
 
     if (mPlaylist->isVariantPlaylist()) {
+        Vector<BandwidthItem> itemsWithVideo;
         for (size_t i = 0; i < mPlaylist->size(); ++i) {
             BandwidthItem item;
 
@@ -663,14 +843,22 @@
 
             CHECK(meta->findInt32("bandwidth", (int32_t *)&item.mBandwidth));
 
-            if (initialBandwidth == 0) {
-                initialBandwidth = item.mBandwidth;
-            }
-
             mBandwidthItems.push(item);
+            if (mPlaylist->hasType(i, "video")) {
+                itemsWithVideo.push(item);
+            }
+        }
+        // remove the audio-only variants if we have at least one with video
+        if (!itemsWithVideo.empty()
+                && itemsWithVideo.size() < mBandwidthItems.size()) {
+            mBandwidthItems.clear();
+            for (size_t i = 0; i < itemsWithVideo.size(); ++i) {
+                mBandwidthItems.push(itemsWithVideo[i]);
+            }
         }
 
         CHECK_GT(mBandwidthItems.size(), 0u);
+        initialBandwidth = mBandwidthItems[0].mBandwidth;
 
         mBandwidthItems.sort(SortByBandwidth);
 
@@ -691,16 +879,13 @@
     mPlaylist->pickRandomMediaItems();
     changeConfiguration(
             0ll /* timeUs */, initialBandwidthIndex, false /* pickTrack */);
-
-    schedulePollBuffering();
 }
 
 void LiveSession::finishDisconnect() {
+    ALOGV("finishDisconnect");
+
     // No reconfiguration is currently pending, make sure none will trigger
     // during disconnection either.
-
-    // Protect mPacketSources from a swapPacketSource race condition through disconnect.
-    // (finishDisconnect, onFinishDisconnect2)
     cancelBandwidthSwitch();
 
     // cancel buffer polling
@@ -750,8 +935,8 @@
     FetcherInfo info;
     info.mFetcher = new PlaylistFetcher(notify, this, uri, mSubtitleGeneration);
     info.mDurationUs = -1ll;
-    info.mIsPrepared = false;
     info.mToBeRemoved = false;
+    info.mToBeResumed = false;
     mFetcherLooper->registerHandler(info.mFetcher);
 
     mFetcherInfos.add(uri, info);
@@ -780,14 +965,15 @@
         int64_t range_offset, int64_t range_length,
         uint32_t block_size, /* download block size */
         sp<DataSource> *source, /* to return and reuse source */
-        String8 *actualUrl) {
+        String8 *actualUrl,
+        bool forceConnectHTTP /* force connect HTTP when resuing source */) {
     off64_t size;
     sp<DataSource> temp_source;
     if (source == NULL) {
         source = &temp_source;
     }
 
-    if (*source == NULL) {
+    if (*source == NULL || forceConnectHTTP) {
         if (!strncasecmp(url, "file://", 7)) {
             *source = new FileSource(url + 7);
         } else if (strncasecmp(url, "http://", 7)
@@ -806,13 +992,18 @@
                                     ? "" : AStringPrintf("%lld",
                                             range_offset + range_length - 1).c_str()).c_str()));
             }
-            status_t err = mHTTPDataSource->connect(url, &headers);
+
+            HTTPBase* httpDataSource =
+                    (*source == NULL) ? mHTTPDataSource.get() : (HTTPBase*)source->get();
+            status_t err = httpDataSource->connect(url, &headers);
 
             if (err != OK) {
                 return err;
             }
 
-            *source = mHTTPDataSource;
+            if (*source == NULL) {
+                *source = mHTTPDataSource;
+            }
         }
     }
 
@@ -902,6 +1093,9 @@
     String8 actualUrl;
     ssize_t  err = fetchFile(url, &buffer, 0, -1, 0, NULL, &actualUrl);
 
+    // close off the connection after use
+    mHTTPDataSource->disconnect();
+
     if (err <= 0) {
         return NULL;
     }
@@ -948,8 +1142,108 @@
 }
 #endif
 
-size_t LiveSession::getBandwidthIndex() {
-    if (mBandwidthItems.size() == 0) {
+bool LiveSession::resumeFetcher(
+        const AString &uri, uint32_t streamMask, int64_t timeUs, bool newUri) {
+    ssize_t index = mFetcherInfos.indexOfKey(uri);
+    if (index < 0) {
+        ALOGE("did not find fetcher for uri: %s", uri.c_str());
+        return false;
+    }
+
+    bool resume = false;
+    sp<AnotherPacketSource> sources[kMaxStreams];
+    for (size_t i = 0; i < kMaxStreams; ++i) {
+        if ((streamMask & indexToType(i))
+            && ((!newUri && uri == mStreams[i].mUri)
+            || (newUri && uri == mStreams[i].mNewUri))) {
+            resume = true;
+            if (newUri) {
+                sources[i] = mPacketSources2.valueFor(indexToType(i));
+                sources[i]->clear();
+            } else {
+                sources[i] = mPacketSources.valueFor(indexToType(i));
+            }
+        }
+    }
+
+    if (resume) {
+        ALOGV("resuming fetcher %s, timeUs %lld", uri.c_str(), (long long)timeUs);
+        SeekMode seekMode = newUri ? kSeekModeNextSample : kSeekModeExactPosition;
+        mFetcherInfos.editValueAt(index).mFetcher->startAsync(
+                sources[kAudioIndex],
+                sources[kVideoIndex],
+                sources[kSubtitleIndex],
+                timeUs, -1, -1, seekMode);
+    }
+
+    return resume;
+}
+
+float LiveSession::getAbortThreshold(
+        ssize_t currentBWIndex, ssize_t targetBWIndex) const {
+    float abortThreshold = -1.0f;
+    if (currentBWIndex > 0 && targetBWIndex < currentBWIndex) {
+        /*
+           If we're switching down, we need to decide whether to
+
+           1) finish last segment of high-bandwidth variant, or
+           2) abort last segment of high-bandwidth variant, and fetch an
+              overlapping portion from low-bandwidth variant.
+
+           Here we try to maximize the amount of buffer left when the
+           switch point is met. Given the following parameters:
+
+           B: our current buffering level in seconds
+           T: target duration in seconds
+           X: sample duration in seconds remain to fetch in last segment
+           bw0: bandwidth of old variant (as specified in playlist)
+           bw1: bandwidth of new variant (as specified in playlist)
+           bw: measured bandwidth available
+
+           If we choose 1), when switch happens at the end of current
+           segment, our buffering will be
+                  B + X - X * bw0 / bw
+
+           If we choose 2), when switch happens where we aborted current
+           segment, our buffering will be
+                  B - (T - X) * bw1 / bw
+
+           We should only choose 1) if
+                  X/T < bw1 / (bw1 + bw0 - bw)
+        */
+
+        // Taking the measured current bandwidth at 50% face value only,
+        // as our bandwidth estimation is a lagging indicator. Being
+        // conservative on this, we prefer switching to lower bandwidth
+        // unless we're really confident finishing up the last segment
+        // of higher bandwidth will be fast.
+        CHECK(mLastBandwidthBps >= 0);
+        abortThreshold =
+                (float)mBandwidthItems.itemAt(targetBWIndex).mBandwidth
+             / ((float)mBandwidthItems.itemAt(targetBWIndex).mBandwidth
+              + (float)mBandwidthItems.itemAt(currentBWIndex).mBandwidth
+              - (float)mLastBandwidthBps * 0.5f);
+        if (abortThreshold < 0.0f) {
+            abortThreshold = -1.0f; // do not abort
+        }
+        ALOGV("Switching Down: bps %ld => %ld, measured %d, abort ratio %.2f",
+                mBandwidthItems.itemAt(currentBWIndex).mBandwidth,
+                mBandwidthItems.itemAt(targetBWIndex).mBandwidth,
+                mLastBandwidthBps,
+                abortThreshold);
+    }
+    return abortThreshold;
+}
+
+void LiveSession::addBandwidthMeasurement(size_t numBytes, int64_t delayUs) {
+    mBandwidthEstimator->addBandwidthMeasurement(numBytes, delayUs);
+}
+
+size_t LiveSession::getBandwidthIndex(int32_t bandwidthBps) {
+    if (mBandwidthItems.size() < 2) {
+        // shouldn't be here if we only have 1 bandwidth, check
+        // logic to get rid of redundant bandwidth polling
+        ALOGW("getBandwidthIndex() called for single bandwidth playlist!");
         return 0;
     }
 
@@ -967,15 +1261,6 @@
     }
 
     if (index < 0) {
-        int32_t bandwidthBps;
-        if (mHTTPDataSource != NULL
-                && mHTTPDataSource->estimateBandwidth(&bandwidthBps)) {
-            ALOGV("bandwidth estimated at %.2f kbps", bandwidthBps / 1024.0f);
-        } else {
-            ALOGV("no bandwidth estimate.");
-            return 0;  // Pick the lowest bandwidth stream by default.
-        }
-
         char value[PROPERTY_VALUE_MAX];
         if (property_get("media.httplive.max-bw", value, NULL)) {
             char *end;
@@ -992,15 +1277,9 @@
 
         index = mBandwidthItems.size() - 1;
         while (index > 0) {
-            // consider only 80% of the available bandwidth, but if we are switching up,
-            // be even more conservative (70%) to avoid overestimating and immediately
-            // switching back.
-            size_t adjustedBandwidthBps = bandwidthBps;
-            if (index > mCurBandwidthIndex) {
-                adjustedBandwidthBps = adjustedBandwidthBps * 7 / 10;
-            } else {
-                adjustedBandwidthBps = adjustedBandwidthBps * 8 / 10;
-            }
+            // be conservative (70%) to avoid overestimating and immediately
+            // switching down again.
+            size_t adjustedBandwidthBps = bandwidthBps * 7 / 10;
             if (mBandwidthItems.itemAt(index).mBandwidth <= adjustedBandwidthBps) {
                 break;
             }
@@ -1060,22 +1339,14 @@
     return index;
 }
 
-int64_t LiveSession::latestMediaSegmentStartTimeUs() {
-    sp<AMessage> audioMeta = mPacketSources.valueFor(STREAMTYPE_AUDIO)->getLatestDequeuedMeta();
-    int64_t minSegmentStartTimeUs = -1, videoSegmentStartTimeUs = -1;
-    if (audioMeta != NULL) {
-        audioMeta->findInt64("segmentStartTimeUs", &minSegmentStartTimeUs);
-    }
+HLSTime LiveSession::latestMediaSegmentStartTime() const {
+    HLSTime audioTime(mPacketSources.valueFor(
+                    STREAMTYPE_AUDIO)->getLatestDequeuedMeta());
 
-    sp<AMessage> videoMeta = mPacketSources.valueFor(STREAMTYPE_VIDEO)->getLatestDequeuedMeta();
-    if (videoMeta != NULL
-            && videoMeta->findInt64("segmentStartTimeUs", &videoSegmentStartTimeUs)) {
-        if (minSegmentStartTimeUs < 0 || videoSegmentStartTimeUs < minSegmentStartTimeUs) {
-            minSegmentStartTimeUs = videoSegmentStartTimeUs;
-        }
+    HLSTime videoTime(mPacketSources.valueFor(
+                    STREAMTYPE_VIDEO)->getLatestDequeuedMeta());
 
-    }
-    return minSegmentStartTimeUs;
+    return audioTime < videoTime ? videoTime : audioTime;
 }
 
 status_t LiveSession::onSeek(const sp<AMessage> &msg) {
@@ -1083,7 +1354,7 @@
     CHECK(msg->findInt64("timeUs", &timeUs));
 
     if (!mReconfigurationInProgress) {
-        changeConfiguration(timeUs, mCurBandwidthIndex);
+        changeConfiguration(timeUs);
         return OK;
     } else {
         return -EWOULDBLOCK;
@@ -1139,7 +1410,6 @@
     status_t err = mPlaylist->selectTrack(index, select);
     if (err == OK) {
         sp<AMessage> msg = new AMessage(kWhatChangeConfiguration, this);
-        msg->setInt32("bandwidthIndex", mCurBandwidthIndex);
         msg->setInt32("pickTrack", select);
         msg->post();
     }
@@ -1155,21 +1425,17 @@
 }
 
 void LiveSession::changeConfiguration(
-        int64_t timeUs, size_t bandwidthIndex, bool pickTrack) {
-    // Protect mPacketSources from a swapPacketSource race condition through reconfiguration.
-    // (changeConfiguration, onChangeConfiguration2, onChangeConfiguration3).
+        int64_t timeUs, ssize_t bandwidthIndex, bool pickTrack) {
     cancelBandwidthSwitch();
 
     CHECK(!mReconfigurationInProgress);
     mReconfigurationInProgress = true;
-
-    mCurBandwidthIndex = bandwidthIndex;
-
-    ALOGV("changeConfiguration => timeUs:%" PRId64 " us, bwIndex:%zu, pickTrack:%d",
-          timeUs, bandwidthIndex, pickTrack);
-
-    CHECK_LT(bandwidthIndex, mBandwidthItems.size());
-    const BandwidthItem &item = mBandwidthItems.itemAt(bandwidthIndex);
+    if (bandwidthIndex >= 0) {
+        mOrigBandwidthIndex = mCurBandwidthIndex;
+        mCurBandwidthIndex = bandwidthIndex;
+    }
+    CHECK_LT(mCurBandwidthIndex, mBandwidthItems.size());
+    const BandwidthItem &item = mBandwidthItems.itemAt(mCurBandwidthIndex);
 
     uint32_t streamMask = 0; // streams that should be fetched by the new fetcher
     uint32_t resumeMask = 0; // streams that should be fetched by the original fetcher
@@ -1184,16 +1450,16 @@
     // Step 1, stop and discard fetchers that are no longer needed.
     // Pause those that we'll reuse.
     for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
-        const AString &uri = mFetcherInfos.keyAt(i);
-
-        bool discardFetcher = true;
-
-        if (timeUs < 0ll) {
-            // delay fetcher removal if not picking tracks
-            discardFetcher = pickTrack;
-
+        // skip fetchers that are marked mToBeRemoved,
+        // these are done and can't be reused
+        if (mFetcherInfos[i].mToBeRemoved) {
+            continue;
         }
 
+        const AString &uri = mFetcherInfos.keyAt(i);
+        sp<PlaylistFetcher> &fetcher = mFetcherInfos.editValueAt(i).mFetcher;
+
+        bool discardFetcher = true, delayRemoval = false;
         for (size_t j = 0; j < kMaxStreams; ++j) {
             StreamType type = indexToType(j);
             if ((streamMask & type) && uri == URIs[j]) {
@@ -1202,13 +1468,31 @@
                 discardFetcher = false;
             }
         }
+        // Delay fetcher removal if not picking tracks, AND old fetcher
+        // has stream mask that overlaps new variant. (Okay to discard
+        // old fetcher now, if completely no overlap.)
+        if (discardFetcher && timeUs < 0ll && !pickTrack
+                && (fetcher->getStreamTypeMask() & streamMask)) {
+            discardFetcher = false;
+            delayRemoval = true;
+        }
 
         if (discardFetcher) {
-            mFetcherInfos.valueAt(i).mFetcher->stopAsync();
+            fetcher->stopAsync();
         } else {
-            // if we're seeking, pause immediately (no need to finish the segment)
-            bool immediate = (timeUs >= 0ll);
-            mFetcherInfos.valueAt(i).mFetcher->pauseAsync(immediate);
+            float threshold = -1.0f; // always finish fetching by default
+            if (timeUs >= 0ll) {
+                // seeking, no need to finish fetching
+                threshold = 0.0f;
+            } else if (delayRemoval) {
+                // adapting, abort if remaining of current segment is over threshold
+                threshold = getAbortThreshold(
+                        mOrigBandwidthIndex, mCurBandwidthIndex);
+            }
+
+            ALOGV("Pausing with threshold %.3f", threshold);
+
+            fetcher->pauseAsync(threshold);
         }
     }
 
@@ -1243,10 +1527,9 @@
 
 void LiveSession::onChangeConfiguration(const sp<AMessage> &msg) {
     if (!mReconfigurationInProgress) {
-        int32_t pickTrack = 0, bandwidthIndex = mCurBandwidthIndex;
+        int32_t pickTrack = 0;
         msg->findInt32("pickTrack", &pickTrack);
-        msg->findInt32("bandwidthIndex", &bandwidthIndex);
-        changeConfiguration(-1ll /* timeUs */, bandwidthIndex, pickTrack);
+        changeConfiguration(-1ll /* timeUs */, -1, pickTrack);
     } else {
         msg->post(1000000ll); // retry in 1 sec
     }
@@ -1269,6 +1552,10 @@
             mPacketSources.editValueAt(i)->clear();
         }
 
+        for (size_t i = 0; i < kMaxStreams; ++i) {
+            mStreams[i].mCurDiscontinuitySeq = 0;
+        }
+
         mDiscontinuityOffsetTimesUs.clear();
         mDiscontinuityAbsStartTimesUs.clear();
 
@@ -1279,6 +1566,10 @@
             mSeekReplyID.clear();
             mSeekReply.clear();
         }
+
+        // restart buffer polling after seek becauese previous
+        // buffering position is no longer valid.
+        restartPollBuffering();
     }
 
     uint32_t streamMask, resumeMask;
@@ -1350,6 +1641,8 @@
     CHECK(msg->findInt32("streamMask", (int32_t *)&streamMask));
     CHECK(msg->findInt32("resumeMask", (int32_t *)&resumeMask));
 
+    mNewStreamMask = streamMask | resumeMask;
+
     int64_t timeUs;
     int32_t pickTrack;
     bool switching = false;
@@ -1358,7 +1651,19 @@
 
     if (timeUs < 0ll) {
         if (!pickTrack) {
-            switching = true;
+            // mSwapMask contains streams that are in both old and new variant,
+            // (in mNewStreamMask & mStreamMask) but with different URIs
+            // (not in resumeMask).
+            // For example, old variant has video and audio in two separate
+            // URIs, and new variant has only audio with unchanged URI. mSwapMask
+            // should be 0 as there is nothing to swap. We only need to stop video,
+            // and resume audio.
+            mSwapMask =  mNewStreamMask & mStreamMask & ~resumeMask;
+            switching = (mSwapMask != 0);
+            if (!switching) {
+                ALOGV("#### Finishing Bandwidth Switch Early: %zd => %zd",
+                        mOrigBandwidthIndex, mCurBandwidthIndex);
+            }
         }
         mRealTimeBaseUs = ALooper::GetNowUs() - mLastDequeuedTimeUs;
     } else {
@@ -1375,36 +1680,18 @@
         }
     }
 
-    mNewStreamMask = streamMask | resumeMask;
-    if (switching) {
-        mSwapMask = mStreamMask & ~resumeMask;
-    }
-
     // Of all existing fetchers:
     // * Resume fetchers that are still needed and assign them original packet sources.
     // * Mark otherwise unneeded fetchers for removal.
     ALOGV("resuming fetchers for mask 0x%08x", resumeMask);
     for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
         const AString &uri = mFetcherInfos.keyAt(i);
-
-        sp<AnotherPacketSource> sources[kMaxStreams];
-        for (size_t j = 0; j < kMaxStreams; ++j) {
-            if ((resumeMask & indexToType(j)) && uri == mStreams[j].mUri) {
-                sources[j] = mPacketSources.valueFor(indexToType(j));
-            }
-        }
-        FetcherInfo &info = mFetcherInfos.editValueAt(i);
-        if (sources[kAudioIndex] != NULL || sources[kVideoIndex] != NULL
-                || sources[kSubtitleIndex] != NULL) {
-            info.mFetcher->startAsync(
-                    sources[kAudioIndex], sources[kVideoIndex], sources[kSubtitleIndex], timeUs);
-        } else {
-            info.mToBeRemoved = true;
+        if (!resumeFetcher(uri, resumeMask, timeUs)) {
+            mFetcherInfos.editValueAt(i).mToBeRemoved = true;
         }
     }
 
     // streamMask now only contains the types that need a new fetcher created.
-
     if (streamMask != 0) {
         ALOGV("creating new fetchers for mask 0x%08x", streamMask);
     }
@@ -1422,13 +1709,12 @@
         sp<PlaylistFetcher> fetcher = addFetcher(uri.c_str());
         CHECK(fetcher != NULL);
 
-        int64_t startTimeUs = -1;
-        int64_t segmentStartTimeUs = -1ll;
-        int32_t discontinuitySeq = -1;
+        HLSTime startTime;
+        SeekMode seekMode = kSeekModeExactPosition;
         sp<AnotherPacketSource> sources[kMaxStreams];
 
-        if (i == kSubtitleIndex) {
-            segmentStartTimeUs = latestMediaSegmentStartTimeUs();
+        if (i == kSubtitleIndex || (!pickTrack && !switching)) {
+            startTime = latestMediaSegmentStartTime();
         }
 
         // TRICKY: looping from i as earlier streams are already removed from streamMask
@@ -1438,56 +1724,50 @@
                 sources[j] = mPacketSources.valueFor(indexToType(j));
 
                 if (timeUs >= 0) {
-                    startTimeUs = timeUs;
+                    startTime.mTimeUs = timeUs;
                 } else {
                     int32_t type;
                     sp<AMessage> meta;
-                    if (pickTrack) {
-                        // selecting
+                    if (!switching) {
+                        // selecting, or adapting but no swap required
                         meta = sources[j]->getLatestDequeuedMeta();
                     } else {
-                        // adapting
+                        // adapting and swap required
                         meta = sources[j]->getLatestEnqueuedMeta();
-                    }
-
-                    if (meta != NULL && !meta->findInt32("discontinuity", &type)) {
-                        int64_t tmpUs;
-                        int64_t tmpSegmentUs;
-
-                        CHECK(meta->findInt64("timeUs", &tmpUs));
-                        CHECK(meta->findInt64("segmentStartTimeUs", &tmpSegmentUs));
-                        if (startTimeUs < 0 || tmpSegmentUs < segmentStartTimeUs) {
-                            startTimeUs = tmpUs;
-                            segmentStartTimeUs = tmpSegmentUs;
-                        } else if (tmpSegmentUs == segmentStartTimeUs && tmpUs < startTimeUs) {
-                            startTimeUs = tmpUs;
-                        }
-
-                        int32_t seq;
-                        CHECK(meta->findInt32("discontinuitySeq", &seq));
-                        if (discontinuitySeq < 0 || seq < discontinuitySeq) {
-                            discontinuitySeq = seq;
+                        if (meta != NULL && mCurBandwidthIndex > mOrigBandwidthIndex) {
+                            // switching up
+                            meta = sources[j]->getMetaAfterLastDequeued(mUpSwitchMargin);
                         }
                     }
 
-                    if (pickTrack) {
-                        // selecting track, queue discontinuities before content
+                    if (j != kSubtitleIndex && meta != NULL
+                            && !meta->findInt32("discontinuity", &type)) {
+                        HLSTime tmpTime(meta);
+                        if (startTime < tmpTime) {
+                            startTime = tmpTime;
+                        }
+                    }
+
+                    if (!switching) {
+                        // selecting, or adapting but no swap required
                         sources[j]->clear();
                         if (j == kSubtitleIndex) {
                             break;
                         }
 
                         ALOGV("stream[%zu]: queue format change", j);
-
                         sources[j]->queueDiscontinuity(
-                                ATSParser::DISCONTINUITY_FORMATCHANGE, NULL, true);
+                                ATSParser::DISCONTINUITY_FORMAT_ONLY, NULL, true);
                     } else {
-                        // adapting, queue discontinuities after resume
+                        // switching, queue discontinuities after resume
                         sources[j] = mPacketSources2.valueFor(indexToType(j));
                         sources[j]->clear();
-                        uint32_t extraStreams = mNewStreamMask & (~mStreamMask);
-                        if (extraStreams & indexToType(j)) {
-                            sources[j]->queueAccessUnit(createFormatChangeBuffer(/*swap*/ false));
+                        // the new fetcher might be providing streams that used to be
+                        // provided by two different fetchers,  if one of the fetcher
+                        // paused in the middle while the other somehow paused in next
+                        // seg, we have to start from next seg.
+                        if (seekMode < mStreams[j].mSeekMode) {
+                            seekMode = mStreams[j].mSeekMode;
                         }
                     }
                 }
@@ -1496,14 +1776,20 @@
             }
         }
 
+        // Set the target segment start time to the middle point of the
+        // segment where the last sample was.
+        // This gives a better guess if segments of the two variants are not
+        // perfectly aligned. (If the corresponding segment in new variant
+        // starts slightly later than that in the old variant, we still want
+        // to pick that segment, not the one before)
         fetcher->startAsync(
                 sources[kAudioIndex],
                 sources[kVideoIndex],
                 sources[kSubtitleIndex],
-                startTimeUs < 0 ? mLastSeekTimeUs : startTimeUs,
-                segmentStartTimeUs,
-                discontinuitySeq,
-                switching);
+                startTime.mTimeUs < 0 ? mLastSeekTimeUs : startTime.mTimeUs,
+                startTime.getSegmentTimeUs(true /* midpoint */),
+                startTime.mSeq,
+                seekMode);
     }
 
     // All fetchers have now been started, the configuration change
@@ -1515,6 +1801,7 @@
         mSwitchInProgress = true;
     } else {
         mStreamMask = mNewStreamMask;
+        mOrigBandwidthIndex = mCurBandwidthIndex;
     }
 
     if (mDisconnectReplyID != NULL) {
@@ -1522,25 +1809,56 @@
     }
 }
 
-void LiveSession::onSwapped(const sp<AMessage> &msg) {
-    int32_t switchGeneration;
-    CHECK(msg->findInt32("switchGeneration", &switchGeneration));
-    if (switchGeneration != mSwitchGeneration) {
+void LiveSession::swapPacketSource(StreamType stream) {
+    ALOGV("swapPacketSource: stream = %d", stream);
+
+    // transfer packets from source2 to source
+    sp<AnotherPacketSource> &aps = mPacketSources.editValueFor(stream);
+    sp<AnotherPacketSource> &aps2 = mPacketSources2.editValueFor(stream);
+
+    // queue discontinuity in mPacketSource
+    aps->queueDiscontinuity(ATSParser::DISCONTINUITY_FORMAT_ONLY, NULL, false);
+
+    // queue packets in mPacketSource2 to mPacketSource
+    status_t finalResult = OK;
+    sp<ABuffer> accessUnit;
+    while (aps2->hasBufferAvailable(&finalResult) && finalResult == OK &&
+          OK == aps2->dequeueAccessUnit(&accessUnit)) {
+        aps->queueAccessUnit(accessUnit);
+    }
+    aps2->clear();
+}
+
+void LiveSession::tryToFinishBandwidthSwitch(const AString &oldUri) {
+    if (!mSwitchInProgress) {
         return;
     }
 
-    int32_t stream;
-    CHECK(msg->findInt32("stream", &stream));
-
-    ssize_t idx = typeToIndex(stream);
-    CHECK(idx >= 0);
-    if ((mNewStreamMask & stream) && mStreams[idx].mNewUri.empty()) {
-        ALOGW("swapping stream type %d %s to empty stream", stream, mStreams[idx].mUri.c_str());
+    ssize_t index = mFetcherInfos.indexOfKey(oldUri);
+    if (index < 0 || !mFetcherInfos[index].mToBeRemoved) {
+        return;
     }
-    mStreams[idx].mUri = mStreams[idx].mNewUri;
-    mStreams[idx].mNewUri.clear();
 
-    mSwapMask &= ~stream;
+    // Swap packet source of streams provided by old variant
+    for (size_t idx = 0; idx < kMaxStreams; idx++) {
+        StreamType stream = indexToType(idx);
+        if ((mSwapMask & stream) && (oldUri == mStreams[idx].mUri)) {
+            swapPacketSource(stream);
+
+            if ((mNewStreamMask & stream) && mStreams[idx].mNewUri.empty()) {
+                ALOGW("swapping stream type %d %s to empty stream",
+                        stream, mStreams[idx].mUri.c_str());
+            }
+            mStreams[idx].mUri = mStreams[idx].mNewUri;
+            mStreams[idx].mNewUri.clear();
+
+            mSwapMask &= ~stream;
+        }
+    }
+
+    mFetcherInfos.editValueAt(index).mFetcher->stopAsync(false /* clear */);
+
+    ALOGV("tryToFinishBandwidthSwitch: mSwapMask=%x", mSwapMask);
     if (mSwapMask != 0) {
         return;
     }
@@ -1548,21 +1866,39 @@
     // Check if new variant contains extra streams.
     uint32_t extraStreams = mNewStreamMask & (~mStreamMask);
     while (extraStreams) {
-        StreamType extraStream = (StreamType) (extraStreams & ~(extraStreams - 1));
-        swapPacketSource(extraStream);
-        extraStreams &= ~extraStream;
+        StreamType stream = (StreamType) (extraStreams & ~(extraStreams - 1));
+        extraStreams &= ~stream;
 
-        idx = typeToIndex(extraStream);
+        swapPacketSource(stream);
+
+        ssize_t idx = typeToIndex(stream);
         CHECK(idx >= 0);
         if (mStreams[idx].mNewUri.empty()) {
             ALOGW("swapping extra stream type %d %s to empty stream",
-                    extraStream, mStreams[idx].mUri.c_str());
+                    stream, mStreams[idx].mUri.c_str());
         }
         mStreams[idx].mUri = mStreams[idx].mNewUri;
         mStreams[idx].mNewUri.clear();
     }
 
-    tryToFinishBandwidthSwitch();
+    // Restart new fetcher (it was paused after the first 47k block)
+    // and let it fetch into mPacketSources (not mPacketSources2)
+    for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
+        FetcherInfo &info = mFetcherInfos.editValueAt(i);
+        if (info.mToBeResumed) {
+            resumeFetcher(mFetcherInfos.keyAt(i), mNewStreamMask);
+            info.mToBeResumed = false;
+        }
+    }
+
+    ALOGI("#### Finished Bandwidth Switch: %zd => %zd",
+            mOrigBandwidthIndex, mCurBandwidthIndex);
+
+    mStreamMask = mNewStreamMask;
+    mSwitchInProgress = false;
+    mOrigBandwidthIndex = mCurBandwidthIndex;
+
+    restartPollBuffering();
 }
 
 void LiveSession::schedulePollBuffering() {
@@ -1573,99 +1909,104 @@
 
 void LiveSession::cancelPollBuffering() {
     ++mPollBufferingGeneration;
+    mPrevBufferPercentage = -1;
+}
+
+void LiveSession::restartPollBuffering() {
+    cancelPollBuffering();
+    onPollBuffering();
 }
 
 void LiveSession::onPollBuffering() {
     ALOGV("onPollBuffering: mSwitchInProgress %d, mReconfigurationInProgress %d, "
-            "mInPreparationPhase %d, mStreamMask 0x%x",
+            "mInPreparationPhase %d, mCurBandwidthIndex %zd, mStreamMask 0x%x",
         mSwitchInProgress, mReconfigurationInProgress,
-        mInPreparationPhase, mStreamMask);
+        mInPreparationPhase, mCurBandwidthIndex, mStreamMask);
 
-    bool low, mid, high;
-    if (checkBuffering(low, mid, high)) {
-        if (mInPreparationPhase && mid) {
+    bool underflow, ready, down, up;
+    if (checkBuffering(underflow, ready, down, up)) {
+        if (mInPreparationPhase && ready) {
             postPrepared(OK);
         }
 
         // don't switch before we report prepared
-        if (!mInPreparationPhase && (low || high)) {
-            switchBandwidthIfNeeded(high);
-        }
+        if (!mInPreparationPhase) {
+            if (ready) {
+                stopBufferingIfNecessary();
+            } else if (underflow) {
+                startBufferingIfNecessary();
+            }
+            switchBandwidthIfNeeded(up, down);
+       }
+
     }
 
     schedulePollBuffering();
 }
 
-// Mark switch done when:
-//   1. all old buffers are swapped out
-void LiveSession::tryToFinishBandwidthSwitch() {
+void LiveSession::cancelBandwidthSwitch(bool resume) {
+    ALOGV("cancelBandwidthSwitch: mSwitchGen(%d)++, orig %zd, cur %zd",
+            mSwitchGeneration, mOrigBandwidthIndex, mCurBandwidthIndex);
     if (!mSwitchInProgress) {
         return;
     }
 
-    bool needToRemoveFetchers = false;
-    for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
-        if (mFetcherInfos.valueAt(i).mToBeRemoved) {
-            needToRemoveFetchers = true;
-            break;
-        }
-    }
-
-    if (!needToRemoveFetchers && mSwapMask == 0) {
-        ALOGI("mSwitchInProgress = false");
-        mStreamMask = mNewStreamMask;
-        mSwitchInProgress = false;
-    }
-}
-
-void LiveSession::cancelBandwidthSwitch() {
-    Mutex::Autolock lock(mSwapMutex);
-    mSwitchGeneration++;
-    mSwitchInProgress = false;
-    mSwapMask = 0;
-
     for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
         FetcherInfo& info = mFetcherInfos.editValueAt(i);
         if (info.mToBeRemoved) {
             info.mToBeRemoved = false;
+            if (resume) {
+                resumeFetcher(mFetcherInfos.keyAt(i), mSwapMask);
+            }
         }
     }
 
     for (size_t i = 0; i < kMaxStreams; ++i) {
-        if (!mStreams[i].mNewUri.empty()) {
-            ssize_t j = mFetcherInfos.indexOfKey(mStreams[i].mNewUri);
-            if (j < 0) {
-                mStreams[i].mNewUri.clear();
+        AString newUri = mStreams[i].mNewUri;
+        if (!newUri.empty()) {
+            // clear all mNewUri matching this newUri
+            for (size_t j = i; j < kMaxStreams; ++j) {
+                if (mStreams[j].mNewUri == newUri) {
+                    mStreams[j].mNewUri.clear();
+                }
+            }
+            ALOGV("stopping newUri = %s", newUri.c_str());
+            ssize_t index = mFetcherInfos.indexOfKey(newUri);
+            if (index < 0) {
+                ALOGE("did not find fetcher for newUri: %s", newUri.c_str());
                 continue;
             }
-
-            const FetcherInfo &info = mFetcherInfos.valueAt(j);
+            FetcherInfo &info = mFetcherInfos.editValueAt(index);
+            info.mToBeRemoved = true;
             info.mFetcher->stopAsync();
-            mFetcherInfos.removeItemsAt(j);
-            mStreams[i].mNewUri.clear();
         }
     }
+
+    ALOGI("#### Canceled Bandwidth Switch: %zd => %zd",
+            mCurBandwidthIndex, mOrigBandwidthIndex);
+
+    mSwitchGeneration++;
+    mSwitchInProgress = false;
+    mCurBandwidthIndex = mOrigBandwidthIndex;
+    mSwapMask = 0;
 }
 
-bool LiveSession::checkBuffering(bool &low, bool &mid, bool &high) {
-    low = mid = high = false;
+bool LiveSession::checkBuffering(
+        bool &underflow, bool &ready, bool &down, bool &up) {
+    underflow = ready = down = up = false;
 
-    if (mSwitchInProgress || mReconfigurationInProgress) {
+    if (mReconfigurationInProgress) {
         ALOGV("Switch/Reconfig in progress, defer buffer polling");
         return false;
     }
 
-    // TODO: Fine tune low/high mark.
-    //       We also need to pause playback if buffering is too low.
-    //       Currently during underflow, we depend on decoder to starve
-    //       to pause, but A/V could have different buffering left,
-    //       they're not paused together.
-    // TODO: Report buffering level to NuPlayer for BUFFERING_UPDATE
-
-    // Switch down if any of the fetchers are below low mark;
-    // Switch up   if all of the fetchers are over high mark.
-    size_t activeCount, lowCount, midCount, highCount;
-    activeCount = lowCount = midCount = highCount = 0;
+    size_t activeCount, underflowCount, readyCount, downCount, upCount;
+    activeCount = underflowCount = readyCount = downCount = upCount =0;
+    int32_t minBufferPercent = -1;
+    int64_t durationUs;
+    if (getDuration(&durationUs) != OK) {
+        durationUs = -1;
+    }
     for (size_t i = 0; i < mPacketSources.size(); ++i) {
         // we don't check subtitles for buffering level
         if (!(mStreamMask & mPacketSources.keyAt(i)
@@ -1679,40 +2020,156 @@
             continue;
         }
 
-        ++activeCount;
         int64_t bufferedDurationUs =
                 mPacketSources[i]->getEstimatedDurationUs();
         ALOGV("source[%zu]: buffered %lld us", i, (long long)bufferedDurationUs);
-        if (bufferedDurationUs < kLowWaterMark) {
-            ++lowCount;
-            break;
-        } else if (bufferedDurationUs > kHighWaterMark) {
-            ++midCount;
-            ++highCount;
-        } else if (bufferedDurationUs > kMidWaterMark) {
-            ++midCount;
+        if (durationUs >= 0) {
+            int32_t percent;
+            if (mPacketSources[i]->isFinished(0 /* duration */)) {
+                percent = 100;
+            } else {
+                percent = (int32_t)(100.0 * (mLastDequeuedTimeUs + bufferedDurationUs) / durationUs);
+            }
+            if (minBufferPercent < 0 || percent < minBufferPercent) {
+                minBufferPercent = percent;
+            }
+        }
+
+        ++activeCount;
+        int64_t readyMark = mInPreparationPhase ? kPrepareMarkUs : kReadyMarkUs;
+        if (bufferedDurationUs > readyMark
+                || mPacketSources[i]->isFinished(0)) {
+            ++readyCount;
+        }
+        if (!mPacketSources[i]->isFinished(0)) {
+            if (bufferedDurationUs < kUnderflowMarkUs) {
+                ++underflowCount;
+            }
+            if (bufferedDurationUs > mUpSwitchMark) {
+                ++upCount;
+            }
+            if (bufferedDurationUs < mDownSwitchMark) {
+                ++downCount;
+            }
         }
     }
 
+    if (minBufferPercent >= 0) {
+        notifyBufferingUpdate(minBufferPercent);
+    }
+
     if (activeCount > 0) {
-        high = (highCount == activeCount);
-        mid = (midCount == activeCount);
-        low = (lowCount > 0);
+        up        = (upCount == activeCount);
+        down      = (downCount > 0);
+        ready     = (readyCount == activeCount);
+        underflow = (underflowCount > 0);
         return true;
     }
 
     return false;
 }
 
-void LiveSession::switchBandwidthIfNeeded(bool canSwitchUp) {
-    ssize_t bandwidthIndex = getBandwidthIndex();
+void LiveSession::startBufferingIfNecessary() {
+    ALOGV("startBufferingIfNecessary: mInPreparationPhase=%d, mBuffering=%d",
+            mInPreparationPhase, mBuffering);
+    if (!mBuffering) {
+        mBuffering = true;
 
-    if ((canSwitchUp && bandwidthIndex > mCurBandwidthIndex)
-            || (!canSwitchUp && bandwidthIndex < mCurBandwidthIndex)) {
+        sp<AMessage> notify = mNotify->dup();
+        notify->setInt32("what", kWhatBufferingStart);
+        notify->post();
+    }
+}
+
+void LiveSession::stopBufferingIfNecessary() {
+    ALOGV("stopBufferingIfNecessary: mInPreparationPhase=%d, mBuffering=%d",
+            mInPreparationPhase, mBuffering);
+
+    if (mBuffering) {
+        mBuffering = false;
+
+        sp<AMessage> notify = mNotify->dup();
+        notify->setInt32("what", kWhatBufferingEnd);
+        notify->post();
+    }
+}
+
+void LiveSession::notifyBufferingUpdate(int32_t percentage) {
+    if (percentage < mPrevBufferPercentage) {
+        percentage = mPrevBufferPercentage;
+    } else if (percentage > 100) {
+        percentage = 100;
+    }
+
+    mPrevBufferPercentage = percentage;
+
+    ALOGV("notifyBufferingUpdate: percentage=%d%%", percentage);
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatBufferingUpdate);
+    notify->setInt32("percentage", percentage);
+    notify->post();
+}
+
+void LiveSession::switchBandwidthIfNeeded(bool bufferHigh, bool bufferLow) {
+    // no need to check bandwidth if we only have 1 bandwidth settings
+    if (mSwitchInProgress || mBandwidthItems.size() < 2) {
+        return;
+    }
+
+    int32_t bandwidthBps;
+    if (mBandwidthEstimator->estimateBandwidth(&bandwidthBps)) {
+        ALOGV("bandwidth estimated at %.2f kbps", bandwidthBps / 1024.0f);
+        mLastBandwidthBps = bandwidthBps;
+    } else {
+        ALOGV("no bandwidth estimate.");
+        return;
+    }
+
+    int32_t curBandwidth = mBandwidthItems.itemAt(mCurBandwidthIndex).mBandwidth;
+    // canSwithDown and canSwitchUp can't both be true.
+    // we only want to switch up when measured bw is 120% higher than current variant,
+    // and we only want to switch down when measured bw is below current variant.
+    bool canSwithDown = bufferLow
+            && (bandwidthBps < (int32_t)curBandwidth);
+    bool canSwitchUp = bufferHigh
+            && (bandwidthBps > (int32_t)curBandwidth * 12 / 10);
+
+    if (canSwithDown || canSwitchUp) {
+        ssize_t bandwidthIndex = getBandwidthIndex(bandwidthBps);
+
+        // it's possible that we're checking for canSwitchUp case, but the returned
+        // bandwidthIndex is < mCurBandwidthIndex, as getBandwidthIndex() only uses 70%
+        // of measured bw. In that case we don't want to do anything, since we have
+        // both enough buffer and enough bw.
+        if (bandwidthIndex == mCurBandwidthIndex
+                || (canSwitchUp && bandwidthIndex < mCurBandwidthIndex)
+                || (canSwithDown && bandwidthIndex > mCurBandwidthIndex)) {
+            return;
+        }
+
+        ALOGI("#### Starting Bandwidth Switch: %zd => %zd",
+                mCurBandwidthIndex, bandwidthIndex);
         changeConfiguration(-1, bandwidthIndex, false);
     }
 }
 
+void LiveSession::postError(status_t err) {
+    // if we reached EOS, notify buffering of 100%
+    if (err == ERROR_END_OF_STREAM) {
+        notifyBufferingUpdate(100);
+    }
+    // we'll stop buffer polling now, before that notify
+    // stop buffering to stop the spinning icon
+    stopBufferingIfNecessary();
+    cancelPollBuffering();
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatError);
+    notify->setInt32("err", err);
+    notify->post();
+}
+
 void LiveSession::postPrepared(status_t err) {
     CHECK(mInPreparationPhase);
 
@@ -1720,6 +2177,8 @@
     if (err == OK || err == ERROR_END_OF_STREAM) {
         notify->setInt32("what", kWhatPrepared);
     } else {
+        cancelPollBuffering();
+
         notify->setInt32("what", kWhatPreparationFailed);
         notify->setInt32("err", err);
     }
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index 3b0a9a4..b5e31c9 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -34,6 +34,7 @@
 struct LiveDataSource;
 struct M3UParser;
 struct PlaylistFetcher;
+struct HLSTime;
 
 struct LiveSession : public AHandler {
     enum Flags {
@@ -54,6 +55,12 @@
         STREAMTYPE_SUBTITLES    = 1 << kSubtitleIndex,
     };
 
+    enum SeekMode {
+        kSeekModeExactPosition = 0, // used for seeking
+        kSeekModeNextSample    = 1, // used for seamless switching
+        kSeekModeNextSegment   = 2, // used for seamless switching
+    };
+
     LiveSession(
             const sp<AMessage> &notify,
             uint32_t flags,
@@ -63,6 +70,8 @@
 
     status_t getStreamFormat(StreamType stream, sp<AMessage> *format);
 
+    sp<HTTPBase> getHTTPDataSource();
+
     void connectAsync(
             const char *url,
             const KeyedVector<String8, String8> *headers = NULL);
@@ -81,18 +90,18 @@
     bool isSeekable() const;
     bool hasDynamicDuration() const;
 
+    static const char *getKeyForStream(StreamType type);
+
     enum {
         kWhatStreamsChanged,
         kWhatError,
         kWhatPrepared,
         kWhatPreparationFailed,
+        kWhatBufferingStart,
+        kWhatBufferingEnd,
+        kWhatBufferingUpdate,
     };
 
-    // create a format-change discontinuity
-    //
-    // swap:
-    //   whether is format-change discontinuity should trigger a buffer swap
-    sp<ABuffer> createFormatChangeBuffer(bool swap = true);
 protected:
     virtual ~LiveSession();
 
@@ -106,20 +115,25 @@
         kWhatDisconnect                 = 'disc',
         kWhatSeek                       = 'seek',
         kWhatFetcherNotify              = 'notf',
-        kWhatCheckBandwidth             = 'bndw',
         kWhatChangeConfiguration        = 'chC0',
         kWhatChangeConfiguration2       = 'chC2',
         kWhatChangeConfiguration3       = 'chC3',
         kWhatFinishDisconnect2          = 'fin2',
-        kWhatSwapped                    = 'swap',
         kWhatPollBuffering              = 'poll',
     };
 
-    static const size_t kBandwidthHistoryBytes;
-    static const int64_t kHighWaterMark;
-    static const int64_t kMidWaterMark;
-    static const int64_t kLowWaterMark;
+    // Bandwidth Switch Mark Defaults
+    static const int64_t kUpSwitchMarkUs;
+    static const int64_t kDownSwitchMarkUs;
+    static const int64_t kUpSwitchMarginUs;
+    static const int64_t kResumeThresholdUs;
 
+    // Buffer Prepare/Ready/Underflow Marks
+    static const int64_t kReadyMarkUs;
+    static const int64_t kPrepareMarkUs;
+    static const int64_t kUnderflowMarkUs;
+
+    struct BandwidthEstimator;
     struct BandwidthItem {
         size_t mPlaylistIndex;
         unsigned long mBandwidth;
@@ -128,23 +142,22 @@
     struct FetcherInfo {
         sp<PlaylistFetcher> mFetcher;
         int64_t mDurationUs;
-        bool mIsPrepared;
         bool mToBeRemoved;
+        bool mToBeResumed;
     };
 
     struct StreamItem {
         const char *mType;
         AString mUri, mNewUri;
+        SeekMode mSeekMode;
         size_t mCurDiscontinuitySeq;
         int64_t mLastDequeuedTimeUs;
         int64_t mLastSampleDurationUs;
         StreamItem()
-            : mType(""),
-              mCurDiscontinuitySeq(0),
-              mLastDequeuedTimeUs(0),
-              mLastSampleDurationUs(0) {}
+            : StreamItem("") {}
         StreamItem(const char *type)
             : mType(type),
+              mSeekMode(kSeekModeExactPosition),
               mCurDiscontinuitySeq(0),
               mLastDequeuedTimeUs(0),
               mLastSampleDurationUs(0) {}
@@ -160,8 +173,10 @@
     uint32_t mFlags;
     sp<IMediaHTTPService> mHTTPService;
 
+    bool mBuffering;
     bool mInPreparationPhase;
-    bool mBuffering[kMaxStreams];
+    int32_t mPollBufferingGeneration;
+    int32_t mPrevBufferPercentage;
 
     sp<HTTPBase> mHTTPDataSource;
     KeyedVector<String8, String8> mExtraHeaders;
@@ -170,6 +185,9 @@
 
     Vector<BandwidthItem> mBandwidthItems;
     ssize_t mCurBandwidthIndex;
+    ssize_t mOrigBandwidthIndex;
+    int32_t mLastBandwidthBps;
+    sp<BandwidthEstimator> mBandwidthEstimator;
 
     sp<M3UParser> mPlaylist;
 
@@ -190,12 +208,6 @@
     // A second set of packet sources that buffer content for the variant we're switching to.
     KeyedVector<StreamType, sp<AnotherPacketSource> > mPacketSources2;
 
-    // A mutex used to serialize two sets of events:
-    // * the swapping of packet sources in dequeueAccessUnit on the player thread, AND
-    // * a forced bandwidth switch termination in cancelSwitch on the live looper.
-    Mutex mSwapMutex;
-
-    int32_t mCheckBandwidthGeneration;
     int32_t mSwitchGeneration;
     int32_t mSubtitleGeneration;
 
@@ -208,6 +220,10 @@
 
     bool mReconfigurationInProgress;
     bool mSwitchInProgress;
+    int64_t mUpSwitchMark;
+    int64_t mDownSwitchMark;
+    int64_t mUpSwitchMargin;
+
     sp<AReplyToken> mDisconnectReplyID;
     sp<AReplyToken> mSeekReplyID;
 
@@ -217,8 +233,6 @@
     KeyedVector<size_t, int64_t> mDiscontinuityAbsStartTimesUs;
     KeyedVector<size_t, int64_t> mDiscontinuityOffsetTimesUs;
 
-    int32_t mPollBufferingGeneration;
-
     sp<PlaylistFetcher> addFetcher(const char *uri);
 
     void onConnect(const sp<AMessage> &msg);
@@ -244,42 +258,54 @@
             uint32_t block_size = 0,
             /* reuse DataSource if doing partial fetch */
             sp<DataSource> *source = NULL,
-            String8 *actualUrl = NULL);
+            String8 *actualUrl = NULL,
+            /* force connect http even when resuing DataSource */
+            bool forceConnectHTTP = false);
 
     sp<M3UParser> fetchPlaylist(
             const char *url, uint8_t *curPlaylistHash, bool *unchanged);
 
-    size_t getBandwidthIndex();
-    int64_t latestMediaSegmentStartTimeUs();
+    bool resumeFetcher(
+            const AString &uri, uint32_t streamMask,
+            int64_t timeUs = -1ll, bool newUri = false);
+
+    float getAbortThreshold(
+            ssize_t currentBWIndex, ssize_t targetBWIndex) const;
+    void addBandwidthMeasurement(size_t numBytes, int64_t delayUs);
+    size_t getBandwidthIndex(int32_t bandwidthBps);
+    HLSTime latestMediaSegmentStartTime() const;
 
     static int SortByBandwidth(const BandwidthItem *, const BandwidthItem *);
     static StreamType indexToType(int idx);
     static ssize_t typeToIndex(int32_t type);
 
     void changeConfiguration(
-            int64_t timeUs, size_t bandwidthIndex, bool pickTrack = false);
+            int64_t timeUs, ssize_t bwIndex = -1, bool pickTrack = false);
     void onChangeConfiguration(const sp<AMessage> &msg);
     void onChangeConfiguration2(const sp<AMessage> &msg);
     void onChangeConfiguration3(const sp<AMessage> &msg);
-    void onSwapped(const sp<AMessage> &msg);
-    void tryToFinishBandwidthSwitch();
 
-    // cancelBandwidthSwitch is atomic wrt swapPacketSource; call it to prevent packet sources
-    // from being swapped out on stale discontinuities while manipulating
-    // mPacketSources/mPacketSources2.
-    void cancelBandwidthSwitch();
+    void swapPacketSource(StreamType stream);
+    void tryToFinishBandwidthSwitch(const AString &oldUri);
+    void cancelBandwidthSwitch(bool resume = false);
+    bool checkSwitchProgress(
+            sp<AMessage> &msg, int64_t delayUs, bool *needResumeUntil);
+
+    void switchBandwidthIfNeeded(bool bufferHigh, bool bufferLow);
 
     void schedulePollBuffering();
     void cancelPollBuffering();
+    void restartPollBuffering();
     void onPollBuffering();
-    bool checkBuffering(bool &low, bool &mid, bool &high);
-    void switchBandwidthIfNeeded(bool canSwitchUp);
+    bool checkBuffering(bool &underflow, bool &ready, bool &down, bool &up);
+    void startBufferingIfNecessary();
+    void stopBufferingIfNecessary();
+    void notifyBufferingUpdate(int32_t percentage);
 
     void finishDisconnect();
 
     void postPrepared(status_t err);
-
-    void swapPacketSource(StreamType stream);
+    void postError(status_t err);
 
     DISALLOW_EVIL_CONSTRUCTORS(LiveSession);
 };
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 3c5d7cf..7bb7f2c 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -395,7 +395,9 @@
 
 bool M3UParser::getTypeURI(size_t index, const char *key, AString *uri) const {
     if (!mIsVariantPlaylist) {
-        *uri = mBaseURI;
+        if (uri != NULL) {
+            *uri = mBaseURI;
+        }
 
         // Assume media without any more specific attribute contains
         // audio and video, but no subtitles.
@@ -408,7 +410,9 @@
 
     AString groupID;
     if (!meta->findString(key, &groupID)) {
-        *uri = mItems.itemAt(index).mURI;
+        if (uri != NULL) {
+            *uri = mItems.itemAt(index).mURI;
+        }
 
         AString codecs;
         if (!meta->findString("codecs", &codecs)) {
@@ -434,18 +438,26 @@
         }
     }
 
-    sp<MediaGroup> group = mMediaGroups.valueFor(groupID);
-    if (!group->getActiveURI(uri)) {
-        return false;
-    }
+    // if uri == NULL, we're only checking if the type is present,
+    // don't care about the active URI (or if there is an active one)
+    if (uri != NULL) {
+        sp<MediaGroup> group = mMediaGroups.valueFor(groupID);
+        if (!group->getActiveURI(uri)) {
+            return false;
+        }
 
-    if ((*uri).empty()) {
-        *uri = mItems.itemAt(index).mURI;
+        if ((*uri).empty()) {
+            *uri = mItems.itemAt(index).mURI;
+        }
     }
 
     return true;
 }
 
+bool M3UParser::hasType(size_t index, const char *key) const {
+    return getTypeURI(index, key, NULL /* uri */);
+}
+
 static bool MakeURL(const char *baseURL, const char *url, AString *out) {
     out->clear();
 
@@ -633,7 +645,8 @@
                         || !itemMeta->findInt64("durationUs", &durationUs)) {
                     return ERROR_MALFORMED;
                 }
-                itemMeta->setInt32("discontinuity-sequence", mDiscontinuitySeq + mDiscontinuityCount);
+                itemMeta->setInt32("discontinuity-sequence",
+                        mDiscontinuitySeq + mDiscontinuityCount);
             }
 
             mItems.push();
@@ -650,6 +663,14 @@
         ++lineNo;
     }
 
+    // error checking of all fields that's required to appear once
+    // (currently only checking "target-duration")
+    int32_t targetDurationSecs;
+    if (!mIsVariantPlaylist && (mMeta == NULL || !mMeta->findInt32(
+            "target-duration", &targetDurationSecs))) {
+        return ERROR_MALFORMED;
+    }
+
     return OK;
 }
 
diff --git a/media/libstagefright/httplive/M3UParser.h b/media/libstagefright/httplive/M3UParser.h
index d475683..fef361f 100644
--- a/media/libstagefright/httplive/M3UParser.h
+++ b/media/libstagefright/httplive/M3UParser.h
@@ -50,6 +50,7 @@
     ssize_t getSelectedTrack(media_track_type /* type */) const;
 
     bool getTypeURI(size_t index, const char *key, AString *uri) const;
+    bool hasType(size_t index, const char *key) const;
 
 protected:
     virtual ~M3UParser();
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 7f818a8..368612d 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -33,6 +33,7 @@
 #include <media/stagefright/foundation/ABitReader.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AUtils.h>
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/FileSource.h>
 #include <media/stagefright/MediaDefs.h>
@@ -47,12 +48,96 @@
 namespace android {
 
 // static
-const int64_t PlaylistFetcher::kMinBufferedDurationUs = 10000000ll;
+const int64_t PlaylistFetcher::kMinBufferedDurationUs = 30000000ll;
 const int64_t PlaylistFetcher::kMaxMonitorDelayUs = 3000000ll;
-const int64_t PlaylistFetcher::kFetcherResumeThreshold = 100000ll;
 // LCM of 188 (size of a TS packet) & 1k works well
 const int32_t PlaylistFetcher::kDownloadBlockSize = 47 * 1024;
-const int32_t PlaylistFetcher::kNumSkipFrames = 5;
+
+struct PlaylistFetcher::DownloadState : public RefBase {
+    DownloadState();
+    void resetState();
+    bool hasSavedState() const;
+    void restoreState(
+            AString &uri,
+            sp<AMessage> &itemMeta,
+            sp<ABuffer> &buffer,
+            sp<ABuffer> &tsBuffer,
+            int32_t &firstSeqNumberInPlaylist,
+            int32_t &lastSeqNumberInPlaylist);
+    void saveState(
+            AString &uri,
+            sp<AMessage> &itemMeta,
+            sp<ABuffer> &buffer,
+            sp<ABuffer> &tsBuffer,
+            int32_t &firstSeqNumberInPlaylist,
+            int32_t &lastSeqNumberInPlaylist);
+
+private:
+    bool mHasSavedState;
+    AString mUri;
+    sp<AMessage> mItemMeta;
+    sp<ABuffer> mBuffer;
+    sp<ABuffer> mTsBuffer;
+    int32_t mFirstSeqNumberInPlaylist;
+    int32_t mLastSeqNumberInPlaylist;
+};
+
+PlaylistFetcher::DownloadState::DownloadState() {
+    resetState();
+}
+
+bool PlaylistFetcher::DownloadState::hasSavedState() const {
+    return mHasSavedState;
+}
+
+void PlaylistFetcher::DownloadState::resetState() {
+    mHasSavedState = false;
+
+    mUri.clear();
+    mItemMeta = NULL;
+    mBuffer = NULL;
+    mTsBuffer = NULL;
+    mFirstSeqNumberInPlaylist = 0;
+    mLastSeqNumberInPlaylist = 0;
+}
+
+void PlaylistFetcher::DownloadState::restoreState(
+        AString &uri,
+        sp<AMessage> &itemMeta,
+        sp<ABuffer> &buffer,
+        sp<ABuffer> &tsBuffer,
+        int32_t &firstSeqNumberInPlaylist,
+        int32_t &lastSeqNumberInPlaylist) {
+    if (!mHasSavedState) {
+        return;
+    }
+
+    uri = mUri;
+    itemMeta = mItemMeta;
+    buffer = mBuffer;
+    tsBuffer = mTsBuffer;
+    firstSeqNumberInPlaylist = mFirstSeqNumberInPlaylist;
+    lastSeqNumberInPlaylist = mLastSeqNumberInPlaylist;
+
+    resetState();
+}
+
+void PlaylistFetcher::DownloadState::saveState(
+        AString &uri,
+        sp<AMessage> &itemMeta,
+        sp<ABuffer> &buffer,
+        sp<ABuffer> &tsBuffer,
+        int32_t &firstSeqNumberInPlaylist,
+        int32_t &lastSeqNumberInPlaylist) {
+    mHasSavedState = true;
+
+    mUri = uri;
+    mItemMeta = itemMeta;
+    mBuffer = buffer;
+    mTsBuffer = tsBuffer;
+    mFirstSeqNumberInPlaylist = firstSeqNumberInPlaylist;
+    mLastSeqNumberInPlaylist = lastSeqNumberInPlaylist;
+}
 
 PlaylistFetcher::PlaylistFetcher(
         const sp<AMessage> &notify,
@@ -71,18 +156,21 @@
       mSeqNumber(-1),
       mNumRetries(0),
       mStartup(true),
-      mAdaptive(false),
-      mPrepared(false),
+      mIDRFound(false),
+      mSeekMode(LiveSession::kSeekModeExactPosition),
       mTimeChangeSignaled(false),
       mNextPTSTimeUs(-1ll),
       mMonitorQueueGeneration(0),
       mSubtitleGeneration(subtitleGeneration),
       mLastDiscontinuitySeq(-1ll),
-      mStopping(false),
       mRefreshState(INITIAL_MINIMUM_RELOAD_DELAY),
       mFirstPTSValid(false),
-      mVideoBuffer(new AnotherPacketSource(NULL)) {
+      mFirstTimeUs(-1ll),
+      mVideoBuffer(new AnotherPacketSource(NULL)),
+      mThresholdRatio(-1.0f),
+      mDownloadState(new DownloadState()) {
     memset(mPlaylistHash, 0, sizeof(mPlaylistHash));
+    mHTTPDataSource = mSession->getHTTPDataSource();
 }
 
 PlaylistFetcher::~PlaylistFetcher() {
@@ -119,6 +207,32 @@
     return segmentStartUs;
 }
 
+int64_t PlaylistFetcher::getSegmentDurationUs(int32_t seqNumber) const {
+    CHECK(mPlaylist != NULL);
+
+    int32_t firstSeqNumberInPlaylist;
+    if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
+                "media-sequence", &firstSeqNumberInPlaylist)) {
+        firstSeqNumberInPlaylist = 0;
+    }
+
+    int32_t lastSeqNumberInPlaylist =
+        firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1;
+
+    CHECK_GE(seqNumber, firstSeqNumberInPlaylist);
+    CHECK_LE(seqNumber, lastSeqNumberInPlaylist);
+
+    int32_t index = seqNumber - firstSeqNumberInPlaylist;
+    sp<AMessage> itemMeta;
+    CHECK(mPlaylist->itemAt(
+                index, NULL /* uri */, &itemMeta));
+
+    int64_t itemDurationUs;
+    CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
+
+    return itemDurationUs;
+}
+
 int64_t PlaylistFetcher::delayUsToRefreshPlaylist() const {
     int64_t nowUs = ALooper::GetNowUs();
 
@@ -334,9 +448,12 @@
     ++mMonitorQueueGeneration;
 }
 
-void PlaylistFetcher::setStopping(bool stopping) {
-    AutoMutex _l(mStoppingLock);
-    mStopping = stopping;
+void PlaylistFetcher::setStoppingThreshold(float thresholdRatio) {
+    AutoMutex _l(mThresholdLock);
+    if (mStreamTypeMask == LiveSession::STREAMTYPE_SUBTITLES) {
+        return;
+    }
+    mThresholdRatio = thresholdRatio;
 }
 
 void PlaylistFetcher::startAsync(
@@ -346,7 +463,7 @@
         int64_t startTimeUs,
         int64_t segmentStartTimeUs,
         int32_t startDiscontinuitySeq,
-        bool adaptive) {
+        LiveSession::SeekMode seekMode) {
     sp<AMessage> msg = new AMessage(kWhatStart, this);
 
     uint32_t streamTypeMask = 0ul;
@@ -370,19 +487,19 @@
     msg->setInt64("startTimeUs", startTimeUs);
     msg->setInt64("segmentStartTimeUs", segmentStartTimeUs);
     msg->setInt32("startDiscontinuitySeq", startDiscontinuitySeq);
-    msg->setInt32("adaptive", adaptive);
+    msg->setInt32("seekMode", seekMode);
     msg->post();
 }
 
-void PlaylistFetcher::pauseAsync(bool immediate) {
-    if (immediate) {
-        setStopping(true);
+void PlaylistFetcher::pauseAsync(float thresholdRatio) {
+    if (thresholdRatio >= 0.0f) {
+        setStoppingThreshold(thresholdRatio);
     }
     (new AMessage(kWhatPause, this))->post();
 }
 
 void PlaylistFetcher::stopAsync(bool clear) {
-    setStopping(true);
+    setStoppingThreshold(0.0f);
 
     sp<AMessage> msg = new AMessage(kWhatStop, this);
     msg->setInt32("clear", clear);
@@ -414,6 +531,10 @@
 
             sp<AMessage> notify = mNotify->dup();
             notify->setInt32("what", kWhatPaused);
+            notify->setInt32("seekMode",
+                    mDownloadState->hasSavedState()
+                    ? LiveSession::kSeekModeNextSample
+                    : LiveSession::kSeekModeNextSegment);
             notify->post();
             break;
         }
@@ -463,7 +584,7 @@
     mStopParams.clear();
     mStartTimeUsNotify = mNotify->dup();
     mStartTimeUsNotify->setInt32("what", kWhatStartedAt);
-    mStartTimeUsNotify->setInt32("streamMask", 0);
+    mStartTimeUsNotify->setString("uri", mURI);
 
     uint32_t streamTypeMask;
     CHECK(msg->findInt32("streamTypeMask", (int32_t *)&streamTypeMask));
@@ -471,11 +592,11 @@
     int64_t startTimeUs;
     int64_t segmentStartTimeUs;
     int32_t startDiscontinuitySeq;
-    int32_t adaptive;
+    int32_t seekMode;
     CHECK(msg->findInt64("startTimeUs", &startTimeUs));
     CHECK(msg->findInt64("segmentStartTimeUs", &segmentStartTimeUs));
     CHECK(msg->findInt32("startDiscontinuitySeq", &startDiscontinuitySeq));
-    CHECK(msg->findInt32("adaptive", &adaptive));
+    CHECK(msg->findInt32("seekMode", &seekMode));
 
     if (streamTypeMask & LiveSession::STREAMTYPE_AUDIO) {
         void *ptr;
@@ -507,20 +628,26 @@
     mStreamTypeMask = streamTypeMask;
 
     mSegmentStartTimeUs = segmentStartTimeUs;
-    mDiscontinuitySeq = startDiscontinuitySeq;
+
+    if (startDiscontinuitySeq >= 0) {
+        mDiscontinuitySeq = startDiscontinuitySeq;
+    }
 
     mRefreshState = INITIAL_MINIMUM_RELOAD_DELAY;
+    mSeekMode = (LiveSession::SeekMode) seekMode;
+
+    if (startTimeUs >= 0 || mSeekMode == LiveSession::kSeekModeNextSample) {
+        mStartup = true;
+        mIDRFound = false;
+        mVideoBuffer->clear();
+    }
 
     if (startTimeUs >= 0) {
         mStartTimeUs = startTimeUs;
         mFirstPTSValid = false;
         mSeqNumber = -1;
-        mStartup = true;
-        mPrepared = false;
-        mIDRFound = false;
         mTimeChangeSignaled = false;
-        mAdaptive = adaptive;
-        mVideoBuffer->clear();
+        mDownloadState->resetState();
     }
 
     postMonitorQueue();
@@ -532,7 +659,7 @@
     cancelMonitorQueue();
     mLastDiscontinuitySeq = mDiscontinuitySeq;
 
-    setStopping(false);
+    setStoppingThreshold(-1.0f);
 }
 
 void PlaylistFetcher::onStop(const sp<AMessage> &msg) {
@@ -547,10 +674,14 @@
         }
     }
 
+    // close off the connection after use
+    mHTTPDataSource->disconnect();
+
+    mDownloadState->resetState();
     mPacketSources.clear();
     mStreamTypeMask = 0;
 
-    setStopping(false);
+    setStoppingThreshold(-1.0f);
 }
 
 // Resume until we have reached the boundary timestamps listed in `msg`; when
@@ -560,61 +691,18 @@
     sp<AMessage> params;
     CHECK(msg->findMessage("params", &params));
 
-    size_t stopCount = 0;
-    for (size_t i = 0; i < mPacketSources.size(); i++) {
-        sp<AnotherPacketSource> packetSource = mPacketSources.valueAt(i);
-
-        const char *stopKey;
-        int streamType = mPacketSources.keyAt(i);
-
-        if (streamType == LiveSession::STREAMTYPE_SUBTITLES) {
-            // the subtitle track can always be stopped
-            ++stopCount;
-            continue;
-        }
-
-        switch (streamType) {
-        case LiveSession::STREAMTYPE_VIDEO:
-            stopKey = "timeUsVideo";
-            break;
-
-        case LiveSession::STREAMTYPE_AUDIO:
-            stopKey = "timeUsAudio";
-            break;
-
-        default:
-            TRESPASS();
-        }
-
-        // check if this stream has too little data left to be resumed
-        int32_t discontinuitySeq;
-        int64_t latestTimeUs = 0, stopTimeUs = 0;
-        sp<AMessage> latestMeta = packetSource->getLatestEnqueuedMeta();
-        if (latestMeta != NULL
-                && latestMeta->findInt32("discontinuitySeq", &discontinuitySeq)
-                && discontinuitySeq == mDiscontinuitySeq
-                && latestMeta->findInt64("timeUs", &latestTimeUs)
-                && params->findInt64(stopKey, &stopTimeUs)
-                && stopTimeUs - latestTimeUs < kFetcherResumeThreshold) {
-            ++stopCount;
-        }
-    }
-
-    // Don't resume if all streams are within a resume threshold
-    if (stopCount == mPacketSources.size()) {
-        for (size_t i = 0; i < mPacketSources.size(); i++) {
-            mPacketSources.valueAt(i)->queueAccessUnit(mSession->createFormatChangeBuffer());
-        }
-        stopAsync(/* clear = */ false);
-        return OK;
-    }
-
     mStopParams = params;
     onDownloadNext();
 
     return OK;
 }
 
+void PlaylistFetcher::notifyStopReached() {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatStopReached);
+    notify->post();
+}
+
 void PlaylistFetcher::notifyError(status_t err) {
     sp<AMessage> notify = mNotify->dup();
     notify->setInt32("what", kWhatError);
@@ -634,7 +722,12 @@
 
 void PlaylistFetcher::onMonitorQueue() {
     bool downloadMore = false;
-    refreshPlaylist();
+
+    // in the middle of an unfinished download, delay
+    // playlist refresh as it'll change seq numbers
+    if (!mDownloadState->hasSavedState()) {
+        refreshPlaylist();
+    }
 
     int32_t targetDurationSecs;
     int64_t targetDurationUs = kMinBufferedDurationUs;
@@ -648,23 +741,23 @@
         targetDurationUs = targetDurationSecs * 1000000ll;
     }
 
-    int64_t durationToBufferUs = kMinBufferedDurationUs;
-
     int64_t bufferedDurationUs = 0ll;
-    status_t finalResult = NOT_ENOUGH_DATA;
+    status_t finalResult = OK;
     if (mStreamTypeMask == LiveSession::STREAMTYPE_SUBTITLES) {
         sp<AnotherPacketSource> packetSource =
             mPacketSources.valueFor(LiveSession::STREAMTYPE_SUBTITLES);
 
         bufferedDurationUs =
                 packetSource->getBufferedDurationUs(&finalResult);
-        finalResult = OK;
     } else {
-        // Use max stream duration to prevent us from waiting on a non-existent stream;
-        // when we cannot make out from the manifest what streams are included in a playlist
-        // we might assume extra streams.
+        // Use min stream duration, but ignore streams that never have any packet
+        // enqueued to prevent us from waiting on a non-existent stream;
+        // when we cannot make out from the manifest what streams are included in
+        // a playlist we might assume extra streams.
+        bufferedDurationUs = -1ll;
         for (size_t i = 0; i < mPacketSources.size(); ++i) {
-            if ((mStreamTypeMask & mPacketSources.keyAt(i)) == 0) {
+            if ((mStreamTypeMask & mPacketSources.keyAt(i)) == 0
+                    || mPacketSources[i]->getLatestEnqueuedMeta() == NULL) {
                 continue;
             }
 
@@ -672,24 +765,19 @@
                 mPacketSources.valueAt(i)->getBufferedDurationUs(&finalResult);
             ALOGV("buffered %" PRId64 " for stream %d",
                     bufferedStreamDurationUs, mPacketSources.keyAt(i));
-            if (bufferedStreamDurationUs > bufferedDurationUs) {
+            if (bufferedDurationUs == -1ll
+                 || bufferedStreamDurationUs < bufferedDurationUs) {
                 bufferedDurationUs = bufferedStreamDurationUs;
             }
         }
-    }
-    downloadMore = (bufferedDurationUs < durationToBufferUs);
-
-    // signal start if buffered up at least the target size
-    if (!mPrepared && bufferedDurationUs > targetDurationUs && downloadMore) {
-        mPrepared = true;
-
-        ALOGV("prepared, buffered=%" PRId64 " > %" PRId64 "",
-                bufferedDurationUs, targetDurationUs);
+        if (bufferedDurationUs == -1ll) {
+            bufferedDurationUs = 0ll;
+        }
     }
 
-    if (finalResult == OK && downloadMore) {
+    if (finalResult == OK && bufferedDurationUs < kMinBufferedDurationUs) {
         ALOGV("monitoring, buffered=%" PRId64 " < %" PRId64 "",
-                bufferedDurationUs, durationToBufferUs);
+                bufferedDurationUs, kMinBufferedDurationUs);
         // delay the next download slightly; hopefully this gives other concurrent fetchers
         // a better chance to run.
         // onDownloadNext();
@@ -697,13 +785,16 @@
         msg->setInt32("generation", mMonitorQueueGeneration);
         msg->post(1000l);
     } else {
-        // Nothing to do yet, try again in a second.
-        int64_t delayUs = mPrepared ? kMaxMonitorDelayUs : targetDurationUs / 2;
+        // We'd like to maintain buffering above durationToBufferUs, so try
+        // again when buffer just about to go below durationToBufferUs
+        // (or after targetDurationUs / 2, whichever is smaller).
+        int64_t delayUs = bufferedDurationUs - kMinBufferedDurationUs + 1000000ll;
+        if (delayUs > targetDurationUs / 2) {
+            delayUs = targetDurationUs / 2;
+        }
         ALOGV("pausing for %" PRId64 ", buffered=%" PRId64 " > %" PRId64 "",
-                delayUs, bufferedDurationUs, durationToBufferUs);
-        // :TRICKY: need to enforce minimum delay because the delay to
-        // refresh the playlist will become 0
-        postMonitorQueue(delayUs, mPrepared ? targetDurationUs * 2 : 0);
+                delayUs, bufferedDurationUs, kMinBufferedDurationUs);
+        postMonitorQueue(delayUs);
     }
 }
 
@@ -732,6 +823,13 @@
             if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
                 updateDuration();
             }
+            // Notify LiveSession to use target-duration based buffering level
+            // for up/down switch. Default LiveSession::kUpSwitchMark may not
+            // be reachable for live streams, as our max buffering amount is
+            // limited to 3 segments.
+            if (!mPlaylist->isComplete()) {
+                updateTargetDuration();
+            }
         }
 
         mLastPlaylistFetchTimeUs = ALooper::GetNowUs();
@@ -744,10 +842,69 @@
     return buffer->size() > 0 && buffer->data()[0] == 0x47;
 }
 
-void PlaylistFetcher::onDownloadNext() {
+bool PlaylistFetcher::shouldPauseDownload() {
+    if (mStreamTypeMask == LiveSession::STREAMTYPE_SUBTITLES) {
+        // doesn't apply to subtitles
+        return false;
+    }
+
+    // Calculate threshold to abort current download
+    int32_t targetDurationSecs;
+    CHECK(mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs));
+    int64_t targetDurationUs = targetDurationSecs * 1000000ll;
+    int64_t thresholdUs = -1;
+    {
+        AutoMutex _l(mThresholdLock);
+        thresholdUs = (mThresholdRatio < 0.0f) ?
+                -1ll : mThresholdRatio * targetDurationUs;
+    }
+
+    if (thresholdUs < 0) {
+        // never abort
+        return false;
+    } else if (thresholdUs == 0) {
+        // immediately abort
+        return true;
+    }
+
+    // now we have a positive thresholdUs, abort if remaining
+    // portion to download is over that threshold.
+    if (mSegmentFirstPTS < 0) {
+        // this means we haven't even find the first access unit,
+        // abort now as we must be very far away from the end.
+        return true;
+    }
+    int64_t lastEnqueueUs = mSegmentFirstPTS;
+    for (size_t i = 0; i < mPacketSources.size(); ++i) {
+        if ((mStreamTypeMask & mPacketSources.keyAt(i)) == 0) {
+            continue;
+        }
+        sp<AMessage> meta = mPacketSources[i]->getLatestEnqueuedMeta();
+        int32_t type;
+        if (meta == NULL || meta->findInt32("discontinuity", &type)) {
+            continue;
+        }
+        int64_t tmpUs;
+        CHECK(meta->findInt64("timeUs", &tmpUs));
+        if (tmpUs > lastEnqueueUs) {
+            lastEnqueueUs = tmpUs;
+        }
+    }
+    lastEnqueueUs -= mSegmentFirstPTS;
+    if (targetDurationUs - lastEnqueueUs > thresholdUs) {
+        return true;
+    }
+    return false;
+}
+
+bool PlaylistFetcher::initDownloadState(
+        AString &uri,
+        sp<AMessage> &itemMeta,
+        int32_t &firstSeqNumberInPlaylist,
+        int32_t &lastSeqNumberInPlaylist) {
     status_t err = refreshPlaylist();
-    int32_t firstSeqNumberInPlaylist = 0;
-    int32_t lastSeqNumberInPlaylist = 0;
+    firstSeqNumberInPlaylist = 0;
+    lastSeqNumberInPlaylist = 0;
     bool discontinuity = false;
 
     if (mPlaylist != NULL) {
@@ -763,6 +920,8 @@
         }
     }
 
+    mSegmentFirstPTS = -1ll;
+
     if (mPlaylist != NULL && mSeqNumber < 0) {
         CHECK_GE(mStartTimeUs, 0ll);
 
@@ -790,7 +949,8 @@
             // timestamps coming from the media container) is used to determine the position
             // inside a segments.
             mSeqNumber = getSeqNumberForTime(mSegmentStartTimeUs);
-            if (mAdaptive) {
+            if (mStreamTypeMask != LiveSession::STREAMTYPE_SUBTITLES
+                    && mSeekMode != LiveSession::kSeekModeNextSample) {
                 // avoid double fetch/decode
                 mSeqNumber += 1;
             }
@@ -840,12 +1000,12 @@
                         mSeqNumber, firstSeqNumberInPlaylist,
                         lastSeqNumberInPlaylist, delayUs, mNumRetries);
                 postMonitorQueue(delayUs);
-                return;
+                return false;
             }
 
             if (err != OK) {
                 notifyError(err);
-                return;
+                return false;
             }
 
             // we've missed the boat, let's start 3 segments prior to the latest sequence
@@ -860,12 +1020,8 @@
                 // but since the segments we are supposed to fetch have already rolled off
                 // the playlist, i.e. we have already missed the boat, we inevitably have to
                 // skip.
-                for (size_t i = 0; i < mPacketSources.size(); i++) {
-                    sp<ABuffer> formatChange = mSession->createFormatChangeBuffer();
-                    mPacketSources.valueAt(i)->queueAccessUnit(formatChange);
-                }
-                stopAsync(/* clear = */ false);
-                return;
+                notifyStopReached();
+                return false;
             }
             mSeqNumber = lastSeqNumberInPlaylist - 3;
             if (mSeqNumber < firstSeqNumberInPlaylist) {
@@ -875,20 +1031,27 @@
 
             // fall through
         } else {
-            ALOGE("Cannot find sequence number %d in playlist "
-                 "(contains %d - %d)",
-                 mSeqNumber, firstSeqNumberInPlaylist,
-                  firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1);
+            if (mPlaylist != NULL) {
+                ALOGE("Cannot find sequence number %d in playlist "
+                     "(contains %d - %d)",
+                     mSeqNumber, firstSeqNumberInPlaylist,
+                      firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1);
 
-            notifyError(ERROR_END_OF_STREAM);
-            return;
+                notifyError(ERROR_END_OF_STREAM);
+            } else {
+                // It's possible that we were never able to download the playlist.
+                // In this case we should notify error, instead of EOS, as EOS during
+                // prepare means we succeeded in downloading everything.
+                ALOGE("Failed to download playlist!");
+                notifyError(ERROR_IO);
+            }
+
+            return false;
         }
     }
 
     mNumRetries = 0;
 
-    AString uri;
-    sp<AMessage> itemMeta;
     CHECK(mPlaylist->itemAt(
                 mSeqNumber - firstSeqNumberInPlaylist,
                 &uri,
@@ -911,20 +1074,6 @@
     }
     mLastDiscontinuitySeq = -1;
 
-    int64_t range_offset, range_length;
-    if (!itemMeta->findInt64("range-offset", &range_offset)
-            || !itemMeta->findInt64("range-length", &range_length)) {
-        range_offset = 0;
-        range_length = -1;
-    }
-
-    ALOGV("fetching segment %d from (%d .. %d)",
-          mSeqNumber, firstSeqNumberInPlaylist, lastSeqNumberInPlaylist);
-
-    ALOGV("fetching '%s'", uri.c_str());
-
-    sp<DataSource> source;
-    sp<ABuffer> buffer, tsBuffer;
     // decrypt a junk buffer to prefetch key; since a session uses only one http connection,
     // this avoids interleaved connections to the key and segment file.
     {
@@ -934,7 +1083,7 @@
                 true /* first */);
         if (err != OK) {
             notifyError(err);
-            return;
+            return false;
         }
     }
 
@@ -962,8 +1111,10 @@
 
         // Signal a format discontinuity to ATSParser to clear partial data
         // from previous streams. Not doing this causes bitstream corruption.
-        mTSParser->signalDiscontinuity(
-                ATSParser::DISCONTINUITY_FORMATCHANGE, NULL /* extra */);
+        if (mTSParser != NULL) {
+            mTSParser->signalDiscontinuity(
+                    ATSParser::DISCONTINUITY_FORMATCHANGE, NULL /* extra */);
+        }
 
         queueDiscontinuity(
                 ATSParser::DISCONTINUITY_FORMATCHANGE,
@@ -983,11 +1134,71 @@
         }
     }
 
+    ALOGV("fetching segment %d from (%d .. %d)",
+            mSeqNumber, firstSeqNumberInPlaylist, lastSeqNumberInPlaylist);
+    return true;
+}
+
+void PlaylistFetcher::onDownloadNext() {
+    AString uri;
+    sp<AMessage> itemMeta;
+    sp<ABuffer> buffer;
+    sp<ABuffer> tsBuffer;
+    int32_t firstSeqNumberInPlaylist = 0;
+    int32_t lastSeqNumberInPlaylist = 0;
+    bool connectHTTP = true;
+
+    if (mDownloadState->hasSavedState()) {
+        mDownloadState->restoreState(
+                uri,
+                itemMeta,
+                buffer,
+                tsBuffer,
+                firstSeqNumberInPlaylist,
+                lastSeqNumberInPlaylist);
+        connectHTTP = false;
+        ALOGV("resuming: '%s'", uri.c_str());
+    } else {
+        if (!initDownloadState(
+                uri,
+                itemMeta,
+                firstSeqNumberInPlaylist,
+                lastSeqNumberInPlaylist)) {
+            return;
+        }
+        ALOGV("fetching: '%s'", uri.c_str());
+    }
+
+    int64_t range_offset, range_length;
+    if (!itemMeta->findInt64("range-offset", &range_offset)
+            || !itemMeta->findInt64("range-length", &range_length)) {
+        range_offset = 0;
+        range_length = -1;
+    }
+
     // block-wise download
+    bool shouldPause = false;
     ssize_t bytesRead;
     do {
+        sp<DataSource> source = mHTTPDataSource;
+
+        int64_t startUs = ALooper::GetNowUs();
         bytesRead = mSession->fetchFile(
-                uri.c_str(), &buffer, range_offset, range_length, kDownloadBlockSize, &source);
+                uri.c_str(), &buffer, range_offset, range_length, kDownloadBlockSize,
+                &source, NULL, connectHTTP);
+
+        // add sample for bandwidth estimation, excluding samples from subtitles (as
+        // its too small), or during startup/resumeUntil (when we could have more than
+        // one connection open which affects bandwidth)
+        if (!mStartup && mStopParams == NULL && bytesRead > 0
+                && (mStreamTypeMask
+                        & (LiveSession::STREAMTYPE_AUDIO
+                        | LiveSession::STREAMTYPE_VIDEO))) {
+            int64_t delayUs = ALooper::GetNowUs() - startUs;
+            mSession->addBandwidthMeasurement(bytesRead, delayUs);
+        }
+
+        connectHTTP = false;
 
         if (bytesRead < 0) {
             status_t err = bytesRead;
@@ -1013,6 +1224,8 @@
             return;
         }
 
+        bool startUp = mStartup; // save current start up state
+
         err = OK;
         if (bufferStartsWithTsSyncByte(buffer)) {
             // Incremental extraction is only supported for MPEG2 transport streams.
@@ -1025,7 +1238,6 @@
                 tsBuffer->setRange(tsOff, tsSize);
             }
             tsBuffer->setRange(tsBuffer->offset(), tsBuffer->size() + bytesRead);
-
             err = extractAndQueueAccessUnitsFromTs(tsBuffer);
         }
 
@@ -1040,14 +1252,36 @@
             return;
         } else if (err == ERROR_OUT_OF_RANGE) {
             // reached stopping point
-            stopAsync(/* clear = */ false);
+            notifyStopReached();
             return;
         } else if (err != OK) {
             notifyError(err);
             return;
         }
-
-    } while (bytesRead != 0 && !mStopping);
+        // If we're switching, post start notification
+        // this should only be posted when the last chunk is full processed by TSParser
+        if (mSeekMode != LiveSession::kSeekModeExactPosition && startUp != mStartup) {
+            CHECK(mStartTimeUsNotify != NULL);
+            mStartTimeUsNotify->post();
+            mStartTimeUsNotify.clear();
+            shouldPause = true;
+        }
+        if (shouldPause || shouldPauseDownload()) {
+            // save state and return if this is not the last chunk,
+            // leaving the fetcher in paused state.
+            if (bytesRead != 0) {
+                mDownloadState->saveState(
+                        uri,
+                        itemMeta,
+                        buffer,
+                        tsBuffer,
+                        firstSeqNumberInPlaylist,
+                        lastSeqNumberInPlaylist);
+                return;
+            }
+            shouldPause = true;
+        }
+    } while (bytesRead != 0);
 
     if (bufferStartsWithTsSyncByte(buffer)) {
         // If we don't see a stream in the program table after fetching a full ts segment
@@ -1083,7 +1317,6 @@
         return;
     }
 
-    err = OK;
     if (tsBuffer != NULL) {
         AString method;
         CHECK(buffer->meta()->findString("cipher-method", &method));
@@ -1097,30 +1330,40 @@
     }
 
     // bulk extract non-ts files
+    bool startUp = mStartup;
     if (tsBuffer == NULL) {
-        err = extractAndQueueAccessUnits(buffer, itemMeta);
+        status_t err = extractAndQueueAccessUnits(buffer, itemMeta);
         if (err == -EAGAIN) {
             // starting sequence number too low/high
             postMonitorQueue();
             return;
         } else if (err == ERROR_OUT_OF_RANGE) {
             // reached stopping point
-            stopAsync(/* clear = */false);
+            notifyStopReached();
+            return;
+        } else if (err != OK) {
+            notifyError(err);
             return;
         }
     }
 
-    if (err != OK) {
-        notifyError(err);
-        return;
-    }
-
     ++mSeqNumber;
 
-    postMonitorQueue();
+    // if adapting, pause after found the next starting point
+    if (mSeekMode != LiveSession::kSeekModeExactPosition && startUp != mStartup) {
+        CHECK(mStartTimeUsNotify != NULL);
+        mStartTimeUsNotify->post();
+        mStartTimeUsNotify.clear();
+        shouldPause = true;
+    }
+
+    if (!shouldPause) {
+        postMonitorQueue();
+    }
 }
 
-int32_t PlaylistFetcher::getSeqNumberWithAnchorTime(int64_t anchorTimeUs) const {
+int32_t PlaylistFetcher::getSeqNumberWithAnchorTime(
+        int64_t anchorTimeUs, int64_t targetDiffUs) const {
     int32_t firstSeqNumberInPlaylist, lastSeqNumberInPlaylist;
     if (mPlaylist->meta() == NULL
             || !mPlaylist->meta()->findInt32("media-sequence", &firstSeqNumberInPlaylist)) {
@@ -1129,7 +1372,8 @@
     lastSeqNumberInPlaylist = firstSeqNumberInPlaylist + mPlaylist->size() - 1;
 
     int32_t index = mSeqNumber - firstSeqNumberInPlaylist - 1;
-    while (index >= 0 && anchorTimeUs > mStartTimeUs) {
+    // adjust anchorTimeUs to within targetDiffUs from mStartTimeUs
+    while (index >= 0 && anchorTimeUs - mStartTimeUs > targetDiffUs) {
         sp<AMessage> itemMeta;
         CHECK(mPlaylist->itemAt(index, NULL /* uri */, &itemMeta));
 
@@ -1150,28 +1394,22 @@
 
 int32_t PlaylistFetcher::getSeqNumberForDiscontinuity(size_t discontinuitySeq) const {
     int32_t firstSeqNumberInPlaylist;
-    if (mPlaylist->meta() == NULL
-            || !mPlaylist->meta()->findInt32("media-sequence", &firstSeqNumberInPlaylist)) {
+    if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
+                "media-sequence", &firstSeqNumberInPlaylist)) {
         firstSeqNumberInPlaylist = 0;
     }
 
-    size_t curDiscontinuitySeq = mPlaylist->getDiscontinuitySeq();
-    if (discontinuitySeq < curDiscontinuitySeq) {
-        return firstSeqNumberInPlaylist <= 0 ? 0 : (firstSeqNumberInPlaylist - 1);
-    }
-
     size_t index = 0;
     while (index < mPlaylist->size()) {
         sp<AMessage> itemMeta;
         CHECK(mPlaylist->itemAt( index, NULL /* uri */, &itemMeta));
-
-        int64_t discontinuity;
-        if (itemMeta->findInt64("discontinuity", &discontinuity)) {
-            curDiscontinuitySeq++;
-        }
-
+        size_t curDiscontinuitySeq;
+        CHECK(itemMeta->findInt32("discontinuity-sequence", (int32_t *)&curDiscontinuitySeq));
+        int32_t seqNumber = firstSeqNumberInPlaylist + index;
         if (curDiscontinuitySeq == discontinuitySeq) {
-            return firstSeqNumberInPlaylist + index;
+            return seqNumber;
+        } else if (curDiscontinuitySeq > discontinuitySeq) {
+            return seqNumber <= 0 ? 0 : seqNumber - 1;
         }
 
         ++index;
@@ -1231,6 +1469,7 @@
 
     accessUnit->meta()->setInt32("discontinuitySeq", mDiscontinuitySeq);
     accessUnit->meta()->setInt64("segmentStartTimeUs", getSegmentStartTimeUs(mSeqNumber));
+    accessUnit->meta()->setInt64("segmentDurationUs", getSegmentDurationUs(mSeqNumber));
     return accessUnit;
 }
 
@@ -1246,6 +1485,12 @@
         // ATSParser from skewing the timestamps of access units.
         extra->setInt64(IStreamListener::kKeyMediaTimeUs, 0);
 
+        // When adapting, signal a recent media time to the parser,
+        // so that PTS wrap around is handled for the new variant.
+        if (mStartTimeUs >= 0 && !mStartTimeUsRelative) {
+            extra->setInt64(IStreamListener::kKeyRecentMediaTimeUs, mStartTimeUs);
+        }
+
         mTSParser->signalDiscontinuity(
                 ATSParser::DISCONTINUITY_TIME, extra);
 
@@ -1269,30 +1514,15 @@
     for (size_t i = mPacketSources.size(); i-- > 0;) {
         sp<AnotherPacketSource> packetSource = mPacketSources.valueAt(i);
 
-        const char *key;
-        ATSParser::SourceType type;
         const LiveSession::StreamType stream = mPacketSources.keyAt(i);
-        switch (stream) {
-            case LiveSession::STREAMTYPE_VIDEO:
-                type = ATSParser::VIDEO;
-                key = "timeUsVideo";
-                break;
-
-            case LiveSession::STREAMTYPE_AUDIO:
-                type = ATSParser::AUDIO;
-                key = "timeUsAudio";
-                break;
-
-            case LiveSession::STREAMTYPE_SUBTITLES:
-            {
-                ALOGE("MPEG2 Transport streams do not contain subtitles.");
-                return ERROR_MALFORMED;
-                break;
-            }
-
-            default:
-                TRESPASS();
+        if (stream == LiveSession::STREAMTYPE_SUBTITLES) {
+            ALOGE("MPEG2 Transport streams do not contain subtitles.");
+            return ERROR_MALFORMED;
         }
+        const char *key = LiveSession::getKeyForStream(stream);
+        ATSParser::SourceType type =
+                (stream == LiveSession::STREAMTYPE_AUDIO) ?
+                        ATSParser::AUDIO : ATSParser::VIDEO;
 
         sp<AnotherPacketSource> source =
             static_cast<AnotherPacketSource *>(
@@ -1315,97 +1545,99 @@
             int64_t timeUs;
             CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
 
+            if (mSegmentFirstPTS < 0ll) {
+                mSegmentFirstPTS = timeUs;
+                if (!mStartTimeUsRelative) {
+                    int32_t firstSeqNumberInPlaylist;
+                    if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
+                                "media-sequence", &firstSeqNumberInPlaylist)) {
+                        firstSeqNumberInPlaylist = 0;
+                    }
+
+                    int32_t targetDurationSecs;
+                    CHECK(mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs));
+                    int64_t targetDurationUs = targetDurationSecs * 1000000ll;
+                    // mStartup
+                    //   mStartup is true until we have queued a packet for all the streams
+                    //   we are fetching. We queue packets whose timestamps are greater than
+                    //   mStartTimeUs.
+                    // mSegmentStartTimeUs >= 0
+                    //   mSegmentStartTimeUs is non-negative when adapting or switching tracks
+                    // mSeqNumber > firstSeqNumberInPlaylist
+                    //   don't decrement mSeqNumber if it already points to the 1st segment
+                    // timeUs - mStartTimeUs > targetDurationUs:
+                    //   This and the 2 above conditions should only happen when adapting in a live
+                    //   stream; the old fetcher has already fetched to mStartTimeUs; the new fetcher
+                    //   would start fetching after timeUs, which should be greater than mStartTimeUs;
+                    //   the old fetcher would then continue fetching data until timeUs. We don't want
+                    //   timeUs to be too far ahead of mStartTimeUs because we want the old fetcher to
+                    //   stop as early as possible. The definition of being "too far ahead" is
+                    //   arbitrary; here we use targetDurationUs as threshold.
+                    int64_t targetDiffUs = (mSeekMode == LiveSession::kSeekModeNextSample
+                            ? 0 : targetDurationUs);
+                    if (mStartup && mSegmentStartTimeUs >= 0
+                            && mSeqNumber > firstSeqNumberInPlaylist
+                            && timeUs - mStartTimeUs > targetDiffUs) {
+                        // we just guessed a starting timestamp that is too high when adapting in a
+                        // live stream; re-adjust based on the actual timestamp extracted from the
+                        // media segment; if we didn't move backward after the re-adjustment
+                        // (newSeqNumber), start at least 1 segment prior.
+                        int32_t newSeqNumber = getSeqNumberWithAnchorTime(
+                                timeUs, targetDiffUs);
+                        if (newSeqNumber >= mSeqNumber) {
+                            --mSeqNumber;
+                        } else {
+                            mSeqNumber = newSeqNumber;
+                        }
+                        mStartTimeUsNotify = mNotify->dup();
+                        mStartTimeUsNotify->setInt32("what", kWhatStartedAt);
+                        mStartTimeUsNotify->setString("uri", mURI);
+                        mIDRFound = false;
+                        return -EAGAIN;
+                    }
+                }
+            }
             if (mStartup) {
                 if (!mFirstPTSValid) {
                     mFirstTimeUs = timeUs;
                     mFirstPTSValid = true;
                 }
+                bool startTimeReached = true;
                 if (mStartTimeUsRelative) {
                     timeUs -= mFirstTimeUs;
                     if (timeUs < 0) {
                         timeUs = 0;
                     }
+                    startTimeReached = (timeUs >= mStartTimeUs);
                 }
 
-                if (timeUs < mStartTimeUs || (isAvc && !mIDRFound)) {
-                    // buffer up to the closest preceding IDR frame
-                    ALOGV("timeUs %" PRId64 " us < mStartTimeUs %" PRId64 " us",
-                            timeUs, mStartTimeUs);
+                if (!startTimeReached || (isAvc && !mIDRFound)) {
+                    // buffer up to the closest preceding IDR frame in the next segement,
+                    // or the closest succeeding IDR frame after the exact position
                     if (isAvc) {
                         if (IsIDR(accessUnit)) {
                             mVideoBuffer->clear();
                             mIDRFound = true;
                         }
-                        if (mIDRFound) {
+                        if (mIDRFound && mStartTimeUsRelative && !startTimeReached) {
                             mVideoBuffer->queueAccessUnit(accessUnit);
                         }
                     }
-
-                    continue;
+                    if (!startTimeReached || (isAvc && !mIDRFound)) {
+                        continue;
+                    }
                 }
             }
 
             if (mStartTimeUsNotify != NULL) {
-                int32_t firstSeqNumberInPlaylist;
-                if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
-                            "media-sequence", &firstSeqNumberInPlaylist)) {
-                    firstSeqNumberInPlaylist = 0;
-                }
-
-                int32_t targetDurationSecs;
-                CHECK(mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs));
-                int64_t targetDurationUs = targetDurationSecs * 1000000ll;
-                // mStartup
-                //   mStartup is true until we have queued a packet for all the streams
-                //   we are fetching. We queue packets whose timestamps are greater than
-                //   mStartTimeUs.
-                // mSegmentStartTimeUs >= 0
-                //   mSegmentStartTimeUs is non-negative when adapting or switching tracks
-                // mSeqNumber > firstSeqNumberInPlaylist
-                //   don't decrement mSeqNumber if it already points to the 1st segment
-                // timeUs - mStartTimeUs > targetDurationUs:
-                //   This and the 2 above conditions should only happen when adapting in a live
-                //   stream; the old fetcher has already fetched to mStartTimeUs; the new fetcher
-                //   would start fetching after timeUs, which should be greater than mStartTimeUs;
-                //   the old fetcher would then continue fetching data until timeUs. We don't want
-                //   timeUs to be too far ahead of mStartTimeUs because we want the old fetcher to
-                //   stop as early as possible. The definition of being "too far ahead" is
-                //   arbitrary; here we use targetDurationUs as threshold.
-                if (mStartup && mSegmentStartTimeUs >= 0
-                        && mSeqNumber > firstSeqNumberInPlaylist
-                        && timeUs - mStartTimeUs > targetDurationUs) {
-                    // we just guessed a starting timestamp that is too high when adapting in a
-                    // live stream; re-adjust based on the actual timestamp extracted from the
-                    // media segment; if we didn't move backward after the re-adjustment
-                    // (newSeqNumber), start at least 1 segment prior.
-                    int32_t newSeqNumber = getSeqNumberWithAnchorTime(timeUs);
-                    if (newSeqNumber >= mSeqNumber) {
-                        --mSeqNumber;
-                    } else {
-                        mSeqNumber = newSeqNumber;
-                    }
-                    mStartTimeUsNotify = mNotify->dup();
-                    mStartTimeUsNotify->setInt32("what", kWhatStartedAt);
-                    return -EAGAIN;
-                }
-
-                int32_t seq;
-                if (!mStartTimeUsNotify->findInt32("discontinuitySeq", &seq)) {
-                    mStartTimeUsNotify->setInt32("discontinuitySeq", mDiscontinuitySeq);
-                }
-                int64_t startTimeUs;
-                if (!mStartTimeUsNotify->findInt64(key, &startTimeUs)) {
-                    mStartTimeUsNotify->setInt64(key, timeUs);
-
-                    uint32_t streamMask = 0;
-                    mStartTimeUsNotify->findInt32("streamMask", (int32_t *) &streamMask);
+                uint32_t streamMask = 0;
+                mStartTimeUsNotify->findInt32("streamMask", (int32_t *) &streamMask);
+                if (!(streamMask & mPacketSources.keyAt(i))) {
                     streamMask |= mPacketSources.keyAt(i);
                     mStartTimeUsNotify->setInt32("streamMask", streamMask);
 
                     if (streamMask == mStreamTypeMask) {
                         mStartup = false;
-                        mStartTimeUsNotify->post();
-                        mStartTimeUsNotify.clear();
                     }
                 }
             }
@@ -1419,7 +1651,6 @@
                         || !mStopParams->findInt64(key, &stopTimeUs)
                         || (discontinuitySeq == mDiscontinuitySeq
                                 && timeUs >= stopTimeUs)) {
-                    packetSource->queueAccessUnit(mSession->createFormatChangeBuffer());
                     mStreamTypeMask &= ~stream;
                     mPacketSources.removeItemsAt(i);
                     break;
@@ -1666,10 +1897,13 @@
                 CHECK(mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs));
                 int64_t targetDurationUs = targetDurationSecs * 1000000ll;
 
+                int64_t targetDiffUs =(mSeekMode == LiveSession::kSeekModeNextSample
+                        ? 0 : targetDurationUs);
                 // Duplicated logic from how we handle .ts playlists.
                 if (mStartup && mSegmentStartTimeUs >= 0
-                        && timeUs - mStartTimeUs > targetDurationUs) {
-                    int32_t newSeqNumber = getSeqNumberWithAnchorTime(timeUs);
+                        && timeUs - mStartTimeUs > targetDiffUs) {
+                    int32_t newSeqNumber = getSeqNumberWithAnchorTime(
+                            timeUs, targetDiffUs);
                     if (newSeqNumber >= mSeqNumber) {
                         --mSeqNumber;
                     } else {
@@ -1678,11 +1912,7 @@
                     return -EAGAIN;
                 }
 
-                mStartTimeUsNotify->setInt64("timeUsAudio", timeUs);
-                mStartTimeUsNotify->setInt32("discontinuitySeq", mDiscontinuitySeq);
                 mStartTimeUsNotify->setInt32("streamMask", LiveSession::STREAMTYPE_AUDIO);
-                mStartTimeUsNotify->post();
-                mStartTimeUsNotify.clear();
                 mStartup = false;
             }
         }
@@ -1695,7 +1925,6 @@
                     || discontinuitySeq > mDiscontinuitySeq
                     || !mStopParams->findInt64("timeUsAudio", &stopTimeUs)
                     || (discontinuitySeq == mDiscontinuitySeq && unitTimeUs >= stopTimeUs)) {
-                packetSource->queueAccessUnit(mSession->createFormatChangeBuffer());
                 mStreamTypeMask = 0;
                 mPacketSources.clear();
                 return ERROR_OUT_OF_RANGE;
@@ -1732,4 +1961,15 @@
     msg->post();
 }
 
+void PlaylistFetcher::updateTargetDuration() {
+    int32_t targetDurationSecs;
+    CHECK(mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs));
+    int64_t targetDurationUs = targetDurationSecs * 1000000ll;
+
+    sp<AMessage> msg = mNotify->dup();
+    msg->setInt32("what", kWhatTargetDurationUpdate);
+    msg->setInt64("targetDurationUs", targetDurationUs);
+    msg->post();
+}
+
 }  // namespace android
diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h
index b82e50d..dab56df 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.h
+++ b/media/libstagefright/httplive/PlaylistFetcher.h
@@ -44,9 +44,11 @@
         kWhatStopped,
         kWhatError,
         kWhatDurationUpdate,
+        kWhatTargetDurationUpdate,
         kWhatPrepared,
         kWhatPreparationFailed,
         kWhatStartedAt,
+        kWhatStopReached,
     };
 
     PlaylistFetcher(
@@ -64,10 +66,10 @@
             int64_t startTimeUs = -1ll,         // starting timestamps
             int64_t segmentStartTimeUs = -1ll, // starting position within playlist
             // startTimeUs!=segmentStartTimeUs only when playlist is live
-            int32_t startDiscontinuitySeq = 0,
-            bool adaptive = false);
+            int32_t startDiscontinuitySeq = -1,
+            LiveSession::SeekMode seekMode = LiveSession::kSeekModeExactPosition);
 
-    void pauseAsync(bool immediate = false);
+    void pauseAsync(float thresholdRatio);
 
     void stopAsync(bool clear = true);
 
@@ -95,6 +97,8 @@
         kWhatDownloadNext   = 'dlnx',
     };
 
+    struct DownloadState;
+
     static const int64_t kMaxMonitorDelayUs;
     static const int32_t kNumSkipFrames;
 
@@ -105,6 +109,7 @@
     sp<AMessage> mNotify;
     sp<AMessage> mStartTimeUsNotify;
 
+    sp<HTTPBase> mHTTPDataSource;
     sp<LiveSession> mSession;
     AString mURI;
 
@@ -131,8 +136,7 @@
     int32_t mNumRetries;
     bool mStartup;
     bool mIDRFound;
-    bool mAdaptive;
-    bool mPrepared;
+    int32_t mSeekMode;
     bool mTimeChangeSignaled;
     int64_t mNextPTSTimeUs;
 
@@ -141,9 +145,6 @@
 
     int32_t mLastDiscontinuitySeq;
 
-    Mutex mStoppingLock;
-    bool mStopping;
-
     enum RefreshState {
         INITIAL_MINIMUM_RELOAD_DELAY,
         FIRST_UNCHANGED_RELOAD_ATTEMPT,
@@ -157,8 +158,8 @@
     sp<ATSParser> mTSParser;
 
     bool mFirstPTSValid;
-    uint64_t mFirstPTS;
     int64_t mFirstTimeUs;
+    int64_t mSegmentFirstPTS;
     sp<AnotherPacketSource> mVideoBuffer;
 
     // Stores the initialization vector to decrypt the next block of cipher text, which can
@@ -166,6 +167,11 @@
     // the last block of cipher text (cipher-block chaining).
     unsigned char mAESInitVec[16];
 
+    Mutex mThresholdLock;
+    float mThresholdRatio;
+
+    sp<DownloadState> mDownloadState;
+
     // Set first to true if decrypting the first segment of a playlist segment. When
     // first is true, reset the initialization vector based on the available
     // information in the manifest; otherwise, use the initialization vector as
@@ -181,7 +187,8 @@
 
     void postMonitorQueue(int64_t delayUs = 0, int64_t minDelayUs = 0);
     void cancelMonitorQueue();
-    void setStopping(bool stopping);
+    void setStoppingThreshold(float thresholdRatio);
+    bool shouldPauseDownload();
 
     int64_t delayUsToRefreshPlaylist() const;
     status_t refreshPlaylist();
@@ -189,12 +196,19 @@
     // Returns the media time in us of the segment specified by seqNumber.
     // This is computed by summing the durations of all segments before it.
     int64_t getSegmentStartTimeUs(int32_t seqNumber) const;
+    // Returns the duration time in us of the segment specified.
+    int64_t getSegmentDurationUs(int32_t seqNumber) const;
 
     status_t onStart(const sp<AMessage> &msg);
     void onPause();
     void onStop(const sp<AMessage> &msg);
     void onMonitorQueue();
     void onDownloadNext();
+    bool initDownloadState(
+            AString &uri,
+            sp<AMessage> &itemMeta,
+            int32_t &firstSeqNumberInPlaylist,
+            int32_t &lastSeqNumberInPlaylist);
 
     // Resume a fetcher to continue until the stopping point stored in msg.
     status_t onResumeUntil(const sp<AMessage> &msg);
@@ -208,16 +222,19 @@
     status_t extractAndQueueAccessUnits(
             const sp<ABuffer> &buffer, const sp<AMessage> &itemMeta);
 
+    void notifyStopReached();
     void notifyError(status_t err);
 
     void queueDiscontinuity(
             ATSParser::DiscontinuityType type, const sp<AMessage> &extra);
 
-    int32_t getSeqNumberWithAnchorTime(int64_t anchorTimeUs) const;
+    int32_t getSeqNumberWithAnchorTime(
+            int64_t anchorTimeUs, int64_t targetDurationUs) const;
     int32_t getSeqNumberForDiscontinuity(size_t discontinuitySeq) const;
     int32_t getSeqNumberForTime(int64_t timeUs) const;
 
     void updateDuration();
+    void updateTargetDuration();
 
     DISALLOW_EVIL_CONSTRUCTORS(PlaylistFetcher);
 };
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 6786506..0a868bc 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -48,7 +48,8 @@
 static const size_t kTSPacketSize = 188;
 
 struct ATSParser::Program : public RefBase {
-    Program(ATSParser *parser, unsigned programNumber, unsigned programMapPID);
+    Program(ATSParser *parser, unsigned programNumber, unsigned programMapPID,
+            int64_t lastRecoveredPTS);
 
     bool parsePSISection(
             unsigned pid, ABitReader *br, status_t *err);
@@ -166,10 +167,12 @@
     PSISection();
 
     status_t append(const void *data, size_t size);
+    void setSkipBytes(uint8_t skip);
     void clear();
 
     bool isComplete() const;
     bool isEmpty() const;
+    bool isCRCOkay() const;
 
     const uint8_t *data() const;
     size_t size() const;
@@ -179,6 +182,8 @@
 
 private:
     sp<ABuffer> mBuffer;
+    uint8_t mSkipBytes;
+    static uint32_t CRC_TABLE[];
 
     DISALLOW_EVIL_CONSTRUCTORS(PSISection);
 };
@@ -186,13 +191,14 @@
 ////////////////////////////////////////////////////////////////////////////////
 
 ATSParser::Program::Program(
-        ATSParser *parser, unsigned programNumber, unsigned programMapPID)
+        ATSParser *parser, unsigned programNumber, unsigned programMapPID,
+        int64_t lastRecoveredPTS)
     : mParser(parser),
       mProgramNumber(programNumber),
       mProgramMapPID(programMapPID),
       mFirstPTSValid(false),
       mFirstPTS(0),
-      mLastRecoveredPTS(-1ll) {
+      mLastRecoveredPTS(lastRecoveredPTS) {
     ALOGV("new program number %u", programNumber);
 }
 
@@ -1037,6 +1043,7 @@
       mAbsoluteTimeAnchorUs(-1ll),
       mTimeOffsetValid(false),
       mTimeOffsetUs(0ll),
+      mLastRecoveredPTS(-1ll),
       mNumTSPacketsParsed(0),
       mNumPCRs(0) {
     mPSISections.add(0 /* PID */, new PSISection);
@@ -1055,11 +1062,21 @@
 void ATSParser::signalDiscontinuity(
         DiscontinuityType type, const sp<AMessage> &extra) {
     int64_t mediaTimeUs;
-    if ((type & DISCONTINUITY_TIME)
-            && extra != NULL
-            && extra->findInt64(
-                IStreamListener::kKeyMediaTimeUs, &mediaTimeUs)) {
-        mAbsoluteTimeAnchorUs = mediaTimeUs;
+    if ((type & DISCONTINUITY_TIME) && extra != NULL) {
+        if (extra->findInt64(IStreamListener::kKeyMediaTimeUs, &mediaTimeUs)) {
+            mAbsoluteTimeAnchorUs = mediaTimeUs;
+        }
+        if ((mFlags & TS_TIMESTAMPS_ARE_ABSOLUTE)
+                && extra->findInt64(
+                    IStreamListener::kKeyRecentMediaTimeUs, &mediaTimeUs)) {
+            if (mAbsoluteTimeAnchorUs >= 0ll) {
+                mediaTimeUs -= mAbsoluteTimeAnchorUs;
+            }
+            if (mTimeOffsetValid) {
+                mediaTimeUs -= mTimeOffsetUs;
+            }
+            mLastRecoveredPTS = (mediaTimeUs * 9) / 100;
+        }
     } else if (type == DISCONTINUITY_ABSOLUTE_TIME) {
         int64_t timeUs;
         CHECK(extra->findInt64("timeUs", &timeUs));
@@ -1143,7 +1160,7 @@
 
             if (!found) {
                 mPrograms.push(
-                        new Program(this, program_number, programMapPID));
+                        new Program(this, program_number, programMapPID, mLastRecoveredPTS));
             }
 
             if (mPSISections.indexOfKey(programMapPID) < 0) {
@@ -1171,6 +1188,7 @@
             }
 
             unsigned skip = br->getBits(8);
+            section->setSkipBytes(skip + 1);  // skip filler bytes + pointer field itself
             br->skipBits(skip * 8);
         }
 
@@ -1185,6 +1203,9 @@
             return OK;
         }
 
+        if (!section->isCRCOkay()) {
+            return BAD_VALUE;
+        }
         ABitReader sectionBits(section->data(), section->size());
 
         if (PID == 0) {
@@ -1407,7 +1428,79 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-ATSParser::PSISection::PSISection() {
+
+// CRC32 used for PSI section. The table was generated by following command:
+// $ python pycrc.py --model crc-32-mpeg --algorithm table-driven --generate c
+// Visit http://www.tty1.net/pycrc/index_en.html for more details.
+uint32_t ATSParser::PSISection::CRC_TABLE[] = {
+    0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9,
+    0x130476dc, 0x17c56b6b, 0x1a864db2, 0x1e475005,
+    0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61,
+    0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd,
+    0x4c11db70, 0x48d0c6c7, 0x4593e01e, 0x4152fda9,
+    0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75,
+    0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011,
+    0x791d4014, 0x7ddc5da3, 0x709f7b7a, 0x745e66cd,
+    0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039,
+    0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5,
+    0xbe2b5b58, 0xbaea46ef, 0xb7a96036, 0xb3687d81,
+    0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d,
+    0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49,
+    0xc7361b4c, 0xc3f706fb, 0xceb42022, 0xca753d95,
+    0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1,
+    0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d,
+    0x34867077, 0x30476dc0, 0x3d044b19, 0x39c556ae,
+    0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072,
+    0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16,
+    0x018aeb13, 0x054bf6a4, 0x0808d07d, 0x0cc9cdca,
+    0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde,
+    0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02,
+    0x5e9f46bf, 0x5a5e5b08, 0x571d7dd1, 0x53dc6066,
+    0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba,
+    0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e,
+    0xbfa1b04b, 0xbb60adfc, 0xb6238b25, 0xb2e29692,
+    0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6,
+    0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a,
+    0xe0b41de7, 0xe4750050, 0xe9362689, 0xedf73b3e,
+    0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2,
+    0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686,
+    0xd5b88683, 0xd1799b34, 0xdc3abded, 0xd8fba05a,
+    0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637,
+    0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb,
+    0x4f040d56, 0x4bc510e1, 0x46863638, 0x42472b8f,
+    0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53,
+    0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47,
+    0x36194d42, 0x32d850f5, 0x3f9b762c, 0x3b5a6b9b,
+    0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff,
+    0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623,
+    0xf12f560e, 0xf5ee4bb9, 0xf8ad6d60, 0xfc6c70d7,
+    0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b,
+    0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f,
+    0xc423cd6a, 0xc0e2d0dd, 0xcda1f604, 0xc960ebb3,
+    0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7,
+    0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b,
+    0x9b3660c6, 0x9ff77d71, 0x92b45ba8, 0x9675461f,
+    0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3,
+    0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640,
+    0x4e8ee645, 0x4a4ffbf2, 0x470cdd2b, 0x43cdc09c,
+    0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8,
+    0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24,
+    0x119b4be9, 0x155a565e, 0x18197087, 0x1cd86d30,
+    0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec,
+    0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088,
+    0x2497d08d, 0x2056cd3a, 0x2d15ebe3, 0x29d4f654,
+    0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0,
+    0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c,
+    0xe3a1cbc1, 0xe760d676, 0xea23f0af, 0xeee2ed18,
+    0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4,
+    0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0,
+    0x9abc8bd5, 0x9e7d9662, 0x933eb0bb, 0x97ffad0c,
+    0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668,
+    0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4
+    };
+
+ATSParser::PSISection::PSISection() :
+    mSkipBytes(0) {
 }
 
 ATSParser::PSISection::~PSISection() {
@@ -1438,10 +1531,15 @@
     return OK;
 }
 
+void ATSParser::PSISection::setSkipBytes(uint8_t skip) {
+    mSkipBytes = skip;
+}
+
 void ATSParser::PSISection::clear() {
     if (mBuffer != NULL) {
         mBuffer->setRange(0, 0);
     }
+    mSkipBytes = 0;
 }
 
 bool ATSParser::PSISection::isComplete() const {
@@ -1465,4 +1563,30 @@
     return mBuffer == NULL ? 0 : mBuffer->size();
 }
 
+bool ATSParser::PSISection::isCRCOkay() const {
+    if (!isComplete()) {
+        return false;
+    }
+    uint8_t* data = mBuffer->data();
+
+    // Return true if section_syntax_indicator says no section follows the field section_length.
+    if ((data[1] & 0x80) == 0) {
+        return true;
+    }
+
+    unsigned sectionLength = U16_AT(data + 1) & 0xfff;
+    ALOGV("sectionLength %u, skip %u", sectionLength, mSkipBytes);
+
+    // Skip the preceding field present when payload start indicator is on.
+    sectionLength -= mSkipBytes;
+
+    uint32_t crc = 0xffffffff;
+    for(unsigned i = 0; i < sectionLength + 4 /* crc */; i++) {
+        uint8_t b = data[i];
+        int index = ((crc >> 24) ^ (b & 0xff)) & 0xff;
+        crc = CRC_TABLE[index] ^ (crc << 8);
+    }
+    ALOGV("crc: %08x\n", crc);
+    return (crc == 0);
+}
 }  // namespace android
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 75d76dc..a1405bd 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -46,6 +46,9 @@
             DISCONTINUITY_AUDIO_FORMAT
                 | DISCONTINUITY_VIDEO_FORMAT
                 | DISCONTINUITY_TIME,
+        DISCONTINUITY_FORMAT_ONLY       =
+            DISCONTINUITY_AUDIO_FORMAT
+                | DISCONTINUITY_VIDEO_FORMAT,
     };
 
     enum Flags {
@@ -115,6 +118,7 @@
 
     bool mTimeOffsetValid;
     int64_t mTimeOffsetUs;
+    int64_t mLastRecoveredPTS;
 
     size_t mNumTSPacketsParsed;
 
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index 79a9b04..c5bb41b 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -19,6 +19,8 @@
 
 #include "AnotherPacketSource.h"
 
+#include "include/avc_utils.h"
+
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -27,6 +29,7 @@
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
 #include <utils/Vector.h>
 
 #include <inttypes.h>
@@ -38,6 +41,7 @@
 AnotherPacketSource::AnotherPacketSource(const sp<MetaData> &meta)
     : mIsAudio(false),
       mIsVideo(false),
+      mEnabled(true),
       mFormat(NULL),
       mLastQueuedTimeUs(0),
       mEOSResult(OK),
@@ -155,7 +159,6 @@
 
         const sp<ABuffer> buffer = *mBuffers.begin();
         mBuffers.erase(mBuffers.begin());
-        mLatestDequeuedMeta = buffer->meta()->dup();
 
         int32_t discontinuity;
         if (buffer->meta()->findInt32("discontinuity", &discontinuity)) {
@@ -166,6 +169,8 @@
             return INFO_DISCONTINUITY;
         }
 
+        mLatestDequeuedMeta = buffer->meta()->dup();
+
         sp<RefBase> object;
         if (buffer->meta()->findObject("format", &object)) {
             setFormat(static_cast<MetaData*>(object.get()));
@@ -205,20 +210,26 @@
         return;
     }
 
-    int64_t lastQueuedTimeUs;
-    CHECK(buffer->meta()->findInt64("timeUs", &lastQueuedTimeUs));
-    mLastQueuedTimeUs = lastQueuedTimeUs;
-    ALOGV("queueAccessUnit timeUs=%" PRIi64 " us (%.2f secs)", mLastQueuedTimeUs, mLastQueuedTimeUs / 1E6);
-
     Mutex::Autolock autoLock(mLock);
     mBuffers.push_back(buffer);
     mCondition.signal();
 
     int32_t discontinuity;
     if (buffer->meta()->findInt32("discontinuity", &discontinuity)) {
+        // discontinuity handling needs to be consistent with queueDiscontinuity()
         ++mQueuedDiscontinuityCount;
+        mLastQueuedTimeUs = 0ll;
+        mEOSResult = OK;
+        mLatestEnqueuedMeta = NULL;
+        return;
     }
 
+    int64_t lastQueuedTimeUs;
+    CHECK(buffer->meta()->findInt64("timeUs", &lastQueuedTimeUs));
+    mLastQueuedTimeUs = lastQueuedTimeUs;
+    ALOGV("queueAccessUnit timeUs=%" PRIi64 " us (%.2f secs)",
+            mLastQueuedTimeUs, mLastQueuedTimeUs / 1E6);
+
     if (mLatestEnqueuedMeta == NULL) {
         mLatestEnqueuedMeta = buffer->meta()->dup();
     } else {
@@ -298,6 +309,10 @@
 
 bool AnotherPacketSource::hasBufferAvailable(status_t *finalResult) {
     Mutex::Autolock autoLock(mLock);
+    *finalResult = OK;
+    if (!mEnabled) {
+        return false;
+    }
     if (!mBuffers.empty()) {
         return true;
     }
@@ -306,6 +321,24 @@
     return false;
 }
 
+bool AnotherPacketSource::hasDataBufferAvailable(status_t *finalResult) {
+    Mutex::Autolock autoLock(mLock);
+    *finalResult = OK;
+    if (!mEnabled) {
+        return false;
+    }
+    List<sp<ABuffer> >::iterator it;
+    for (it = mBuffers.begin(); it != mBuffers.end(); it++) {
+        int32_t discontinuity;
+        if (!(*it)->meta()->findInt32("discontinuity", &discontinuity)) {
+            return true;
+        }
+    }
+
+    *finalResult = mEOSResult;
+    return false;
+}
+
 int64_t AnotherPacketSource::getBufferedDurationUs(status_t *finalResult) {
     Mutex::Autolock autoLock(mLock);
     return getBufferedDurationUs_l(finalResult);
@@ -424,4 +457,152 @@
     return mLatestDequeuedMeta;
 }
 
+void AnotherPacketSource::enable(bool enable) {
+    Mutex::Autolock autoLock(mLock);
+    mEnabled = enable;
+}
+
+/*
+ * returns the sample meta that's delayUs after queue head
+ * (NULL if such sample is unavailable)
+ */
+sp<AMessage> AnotherPacketSource::getMetaAfterLastDequeued(int64_t delayUs) {
+    Mutex::Autolock autoLock(mLock);
+    int64_t firstUs = -1;
+    int64_t lastUs = -1;
+    int64_t durationUs = 0;
+
+    List<sp<ABuffer> >::iterator it;
+    for (it = mBuffers.begin(); it != mBuffers.end(); ++it) {
+        const sp<ABuffer> &buffer = *it;
+        int32_t discontinuity;
+        if (buffer->meta()->findInt32("discontinuity", &discontinuity)) {
+            durationUs += lastUs - firstUs;
+            firstUs = -1;
+            lastUs = -1;
+            continue;
+        }
+        int64_t timeUs;
+        if (buffer->meta()->findInt64("timeUs", &timeUs)) {
+            if (firstUs < 0) {
+                firstUs = timeUs;
+            }
+            if (lastUs < 0 || timeUs > lastUs) {
+                lastUs = timeUs;
+            }
+            if (durationUs + (lastUs - firstUs) >= delayUs) {
+                return buffer->meta();
+            }
+        }
+    }
+    return NULL;
+}
+
+/*
+ * removes samples with time equal or after meta
+ */
+void AnotherPacketSource::trimBuffersAfterMeta(
+        const sp<AMessage> &meta) {
+    if (meta == NULL) {
+        ALOGW("trimming with NULL meta, ignoring");
+        return;
+    }
+
+    Mutex::Autolock autoLock(mLock);
+    if (mBuffers.empty()) {
+        return;
+    }
+
+    HLSTime stopTime(meta);
+    ALOGV("trimBuffersAfterMeta: discontinuitySeq %zu, timeUs %lld",
+            stopTime.mSeq, (long long)stopTime.mTimeUs);
+
+    List<sp<ABuffer> >::iterator it;
+    sp<AMessage> newLatestEnqueuedMeta = NULL;
+    int64_t newLastQueuedTimeUs = 0;
+    size_t newDiscontinuityCount = 0;
+    for (it = mBuffers.begin(); it != mBuffers.end(); ++it) {
+        const sp<ABuffer> &buffer = *it;
+        int32_t discontinuity;
+        if (buffer->meta()->findInt32("discontinuity", &discontinuity)) {
+            newDiscontinuityCount++;
+            continue;
+        }
+
+        HLSTime curTime(buffer->meta());
+        if (!(curTime < stopTime)) {
+            ALOGV("trimming from %lld (inclusive) to end",
+                    (long long)curTime.mTimeUs);
+            break;
+        }
+        newLatestEnqueuedMeta = buffer->meta();
+        newLastQueuedTimeUs = curTime.mTimeUs;
+    }
+    mBuffers.erase(it, mBuffers.end());
+    mLatestEnqueuedMeta = newLatestEnqueuedMeta;
+    mLastQueuedTimeUs = newLastQueuedTimeUs;
+    mQueuedDiscontinuityCount = newDiscontinuityCount;
+}
+
+/*
+ * removes samples with time equal or before meta;
+ * returns first sample left in the queue.
+ *
+ * (for AVC, if trim happens, the samples left will always start
+ * at next IDR.)
+ */
+sp<AMessage> AnotherPacketSource::trimBuffersBeforeMeta(
+        const sp<AMessage> &meta) {
+    HLSTime startTime(meta);
+    ALOGV("trimBuffersBeforeMeta: discontinuitySeq %zu, timeUs %lld",
+            startTime.mSeq, (long long)startTime.mTimeUs);
+
+    sp<AMessage> firstMeta;
+    Mutex::Autolock autoLock(mLock);
+    if (mBuffers.empty()) {
+        return NULL;
+    }
+
+    sp<MetaData> format;
+    bool isAvc = false;
+
+    List<sp<ABuffer> >::iterator it;
+    size_t discontinuityCount = 0;
+    for (it = mBuffers.begin(); it != mBuffers.end(); ++it) {
+        const sp<ABuffer> &buffer = *it;
+        int32_t discontinuity;
+        if (buffer->meta()->findInt32("discontinuity", &discontinuity)) {
+            format = NULL;
+            isAvc = false;
+            discontinuityCount++;
+            continue;
+        }
+        if (format == NULL) {
+            sp<RefBase> object;
+            if (buffer->meta()->findObject("format", &object)) {
+                const char* mime;
+                format = static_cast<MetaData*>(object.get());
+                isAvc = format != NULL
+                        && format->findCString(kKeyMIMEType, &mime)
+                        && !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
+            }
+        }
+        if (isAvc && !IsIDR(buffer)) {
+            continue;
+        }
+
+        HLSTime curTime(buffer->meta());
+        if (startTime < curTime) {
+            ALOGV("trimming from beginning to %lld (not inclusive)",
+                    (long long)curTime.mTimeUs);
+            firstMeta = buffer->meta();
+            break;
+        }
+    }
+    mBuffers.erase(mBuffers.begin(), it);
+    mQueuedDiscontinuityCount -= discontinuityCount;
+    mLatestDequeuedMeta = NULL;
+    return firstMeta;
+}
+
 }  // namespace android
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.h b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
index 809a858..fa7dd6a 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.h
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
@@ -43,8 +43,12 @@
 
     void clear();
 
+    // Returns true if we have any packets including discontinuities
     bool hasBufferAvailable(status_t *finalResult);
 
+    // Returns true if we have packets that's not discontinuities
+    bool hasDataBufferAvailable(status_t *finalResult);
+
     // Returns the difference between the last and the first queued
     // presentation timestamps since the last discontinuity (if any).
     int64_t getBufferedDurationUs(status_t *finalResult);
@@ -66,8 +70,14 @@
 
     bool isFinished(int64_t duration) const;
 
+    void enable(bool enable);
+
     sp<AMessage> getLatestEnqueuedMeta();
     sp<AMessage> getLatestDequeuedMeta();
+    sp<AMessage> getMetaAfterLastDequeued(int64_t delayUs);
+
+    void trimBuffersAfterMeta(const sp<AMessage> &meta);
+    sp<AMessage> trimBuffersBeforeMeta(const sp<AMessage> &meta);
 
 protected:
     virtual ~AnotherPacketSource();
@@ -78,6 +88,7 @@
 
     bool mIsAudio;
     bool mIsVideo;
+    bool mEnabled;
     sp<MetaData> mFormat;
     int64_t mLastQueuedTimeUs;
     List<sp<ABuffer> > mBuffers;
diff --git a/media/mediaserver/Android.mk b/media/mediaserver/Android.mk
index 0ad0bf3..0e2e48c 100644
--- a/media/mediaserver/Android.mk
+++ b/media/mediaserver/Android.mk
@@ -19,6 +19,7 @@
 	libcamera_metadata\
 	libcameraservice \
 	libmedialogservice \
+	libresourcemanagerservice \
 	libcutils \
 	libnbaio \
 	libmedia \
@@ -30,14 +31,18 @@
 	libradioservice
 
 LOCAL_STATIC_LIBRARIES := \
-	libregistermsext
+        libregistermsext
 
 LOCAL_C_INCLUDES := \
     frameworks/av/media/libmediaplayerservice \
     frameworks/av/services/medialog \
     frameworks/av/services/audioflinger \
     frameworks/av/services/audiopolicy \
+    frameworks/av/services/audiopolicy/common/managerdefinitions/include \
+    frameworks/av/services/audiopolicy/common/include \
+    frameworks/av/services/audiopolicy/engine/interface \
     frameworks/av/services/camera/libcameraservice \
+    frameworks/av/services/mediaresourcemanager \
     $(call include-path-for, audio-utils) \
     frameworks/av/services/soundtrigger \
     frameworks/av/services/radio
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index 7a1048c..83a5ba1 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -312,8 +312,10 @@
                 String8(optionalParameters[i].mValue));
     }
     String8 defaultUrl;
+    DrmPlugin::KeyRequestType keyRequestType;
     status_t status = mObj->mDrm->getKeyRequest(*iter, mdInit, String8(mimeType),
-            mdKeyType, mdOptionalParameters, mObj->mKeyRequest, defaultUrl);
+            mdKeyType, mdOptionalParameters, mObj->mKeyRequest, defaultUrl,
+            &keyRequestType);
     if (status != OK) {
         return translateStatus(status);
     } else {
@@ -725,4 +727,3 @@
 }
 
 } // extern "C"
-
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 642ff82..fee2347 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -39,6 +39,9 @@
     AudioFlinger.cpp            \
     Threads.cpp                 \
     Tracks.cpp                  \
+    AudioHwDevice.cpp           \
+    AudioStreamOut.cpp          \
+    SpdifStreamOut.cpp          \
     Effects.cpp                 \
     AudioMixer.cpp.arm          \
     PatchPanel.cpp
@@ -52,6 +55,7 @@
 
 LOCAL_SHARED_LIBRARIES := \
     libaudioresampler \
+    libaudiospdif \
     libaudioutils \
     libcommon_time_client \
     libcutils \
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 461b5d3..f3206cb 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -272,7 +272,7 @@
 };
 #define ARRAY_SIZE(x) (sizeof((x))/sizeof(((x)[0])))
 
-AudioFlinger::AudioHwDevice* AudioFlinger::findSuitableHwDev_l(
+AudioHwDevice* AudioFlinger::findSuitableHwDev_l(
         audio_module_handle_t module,
         audio_devices_t devices)
 {
@@ -1716,8 +1716,6 @@
 
     mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
 
-    audio_stream_out_t *outStream = NULL;
-
     // FOR TESTING ONLY:
     // This if statement allows overriding the audio policy settings
     // and forcing a specific format or channel mask to the HAL/Sink device for testing.
@@ -1739,25 +1737,18 @@
         }
     }
 
-    status_t status = hwDevHal->open_output_stream(hwDevHal,
-                                                   *output,
-                                                   devices,
-                                                   flags,
-                                                   config,
-                                                   &outStream,
-                                                   address.string());
+    AudioStreamOut *outputStream = NULL;
+    status_t status = outHwDev->openOutputStream(
+            &outputStream,
+            *output,
+            devices,
+            flags,
+            config,
+            address.string());
 
     mHardwareStatus = AUDIO_HW_IDLE;
-    ALOGV("openOutput_l() openOutputStream returned output %p, sampleRate %d, Format %#x, "
-            "channelMask %#x, status %d",
-            outStream,
-            config->sample_rate,
-            config->format,
-            config->channel_mask,
-            status);
 
-    if (status == NO_ERROR && outStream != NULL) {
-        AudioStreamOut *outputStream = new AudioStreamOut(outHwDev, outStream, flags);
+    if (status == NO_ERROR) {
 
         PlaybackThread *thread;
         if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
@@ -1787,7 +1778,7 @@
                                   uint32_t *latencyMs,
                                   audio_output_flags_t flags)
 {
-    ALOGV("openOutput(), module %d Device %x, SamplingRate %d, Format %#08x, Channels %x, flags %x",
+    ALOGI("openOutput(), module %d Device %x, SamplingRate %d, Format %#08x, Channels %x, flags %x",
               module,
               (devices != NULL) ? *devices : 0,
               config->sample_rate,
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 7b76185..c7d9161 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -56,6 +56,9 @@
 #include <media/nbaio/NBAIO.h>
 #include "AudioWatchdog.h"
 #include "AudioMixer.h"
+#include "AudioStreamOut.h"
+#include "SpdifStreamOut.h"
+#include "AudioHwDevice.h"
 
 #include <powermanager/IPowerManager.h>
 
@@ -311,7 +314,6 @@
                                         wp<RefBase> cookie);
 
 private:
-    class AudioHwDevice;    // fwd declaration for findSuitableHwDev_l
 
                audio_mode_t getMode() const { return mMode; }
 
@@ -449,7 +451,7 @@
     class EffectModule;
     class EffectHandle;
     class EffectChain;
-    struct AudioStreamOut;
+
     struct AudioStreamIn;
 
     struct  stream_type_t {
@@ -586,57 +588,11 @@
                 // Return true if the effect was found in mOrphanEffectChains, false otherwise.
                 bool            updateOrphanEffectChains(const sp<EffectModule>& effect);
 
-    class AudioHwDevice {
-    public:
-        enum Flags {
-            AHWD_CAN_SET_MASTER_VOLUME  = 0x1,
-            AHWD_CAN_SET_MASTER_MUTE    = 0x2,
-        };
 
-        AudioHwDevice(audio_module_handle_t handle,
-                      const char *moduleName,
-                      audio_hw_device_t *hwDevice,
-                      Flags flags)
-            : mHandle(handle), mModuleName(strdup(moduleName))
-            , mHwDevice(hwDevice)
-            , mFlags(flags) { }
-        /*virtual*/ ~AudioHwDevice() { free((void *)mModuleName); }
-
-        bool canSetMasterVolume() const {
-            return (0 != (mFlags & AHWD_CAN_SET_MASTER_VOLUME));
-        }
-
-        bool canSetMasterMute() const {
-            return (0 != (mFlags & AHWD_CAN_SET_MASTER_MUTE));
-        }
-
-        audio_module_handle_t handle() const { return mHandle; }
-        const char *moduleName() const { return mModuleName; }
-        audio_hw_device_t *hwDevice() const { return mHwDevice; }
-        uint32_t version() const { return mHwDevice->common.version; }
-
-    private:
-        const audio_module_handle_t mHandle;
-        const char * const mModuleName;
-        audio_hw_device_t * const mHwDevice;
-        const Flags mFlags;
-    };
-
-    // AudioStreamOut and AudioStreamIn are immutable, so their fields are const.
+    // AudioStreamIn is immutable, so their fields are const.
     // For emphasis, we could also make all pointers to them be "const *",
     // but that would clutter the code unnecessarily.
 
-    struct AudioStreamOut {
-        AudioHwDevice* const audioHwDev;
-        audio_stream_out_t* const stream;
-        const audio_output_flags_t flags;
-
-        audio_hw_device_t* hwDev() const { return audioHwDev->hwDevice(); }
-
-        AudioStreamOut(AudioHwDevice *dev, audio_stream_out_t *out, audio_output_flags_t flags) :
-            audioHwDev(dev), stream(out), flags(flags) {}
-    };
-
     struct AudioStreamIn {
         AudioHwDevice* const audioHwDev;
         audio_stream_in_t* const stream;
diff --git a/services/audioflinger/AudioHwDevice.cpp b/services/audioflinger/AudioHwDevice.cpp
new file mode 100644
index 0000000..09d86ea
--- /dev/null
+++ b/services/audioflinger/AudioHwDevice.cpp
@@ -0,0 +1,94 @@
+/*
+**
+** Copyright 2007, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "AudioHwDevice"
+//#define LOG_NDEBUG 0
+
+#include <hardware/audio.h>
+#include <utils/Log.h>
+
+#include <audio_utils/spdif/SPDIFEncoder.h>
+
+#include "AudioHwDevice.h"
+#include "AudioStreamOut.h"
+#include "SpdifStreamOut.h"
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+status_t AudioHwDevice::openOutputStream(
+        AudioStreamOut **ppStreamOut,
+        audio_io_handle_t handle,
+        audio_devices_t devices,
+        audio_output_flags_t flags,
+        struct audio_config *config,
+        const char *address)
+{
+
+    struct audio_config originalConfig = *config;
+    AudioStreamOut *outputStream = new AudioStreamOut(this, flags);
+
+    // Try to open the HAL first using the current format.
+    ALOGV("AudioHwDevice::openOutputStream(), try "
+            " sampleRate %d, Format %#x, "
+            "channelMask %#x",
+            config->sample_rate,
+            config->format,
+            config->channel_mask);
+    status_t status = outputStream->open(handle, devices, config, address);
+
+    if (status != NO_ERROR) {
+        delete outputStream;
+        outputStream = NULL;
+
+        // FIXME Look at any modification to the config.
+        // The HAL might modify the config to suggest a wrapped format.
+        // Log this so we can see what the HALs are doing.
+        ALOGI("AudioHwDevice::openOutputStream(), HAL returned"
+            " sampleRate %d, Format %#x, "
+            "channelMask %#x, status %d",
+            config->sample_rate,
+            config->format,
+            config->channel_mask,
+            status);
+
+        // If the data is encoded then try again using wrapped PCM.
+        bool wrapperNeeded = !audio_is_linear_pcm(originalConfig.format)
+                && ((flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0)
+                && ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0);
+
+        // FIXME - Add isEncodingSupported() query to SPDIF wrapper then
+        // call it from here.
+        if (wrapperNeeded) {
+            outputStream = new SpdifStreamOut(this, flags);
+            status = outputStream->open(handle, devices, &originalConfig, address);
+            if (status != NO_ERROR) {
+                ALOGE("ERROR - AudioHwDevice::openOutputStream(), SPDIF open returned %d",
+                    status);
+                delete outputStream;
+                outputStream = NULL;
+            }
+        }
+    }
+
+    *ppStreamOut = outputStream;
+    return status;
+}
+
+
+}; // namespace android
diff --git a/services/audioflinger/AudioHwDevice.h b/services/audioflinger/AudioHwDevice.h
new file mode 100644
index 0000000..b9f65c1
--- /dev/null
+++ b/services/audioflinger/AudioHwDevice.h
@@ -0,0 +1,88 @@
+/*
+**
+** Copyright 2007, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef ANDROID_AUDIO_HW_DEVICE_H
+#define ANDROID_AUDIO_HW_DEVICE_H
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/types.h>
+
+#include <hardware/audio.h>
+#include <utils/Errors.h>
+#include <system/audio.h>
+
+
+namespace android {
+
+class AudioStreamOut;
+
+class AudioHwDevice {
+public:
+    enum Flags {
+        AHWD_CAN_SET_MASTER_VOLUME  = 0x1,
+        AHWD_CAN_SET_MASTER_MUTE    = 0x2,
+    };
+
+    AudioHwDevice(audio_module_handle_t handle,
+                  const char *moduleName,
+                  audio_hw_device_t *hwDevice,
+                  Flags flags)
+        : mHandle(handle)
+        , mModuleName(strdup(moduleName))
+        , mHwDevice(hwDevice)
+        , mFlags(flags) { }
+    virtual ~AudioHwDevice() { free((void *)mModuleName); }
+
+    bool canSetMasterVolume() const {
+        return (0 != (mFlags & AHWD_CAN_SET_MASTER_VOLUME));
+    }
+
+    bool canSetMasterMute() const {
+        return (0 != (mFlags & AHWD_CAN_SET_MASTER_MUTE));
+    }
+
+    audio_module_handle_t handle() const { return mHandle; }
+    const char *moduleName() const { return mModuleName; }
+    audio_hw_device_t *hwDevice() const { return mHwDevice; }
+    uint32_t version() const { return mHwDevice->common.version; }
+
+    /** This method creates and opens the audio hardware output stream.
+     * The "address" parameter qualifies the "devices" audio device type if needed.
+     * The format format depends on the device type:
+     * - Bluetooth devices use the MAC address of the device in the form "00:11:22:AA:BB:CC"
+     * - USB devices use the ALSA card and device numbers in the form  "card=X;device=Y"
+     * - Other devices may use a number or any other string.
+     */
+    status_t openOutputStream(
+            AudioStreamOut **ppStreamOut,
+            audio_io_handle_t handle,
+            audio_devices_t devices,
+            audio_output_flags_t flags,
+            struct audio_config *config,
+            const char *address);
+
+private:
+    const audio_module_handle_t mHandle;
+    const char * const          mModuleName;
+    audio_hw_device_t * const   mHwDevice;
+    const Flags                 mFlags;
+};
+
+} // namespace android
+
+#endif // ANDROID_AUDIO_HW_DEVICE_H
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 93d821a..dddca02 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -69,9 +69,9 @@
 #define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
 #endif
 
-// Set kUseNewMixer to true to use the new mixer engine. Otherwise the
-// original code will be used.  This is false for now.
-static const bool kUseNewMixer = false;
+// Set kUseNewMixer to true to use the new mixer engine always. Otherwise the
+// original code will be used for stereo sinks, the new mixer for multichannel.
+static const bool kUseNewMixer = true;
 
 // Set kUseFloat to true to allow floating input into the mixer engine.
 // If kUseNewMixer is false, this is ignored or may be overridden internally
diff --git a/services/audioflinger/AudioStreamOut.cpp b/services/audioflinger/AudioStreamOut.cpp
new file mode 100644
index 0000000..e6d8f09
--- /dev/null
+++ b/services/audioflinger/AudioStreamOut.cpp
@@ -0,0 +1,117 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "AudioFlinger"
+//#define LOG_NDEBUG 0
+
+#include <hardware/audio.h>
+#include <utils/Log.h>
+
+#include "AudioHwDevice.h"
+#include "AudioStreamOut.h"
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+AudioStreamOut::AudioStreamOut(AudioHwDevice *dev, audio_output_flags_t flags)
+        : audioHwDev(dev)
+        , stream(NULL)
+        , flags(flags)
+{
+}
+
+audio_hw_device_t* AudioStreamOut::hwDev() const
+{
+    return audioHwDev->hwDevice();
+}
+
+status_t AudioStreamOut::getRenderPosition(uint32_t *frames)
+{
+    if (stream == NULL) {
+        return NO_INIT;
+    }
+    return stream->get_render_position(stream, frames);
+}
+
+status_t AudioStreamOut::getPresentationPosition(uint64_t *frames, struct timespec *timestamp)
+{
+    if (stream == NULL) {
+        return NO_INIT;
+    }
+    return stream->get_presentation_position(stream, frames, timestamp);
+}
+
+status_t AudioStreamOut::open(
+        audio_io_handle_t handle,
+        audio_devices_t devices,
+        struct audio_config *config,
+        const char *address)
+{
+    audio_stream_out_t* outStream;
+    int status = hwDev()->open_output_stream(
+            hwDev(),
+            handle,
+            devices,
+            flags,
+            config,
+            &outStream,
+            address);
+    ALOGV("AudioStreamOut::open(), HAL open_output_stream returned "
+            " %p, sampleRate %d, Format %#x, "
+            "channelMask %#x, status %d",
+            outStream,
+            config->sample_rate,
+            config->format,
+            config->channel_mask,
+            status);
+
+    if (status == NO_ERROR) {
+        stream = outStream;
+    }
+
+    return status;
+}
+
+size_t AudioStreamOut::getFrameSize()
+{
+    ALOG_ASSERT(stream != NULL);
+    return audio_stream_out_frame_size(stream);
+}
+
+int AudioStreamOut::flush()
+{
+    ALOG_ASSERT(stream != NULL);
+    if (stream->flush != NULL) {
+        return stream->flush(stream);
+    }
+    return NO_ERROR;
+}
+
+int AudioStreamOut::standby()
+{
+    ALOG_ASSERT(stream != NULL);
+    return stream->common.standby(&stream->common);
+}
+
+ssize_t AudioStreamOut::write(const void* buffer, size_t bytes)
+{
+    ALOG_ASSERT(stream != NULL);
+    return stream->write(stream, buffer, bytes);
+}
+
+} // namespace android
diff --git a/services/audioflinger/AudioStreamOut.h b/services/audioflinger/AudioStreamOut.h
new file mode 100644
index 0000000..e91ca9c
--- /dev/null
+++ b/services/audioflinger/AudioStreamOut.h
@@ -0,0 +1,83 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef ANDROID_AUDIO_STREAM_OUT_H
+#define ANDROID_AUDIO_STREAM_OUT_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <system/audio.h>
+
+#include "AudioStreamOut.h"
+
+namespace android {
+
+class AudioHwDevice;
+
+/**
+ * Managed access to a HAL output stream.
+ */
+class AudioStreamOut {
+public:
+// AudioStreamOut is immutable, so its fields are const.
+// For emphasis, we could also make all pointers to them be "const *",
+// but that would clutter the code unnecessarily.
+    AudioHwDevice * const audioHwDev;
+    audio_stream_out_t *stream;
+    const audio_output_flags_t flags;
+
+    audio_hw_device_t *hwDev() const;
+
+    AudioStreamOut(AudioHwDevice *dev, audio_output_flags_t flags);
+
+    virtual status_t open(
+            audio_io_handle_t handle,
+            audio_devices_t devices,
+            struct audio_config *config,
+            const char *address);
+
+    virtual ~AudioStreamOut() { }
+
+    virtual status_t getRenderPosition(uint32_t *frames);
+
+    virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
+
+    /**
+    * Write audio buffer to driver. Returns number of bytes written, or a
+    * negative status_t. If at least one frame was written successfully prior to the error,
+    * it is suggested that the driver return that successful (short) byte count
+    * and then return an error in the subsequent call.
+    *
+    * If set_callback() has previously been called to enable non-blocking mode
+    * the write() is not allowed to block. It must write only the number of
+    * bytes that currently fit in the driver/hardware buffer and then return
+    * this byte count. If this is less than the requested write size the
+    * callback function must be called when more space is available in the
+    * driver/hardware buffer.
+    */
+    virtual ssize_t write(const void *buffer, size_t bytes);
+
+    virtual size_t getFrameSize();
+
+    virtual status_t flush();
+    virtual status_t standby();
+};
+
+} // namespace android
+
+#endif // ANDROID_AUDIO_STREAM_OUT_H
diff --git a/services/audioflinger/FastCaptureDumpState.cpp b/services/audioflinger/FastCaptureDumpState.cpp
index 00f8da0..53eeba5 100644
--- a/services/audioflinger/FastCaptureDumpState.cpp
+++ b/services/audioflinger/FastCaptureDumpState.cpp
@@ -14,7 +14,13 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "FastCaptureDumpState"
+//define LOG_NDEBUG 0
+
+#include "Configuration.h"
+#include <utils/Log.h>
 #include "FastCaptureDumpState.h"
+#include "FastCaptureState.h"
 
 namespace android {
 
@@ -27,4 +33,21 @@
 {
 }
 
+void FastCaptureDumpState::dump(int fd) const
+{
+    if (mCommand == FastCaptureState::INITIAL) {
+        dprintf(fd, "  FastCapture not initialized\n");
+        return;
+    }
+    double measuredWarmupMs = (mMeasuredWarmupTs.tv_sec * 1000.0) +
+            (mMeasuredWarmupTs.tv_nsec / 1000000.0);
+    double periodSec = (double) mFrameCount / mSampleRate;
+    dprintf(fd, "  FastCapture command=%s readSequence=%u framesRead=%u\n"
+                "              readErrors=%u sampleRate=%u frameCount=%zu\n"
+                "              measuredWarmup=%.3g ms, warmupCycles=%u period=%.2f ms\n",
+                FastCaptureState::commandToString(mCommand), mReadSequence, mFramesRead,
+                mReadErrors, mSampleRate, mFrameCount, measuredWarmupMs, mWarmupCycles,
+                periodSec * 1e3);
+}
+
 }   // android
diff --git a/services/audioflinger/FastCaptureDumpState.h b/services/audioflinger/FastCaptureDumpState.h
index ee99099..6f9c4c3 100644
--- a/services/audioflinger/FastCaptureDumpState.h
+++ b/services/audioflinger/FastCaptureDumpState.h
@@ -27,6 +27,8 @@
     FastCaptureDumpState();
     /*virtual*/ ~FastCaptureDumpState();
 
+    void dump(int fd) const;    // should only be called on a stable copy, not the original
+
     // FIXME by renaming, could pull up many of these to FastThreadDumpState
     uint32_t mReadSequence;     // incremented before and after each read()
     uint32_t mFramesRead;       // total number of frames read successfully
diff --git a/services/audioflinger/FastMixerDumpState.cpp b/services/audioflinger/FastMixerDumpState.cpp
index 65fbf2b..b10942b 100644
--- a/services/audioflinger/FastMixerDumpState.cpp
+++ b/services/audioflinger/FastMixerDumpState.cpp
@@ -64,7 +64,7 @@
     }
     double measuredWarmupMs = (mMeasuredWarmupTs.tv_sec * 1000.0) +
             (mMeasuredWarmupTs.tv_nsec / 1000000.0);
-    double mixPeriodSec = (double) mFrameCount / (double) mSampleRate;
+    double mixPeriodSec = (double) mFrameCount / mSampleRate;
     dprintf(fd, "  FastMixer command=%s writeSequence=%u framesWritten=%u\n"
                 "            numTracks=%u writeErrors=%u underruns=%u overruns=%u\n"
                 "            sampleRate=%u frameCount=%zu measuredWarmup=%.3g ms, warmupCycles=%u\n"
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 902d5e4..45df6a9 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -157,8 +157,9 @@
     bool                mFlushHwPending; // track requests for thread flush
 
     // for last call to getTimestamp
-    bool                mPreviousValid;
-    uint32_t            mPreviousFramesWritten;
+    bool                mPreviousTimestampValid;
+    // This is either the first timestamp or one that has passed
+    // the check to prevent retrograde motion.
     AudioTimestamp      mPreviousTimestamp;
 };  // end of Track
 
diff --git a/services/audioflinger/SpdifStreamOut.cpp b/services/audioflinger/SpdifStreamOut.cpp
new file mode 100644
index 0000000..d23588e
--- /dev/null
+++ b/services/audioflinger/SpdifStreamOut.cpp
@@ -0,0 +1,166 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "AudioFlinger"
+//#define LOG_NDEBUG 0
+#include <hardware/audio.h>
+#include <utils/Log.h>
+
+#include <audio_utils/spdif/SPDIFEncoder.h>
+
+#include "AudioHwDevice.h"
+#include "AudioStreamOut.h"
+#include "SpdifStreamOut.h"
+
+namespace android {
+
+/**
+ * If the AudioFlinger is processing encoded data and the HAL expects
+ * PCM then we need to wrap the data in an SPDIF wrapper.
+ */
+SpdifStreamOut::SpdifStreamOut(AudioHwDevice *dev, audio_output_flags_t flags)
+        : AudioStreamOut(dev,flags)
+        , mRateMultiplier(1)
+        , mSpdifEncoder(this)
+        , mRenderPositionHal(0)
+        , mPreviousHalPosition32(0)
+{
+}
+
+status_t SpdifStreamOut::open(
+                              audio_io_handle_t handle,
+                              audio_devices_t devices,
+                              struct audio_config *config,
+                              const char *address)
+{
+    struct audio_config customConfig = *config;
+
+    customConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+    customConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+
+    // Some data bursts run at a higher sample rate.
+    switch(config->format) {
+        case AUDIO_FORMAT_E_AC3:
+            mRateMultiplier = 4;
+            break;
+        case AUDIO_FORMAT_AC3:
+            mRateMultiplier = 1;
+            break;
+        default:
+            ALOGE("ERROR SpdifStreamOut::open() unrecognized format 0x%08X\n",
+                config->format);
+            return BAD_VALUE;
+    }
+    customConfig.sample_rate = config->sample_rate * mRateMultiplier;
+
+    // Always print this because otherwise it could be very confusing if the
+    // HAL and AudioFlinger are using different formats.
+    // Print before open() because HAL may modify customConfig.
+    ALOGI("SpdifStreamOut::open() AudioFlinger requested"
+            " sampleRate %d, format %#x, channelMask %#x",
+            config->sample_rate,
+            config->format,
+            config->channel_mask);
+    ALOGI("SpdifStreamOut::open() HAL configured for"
+            " sampleRate %d, format %#x, channelMask %#x",
+            customConfig.sample_rate,
+            customConfig.format,
+            customConfig.channel_mask);
+
+    status_t status = AudioStreamOut::open(
+            handle,
+            devices,
+            &customConfig,
+            address);
+
+    ALOGI("SpdifStreamOut::open() status = %d", status);
+
+    return status;
+}
+
+// Account for possibly higher sample rate.
+status_t SpdifStreamOut::getRenderPosition(uint32_t *frames)
+{
+    uint32_t halPosition = 0;
+    status_t status = AudioStreamOut::getRenderPosition(&halPosition);
+    if (status != NO_ERROR) {
+        return status;
+    }
+
+    // Accumulate a 64-bit position so that we wrap at the right place.
+    if (mRateMultiplier != 1) {
+        // Maintain a 64-bit render position.
+        int32_t deltaHalPosition = (int32_t)(halPosition - mPreviousHalPosition32);
+        mPreviousHalPosition32 = halPosition;
+        mRenderPositionHal += deltaHalPosition;
+
+        // Scale from device sample rate to application rate.
+        uint64_t renderPositionApp = mRenderPositionHal / mRateMultiplier;
+        ALOGV("SpdifStreamOut::getRenderPosition() "
+            "renderPositionAppRate = %llu = %llu / %u\n",
+            renderPositionApp, mRenderPositionHal, mRateMultiplier);
+
+        *frames = (uint32_t)renderPositionApp;
+    } else {
+        *frames = halPosition;
+    }
+    return status;
+}
+
+int SpdifStreamOut::flush()
+{
+    // FIXME Is there an issue here with flush being asynchronous?
+    mRenderPositionHal = 0;
+    mPreviousHalPosition32 = 0;
+    return AudioStreamOut::flush();
+}
+
+int SpdifStreamOut::standby()
+{
+    mRenderPositionHal = 0;
+    mPreviousHalPosition32 = 0;
+    return AudioStreamOut::standby();
+}
+
+// Account for possibly higher sample rate.
+// This is much easier when all the values are 64-bit.
+status_t SpdifStreamOut::getPresentationPosition(uint64_t *frames,
+        struct timespec *timestamp)
+{
+    uint64_t halFrames = 0;
+    status_t status = AudioStreamOut::getPresentationPosition(&halFrames, timestamp);
+    *frames = halFrames / mRateMultiplier;
+    return status;
+}
+
+size_t SpdifStreamOut::getFrameSize()
+{
+    return sizeof(int8_t);
+}
+
+ssize_t SpdifStreamOut::writeDataBurst(const void* buffer, size_t bytes)
+{
+    return AudioStreamOut::write(buffer, bytes);
+}
+
+ssize_t SpdifStreamOut::write(const void* buffer, size_t bytes)
+{
+    // Write to SPDIF wrapper. It will call back to writeDataBurst().
+    return mSpdifEncoder.write(buffer, bytes);
+}
+
+} // namespace android
diff --git a/services/audioflinger/SpdifStreamOut.h b/services/audioflinger/SpdifStreamOut.h
new file mode 100644
index 0000000..cb82ac7
--- /dev/null
+++ b/services/audioflinger/SpdifStreamOut.h
@@ -0,0 +1,107 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef ANDROID_SPDIF_STREAM_OUT_H
+#define ANDROID_SPDIF_STREAM_OUT_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <system/audio.h>
+
+#include "AudioHwDevice.h"
+#include "AudioStreamOut.h"
+#include "SpdifStreamOut.h"
+
+#include <audio_utils/spdif/SPDIFEncoder.h>
+
+namespace android {
+
+/**
+ * Stream that is a PCM data burst in the HAL but looks like an encoded stream
+ * to the AudioFlinger. Wraps encoded data in an SPDIF wrapper per IEC61973-3.
+ */
+class SpdifStreamOut : public AudioStreamOut {
+public:
+
+    SpdifStreamOut(AudioHwDevice *dev, audio_output_flags_t flags);
+
+    virtual ~SpdifStreamOut() { }
+
+    virtual status_t open(
+            audio_io_handle_t handle,
+            audio_devices_t devices,
+            struct audio_config *config,
+            const char *address);
+
+    virtual status_t getRenderPosition(uint32_t *frames);
+
+    virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
+
+    /**
+    * Write audio buffer to driver. Returns number of bytes written, or a
+    * negative status_t. If at least one frame was written successfully prior to the error,
+    * it is suggested that the driver return that successful (short) byte count
+    * and then return an error in the subsequent call.
+    *
+    * If set_callback() has previously been called to enable non-blocking mode
+    * the write() is not allowed to block. It must write only the number of
+    * bytes that currently fit in the driver/hardware buffer and then return
+    * this byte count. If this is less than the requested write size the
+    * callback function must be called when more space is available in the
+    * driver/hardware buffer.
+    */
+    virtual ssize_t write(const void* buffer, size_t bytes);
+
+    virtual size_t getFrameSize();
+
+    virtual status_t flush();
+    virtual status_t standby();
+
+private:
+
+    class MySPDIFEncoder : public SPDIFEncoder
+    {
+    public:
+        MySPDIFEncoder(SpdifStreamOut *spdifStreamOut)
+          : mSpdifStreamOut(spdifStreamOut)
+        {
+        }
+
+        virtual ssize_t writeOutput(const void* buffer, size_t bytes)
+        {
+            return mSpdifStreamOut->writeDataBurst(buffer, bytes);
+        }
+    protected:
+        SpdifStreamOut * const mSpdifStreamOut;
+    };
+
+    int                  mRateMultiplier;
+    MySPDIFEncoder       mSpdifEncoder;
+
+    // Used to implement getRenderPosition()
+    int64_t              mRenderPositionHal;
+    uint32_t             mPreviousHalPosition32;
+
+    ssize_t  writeDataBurst(const void* data, size_t bytes);
+    ssize_t  writeInternal(const void* buffer, size_t bytes);
+
+};
+
+} // namespace android
+
+#endif // ANDROID_SPDIF_STREAM_OUT_H
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index c1da6bc..4efb3d7 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -2009,7 +2009,7 @@
         LOG_FATAL("HAL format %#x not supported for mixed output",
                 mFormat);
     }
-    mFrameSize = audio_stream_out_frame_size(mOutput->stream);
+    mFrameSize = mOutput->getFrameSize();
     mBufferSize = mOutput->stream->common.get_buffer_size(&mOutput->stream->common);
     mFrameCount = mBufferSize / mFrameSize;
     if (mFrameCount & 15) {
@@ -2160,7 +2160,7 @@
     } else {
         status_t status;
         uint32_t frames;
-        status = mOutput->stream->get_render_position(mOutput->stream, &frames);
+        status = mOutput->getRenderPosition(&frames);
         *dspFrames = (size_t)frames;
         return status;
     }
@@ -2202,13 +2202,13 @@
 }
 
 
-AudioFlinger::AudioStreamOut* AudioFlinger::PlaybackThread::getOutput() const
+AudioStreamOut* AudioFlinger::PlaybackThread::getOutput() const
 {
     Mutex::Autolock _l(mLock);
     return mOutput;
 }
 
-AudioFlinger::AudioStreamOut* AudioFlinger::PlaybackThread::clearOutput()
+AudioStreamOut* AudioFlinger::PlaybackThread::clearOutput()
 {
     Mutex::Autolock _l(mLock);
     AudioStreamOut *output = mOutput;
@@ -2354,8 +2354,7 @@
         }
         // FIXME We should have an implementation of timestamps for direct output threads.
         // They are used e.g for multichannel PCM playback over HDMI.
-        bytesWritten = mOutput->stream->write(mOutput->stream,
-                                                   (char *)mSinkBuffer + offset, mBytesRemaining);
+        bytesWritten = mOutput->write((char *)mSinkBuffer + offset, mBytesRemaining);
         if (mUseAsyncWrite &&
                 ((bytesWritten < 0) || (bytesWritten == (ssize_t)mBytesRemaining))) {
             // do not wait for async callback in case of error of full write
@@ -2908,8 +2907,7 @@
     if ((mType == OFFLOAD || mType == DIRECT)
             && mOutput != NULL && mOutput->stream->get_presentation_position) {
         uint64_t position64;
-        int ret = mOutput->stream->get_presentation_position(
-                                                mOutput->stream, &position64, &timestamp.mTime);
+        int ret = mOutput->getPresentationPosition(&position64, &timestamp.mTime);
         if (ret == 0) {
             timestamp.mPosition = (uint32_t)position64;
             return NO_ERROR;
@@ -3289,7 +3287,7 @@
 void AudioFlinger::PlaybackThread::threadLoop_standby()
 {
     ALOGV("Audio hardware entering standby, mixer %p, suspend count %d", this, mSuspended);
-    mOutput->stream->common.standby(&mOutput->stream->common);
+    mOutput->standby();
     if (mUseAsyncWrite != 0) {
         // discard any pending drain or write ack by incrementing sequence
         mWriteAckSequence = (mWriteAckSequence + 2) & ~1;
@@ -4058,7 +4056,7 @@
         status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
                                                 keyValuePair.string());
         if (!mStandby && status == INVALID_OPERATION) {
-            mOutput->stream->common.standby(&mOutput->stream->common);
+            mOutput->standby();
             mStandby = true;
             mBytesWritten = 0;
             status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
@@ -4318,6 +4316,10 @@
             }
             if (track->isStopping_1()) {
                 track->mState = TrackBase::STOPPING_2;
+                if (last && mHwPaused) {
+                     doHwResume = true;
+                     mHwPaused = false;
+                 }
             }
             if ((track->sharedBuffer() != 0) || track->isStopped() ||
                     track->isStopping_2() || track->isPaused()) {
@@ -4400,8 +4402,8 @@
     while (frameCount) {
         AudioBufferProvider::Buffer buffer;
         buffer.frameCount = frameCount;
-        mActiveTrack->getNextBuffer(&buffer);
-        if (buffer.raw == NULL) {
+        status_t status = mActiveTrack->getNextBuffer(&buffer);
+        if (status != NO_ERROR || buffer.raw == NULL) {
             memset(curBuf, 0, frameCount * mFrameSize);
             break;
         }
@@ -4457,14 +4459,17 @@
 bool AudioFlinger::DirectOutputThread::shouldStandby_l()
 {
     bool trackPaused = false;
+    bool trackStopped = false;
 
     // do not put the HAL in standby when paused. AwesomePlayer clear the offloaded AudioTrack
     // after a timeout and we will enter standby then.
     if (mTracks.size() > 0) {
         trackPaused = mTracks[mTracks.size() - 1]->isPaused();
+        trackStopped = mTracks[mTracks.size() - 1]->isStopped() ||
+                           mTracks[mTracks.size() - 1]->mState == TrackBase::IDLE;
     }
 
-    return !mStandby && !(trackPaused || (usesHwAvSync() && mHwPaused));
+    return !mStandby && !(trackPaused || (usesHwAvSync() && mHwPaused && !trackStopped));
 }
 
 // getTrackName_l() must be called with ThreadBase::mLock held
@@ -4513,7 +4518,7 @@
         status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
                                                 keyValuePair.string());
         if (!mStandby && status == INVALID_OPERATION) {
-            mOutput->stream->common.standby(&mOutput->stream->common);
+            mOutput->standby();
             mStandby = true;
             mBytesWritten = 0;
             status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
@@ -4567,7 +4572,10 @@
 
     // use shorter standby delay as on normal output to release
     // hardware resources as soon as possible
-    if (audio_is_linear_pcm(mFormat)) {
+    // no delay on outputs with HW A/V sync
+    if (usesHwAvSync()) {
+        standbyDelay = 0;
+    } else if (audio_is_linear_pcm(mFormat)) {
         standbyDelay = microseconds(activeSleepTime*2);
     } else {
         standbyDelay = kOffloadStandbyDelayNs;
@@ -4576,9 +4584,7 @@
 
 void AudioFlinger::DirectOutputThread::flushHw_l()
 {
-    if (mOutput->stream->flush != NULL) {
-        mOutput->stream->flush(mOutput->stream);
-    }
+    mOutput->flush();
     mHwPaused = false;
 }
 
@@ -4868,7 +4874,7 @@
                     size_t audioHALFrames =
                             (mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000;
                     size_t framesWritten =
-                            mBytesWritten / audio_stream_out_frame_size(mOutput->stream);
+                            mBytesWritten / mOutput->getFrameSize();
                     track->presentationComplete(framesWritten, audioHALFrames);
                     track->reset();
                     tracksToRemove->add(track);
@@ -5208,7 +5214,7 @@
     }
 
     if (initFastCapture) {
-        // create a Pipe for FastMixer to write to, and for us and fast tracks to read from
+        // create a Pipe for FastCapture to write to, and for us and fast tracks to read from
         NBAIO_Format format = mInputSource->format();
         size_t pipeFramesP2 = roundup(mSampleRate / 25);    // double-buffering of 20 ms each
         size_t pipeSize = pipeFramesP2 * Format_frameSize(format);
@@ -5867,8 +5873,9 @@
     // client expresses a preference for FAST, but we get the final say
     if (*flags & IAudioFlinger::TRACK_FAST) {
       if (
-            // use case: callback handler
-            (tid != -1) &&
+            // we formerly checked for a callback handler (non-0 tid),
+            // but that is no longer required for TRANSFER_OBTAIN mode
+            //
             // frame count is not specified, or is exactly the pipe depth
             ((frameCount == 0) || (frameCount == mPipeFramesP2)) &&
             // PCM data
@@ -6164,6 +6171,10 @@
     }
     dprintf(fd, "  Fast capture thread: %s\n", hasFastCapture() ? "yes" : "no");
     dprintf(fd, "  Fast track available: %s\n", mFastTrackAvail ? "yes" : "no");
+
+    //  Make a non-atomic copy of fast capture dump state so it won't change underneath us
+    const FastCaptureDumpState copy(mFastCaptureDumpState);
+    copy.dump(fd);
 }
 
 void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args __unused)
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 38667b9..dc9f249 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -405,9 +405,7 @@
     mAudioTrackServerProxy(NULL),
     mResumeToStopping(false),
     mFlushHwPending(false),
-    mPreviousValid(false),
-    mPreviousFramesWritten(0)
-    // mPreviousTimestamp
+    mPreviousTimestampValid(false)
 {
     // client == 0 implies sharedBuffer == 0
     ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
@@ -747,6 +745,7 @@
                 // move to STOPPING_2 when drain completes and then STOPPED
                 mState = STOPPING_1;
             }
+            playbackThread->broadcast_l();
             ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName,
                     playbackThread);
         }
@@ -864,7 +863,7 @@
         if (mState == FLUSHED) {
             mState = IDLE;
         }
-        mPreviousValid = false;
+        mPreviousTimestampValid = false;
     }
 }
 
@@ -886,19 +885,22 @@
 {
     // Client should implement this using SSQ; the unpresented frame count in latch is irrelevant
     if (isFastTrack()) {
-        // FIXME no lock held to set mPreviousValid = false
+        // FIXME no lock held to set mPreviousTimestampValid = false
         return INVALID_OPERATION;
     }
     sp<ThreadBase> thread = mThread.promote();
     if (thread == 0) {
-        // FIXME no lock held to set mPreviousValid = false
+        // FIXME no lock held to set mPreviousTimestampValid = false
         return INVALID_OPERATION;
     }
+
     Mutex::Autolock _l(thread->mLock);
     PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+
+    status_t result = INVALID_OPERATION;
     if (!isOffloaded() && !isDirect()) {
         if (!playbackThread->mLatchQValid) {
-            mPreviousValid = false;
+            mPreviousTimestampValid = false;
             return INVALID_OPERATION;
         }
         uint32_t unpresentedFrames =
@@ -914,36 +916,54 @@
         uint32_t framesWritten = i >= 0 ?
                 playbackThread->mLatchQ.mFramesReleased[i] :
                 mAudioTrackServerProxy->framesReleased();
-        bool checkPreviousTimestamp = mPreviousValid && framesWritten >= mPreviousFramesWritten;
         if (framesWritten < unpresentedFrames) {
-            mPreviousValid = false;
-            return INVALID_OPERATION;
+            mPreviousTimestampValid = false;
+            // return invalid result
+        } else {
+            timestamp.mPosition = framesWritten - unpresentedFrames;
+            timestamp.mTime = playbackThread->mLatchQ.mTimestamp.mTime;
+            result = NO_ERROR;
         }
-        mPreviousFramesWritten = framesWritten;
-        uint32_t position = framesWritten - unpresentedFrames;
-        struct timespec time = playbackThread->mLatchQ.mTimestamp.mTime;
-        if (checkPreviousTimestamp) {
-            if (time.tv_sec < mPreviousTimestamp.mTime.tv_sec ||
-                    (time.tv_sec == mPreviousTimestamp.mTime.tv_sec &&
-                    time.tv_nsec < mPreviousTimestamp.mTime.tv_nsec)) {
-                ALOGW("Time is going backwards");
-            }
-            // position can bobble slightly as an artifact; this hides the bobble
-            static const uint32_t MINIMUM_POSITION_DELTA = 8u;
-            if ((position <= mPreviousTimestamp.mPosition) ||
-                    (position - mPreviousTimestamp.mPosition) < MINIMUM_POSITION_DELTA) {
-                position = mPreviousTimestamp.mPosition;
-                time = mPreviousTimestamp.mTime;
-            }
-        }
-        timestamp.mPosition = position;
-        timestamp.mTime = time;
-        mPreviousTimestamp = timestamp;
-        mPreviousValid = true;
-        return NO_ERROR;
+    } else { // offloaded or direct
+        result = playbackThread->getTimestamp_l(timestamp);
     }
 
-    return playbackThread->getTimestamp_l(timestamp);
+    // Prevent retrograde motion in timestamp.
+    if (result == NO_ERROR) {
+        if (mPreviousTimestampValid) {
+            if (timestamp.mTime.tv_sec < mPreviousTimestamp.mTime.tv_sec ||
+                    (timestamp.mTime.tv_sec == mPreviousTimestamp.mTime.tv_sec &&
+                    timestamp.mTime.tv_nsec < mPreviousTimestamp.mTime.tv_nsec)) {
+                ALOGW("WARNING - retrograde timestamp time");
+                // FIXME Consider blocking this from propagating upwards.
+            }
+
+            // Looking at signed delta will work even when the timestamps
+            // are wrapping around.
+            int32_t deltaPosition = static_cast<int32_t>(timestamp.mPosition
+                    - mPreviousTimestamp.mPosition);
+            // position can bobble slightly as an artifact; this hides the bobble
+            static const int32_t MINIMUM_POSITION_DELTA = 8;
+            if (deltaPosition < 0) {
+#define TIME_TO_NANOS(time) ((uint64_t)time.tv_sec * 1000000000 + time.tv_nsec)
+                ALOGW("WARNING - retrograde timestamp position corrected,"
+                        " %d = %u - %u, (at %llu, %llu nanos)",
+                        deltaPosition,
+                        timestamp.mPosition,
+                        mPreviousTimestamp.mPosition,
+                        TIME_TO_NANOS(timestamp.mTime),
+                        TIME_TO_NANOS(mPreviousTimestamp.mTime));
+#undef TIME_TO_NANOS
+            }
+            if (deltaPosition < MINIMUM_POSITION_DELTA) {
+                // Current timestamp is bad. Use last valid timestamp.
+                timestamp = mPreviousTimestamp;
+            }
+        }
+        mPreviousTimestamp = timestamp;
+        mPreviousTimestampValid = true;
+    }
+    return result;
 }
 
 status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index 351ed79..d4ce86a 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -21,7 +21,9 @@
 LOCAL_C_INCLUDES := \
     $(TOPDIR)frameworks/av/services/audioflinger \
     $(call include-path-for, audio-effects) \
-    $(call include-path-for, audio-utils)
+    $(call include-path-for, audio-utils) \
+    $(TOPDIR)frameworks/av/services/audiopolicy/common/include \
+    $(TOPDIR)frameworks/av/services/audiopolicy/engine/interface \
 
 LOCAL_SHARED_LIBRARIES := \
     libcutils \
@@ -39,7 +41,8 @@
 endif
 
 LOCAL_STATIC_LIBRARIES := \
-    libmedia_helper
+    libmedia_helper \
+    libaudiopolicycomponents
 
 LOCAL_MODULE:= libaudiopolicyservice
 
@@ -54,14 +57,6 @@
 
 LOCAL_SRC_FILES:= \
     managerdefault/AudioPolicyManager.cpp \
-    managerdefault/ConfigParsingUtils.cpp \
-    managerdefault/Devices.cpp \
-    managerdefault/Gains.cpp \
-    managerdefault/HwModule.cpp \
-    managerdefault/IOProfile.cpp \
-    managerdefault/Ports.cpp \
-    managerdefault/AudioInputDescriptor.cpp \
-    managerdefault/AudioOutputDescriptor.cpp
 
 LOCAL_SHARED_LIBRARIES := \
     libcutils \
@@ -69,8 +64,15 @@
     liblog \
     libsoundtrigger
 
+LOCAL_SHARED_LIBRARIES += libaudiopolicyenginedefault
+
+LOCAL_C_INCLUDES += \
+    $(TOPDIR)frameworks/av/services/audiopolicy/common/include \
+    $(TOPDIR)frameworks/av/services/audiopolicy/engine/interface \
+
 LOCAL_STATIC_LIBRARIES := \
-    libmedia_helper
+    libmedia_helper \
+    libaudiopolicycomponents
 
 LOCAL_MODULE:= libaudiopolicymanagerdefault
 
@@ -86,9 +88,21 @@
 LOCAL_SHARED_LIBRARIES := \
     libaudiopolicymanagerdefault
 
+LOCAL_STATIC_LIBRARIES := \
+    libaudiopolicycomponents
+
+LOCAL_C_INCLUDES += \
+    $(TOPDIR)frameworks/av/services/audiopolicy/common/include \
+    $(TOPDIR)frameworks/av/services/audiopolicy/engine/interface \
+
 LOCAL_MODULE:= libaudiopolicymanager
 
 include $(BUILD_SHARED_LIBRARY)
 
 endif
 endif
+
+#######################################################################
+# Recursive call sub-folder Android.mk
+#
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/services/audiopolicy/common/Android.mk b/services/audiopolicy/common/Android.mk
new file mode 100644
index 0000000..dcce8e3
--- /dev/null
+++ b/services/audiopolicy/common/Android.mk
@@ -0,0 +1,9 @@
+
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+#######################################################################
+# Recursive call sub-folder Android.mk
+#
+include $(call all-makefiles-under,$(LOCAL_PATH))
+
diff --git a/services/audiopolicy/managerdefault/ApmImplDefinitions.h b/services/audiopolicy/common/include/RoutingStrategy.h
similarity index 79%
rename from services/audiopolicy/managerdefault/ApmImplDefinitions.h
rename to services/audiopolicy/common/include/RoutingStrategy.h
index 620979b..d38967e 100644
--- a/services/audiopolicy/managerdefault/ApmImplDefinitions.h
+++ b/services/audiopolicy/common/include/RoutingStrategy.h
@@ -14,8 +14,14 @@
  * limitations under the License.
  */
 
+#pragma once
+
 namespace android {
 
+// Time in milliseconds after media stopped playing during which we consider that the
+// sonification should be as unobtrusive as during the time media was playing.
+#define SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY 5000
+
 enum routing_strategy {
     STRATEGY_MEDIA,
     STRATEGY_PHONE,
diff --git a/services/audiopolicy/common/include/Volume.h b/services/audiopolicy/common/include/Volume.h
new file mode 100755
index 0000000..a4cc759
--- /dev/null
+++ b/services/audiopolicy/common/include/Volume.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <system/audio.h>
+#include <utils/Log.h>
+
+class VolumeCurvePoint
+{
+public:
+    int mIndex;
+    float mDBAttenuation;
+};
+
+class Volume
+{
+public:
+    /**
+     * 4 points to define the volume attenuation curve, each characterized by the volume
+     * index (from 0 to 100) at which they apply, and the attenuation in dB at that index.
+     * we use 100 steps to avoid rounding errors when computing the volume in volIndexToAmpl()
+     *
+     * @todo shall become configurable
+     */
+    enum {
+        VOLMIN = 0,
+        VOLKNEE1 = 1,
+        VOLKNEE2 = 2,
+        VOLMAX = 3,
+
+        VOLCNT = 4
+    };
+
+    /**
+     * device categories used for volume curve management.
+     */
+    enum device_category {
+        DEVICE_CATEGORY_HEADSET,
+        DEVICE_CATEGORY_SPEAKER,
+        DEVICE_CATEGORY_EARPIECE,
+        DEVICE_CATEGORY_EXT_MEDIA,
+        DEVICE_CATEGORY_CNT
+    };
+
+    /**
+     * extract one device relevant for volume control from multiple device selection
+     *
+     * @param[in] device for which the volume category is associated
+     *
+     * @return subset of device required to limit the number of volume category per device
+     */
+    static audio_devices_t getDeviceForVolume(audio_devices_t device)
+    {
+        if (device == AUDIO_DEVICE_NONE) {
+            // this happens when forcing a route update and no track is active on an output.
+            // In this case the returned category is not important.
+            device =  AUDIO_DEVICE_OUT_SPEAKER;
+        } else if (popcount(device) > 1) {
+            // Multiple device selection is either:
+            //  - speaker + one other device: give priority to speaker in this case.
+            //  - one A2DP device + another device: happens with duplicated output. In this case
+            // retain the device on the A2DP output as the other must not correspond to an active
+            // selection if not the speaker.
+            //  - HDMI-CEC system audio mode only output: give priority to available item in order.
+            if (device & AUDIO_DEVICE_OUT_SPEAKER) {
+                device = AUDIO_DEVICE_OUT_SPEAKER;
+            } else if (device & AUDIO_DEVICE_OUT_HDMI_ARC) {
+                device = AUDIO_DEVICE_OUT_HDMI_ARC;
+            } else if (device & AUDIO_DEVICE_OUT_AUX_LINE) {
+                device = AUDIO_DEVICE_OUT_AUX_LINE;
+            } else if (device & AUDIO_DEVICE_OUT_SPDIF) {
+                device = AUDIO_DEVICE_OUT_SPDIF;
+            } else {
+                device = (audio_devices_t)(device & AUDIO_DEVICE_OUT_ALL_A2DP);
+            }
+        }
+
+        /*SPEAKER_SAFE is an alias of SPEAKER for purposes of volume control*/
+        if (device == AUDIO_DEVICE_OUT_SPEAKER_SAFE)
+            device = AUDIO_DEVICE_OUT_SPEAKER;
+
+        ALOGW_IF(popcount(device) != 1,
+                 "getDeviceForVolume() invalid device combination: %08x",
+                 device);
+
+        return device;
+    }
+
+    /**
+     * returns the category the device belongs to with regard to volume curve management
+     *
+     * @param[in] device to check upon the category to whom it belongs to.
+     *
+     * @return device category.
+     */
+    static device_category getDeviceCategory(audio_devices_t device)
+    {
+        switch(getDeviceForVolume(device)) {
+        case AUDIO_DEVICE_OUT_EARPIECE:
+            return DEVICE_CATEGORY_EARPIECE;
+        case AUDIO_DEVICE_OUT_WIRED_HEADSET:
+        case AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
+        case AUDIO_DEVICE_OUT_BLUETOOTH_SCO:
+        case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET:
+        case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP:
+        case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES:
+            return DEVICE_CATEGORY_HEADSET;
+        case AUDIO_DEVICE_OUT_LINE:
+        case AUDIO_DEVICE_OUT_AUX_DIGITAL:
+            /*USB?  Remote submix?*/
+            return DEVICE_CATEGORY_EXT_MEDIA;
+        case AUDIO_DEVICE_OUT_SPEAKER:
+        case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT:
+        case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER:
+        case AUDIO_DEVICE_OUT_USB_ACCESSORY:
+        case AUDIO_DEVICE_OUT_USB_DEVICE:
+        case AUDIO_DEVICE_OUT_REMOTE_SUBMIX:
+        default:
+            return DEVICE_CATEGORY_SPEAKER;
+        }
+    }
+
+};
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
new file mode 100755
index 0000000..a2327ee
--- /dev/null
+++ b/services/audiopolicy/common/include/policy.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <system/audio.h>
+
+// For mixed output and inputs, the policy will use max mixer sampling rates.
+// Do not limit sampling rate otherwise
+#define MAX_MIXER_SAMPLING_RATE 48000
+
+// For mixed output and inputs, the policy will use max mixer channel count.
+// Do not limit channel count otherwise
+#define MAX_MIXER_CHANNEL_COUNT 8
+
+/**
+ * A device mask for all audio input devices that are considered "virtual" when evaluating
+ * active inputs in getActiveInput()
+ */
+#define APM_AUDIO_IN_DEVICE_VIRTUAL_ALL  (AUDIO_DEVICE_IN_REMOTE_SUBMIX|AUDIO_DEVICE_IN_FM_TUNER)
+
+
+/**
+ * A device mask for all audio input and output devices where matching inputs/outputs on device
+ * type alone is not enough: the address must match too
+ */
+#define APM_AUDIO_DEVICE_MATCH_ADDRESS_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX | \
+                                            AUDIO_DEVICE_OUT_REMOTE_SUBMIX)
+
+/**
+ * Check if the state given correspond to an in call state.
+ * @TODO find a better name for widely call state
+ *
+ * @param[in] state to consider
+ *
+ * @return true if given state represents a device in a telephony or VoIP call
+ */
+static inline bool is_state_in_call(int state)
+{
+    return (state == AUDIO_MODE_IN_CALL) || (state == AUDIO_MODE_IN_COMMUNICATION);
+}
+
+/**
+ * Check if the input device given is considered as a virtual device.
+ *
+ * @param[in] device to consider
+ *
+ * @return true if the device is a virtual one, false otherwise.
+ */
+static bool is_virtual_input_device(audio_devices_t device)
+{
+    if ((device & AUDIO_DEVICE_BIT_IN) != 0) {
+        device &= ~AUDIO_DEVICE_BIT_IN;
+        if ((popcount(device) == 1) && ((device & ~APM_AUDIO_IN_DEVICE_VIRTUAL_ALL) == 0))
+            return true;
+    }
+    return false;
+}
+
+/**
+ * Check whether the device type is one
+ * where addresses are used to distinguish between one connected device and another
+ *
+ * @param[in] device to consider
+ *
+ * @return true if the device needs distinguish on address, false otherwise..
+ */
+static bool device_distinguishes_on_address(audio_devices_t device)
+{
+    return ((device & APM_AUDIO_DEVICE_MATCH_ADDRESS_ALL & ~AUDIO_DEVICE_BIT_IN) != 0);
+}
diff --git a/services/audiopolicy/common/managerdefinitions/Android.mk b/services/audiopolicy/common/managerdefinitions/Android.mk
new file mode 100644
index 0000000..71ba1cb
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/Android.mk
@@ -0,0 +1,34 @@
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+    src/DeviceDescriptor.cpp \
+    src/AudioGain.cpp \
+    src/StreamDescriptor.cpp \
+    src/HwModule.cpp \
+    src/IOProfile.cpp \
+    src/AudioPort.cpp \
+    src/AudioPolicyMix.cpp \
+    src/AudioPatch.cpp \
+    src/AudioInputDescriptor.cpp \
+    src/AudioOutputDescriptor.cpp \
+    src/EffectDescriptor.cpp \
+    src/ConfigParsingUtils.cpp \
+    src/SoundTriggerSession.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+    libcutils \
+    libutils \
+    liblog \
+
+LOCAL_C_INCLUDES += \
+    $(LOCAL_PATH)/include \
+    $(TOPDIR)frameworks/av/services/audiopolicy/common/include \
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := \
+    $(LOCAL_PATH)/include
+
+LOCAL_MODULE := libaudiopolicycomponents
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioGain.h b/services/audiopolicy/common/managerdefinitions/include/AudioGain.h
new file mode 100644
index 0000000..21fbf9b
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioGain.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <system/audio.h>
+
+namespace android {
+
+class AudioGain: public RefBase
+{
+public:
+    AudioGain(int index, bool useInChannelMask);
+    virtual ~AudioGain() {}
+
+    void dump(int fd, int spaces, int index) const;
+
+    void getDefaultConfig(struct audio_gain_config *config);
+    status_t checkConfig(const struct audio_gain_config *config);
+    int               mIndex;
+    struct audio_gain mGain;
+    bool              mUseInChannelMask;
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/managerdefault/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
similarity index 68%
rename from services/audiopolicy/managerdefault/AudioInputDescriptor.h
rename to services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index 02579e6..7536a37 100644
--- a/services/audiopolicy/managerdefault/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -14,14 +14,28 @@
  * limitations under the License.
  */
 
+#pragma once
+
+#include "AudioPort.h"
+#include <utils/Errors.h>
+#include <system/audio.h>
+#include <utils/SortedVector.h>
+#include <utils/KeyedVector.h>
+
 namespace android {
 
+class IOProfile;
+class AudioMix;
+
 // descriptor for audio inputs. Used to maintain current configuration of each opened audio input
 // and keep track of the usage of this input.
 class AudioInputDescriptor: public AudioPortConfig
 {
 public:
     AudioInputDescriptor(const sp<IOProfile>& profile);
+    void setIoHandle(audio_io_handle_t ioHandle);
+
+    audio_module_handle_t getModuleHandle() const;
 
     status_t    dump(int fd);
 
@@ -45,4 +59,27 @@
     void toAudioPort(struct audio_port *port) const;
 };
 
+class AudioInputCollection :
+        public DefaultKeyedVector< audio_io_handle_t, sp<AudioInputDescriptor> >
+{
+public:
+    bool isSourceActive(audio_source_t source) const;
+
+    sp<AudioInputDescriptor> getInputFromId(audio_port_handle_t id) const;
+
+    uint32_t activeInputsCount() const;
+
+    /**
+     * return io handle of active input or 0 if no input is active
+     * Only considers inputs from physical devices (e.g. main mic, headset mic) when
+     * ignoreVirtualInputs is true.
+     */
+    audio_io_handle_t getActiveInput(bool ignoreVirtualInputs = true);
+
+    audio_devices_t getSupportedDevices(audio_io_handle_t handle) const;
+
+    status_t dump(int fd) const;
+};
+
+
 }; // namespace android
diff --git a/services/audiopolicy/managerdefault/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
similarity index 65%
rename from services/audiopolicy/managerdefault/AudioOutputDescriptor.h
rename to services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 32f46e4..43ee691 100644
--- a/services/audiopolicy/managerdefault/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -14,10 +14,20 @@
  * limitations under the License.
  */
 
-#include "ApmImplDefinitions.h"
+#pragma once
+
+#include "AudioPort.h"
+#include <RoutingStrategy.h>
+#include <utils/Errors.h>
+#include <utils/Timers.h>
+#include <utils/KeyedVector.h>
+#include <system/audio.h>
 
 namespace android {
 
+class IOProfile;
+class AudioMix;
+
 // descriptor for audio outputs. Used to maintain current configuration of each opened audio output
 // and keep track of the usage of this output by each audio stream type.
 class AudioOutputDescriptor: public AudioPortConfig
@@ -30,6 +40,7 @@
     audio_devices_t device() const;
     void changeRefCount(audio_stream_type_t stream, int delta);
 
+    void setIoHandle(audio_io_handle_t ioHandle);
     bool isDuplicated() const { return (mOutput1 != NULL && mOutput2 != NULL); }
     audio_devices_t supportedDevices();
     uint32_t latency();
@@ -38,15 +49,14 @@
     bool isStreamActive(audio_stream_type_t stream,
                         uint32_t inPastMs = 0,
                         nsecs_t sysTime = 0) const;
-    bool isStrategyActive(routing_strategy strategy,
-                     uint32_t inPastMs = 0,
-                     nsecs_t sysTime = 0) const;
 
     virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
                            const struct audio_port_config *srcConfig = NULL) const;
     virtual sp<AudioPort> getAudioPort() const { return mProfile; }
     void toAudioPort(struct audio_port *port) const;
 
+    audio_module_handle_t getModuleHandle() const;
+
     audio_port_handle_t mId;
     audio_io_handle_t mIoHandle;              // output handle
     uint32_t mLatency;                  //
@@ -66,4 +76,38 @@
     uint32_t mDirectOpenCount; // number of clients using this output (direct outputs only)
 };
 
+class AudioOutputCollection :
+        public DefaultKeyedVector< audio_io_handle_t, sp<AudioOutputDescriptor> >
+{
+public:
+    bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+
+    /**
+     * return whether a stream is playing remotely, override to change the definition of
+     * local/remote playback, used for instance by notification manager to not make
+     * media players lose audio focus when not playing locally
+     * For the base implementation, "remotely" means playing during screen mirroring which
+     * uses an output for playback with a non-empty, non "0" address.
+     */
+    bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+
+    /**
+     * returns the A2DP output handle if it is open or 0 otherwise
+     */
+    audio_io_handle_t getA2dpOutput() const;
+
+    sp<AudioOutputDescriptor> getOutputFromId(audio_port_handle_t id) const;
+
+    sp<AudioOutputDescriptor> getPrimaryOutput() const;
+
+    /**
+     * return true if any output is playing anything besides the stream to ignore
+     */
+    bool isAnyOutputActive(audio_stream_type_t streamToIgnore) const;
+
+    audio_devices_t getSupportedDevices(audio_io_handle_t handle) const;
+
+    status_t dump(int fd) const;
+};
+
 }; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h b/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
new file mode 100644
index 0000000..385f257
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+
+namespace android {
+
+class AudioPatch : public RefBase
+{
+public:
+    AudioPatch(const struct audio_patch *patch, uid_t uid);
+
+    status_t dump(int fd, int spaces, int index) const;
+
+    audio_patch_handle_t mHandle;
+    struct audio_patch mPatch;
+    uid_t mUid;
+    audio_patch_handle_t mAfPatchHandle;
+
+private:
+    static volatile int32_t mNextUniqueId;
+};
+
+class AudioPatchCollection : public DefaultKeyedVector<audio_patch_handle_t, sp<AudioPatch> >
+{
+public:
+    status_t addAudioPatch(audio_patch_handle_t handle, const sp<AudioPatch>& patch);
+
+    status_t removeAudioPatch(audio_patch_handle_t handle);
+
+    status_t listAudioPatches(unsigned int *num_patches, struct audio_patch *patches) const;
+
+    status_t dump(int fd) const;
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
new file mode 100644
index 0000000..988aed6
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <utils/RefBase.h>
+#include <media/AudioPolicy.h>
+#include <utils/KeyedVector.h>
+#include <hardware/audio.h>
+#include <utils/String8.h>
+
+namespace android {
+
+class AudioOutputDescriptor;
+
+/**
+ * custom mix entry in mPolicyMixes
+ */
+class AudioPolicyMix : public RefBase {
+public:
+    AudioPolicyMix() {}
+
+    const sp<AudioOutputDescriptor> &getOutput() const;
+
+    void setOutput(sp<AudioOutputDescriptor> &output);
+
+    void clearOutput();
+
+    android::AudioMix &getMix();
+
+    void setMix(AudioMix &mix);
+
+private:
+    AudioMix    mMix;                   // Audio policy mix descriptor
+    sp<AudioOutputDescriptor> mOutput;  // Corresponding output stream
+};
+
+
+class AudioPolicyMixCollection : public DefaultKeyedVector<String8, sp<AudioPolicyMix> >
+{
+public:
+    status_t getAudioPolicyMix(String8 address, sp<AudioPolicyMix> &policyMix) const;
+
+    status_t registerMix(String8 address, AudioMix mix);
+
+    status_t unregisterMix(String8 address);
+
+    void closeOutput(sp<AudioOutputDescriptor> &desc);
+
+    /**
+     * Try to find an output descriptor for the given attributes.
+     *
+     * @param[in] attributes to consider for the research of output descriptor.
+     * @param[out] desc to return if an output could be found.
+     *
+     * @return NO_ERROR if an output was found for the given attribute (in this case, the
+     *                  descriptor output param is initialized), error code otherwise.
+     */
+    status_t getOutputForAttr(audio_attributes_t attributes, sp<AudioOutputDescriptor> &desc);
+
+    audio_devices_t getDeviceAndMixForInputSource(audio_source_t inputSource,
+                                                  audio_devices_t availableDeviceTypes,
+                                                  AudioMix **policyMix);
+
+    status_t getInputMixForAttr(audio_attributes_t attr, AudioMix *&policyMix);
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/managerdefault/Ports.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
similarity index 88%
rename from services/audiopolicy/managerdefault/Ports.h
rename to services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index f6e0e93..4f7f2bc 100644
--- a/services/audiopolicy/managerdefault/Ports.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -14,15 +14,25 @@
  * limitations under the License.
  */
 
+#pragma once
+
+#include <utils/String8.h>
+#include <utils/Vector.h>
+#include <utils/RefBase.h>
+#include <utils/Errors.h>
+#include <system/audio.h>
+#include <cutils/config_utils.h>
+
 namespace android {
 
 class HwModule;
+class AudioGain;
 
-class AudioPort: public virtual RefBase
+class AudioPort : public virtual RefBase
 {
 public:
     AudioPort(const String8& name, audio_port_type_t type,
-            audio_port_role_t role, const sp<HwModule>& module);
+              audio_port_role_t role, const sp<HwModule>& module);
     virtual ~AudioPort() {}
 
     audio_port_handle_t getHandle() { return mId; }
@@ -30,6 +40,8 @@
     void attach(const sp<HwModule>& module);
     bool isAttached() { return mId != 0; }
 
+    static audio_port_handle_t getNextUniqueId();
+
     virtual void toAudioPort(struct audio_port *port) const;
 
     void importAudioPort(const sp<AudioPort> port);
@@ -63,6 +75,8 @@
     static const audio_format_t sPcmFormatCompareTable[];
     static int compareFormats(audio_format_t format1, audio_format_t format2);
 
+    audio_module_handle_t getModuleHandle() const;
+
     void dump(int fd, int spaces) const;
 
     String8           mName;
@@ -86,9 +100,12 @@
     // and a unique ID for identifying a port to the (upcoming) selection API,
     // and its relationship to the mId in AudioOutputDescriptor and AudioInputDescriptor.
     audio_port_handle_t mId;
+
+private:
+    static volatile int32_t mNextUniqueId;
 };
 
-class AudioPortConfig: public virtual RefBase
+class AudioPortConfig : public virtual RefBase
 {
 public:
     AudioPortConfig();
@@ -105,18 +122,4 @@
     struct audio_gain_config mGain;
 };
 
-
-class AudioPatch: public RefBase
-{
-public:
-    AudioPatch(audio_patch_handle_t handle, const struct audio_patch *patch, uid_t uid);
-
-    status_t dump(int fd, int spaces, int index) const;
-
-    audio_patch_handle_t mHandle;
-    struct audio_patch mPatch;
-    uid_t mUid;
-    audio_patch_handle_t mAfPatchHandle;
-};
-
 }; // namespace android
diff --git a/services/audiopolicy/managerdefault/ConfigParsingUtils.h b/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h
similarity index 77%
rename from services/audiopolicy/managerdefault/ConfigParsingUtils.h
rename to services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h
index b2d9763..53cb4a3 100644
--- a/services/audiopolicy/managerdefault/ConfigParsingUtils.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h
@@ -14,6 +14,19 @@
  * limitations under the License.
  */
 
+#pragma once
+
+#include "DeviceDescriptor.h"
+#include "HwModule.h"
+#include "audio_policy_conf.h"
+#include <system/audio.h>
+#include <utils/Log.h>
+#include <utils/Vector.h>
+#include <utils/SortedVector.h>
+#include <cutils/config_utils.h>
+#include <utils/RefBase.h>
+#include <system/audio_policy.h>
+
 namespace android {
 
 // ----------------------------------------------------------------------------
@@ -156,6 +169,32 @@
     static uint32_t parseOutputFlagNames(char *name);
     static uint32_t parseInputFlagNames(char *name);
     static audio_devices_t parseDeviceNames(char *name);
+
+    static void loadHwModules(cnode *root, HwModuleCollection &hwModules,
+                              DeviceVector &availableInputDevices,
+                              DeviceVector &availableOutputDevices,
+                              sp<DeviceDescriptor> &defaultOutputDevices,
+                              bool &isSpeakerDrcEnabled);
+
+    static void loadGlobalConfig(cnode *root, const sp<HwModule>& module,
+                                 DeviceVector &availableInputDevices,
+                                 DeviceVector &availableOutputDevices,
+                                 sp<DeviceDescriptor> &defaultOutputDevices,
+                                 bool &isSpeakerDrcEnabled);
+
+    static status_t loadAudioPolicyConfig(const char *path,
+                                          HwModuleCollection &hwModules,
+                                          DeviceVector &availableInputDevices,
+                                          DeviceVector &availableOutputDevices,
+                                          sp<DeviceDescriptor> &defaultOutputDevices,
+                                          bool &isSpeakerDrcEnabled);
+
+private:
+    static void loadHwModule(cnode *root, HwModuleCollection &hwModules,
+                             DeviceVector &availableInputDevices,
+                             DeviceVector &availableOutputDevices,
+                             sp<DeviceDescriptor> &defaultOutputDevices,
+                             bool &isSpeakerDrcEnabled);
 };
 
 }; // namespace android
diff --git a/services/audiopolicy/managerdefault/Devices.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
similarity index 71%
rename from services/audiopolicy/managerdefault/Devices.h
rename to services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index af2fbda..d15f6b4 100644
--- a/services/audiopolicy/managerdefault/Devices.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -14,12 +14,19 @@
  * limitations under the License.
  */
 
+#pragma once
+
+#include "AudioPort.h"
+#include <utils/Errors.h>
+#include <utils/String8.h>
+#include <utils/SortedVector.h>
+#include <cutils/config_utils.h>
+#include <system/audio.h>
+#include <system/audio_policy.h>
+
 namespace android {
 
-class AudioPort;
-class AudioPortConfig;
-
-class DeviceDescriptor: public AudioPort, public AudioPortConfig
+class DeviceDescriptor : public AudioPort, public AudioPortConfig
 {
 public:
     DeviceDescriptor(const String8& name, audio_devices_t type);
@@ -37,12 +44,18 @@
     virtual void loadGains(cnode *root);
     virtual void toAudioPort(struct audio_port *port) const;
 
+    audio_devices_t type() const { return mDeviceType; }
     status_t dump(int fd, int spaces, int index) const;
 
-    audio_devices_t mDeviceType;
     String8 mAddress;
+    audio_port_handle_t mId;
 
     static String8  emptyNameStr;
+
+private:
+    audio_devices_t mDeviceType;
+
+friend class DeviceVector;
 };
 
 class DeviceVector : public SortedVector< sp<DeviceDescriptor> >
@@ -50,9 +63,9 @@
 public:
     DeviceVector() : SortedVector(), mDeviceTypes(AUDIO_DEVICE_NONE) {}
 
-    ssize_t         add(const sp<DeviceDescriptor>& item);
-    ssize_t         remove(const sp<DeviceDescriptor>& item);
-    ssize_t         indexOf(const sp<DeviceDescriptor>& item) const;
+    ssize_t add(const sp<DeviceDescriptor>& item);
+    ssize_t remove(const sp<DeviceDescriptor>& item);
+    ssize_t indexOf(const sp<DeviceDescriptor>& item) const;
 
     audio_devices_t types() const { return mDeviceTypes; }
 
@@ -63,8 +76,13 @@
     DeviceVector getDevicesFromType(audio_devices_t types) const;
     sp<DeviceDescriptor> getDeviceFromId(audio_port_handle_t id) const;
     sp<DeviceDescriptor> getDeviceFromName(const String8& name) const;
-    DeviceVector getDevicesFromTypeAddr(audio_devices_t type, String8 address)
-    const;
+    DeviceVector getDevicesFromTypeAddr(audio_devices_t type, String8 address) const;
+
+    audio_devices_t getDevicesFromHwModule(audio_module_handle_t moduleHandle) const;
+
+    audio_policy_dev_state_t getDeviceConnectionState(const sp<DeviceDescriptor> &devDesc) const;
+
+    status_t dump(int fd, const String8 &direction) const;
 
 private:
     void refreshTypes();
diff --git a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
new file mode 100644
index 0000000..c9783a1
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <RoutingStrategy.h>
+#include <hardware/audio_effect.h>
+#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
+#include <utils/Errors.h>
+
+namespace android {
+
+
+class EffectDescriptor : public RefBase
+{
+public:
+    status_t dump(int fd);
+
+    int mIo;                // io the effect is attached to
+    routing_strategy mStrategy; // routing strategy the effect is associated to
+    int mSession;               // audio session the effect is on
+    effect_descriptor_t mDesc;  // effect descriptor
+    bool mEnabled;              // enabled state: CPU load being used or not
+};
+
+class EffectDescriptorCollection : public KeyedVector<int, sp<EffectDescriptor> >
+{
+public:
+    EffectDescriptorCollection();
+
+    status_t registerEffect(const effect_descriptor_t *desc, audio_io_handle_t io,
+                            uint32_t strategy, int session, int id);
+    status_t unregisterEffect(int id);
+    status_t setEffectEnabled(int id, bool enabled);
+    uint32_t getMaxEffectsCpuLoad() const;
+    uint32_t getMaxEffectsMemory() const;
+    bool isNonOffloadableEffectEnabled();
+
+    status_t dump(int fd);
+
+private:
+    status_t setEffectEnabled(const sp<EffectDescriptor> &effectDesc, bool enabled);
+
+    uint32_t mTotalEffectsCpuLoad; // current CPU load used by effects
+    uint32_t mTotalEffectsMemory;  // current memory used by effects
+
+    /**
+     * Maximum CPU load allocated to audio effects in 0.1 MIPS (ARMv5TE, 0 WS memory) units
+     */
+    static const uint32_t MAX_EFFECTS_CPU_LOAD = 1000;
+    /**
+     * Maximum memory allocated to audio effects in KB
+     */
+    static const uint32_t MAX_EFFECTS_MEMORY = 512;
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/managerdefault/HwModule.h b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
similarity index 69%
rename from services/audiopolicy/managerdefault/HwModule.h
rename to services/audiopolicy/common/managerdefinitions/include/HwModule.h
index f814dd9..92c3ea2 100644
--- a/services/audiopolicy/managerdefault/HwModule.h
+++ b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
@@ -14,8 +14,20 @@
  * limitations under the License.
  */
 
+#pragma once
+
+#include "DeviceDescriptor.h"
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+#include <utils/Errors.h>
+#include <utils/Vector.h>
+#include <system/audio.h>
+#include <cutils/config_utils.h>
+
 namespace android {
 
+class IOProfile;
+
 class HwModule : public RefBase
 {
 public:
@@ -33,6 +45,8 @@
             audio_devices_t device, String8 address);
     status_t removeInputProfile(String8 name);
 
+    audio_module_handle_t getHandle() const { return mHandle; }
+
     void dump(int fd);
 
     const char *const        mName; // base name of the audio HW module (primary, a2dp ...)
@@ -43,4 +57,18 @@
     DeviceVector             mDeclaredDevices; // devices declared in audio_policy.conf
 };
 
+class HwModuleCollection : public Vector< sp<HwModule> >
+{
+public:
+    sp<HwModule> getModuleFromName(const char *name) const;
+
+    sp <HwModule> getModuleForDevice(audio_devices_t device) const;
+
+    sp<DeviceDescriptor>  getDeviceDescriptor(const audio_devices_t device,
+                                              const char *device_address,
+                                              const char *device_name) const;
+
+    status_t dump(int fd) const;
+};
+
 }; // namespace android
diff --git a/services/audiopolicy/managerdefault/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
similarity index 94%
rename from services/audiopolicy/managerdefault/IOProfile.h
rename to services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index 3317969..095e759 100644
--- a/services/audiopolicy/managerdefault/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -14,6 +14,13 @@
  * limitations under the License.
  */
 
+#pragma once
+
+#include "AudioPort.h"
+#include "DeviceDescriptor.h"
+#include <utils/String8.h>
+#include <system/audio.h>
+
 namespace android {
 
 class HwModule;
diff --git a/services/audiopolicy/managerdefault/ApmImplDefinitions.h b/services/audiopolicy/common/managerdefinitions/include/SoundTriggerSession.h
similarity index 63%
copy from services/audiopolicy/managerdefault/ApmImplDefinitions.h
copy to services/audiopolicy/common/managerdefinitions/include/SoundTriggerSession.h
index 620979b..420e6d7 100644
--- a/services/audiopolicy/managerdefault/ApmImplDefinitions.h
+++ b/services/audiopolicy/common/managerdefinitions/include/SoundTriggerSession.h
@@ -14,19 +14,20 @@
  * limitations under the License.
  */
 
+#pragma once
+
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+
 namespace android {
 
-enum routing_strategy {
-    STRATEGY_MEDIA,
-    STRATEGY_PHONE,
-    STRATEGY_SONIFICATION,
-    STRATEGY_SONIFICATION_RESPECTFUL,
-    STRATEGY_DTMF,
-    STRATEGY_ENFORCED_AUDIBLE,
-    STRATEGY_TRANSMITTED_THROUGH_SPEAKER,
-    STRATEGY_ACCESSIBILITY,
-    STRATEGY_REROUTING,
-    NUM_STRATEGIES
+class SoundTriggerSessionCollection : public DefaultKeyedVector<audio_session_t, audio_io_handle_t>
+{
+public:
+    status_t releaseSession(audio_session_t session);
+
+    status_t acquireSession(audio_session_t session, audio_io_handle_t ioHandle);
 };
 
-}; //namespace android
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h
new file mode 100644
index 0000000..84db5ab
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <Volume.h>
+#include <utils/KeyedVector.h>
+#include <utils/StrongPointer.h>
+#include <utils/SortedVector.h>
+#include <hardware/audio.h>
+
+namespace android {
+
+// stream descriptor used for volume control
+class StreamDescriptor
+{
+public:
+    StreamDescriptor();
+
+    int getVolumeIndex(audio_devices_t device) const;
+    bool canBeMuted() const { return mCanBeMuted; }
+    void clearCurrentVolumeIndex();
+    void addCurrentVolumeIndex(audio_devices_t device, int index);
+    int getVolumeIndexMin() const { return mIndexMin; }
+    int getVolumeIndexMax() const { return mIndexMax; }
+    void setVolumeIndexMin(int volIndexMin);
+    void setVolumeIndexMax(int volIndexMax);
+
+    void dump(int fd) const;
+
+    void setVolumeCurvePoint(Volume::device_category deviceCategory, const VolumeCurvePoint *point);
+    const VolumeCurvePoint *getVolumeCurvePoint(Volume::device_category deviceCategory) const
+    {
+        return mVolumeCurve[deviceCategory];
+    }
+
+private:
+    const VolumeCurvePoint *mVolumeCurve[Volume::DEVICE_CATEGORY_CNT];
+    KeyedVector<audio_devices_t, int> mIndexCur; /**< current volume index per device. */
+    int mIndexMin; /**< min volume index. */
+    int mIndexMax; /**< max volume index. */
+    bool mCanBeMuted; /**< true is the stream can be muted. */
+};
+
+/**
+ * stream descriptors collection for volume control
+ */
+class StreamDescriptorCollection : public DefaultKeyedVector<audio_stream_type_t, StreamDescriptor>
+{
+public:
+    StreamDescriptorCollection();
+
+    void clearCurrentVolumeIndex(audio_stream_type_t stream);
+    void addCurrentVolumeIndex(audio_stream_type_t stream, audio_devices_t device, int index);
+
+    bool canBeMuted(audio_stream_type_t stream);
+
+    status_t dump(int fd) const;
+
+    void setVolumeCurvePoint(audio_stream_type_t stream,
+                             Volume::device_category deviceCategory,
+                             const VolumeCurvePoint *point);
+
+    const VolumeCurvePoint *getVolumeCurvePoint(audio_stream_type_t stream,
+                                                Volume::device_category deviceCategory) const;
+
+    void setVolumeIndexMin(audio_stream_type_t stream,int volIndexMin);
+    void setVolumeIndexMax(audio_stream_type_t stream,int volIndexMax);
+
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/managerdefault/audio_policy_conf.h b/services/audiopolicy/common/managerdefinitions/include/audio_policy_conf.h
similarity index 91%
rename from services/audiopolicy/managerdefault/audio_policy_conf.h
rename to services/audiopolicy/common/managerdefinitions/include/audio_policy_conf.h
index 2535a67..a393e3b 100644
--- a/services/audiopolicy/managerdefault/audio_policy_conf.h
+++ b/services/audiopolicy/common/managerdefinitions/include/audio_policy_conf.h
@@ -14,9 +14,7 @@
  * limitations under the License.
  */
 
-
-#ifndef ANDROID_AUDIO_POLICY_CONF_H
-#define ANDROID_AUDIO_POLICY_CONF_H
+#pragma once
 
 
 /////////////////////////////////////////////////
@@ -53,9 +51,9 @@
                                     // "formats" in outputs descriptors indicating that supported
                                     // values should be queried after opening the output.
 
-#define DEVICES_TAG "devices"
-#define DEVICE_TYPE "type"
-#define DEVICE_ADDRESS "address"
+#define APM_DEVICES_TAG "devices"
+#define APM_DEVICE_TYPE "type"
+#define APM_DEVICE_ADDRESS "address"
 
 #define MIXERS_TAG "mixers"
 #define MIXER_TYPE "type"
@@ -71,7 +69,3 @@
 #define GAIN_STEP_VALUE "step_value_mB"
 #define GAIN_MIN_RAMP_MS "min_ramp_ms"
 #define GAIN_MAX_RAMP_MS "max_ramp_ms"
-
-
-
-#endif  // ANDROID_AUDIO_POLICY_CONF_H
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp
new file mode 100644
index 0000000..fc7b0cc
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioGain"
+//#define LOG_NDEBUG 0
+
+//#define VERY_VERBOSE_LOGGING
+#ifdef VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) do { } while(0)
+#endif
+
+#include "AudioGain.h"
+#include "StreamDescriptor.h"
+#include <utils/Log.h>
+#include <utils/String8.h>
+
+#include <math.h>
+
+namespace android {
+
+AudioGain::AudioGain(int index, bool useInChannelMask)
+{
+    mIndex = index;
+    mUseInChannelMask = useInChannelMask;
+    memset(&mGain, 0, sizeof(struct audio_gain));
+}
+
+void AudioGain::getDefaultConfig(struct audio_gain_config *config)
+{
+    config->index = mIndex;
+    config->mode = mGain.mode;
+    config->channel_mask = mGain.channel_mask;
+    if ((mGain.mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
+        config->values[0] = mGain.default_value;
+    } else {
+        uint32_t numValues;
+        if (mUseInChannelMask) {
+            numValues = audio_channel_count_from_in_mask(mGain.channel_mask);
+        } else {
+            numValues = audio_channel_count_from_out_mask(mGain.channel_mask);
+        }
+        for (size_t i = 0; i < numValues; i++) {
+            config->values[i] = mGain.default_value;
+        }
+    }
+    if ((mGain.mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) {
+        config->ramp_duration_ms = mGain.min_ramp_ms;
+    }
+}
+
+status_t AudioGain::checkConfig(const struct audio_gain_config *config)
+{
+    if ((config->mode & ~mGain.mode) != 0) {
+        return BAD_VALUE;
+    }
+    if ((config->mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
+        if ((config->values[0] < mGain.min_value) ||
+                    (config->values[0] > mGain.max_value)) {
+            return BAD_VALUE;
+        }
+    } else {
+        if ((config->channel_mask & ~mGain.channel_mask) != 0) {
+            return BAD_VALUE;
+        }
+        uint32_t numValues;
+        if (mUseInChannelMask) {
+            numValues = audio_channel_count_from_in_mask(config->channel_mask);
+        } else {
+            numValues = audio_channel_count_from_out_mask(config->channel_mask);
+        }
+        for (size_t i = 0; i < numValues; i++) {
+            if ((config->values[i] < mGain.min_value) ||
+                    (config->values[i] > mGain.max_value)) {
+                return BAD_VALUE;
+            }
+        }
+    }
+    if ((config->mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) {
+        if ((config->ramp_duration_ms < mGain.min_ramp_ms) ||
+                    (config->ramp_duration_ms > mGain.max_ramp_ms)) {
+            return BAD_VALUE;
+        }
+    }
+    return NO_ERROR;
+}
+
+void AudioGain::dump(int fd, int spaces, int index) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, "%*sGain %d:\n", spaces, "", index+1);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- mode: %08x\n", spaces, "", mGain.mode);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- channel_mask: %08x\n", spaces, "", mGain.channel_mask);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- min_value: %d mB\n", spaces, "", mGain.min_value);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- max_value: %d mB\n", spaces, "", mGain.max_value);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- default_value: %d mB\n", spaces, "", mGain.default_value);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- step_value: %d mB\n", spaces, "", mGain.step_value);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- min_ramp_ms: %d ms\n", spaces, "", mGain.min_ramp_ms);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- max_ramp_ms: %d ms\n", spaces, "", mGain.max_ramp_ms);
+    result.append(buffer);
+
+    write(fd, result.string(), result.size());
+}
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
new file mode 100644
index 0000000..fa66728
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioInputDescriptor"
+//#define LOG_NDEBUG 0
+
+#include "AudioInputDescriptor.h"
+#include "IOProfile.h"
+#include "AudioGain.h"
+#include "HwModule.h"
+#include <media/AudioPolicy.h>
+#include <policy.h>
+
+namespace android {
+
+AudioInputDescriptor::AudioInputDescriptor(const sp<IOProfile>& profile)
+    : mId(0), mIoHandle(0),
+      mDevice(AUDIO_DEVICE_NONE), mPolicyMix(NULL), mPatchHandle(0), mRefCount(0),
+      mInputSource(AUDIO_SOURCE_DEFAULT), mProfile(profile), mIsSoundTrigger(false)
+{
+    if (profile != NULL) {
+        mSamplingRate = profile->pickSamplingRate();
+        mFormat = profile->pickFormat();
+        mChannelMask = profile->pickChannelMask();
+        if (profile->mGains.size() > 0) {
+            profile->mGains[0]->getDefaultConfig(&mGain);
+        }
+    }
+}
+
+void AudioInputDescriptor::setIoHandle(audio_io_handle_t ioHandle)
+{
+    mId = AudioPort::getNextUniqueId();
+    mIoHandle = ioHandle;
+}
+
+audio_module_handle_t AudioInputDescriptor::getModuleHandle() const
+{
+    return mProfile->getModuleHandle();
+}
+
+void AudioInputDescriptor::toAudioPortConfig(struct audio_port_config *dstConfig,
+                                             const struct audio_port_config *srcConfig) const
+{
+    ALOG_ASSERT(mProfile != 0,
+                "toAudioPortConfig() called on input with null profile %d", mIoHandle);
+    dstConfig->config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE|AUDIO_PORT_CONFIG_CHANNEL_MASK|
+                            AUDIO_PORT_CONFIG_FORMAT|AUDIO_PORT_CONFIG_GAIN;
+    if (srcConfig != NULL) {
+        dstConfig->config_mask |= srcConfig->config_mask;
+    }
+
+    AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
+
+    dstConfig->id = mId;
+    dstConfig->role = AUDIO_PORT_ROLE_SINK;
+    dstConfig->type = AUDIO_PORT_TYPE_MIX;
+    dstConfig->ext.mix.hw_module = mProfile->mModule->mHandle;
+    dstConfig->ext.mix.handle = mIoHandle;
+    dstConfig->ext.mix.usecase.source = mInputSource;
+}
+
+void AudioInputDescriptor::toAudioPort(struct audio_port *port) const
+{
+    ALOG_ASSERT(mProfile != 0, "toAudioPort() called on input with null profile %d", mIoHandle);
+
+    mProfile->toAudioPort(port);
+    port->id = mId;
+    toAudioPortConfig(&port->active_config);
+    port->ext.mix.hw_module = mProfile->mModule->mHandle;
+    port->ext.mix.handle = mIoHandle;
+    port->ext.mix.latency_class = AUDIO_LATENCY_NORMAL;
+}
+
+status_t AudioInputDescriptor::dump(int fd)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, " ID: %d\n", mId);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Sampling rate: %d\n", mSamplingRate);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Format: %d\n", mFormat);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Channels: %08x\n", mChannelMask);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Devices %08x\n", mDevice);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Ref Count %d\n", mRefCount);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Open Ref Count %d\n", mOpenRefCount);
+    result.append(buffer);
+
+    write(fd, result.string(), result.size());
+
+    return NO_ERROR;
+}
+
+bool AudioInputCollection::isSourceActive(audio_source_t source) const
+{
+    for (size_t i = 0; i < size(); i++) {
+        const sp<AudioInputDescriptor>  inputDescriptor = valueAt(i);
+        if (inputDescriptor->mRefCount == 0) {
+            continue;
+        }
+        if (inputDescriptor->mInputSource == (int)source) {
+            return true;
+        }
+    }
+    return false;
+}
+
+sp<AudioInputDescriptor> AudioInputCollection::getInputFromId(audio_port_handle_t id) const
+{
+    sp<AudioInputDescriptor> inputDesc = NULL;
+    for (size_t i = 0; i < size(); i++) {
+        inputDesc = valueAt(i);
+        if (inputDesc->mId == id) {
+            break;
+        }
+    }
+    return inputDesc;
+}
+
+uint32_t AudioInputCollection::activeInputsCount() const
+{
+    uint32_t count = 0;
+    for (size_t i = 0; i < size(); i++) {
+        const sp<AudioInputDescriptor>  desc = valueAt(i);
+        if (desc->mRefCount > 0) {
+            count++;
+        }
+    }
+    return count;
+}
+
+audio_io_handle_t AudioInputCollection::getActiveInput(bool ignoreVirtualInputs)
+{
+    for (size_t i = 0; i < size(); i++) {
+        const sp<AudioInputDescriptor>  input_descriptor = valueAt(i);
+        if ((input_descriptor->mRefCount > 0)
+                && (!ignoreVirtualInputs || !is_virtual_input_device(input_descriptor->mDevice))) {
+            return keyAt(i);
+        }
+    }
+    return 0;
+}
+
+audio_devices_t AudioInputCollection::getSupportedDevices(audio_io_handle_t handle) const
+{
+    sp<AudioInputDescriptor> inputDesc = valueFor(handle);
+    audio_devices_t devices = inputDesc->mProfile->mSupportedDevices.types();
+    return devices;
+}
+
+status_t AudioInputCollection::dump(int fd) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "\nInputs dump:\n");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        snprintf(buffer, SIZE, "- Input %d dump:\n", keyAt(i));
+        write(fd, buffer, strlen(buffer));
+        valueAt(i)->dump(fd);
+    }
+
+    return NO_ERROR;
+}
+
+}; //namespace android
diff --git a/services/audiopolicy/managerdefault/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
similarity index 61%
rename from services/audiopolicy/managerdefault/AudioOutputDescriptor.cpp
rename to services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 4b85972..cdb5b51 100644
--- a/services/audiopolicy/managerdefault/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -17,12 +17,19 @@
 #define LOG_TAG "APM::AudioOutputDescriptor"
 //#define LOG_NDEBUG 0
 
-#include "AudioPolicyManager.h"
+#include "AudioOutputDescriptor.h"
+#include "IOProfile.h"
+#include "AudioGain.h"
+#include "HwModule.h"
+#include <media/AudioPolicy.h>
+
+// A device mask for all audio output devices that are considered "remote" when evaluating
+// active output devices in isStreamActiveRemotely()
+#define APM_AUDIO_OUT_DEVICE_REMOTE_ALL  AUDIO_DEVICE_OUT_REMOTE_SUBMIX
 
 namespace android {
 
-AudioOutputDescriptor::AudioOutputDescriptor(
-        const sp<IOProfile>& profile)
+AudioOutputDescriptor::AudioOutputDescriptor(const sp<IOProfile>& profile)
     : mId(0), mIoHandle(0), mLatency(0),
     mFlags((audio_output_flags_t)0), mDevice(AUDIO_DEVICE_NONE), mPolicyMix(NULL),
     mPatchHandle(0),
@@ -49,6 +56,11 @@
     }
 }
 
+audio_module_handle_t AudioOutputDescriptor::getModuleHandle() const
+{
+    return mProfile->getModuleHandle();
+}
+
 audio_devices_t AudioOutputDescriptor::device() const
 {
     if (isDuplicated()) {
@@ -58,6 +70,12 @@
     }
 }
 
+void AudioOutputDescriptor::setIoHandle(audio_io_handle_t ioHandle)
+{
+    mId = AudioPort::getNextUniqueId();
+    mIoHandle = ioHandle;
+}
+
 uint32_t AudioOutputDescriptor::latency()
 {
     if (isDuplicated()) {
@@ -108,23 +126,15 @@
 
 bool AudioOutputDescriptor::isActive(uint32_t inPastMs) const
 {
-    return isStrategyActive(NUM_STRATEGIES, inPastMs);
-}
-
-bool AudioOutputDescriptor::isStrategyActive(routing_strategy strategy,
-                                                                       uint32_t inPastMs,
-                                                                       nsecs_t sysTime) const
-{
-    if ((sysTime == 0) && (inPastMs != 0)) {
+    nsecs_t sysTime = 0;
+    if (inPastMs != 0) {
         sysTime = systemTime();
     }
     for (int i = 0; i < (int)AUDIO_STREAM_CNT; i++) {
         if (i == AUDIO_STREAM_PATCH) {
             continue;
         }
-        if (((AudioPolicyManager::getStrategy((audio_stream_type_t)i) == strategy) ||
-                (NUM_STRATEGIES == strategy)) &&
-                isStreamActive((audio_stream_type_t)i, inPastMs, sysTime)) {
+        if (isStreamActive((audio_stream_type_t)i, inPastMs, sysTime)) {
             return true;
         }
     }
@@ -132,8 +142,8 @@
 }
 
 bool AudioOutputDescriptor::isStreamActive(audio_stream_type_t stream,
-                                                                       uint32_t inPastMs,
-                                                                       nsecs_t sysTime) const
+                                           uint32_t inPastMs,
+                                           nsecs_t sysTime) const
 {
     if (mRefCount[stream] != 0) {
         return true;
@@ -216,6 +226,108 @@
     return NO_ERROR;
 }
 
+bool AudioOutputCollection::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
+{
+    nsecs_t sysTime = systemTime();
+    for (size_t i = 0; i < this->size(); i++) {
+        const sp<AudioOutputDescriptor> outputDesc = this->valueAt(i);
+        if (outputDesc->isStreamActive(stream, inPastMs, sysTime)) {
+            return true;
+        }
+    }
+    return false;
+}
 
+bool AudioOutputCollection::isStreamActiveRemotely(audio_stream_type_t stream,
+                                                   uint32_t inPastMs) const
+{
+    nsecs_t sysTime = systemTime();
+    for (size_t i = 0; i < size(); i++) {
+        const sp<AudioOutputDescriptor> outputDesc = valueAt(i);
+        if (((outputDesc->device() & APM_AUDIO_OUT_DEVICE_REMOTE_ALL) != 0) &&
+                outputDesc->isStreamActive(stream, inPastMs, sysTime)) {
+            // do not consider re routing (when the output is going to a dynamic policy)
+            // as "remote playback"
+            if (outputDesc->mPolicyMix == NULL) {
+                return true;
+            }
+        }
+    }
+    return false;
+}
+
+audio_io_handle_t AudioOutputCollection::getA2dpOutput() const
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<AudioOutputDescriptor> outputDesc = valueAt(i);
+        if (!outputDesc->isDuplicated() && outputDesc->device() & AUDIO_DEVICE_OUT_ALL_A2DP) {
+            return this->keyAt(i);
+        }
+    }
+    return 0;
+}
+
+sp<AudioOutputDescriptor> AudioOutputCollection::getPrimaryOutput() const
+{
+    for (size_t i = 0; i < size(); i++) {
+        const sp<AudioOutputDescriptor> outputDesc = valueAt(i);
+        if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) {
+            return outputDesc;
+        }
+    }
+    return NULL;
+}
+
+sp<AudioOutputDescriptor> AudioOutputCollection::getOutputFromId(audio_port_handle_t id) const
+{
+    sp<AudioOutputDescriptor> outputDesc = NULL;
+    for (size_t i = 0; i < size(); i++) {
+        outputDesc = valueAt(i);
+        if (outputDesc->mId == id) {
+            break;
+        }
+    }
+    return outputDesc;
+}
+
+bool AudioOutputCollection::isAnyOutputActive(audio_stream_type_t streamToIgnore) const
+{
+    for (size_t s = 0 ; s < AUDIO_STREAM_CNT ; s++) {
+        if (s == (size_t) streamToIgnore) {
+            continue;
+        }
+        for (size_t i = 0; i < size(); i++) {
+            const sp<AudioOutputDescriptor> outputDesc = valueAt(i);
+            if (outputDesc->mRefCount[s] != 0) {
+                return true;
+            }
+        }
+    }
+    return false;
+}
+
+audio_devices_t AudioOutputCollection::getSupportedDevices(audio_io_handle_t handle) const
+{
+    sp<AudioOutputDescriptor> outputDesc = valueFor(handle);
+    audio_devices_t devices = outputDesc->mProfile->mSupportedDevices.types();
+    return devices;
+}
+
+
+status_t AudioOutputCollection::dump(int fd) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "\nOutputs dump:\n");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        snprintf(buffer, SIZE, "- Output %d dump:\n", keyAt(i));
+        write(fd, buffer, strlen(buffer));
+        valueAt(i)->dump(fd);
+    }
+
+    return NO_ERROR;
+}
 
 }; //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
new file mode 100644
index 0000000..3a317fa
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPatch"
+//#define LOG_NDEBUG 0
+
+#include "AudioPatch.h"
+#include "AudioGain.h"
+#include "ConfigParsingUtils.h"
+#include <cutils/log.h>
+#include <utils/String8.h>
+
+namespace android {
+
+int32_t volatile AudioPatch::mNextUniqueId = 1;
+
+AudioPatch::AudioPatch(const struct audio_patch *patch, uid_t uid) :
+    mHandle(static_cast<audio_patch_handle_t>(android_atomic_inc(&mNextUniqueId))),
+    mPatch(*patch),
+    mUid(uid),
+    mAfPatchHandle(0)
+{
+}
+
+status_t AudioPatch::dump(int fd, int spaces, int index) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, "%*sAudio patch %d:\n", spaces, "", index+1);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- handle: %2d\n", spaces, "", mHandle);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- audio flinger handle: %2d\n", spaces, "", mAfPatchHandle);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- owner uid: %2d\n", spaces, "", mUid);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- %d sources:\n", spaces, "", mPatch.num_sources);
+    result.append(buffer);
+    for (size_t i = 0; i < mPatch.num_sources; i++) {
+        if (mPatch.sources[i].type == AUDIO_PORT_TYPE_DEVICE) {
+            snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
+                     mPatch.sources[i].id, ConfigParsingUtils::enumToString(sDeviceNameToEnumTable,
+                                                        ARRAY_SIZE(sDeviceNameToEnumTable),
+                                                        mPatch.sources[i].ext.device.type));
+        } else {
+            snprintf(buffer, SIZE, "%*s- Mix ID %d I/O handle %d\n", spaces + 2, "",
+                     mPatch.sources[i].id, mPatch.sources[i].ext.mix.handle);
+        }
+        result.append(buffer);
+    }
+    snprintf(buffer, SIZE, "%*s- %d sinks:\n", spaces, "", mPatch.num_sinks);
+    result.append(buffer);
+    for (size_t i = 0; i < mPatch.num_sinks; i++) {
+        if (mPatch.sinks[i].type == AUDIO_PORT_TYPE_DEVICE) {
+            snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
+                     mPatch.sinks[i].id, ConfigParsingUtils::enumToString(sDeviceNameToEnumTable,
+                                                        ARRAY_SIZE(sDeviceNameToEnumTable),
+                                                        mPatch.sinks[i].ext.device.type));
+        } else {
+            snprintf(buffer, SIZE, "%*s- Mix ID %d I/O handle %d\n", spaces + 2, "",
+                     mPatch.sinks[i].id, mPatch.sinks[i].ext.mix.handle);
+        }
+        result.append(buffer);
+    }
+
+    write(fd, result.string(), result.size());
+    return NO_ERROR;
+}
+
+status_t AudioPatchCollection::addAudioPatch(audio_patch_handle_t handle,
+                                             const sp<AudioPatch>& patch)
+{
+    ssize_t index = indexOfKey(handle);
+
+    if (index >= 0) {
+        ALOGW("addAudioPatch() patch %d already in", handle);
+        return ALREADY_EXISTS;
+    }
+    add(handle, patch);
+    ALOGV("addAudioPatch() handle %d af handle %d num_sources %d num_sinks %d source handle %d"
+            "sink handle %d",
+          handle, patch->mAfPatchHandle, patch->mPatch.num_sources, patch->mPatch.num_sinks,
+          patch->mPatch.sources[0].id, patch->mPatch.sinks[0].id);
+    return NO_ERROR;
+}
+
+status_t AudioPatchCollection::removeAudioPatch(audio_patch_handle_t handle)
+{
+    ssize_t index = indexOfKey(handle);
+
+    if (index < 0) {
+        ALOGW("removeAudioPatch() patch %d not in", handle);
+        return ALREADY_EXISTS;
+    }
+    ALOGV("removeAudioPatch() handle %d af handle %d", handle, valueAt(index)->mAfPatchHandle);
+    removeItemsAt(index);
+    return NO_ERROR;
+}
+
+status_t AudioPatchCollection::listAudioPatches(unsigned int *num_patches,
+                                                struct audio_patch *patches) const
+{
+    if (num_patches == NULL || (*num_patches != 0 && patches == NULL)) {
+        return BAD_VALUE;
+    }
+    ALOGV("listAudioPatches() num_patches %d patches %p available patches %zu",
+          *num_patches, patches, size());
+    if (patches == NULL) {
+        *num_patches = 0;
+    }
+
+    size_t patchesWritten = 0;
+    size_t patchesMax = *num_patches;
+    for (size_t i = 0; i  < size() && patchesWritten < patchesMax; i++) {
+        const sp<AudioPatch>  patch = valueAt(i);
+        patches[patchesWritten] = patch->mPatch;
+        patches[patchesWritten++].id = patch->mHandle;
+        ALOGV("listAudioPatches() patch %zu num_sources %d num_sinks %d",
+              i, patch->mPatch.num_sources, patch->mPatch.num_sinks);
+    }
+    *num_patches = size();
+
+    ALOGV("listAudioPatches() got %zu patches needed %d", patchesWritten, *num_patches);
+    return NO_ERROR;
+}
+
+status_t AudioPatchCollection::dump(int fd) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    snprintf(buffer, SIZE, "\nAudio Patches:\n");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        valueAt(i)->dump(fd, 2, i);
+    }
+    return NO_ERROR;
+}
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
new file mode 100644
index 0000000..84a53ebd
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPolicyMix"
+//#define LOG_NDEBUG 0
+
+#include "AudioPolicyMix.h"
+#include "HwModule.h"
+#include "AudioPort.h"
+#include "IOProfile.h"
+#include "AudioGain.h"
+#include <AudioOutputDescriptor.h>
+
+namespace android {
+
+void AudioPolicyMix::setOutput(sp<AudioOutputDescriptor> &output)
+{
+    mOutput = output;
+}
+
+const sp<AudioOutputDescriptor> &AudioPolicyMix::getOutput() const
+{
+    return mOutput;
+}
+
+void AudioPolicyMix::clearOutput()
+{
+    mOutput.clear();
+}
+
+void AudioPolicyMix::setMix(AudioMix &mix)
+{
+    mMix = mix;
+}
+
+android::AudioMix &AudioPolicyMix::getMix()
+{
+    return mMix;
+}
+
+status_t AudioPolicyMixCollection::registerMix(String8 address, AudioMix mix)
+{
+    ssize_t index = indexOfKey(address);
+    if (index >= 0) {
+        ALOGE("registerPolicyMixes(): mix for address %s already registered", address.string());
+        return BAD_VALUE;
+    }
+    sp<AudioPolicyMix> policyMix = new AudioPolicyMix();
+    policyMix->setMix(mix);
+    add(address, policyMix);
+    return NO_ERROR;
+}
+
+status_t AudioPolicyMixCollection::unregisterMix(String8 address)
+{
+    ssize_t index = indexOfKey(address);
+    if (index < 0) {
+        ALOGE("unregisterPolicyMixes(): mix for address %s not registered", address.string());
+        return BAD_VALUE;
+    }
+
+    removeItemsAt(index);
+    return NO_ERROR;
+}
+
+status_t AudioPolicyMixCollection::getAudioPolicyMix(String8 address,
+                                                     sp<AudioPolicyMix> &policyMix) const
+{
+    ssize_t index = indexOfKey(address);
+    if (index < 0) {
+        ALOGE("unregisterPolicyMixes(): mix for address %s not registered", address.string());
+        return BAD_VALUE;
+    }
+    policyMix = valueAt(index);
+    return NO_ERROR;
+}
+
+void AudioPolicyMixCollection::closeOutput(sp<AudioOutputDescriptor> &desc)
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<AudioPolicyMix> policyMix = valueAt(i);
+        if (policyMix->getOutput() == desc) {
+            policyMix->clearOutput();
+        }
+    }
+}
+
+status_t AudioPolicyMixCollection::getOutputForAttr(audio_attributes_t attributes,
+                                                    sp<AudioOutputDescriptor> &desc)
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<AudioPolicyMix> policyMix = valueAt(i);
+        AudioMix mix = policyMix->getMix();
+
+        if (mix.mMixType == MIX_TYPE_PLAYERS) {
+            for (size_t j = 0; j < mix.mCriteria.size(); j++) {
+                if ((RULE_MATCH_ATTRIBUTE_USAGE == mix.mCriteria[j].mRule &&
+                     mix.mCriteria[j].mAttr.mUsage == attributes.usage) ||
+                        (RULE_EXCLUDE_ATTRIBUTE_USAGE == mix.mCriteria[j].mRule &&
+                         mix.mCriteria[j].mAttr.mUsage != attributes.usage)) {
+                    desc = policyMix->getOutput();
+                    break;
+                }
+                if (strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
+                        strncmp(attributes.tags + strlen("addr="),
+                                mix.mRegistrationId.string(),
+                                AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) {
+                    desc = policyMix->getOutput();
+                    break;
+                }
+            }
+        } else if (mix.mMixType == MIX_TYPE_RECORDERS) {
+            if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE &&
+                    strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
+                    strncmp(attributes.tags + strlen("addr="),
+                            mix.mRegistrationId.string(),
+                            AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) {
+                desc = policyMix->getOutput();
+            }
+        }
+        if (desc != 0) {
+            desc->mPolicyMix = &mix;
+            return NO_ERROR;
+        }
+    }
+    return BAD_VALUE;
+}
+
+audio_devices_t AudioPolicyMixCollection::getDeviceAndMixForInputSource(audio_source_t inputSource,
+                                                                        audio_devices_t availDevices,
+                                                                        AudioMix **policyMix)
+{
+    for (size_t i = 0; i < size(); i++) {
+        AudioMix mix = valueAt(i)->getMix();
+
+        if (mix.mMixType != MIX_TYPE_RECORDERS) {
+            continue;
+        }
+        for (size_t j = 0; j < mix.mCriteria.size(); j++) {
+            if ((RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET == mix.mCriteria[j].mRule &&
+                    mix.mCriteria[j].mAttr.mSource == inputSource) ||
+               (RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET == mix.mCriteria[j].mRule &&
+                    mix.mCriteria[j].mAttr.mSource != inputSource)) {
+                if (availDevices & AUDIO_DEVICE_IN_REMOTE_SUBMIX) {
+                    if (policyMix != NULL) {
+                        *policyMix = &mix;
+                    }
+                    return AUDIO_DEVICE_IN_REMOTE_SUBMIX;
+                }
+                break;
+            }
+        }
+    }
+    return AUDIO_DEVICE_NONE;
+}
+
+status_t AudioPolicyMixCollection::getInputMixForAttr(audio_attributes_t attr, AudioMix *&policyMix)
+{
+    if (strncmp(attr.tags, "addr=", strlen("addr=")) != 0) {
+        return BAD_VALUE;
+    }
+    String8 address(attr.tags + strlen("addr="));
+
+    ssize_t index = indexOfKey(address);
+    if (index < 0) {
+        ALOGW("getInputForAttr() no policy for address %s", address.string());
+        return BAD_VALUE;
+    }
+    sp<AudioPolicyMix> audioPolicyMix = valueAt(index);
+    AudioMix mix = audioPolicyMix->getMix();
+
+    if (mix.mMixType != MIX_TYPE_PLAYERS) {
+        ALOGW("getInputForAttr() bad policy mix type for address %s", address.string());
+        return BAD_VALUE;
+    }
+    policyMix = &mix;
+    return NO_ERROR;
+}
+
+}; //namespace android
diff --git a/services/audiopolicy/managerdefault/Ports.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
similarity index 90%
rename from services/audiopolicy/managerdefault/Ports.cpp
rename to services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index 3e55cee..46a119e 100644
--- a/services/audiopolicy/managerdefault/Ports.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -14,30 +14,46 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "APM::Ports"
+#define LOG_TAG "APM::AudioPort"
 //#define LOG_NDEBUG 0
 
-#include "AudioPolicyManager.h"
-
+#include "AudioPort.h"
+#include "HwModule.h"
+#include "AudioGain.h"
+#include "ConfigParsingUtils.h"
 #include "audio_policy_conf.h"
+#include <policy.h>
 
 namespace android {
 
+int32_t volatile AudioPort::mNextUniqueId = 1;
+
 // --- AudioPort class implementation
 
 AudioPort::AudioPort(const String8& name, audio_port_type_t type,
-          audio_port_role_t role, const sp<HwModule>& module) :
+                     audio_port_role_t role, const sp<HwModule>& module) :
     mName(name), mType(type), mRole(role), mModule(module), mFlags(0), mId(0)
 {
     mUseInChannelMask = ((type == AUDIO_PORT_TYPE_DEVICE) && (role == AUDIO_PORT_ROLE_SOURCE)) ||
                     ((type == AUDIO_PORT_TYPE_MIX) && (role == AUDIO_PORT_ROLE_SINK));
 }
 
-void AudioPort::attach(const sp<HwModule>& module) {
-    mId = AudioPolicyManager::nextUniqueId();
+void AudioPort::attach(const sp<HwModule>& module)
+{
+    mId = getNextUniqueId();
     mModule = module;
 }
 
+audio_port_handle_t AudioPort::getNextUniqueId()
+{
+    return static_cast<audio_port_handle_t>(android_atomic_inc(&mNextUniqueId));
+}
+
+audio_module_handle_t AudioPort::getModuleHandle() const
+{
+    return mModule->mHandle;
+}
+
 void AudioPort::toAudioPort(struct audio_port *port) const
 {
     port->role = mRole;
@@ -785,60 +801,4 @@
     }
 }
 
-
-// --- AudioPatch class implementation
-
-AudioPatch::AudioPatch(audio_patch_handle_t handle,
-            const struct audio_patch *patch, uid_t uid) :
-                mHandle(handle), mPatch(*patch), mUid(uid), mAfPatchHandle(0)
-{}
-
-status_t AudioPatch::dump(int fd, int spaces, int index) const
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    snprintf(buffer, SIZE, "%*sAudio patch %d:\n", spaces, "", index+1);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- handle: %2d\n", spaces, "", mHandle);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- audio flinger handle: %2d\n", spaces, "", mAfPatchHandle);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- owner uid: %2d\n", spaces, "", mUid);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- %d sources:\n", spaces, "", mPatch.num_sources);
-    result.append(buffer);
-    for (size_t i = 0; i < mPatch.num_sources; i++) {
-        if (mPatch.sources[i].type == AUDIO_PORT_TYPE_DEVICE) {
-            snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
-                     mPatch.sources[i].id, ConfigParsingUtils::enumToString(sDeviceNameToEnumTable,
-                                                        ARRAY_SIZE(sDeviceNameToEnumTable),
-                                                        mPatch.sources[i].ext.device.type));
-        } else {
-            snprintf(buffer, SIZE, "%*s- Mix ID %d I/O handle %d\n", spaces + 2, "",
-                     mPatch.sources[i].id, mPatch.sources[i].ext.mix.handle);
-        }
-        result.append(buffer);
-    }
-    snprintf(buffer, SIZE, "%*s- %d sinks:\n", spaces, "", mPatch.num_sinks);
-    result.append(buffer);
-    for (size_t i = 0; i < mPatch.num_sinks; i++) {
-        if (mPatch.sinks[i].type == AUDIO_PORT_TYPE_DEVICE) {
-            snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
-                     mPatch.sinks[i].id, ConfigParsingUtils::enumToString(sDeviceNameToEnumTable,
-                                                        ARRAY_SIZE(sDeviceNameToEnumTable),
-                                                        mPatch.sinks[i].ext.device.type));
-        } else {
-            snprintf(buffer, SIZE, "%*s- Mix ID %d I/O handle %d\n", spaces + 2, "",
-                     mPatch.sinks[i].id, mPatch.sinks[i].ext.mix.handle);
-        }
-        result.append(buffer);
-    }
-
-    write(fd, result.string(), result.size());
-    return NO_ERROR;
-}
-
-
 }; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
new file mode 100644
index 0000000..fe5bc5f
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
@@ -0,0 +1,288 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::ConfigParsingUtils"
+//#define LOG_NDEBUG 0
+
+#include "ConfigParsingUtils.h"
+#include "AudioGain.h"
+#include <hardware/audio.h>
+#include <utils/Log.h>
+#include <cutils/misc.h>
+
+namespace android {
+
+//static
+uint32_t ConfigParsingUtils::stringToEnum(const struct StringToEnum *table,
+                                              size_t size,
+                                              const char *name)
+{
+    for (size_t i = 0; i < size; i++) {
+        if (strcmp(table[i].name, name) == 0) {
+            ALOGV("stringToEnum() found %s", table[i].name);
+            return table[i].value;
+        }
+    }
+    return 0;
+}
+
+//static
+const char *ConfigParsingUtils::enumToString(const struct StringToEnum *table,
+                                              size_t size,
+                                              uint32_t value)
+{
+    for (size_t i = 0; i < size; i++) {
+        if (table[i].value == value) {
+            return table[i].name;
+        }
+    }
+    return "";
+}
+
+//static
+bool ConfigParsingUtils::stringToBool(const char *value)
+{
+    return ((strcasecmp("true", value) == 0) || (strcmp("1", value) == 0));
+}
+
+
+// --- audio_policy.conf file parsing
+//static
+uint32_t ConfigParsingUtils::parseOutputFlagNames(char *name)
+{
+    uint32_t flag = 0;
+
+    // it is OK to cast name to non const here as we are not going to use it after
+    // strtok() modifies it
+    char *flagName = strtok(name, "|");
+    while (flagName != NULL) {
+        if (strlen(flagName) != 0) {
+            flag |= ConfigParsingUtils::stringToEnum(sOutputFlagNameToEnumTable,
+                               ARRAY_SIZE(sOutputFlagNameToEnumTable),
+                               flagName);
+        }
+        flagName = strtok(NULL, "|");
+    }
+    //force direct flag if offload flag is set: offloading implies a direct output stream
+    // and all common behaviors are driven by checking only the direct flag
+    // this should normally be set appropriately in the policy configuration file
+    if ((flag & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+        flag |= AUDIO_OUTPUT_FLAG_DIRECT;
+    }
+
+    return flag;
+}
+
+//static
+uint32_t ConfigParsingUtils::parseInputFlagNames(char *name)
+{
+    uint32_t flag = 0;
+
+    // it is OK to cast name to non const here as we are not going to use it after
+    // strtok() modifies it
+    char *flagName = strtok(name, "|");
+    while (flagName != NULL) {
+        if (strlen(flagName) != 0) {
+            flag |= stringToEnum(sInputFlagNameToEnumTable,
+                               ARRAY_SIZE(sInputFlagNameToEnumTable),
+                               flagName);
+        }
+        flagName = strtok(NULL, "|");
+    }
+    return flag;
+}
+
+//static
+audio_devices_t ConfigParsingUtils::parseDeviceNames(char *name)
+{
+    uint32_t device = 0;
+
+    char *devName = strtok(name, "|");
+    while (devName != NULL) {
+        if (strlen(devName) != 0) {
+            device |= stringToEnum(sDeviceNameToEnumTable,
+                                 ARRAY_SIZE(sDeviceNameToEnumTable),
+                                 devName);
+         }
+        devName = strtok(NULL, "|");
+     }
+    return device;
+}
+
+//static
+void ConfigParsingUtils::loadHwModule(cnode *root, HwModuleCollection &hwModules,
+                                      DeviceVector &availableInputDevices,
+                                      DeviceVector &availableOutputDevices,
+                                      sp<DeviceDescriptor> &defaultOutputDevices,
+                                      bool &isSpeakerDrcEnable)
+{
+    status_t status = NAME_NOT_FOUND;
+    cnode *node;
+    sp<HwModule> module = new HwModule(root->name);
+
+    node = config_find(root, DEVICES_TAG);
+    if (node != NULL) {
+        node = node->first_child;
+        while (node) {
+            ALOGV("loadHwModule() loading device %s", node->name);
+            status_t tmpStatus = module->loadDevice(node);
+            if (status == NAME_NOT_FOUND || status == NO_ERROR) {
+                status = tmpStatus;
+            }
+            node = node->next;
+        }
+    }
+    node = config_find(root, OUTPUTS_TAG);
+    if (node != NULL) {
+        node = node->first_child;
+        while (node) {
+            ALOGV("loadHwModule() loading output %s", node->name);
+            status_t tmpStatus = module->loadOutput(node);
+            if (status == NAME_NOT_FOUND || status == NO_ERROR) {
+                status = tmpStatus;
+            }
+            node = node->next;
+        }
+    }
+    node = config_find(root, INPUTS_TAG);
+    if (node != NULL) {
+        node = node->first_child;
+        while (node) {
+            ALOGV("loadHwModule() loading input %s", node->name);
+            status_t tmpStatus = module->loadInput(node);
+            if (status == NAME_NOT_FOUND || status == NO_ERROR) {
+                status = tmpStatus;
+            }
+            node = node->next;
+        }
+    }
+    loadGlobalConfig(root, module, availableInputDevices, availableOutputDevices,
+                     defaultOutputDevices, isSpeakerDrcEnable);
+
+    if (status == NO_ERROR) {
+        hwModules.add(module);
+    }
+}
+
+//static
+void ConfigParsingUtils::loadHwModules(cnode *root, HwModuleCollection &hwModules,
+                                       DeviceVector &availableInputDevices,
+                                       DeviceVector &availableOutputDevices,
+                                       sp<DeviceDescriptor> &defaultOutputDevices,
+                                       bool &isSpeakerDrcEnabled)
+{
+    cnode *node = config_find(root, AUDIO_HW_MODULE_TAG);
+    if (node == NULL) {
+        return;
+    }
+
+    node = node->first_child;
+    while (node) {
+        ALOGV("loadHwModules() loading module %s", node->name);
+        loadHwModule(node, hwModules, availableInputDevices, availableOutputDevices,
+                     defaultOutputDevices, isSpeakerDrcEnabled);
+        node = node->next;
+    }
+}
+
+//static
+void ConfigParsingUtils::loadGlobalConfig(cnode *root, const sp<HwModule>& module,
+                                          DeviceVector &availableInputDevices,
+                                          DeviceVector &availableOutputDevices,
+                                          sp<DeviceDescriptor> &defaultOutputDevice,
+                                          bool &speakerDrcEnabled)
+{
+    cnode *node = config_find(root, GLOBAL_CONFIG_TAG);
+
+    if (node == NULL) {
+        return;
+    }
+    DeviceVector declaredDevices;
+    if (module != NULL) {
+        declaredDevices = module->mDeclaredDevices;
+    }
+
+    node = node->first_child;
+    while (node) {
+        if (strcmp(ATTACHED_OUTPUT_DEVICES_TAG, node->name) == 0) {
+            availableOutputDevices.loadDevicesFromName((char *)node->value,
+                                                        declaredDevices);
+            ALOGV("loadGlobalConfig() Attached Output Devices %08x",
+                  availableOutputDevices.types());
+        } else if (strcmp(DEFAULT_OUTPUT_DEVICE_TAG, node->name) == 0) {
+            audio_devices_t device = (audio_devices_t)stringToEnum(
+                    sDeviceNameToEnumTable,
+                    ARRAY_SIZE(sDeviceNameToEnumTable),
+                    (char *)node->value);
+            if (device != AUDIO_DEVICE_NONE) {
+                defaultOutputDevice = new DeviceDescriptor(String8("default-output"), device);
+            } else {
+                ALOGW("loadGlobalConfig() default device not specified");
+            }
+            ALOGV("loadGlobalConfig() mDefaultOutputDevice %08x", defaultOutputDevice->type());
+        } else if (strcmp(ATTACHED_INPUT_DEVICES_TAG, node->name) == 0) {
+            availableInputDevices.loadDevicesFromName((char *)node->value,
+                                                       declaredDevices);
+            ALOGV("loadGlobalConfig() Available InputDevices %08x", availableInputDevices.types());
+        } else if (strcmp(SPEAKER_DRC_ENABLED_TAG, node->name) == 0) {
+            speakerDrcEnabled = stringToBool((char *)node->value);
+            ALOGV("loadGlobalConfig() mSpeakerDrcEnabled = %d", speakerDrcEnabled);
+        } else if (strcmp(AUDIO_HAL_VERSION_TAG, node->name) == 0) {
+            uint32_t major, minor;
+            sscanf((char *)node->value, "%u.%u", &major, &minor);
+            module->mHalVersion = HARDWARE_DEVICE_API_VERSION(major, minor);
+            ALOGV("loadGlobalConfig() mHalVersion = %04x major %u minor %u",
+                  module->mHalVersion, major, minor);
+        }
+        node = node->next;
+    }
+}
+
+//static
+status_t ConfigParsingUtils::loadAudioPolicyConfig(const char *path,
+                                                   HwModuleCollection &hwModules,
+                                                   DeviceVector &availableInputDevices,
+                                                   DeviceVector &availableOutputDevices,
+                                                   sp<DeviceDescriptor> &defaultOutputDevices,
+                                                   bool &isSpeakerDrcEnabled)
+{
+    cnode *root;
+    char *data;
+
+    data = (char *)load_file(path, NULL);
+    if (data == NULL) {
+        return -ENODEV;
+    }
+    root = config_node("", "");
+    config_load(root, data);
+
+    loadHwModules(root, hwModules,
+                  availableInputDevices, availableOutputDevices,
+                  defaultOutputDevices, isSpeakerDrcEnabled);
+    // legacy audio_policy.conf files have one global_configuration section
+    loadGlobalConfig(root, hwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY),
+                     availableInputDevices, availableOutputDevices,
+                     defaultOutputDevices, isSpeakerDrcEnabled);
+    config_free(root);
+    free(root);
+    free(data);
+
+    ALOGI("loadAudioPolicyConfig() loaded %s\n", path);
+
+    return NO_ERROR;
+}
+
+}; // namespace android
diff --git a/services/audiopolicy/managerdefault/Devices.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
similarity index 80%
rename from services/audiopolicy/managerdefault/Devices.cpp
rename to services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 13c8bbc..7df7d75 100644
--- a/services/audiopolicy/managerdefault/Devices.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -17,18 +17,21 @@
 #define LOG_TAG "APM::Devices"
 //#define LOG_NDEBUG 0
 
-#include "AudioPolicyManager.h"
+#include "DeviceDescriptor.h"
+#include "AudioGain.h"
+#include "HwModule.h"
+#include "ConfigParsingUtils.h"
 
 namespace android {
 
 String8 DeviceDescriptor::emptyNameStr = String8("");
 
 DeviceDescriptor::DeviceDescriptor(const String8& name, audio_devices_t type) :
-                     AudioPort(name, AUDIO_PORT_TYPE_DEVICE,
-                               audio_is_output_device(type) ? AUDIO_PORT_ROLE_SINK :
-                                                              AUDIO_PORT_ROLE_SOURCE,
-                             NULL),
-                     mDeviceType(type), mAddress("")
+    AudioPort(name, AUDIO_PORT_TYPE_DEVICE,
+              audio_is_output_device(type) ? AUDIO_PORT_ROLE_SINK :
+                                             AUDIO_PORT_ROLE_SOURCE,
+              NULL),
+    mAddress(""), mDeviceType(type)
 {
 
 }
@@ -57,7 +60,7 @@
 {
     mDeviceTypes = AUDIO_DEVICE_NONE;
     for(size_t i = 0; i < size(); i++) {
-        mDeviceTypes |= itemAt(i)->mDeviceType;
+        mDeviceTypes |= itemAt(i)->type();
     }
     ALOGV("DeviceVector::refreshTypes() mDeviceTypes %08x", mDeviceTypes);
 }
@@ -82,7 +85,7 @@
             refreshTypes();
         }
     } else {
-        ALOGW("DeviceVector::add device %08x already in", item->mDeviceType);
+        ALOGW("DeviceVector::add device %08x already in", item->type());
         ret = -1;
     }
     return ret;
@@ -94,7 +97,7 @@
     ssize_t ret = indexOf(item);
 
     if (ret < 0) {
-        ALOGW("DeviceVector::remove device %08x not in", item->mDeviceType);
+        ALOGW("DeviceVector::remove device %08x not in", item->type());
     } else {
         ret = SortedVector::removeAt(ret);
         if (ret >= 0) {
@@ -104,6 +107,17 @@
     return ret;
 }
 
+audio_devices_t DeviceVector::getDevicesFromHwModule(audio_module_handle_t moduleHandle) const
+{
+    audio_devices_t devices = AUDIO_DEVICE_NONE;
+    for (size_t i = 0; i < size(); i++) {
+        if (itemAt(i)->getModuleHandle() == moduleHandle) {
+            devices |= itemAt(i)->type();
+        }
+    }
+    return devices;
+}
+
 void DeviceVector::loadDevicesFromType(audio_devices_t types)
 {
     DeviceVector deviceList;
@@ -151,7 +165,7 @@
 {
     sp<DeviceDescriptor> device;
     for (size_t i = 0; i < size(); i++) {
-        if (itemAt(i)->mDeviceType == type) {
+        if (itemAt(i)->type() == type) {
             if (address == "" || itemAt(i)->mAddress == address) {
                 device = itemAt(i);
                 if (itemAt(i)->mAddress == address) {
@@ -180,12 +194,16 @@
 DeviceVector DeviceVector::getDevicesFromType(audio_devices_t type) const
 {
     DeviceVector devices;
+    bool isOutput = audio_is_output_devices(type);
+    type &= ~AUDIO_DEVICE_BIT_IN;
     for (size_t i = 0; (i < size()) && (type != AUDIO_DEVICE_NONE); i++) {
-        if (itemAt(i)->mDeviceType & type & ~AUDIO_DEVICE_BIT_IN) {
+        bool curIsOutput = audio_is_output_devices(itemAt(i)->mDeviceType);
+        audio_devices_t curType = itemAt(i)->mDeviceType & ~AUDIO_DEVICE_BIT_IN;
+        if ((isOutput == curIsOutput) && ((type & curType) != 0)) {
             devices.add(itemAt(i));
-            type &= ~itemAt(i)->mDeviceType;
+            type &= ~curType;
             ALOGV("DeviceVector::getDevicesFromType() for type %x found %p",
-                  itemAt(i)->mDeviceType, itemAt(i).get());
+                  itemAt(i)->type(), itemAt(i).get());
         }
     }
     return devices;
@@ -196,7 +214,7 @@
 {
     DeviceVector devices;
     for (size_t i = 0; i < size(); i++) {
-        if (itemAt(i)->mDeviceType == type) {
+        if (itemAt(i)->type() == type) {
             if (itemAt(i)->mAddress == address) {
                 devices.add(itemAt(i));
             }
@@ -217,6 +235,26 @@
     return device;
 }
 
+
+status_t DeviceVector::dump(int fd, const String8 &direction) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "\n Available %s devices:\n", direction.string());
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        itemAt(i)->dump(fd, 2, i);
+    }
+    return NO_ERROR;
+}
+
+audio_policy_dev_state_t DeviceVector::getDeviceConnectionState(const sp<DeviceDescriptor> &devDesc) const
+{
+    ssize_t index = indexOf(devDesc);
+    return index >= 0 ? AUDIO_POLICY_DEVICE_STATE_AVAILABLE : AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
+}
+
 void DeviceDescriptor::toAudioPortConfig(struct audio_port_config *dstConfig,
                                          const struct audio_port_config *srcConfig) const
 {
@@ -237,7 +275,7 @@
     // without the test?
     // This has been demonstrated to NOT be true (at start up)
     // ALOG_ASSERT(mModule != NULL);
-    dstConfig->ext.device.hw_module = mModule != NULL ? mModule->mHandle : NULL;
+    dstConfig->ext.device.hw_module = mModule != 0 ? mModule->mHandle : AUDIO_IO_HANDLE_NONE;
     strncpy(dstConfig->ext.device.address, mAddress.string(), AUDIO_DEVICE_MAX_ADDRESS_LEN);
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
new file mode 100644
index 0000000..33d838d
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::EffectDescriptor"
+//#define LOG_NDEBUG 0
+
+#include "EffectDescriptor.h"
+#include <utils/String8.h>
+
+namespace android {
+
+status_t EffectDescriptor::dump(int fd)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, " I/O: %d\n", mIo);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Strategy: %d\n", mStrategy);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Session: %d\n", mSession);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Name: %s\n",  mDesc.name);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " %s\n",  mEnabled ? "Enabled" : "Disabled");
+    result.append(buffer);
+    write(fd, result.string(), result.size());
+
+    return NO_ERROR;
+}
+
+EffectDescriptorCollection::EffectDescriptorCollection() :
+    mTotalEffectsCpuLoad(0),
+    mTotalEffectsMemory(0)
+{
+
+}
+
+status_t EffectDescriptorCollection::registerEffect(const effect_descriptor_t *desc,
+                                                    audio_io_handle_t io,
+                                                    uint32_t strategy,
+                                                    int session,
+                                                    int id)
+{
+    if (mTotalEffectsMemory + desc->memoryUsage > getMaxEffectsMemory()) {
+        ALOGW("registerEffect() memory limit exceeded for Fx %s, Memory %d KB",
+                desc->name, desc->memoryUsage);
+        return INVALID_OPERATION;
+    }
+    mTotalEffectsMemory += desc->memoryUsage;
+    ALOGV("registerEffect() effect %s, io %d, strategy %d session %d id %d",
+            desc->name, io, strategy, session, id);
+    ALOGV("registerEffect() memory %d, total memory %d", desc->memoryUsage, mTotalEffectsMemory);
+
+    sp<EffectDescriptor> effectDesc = new EffectDescriptor();
+    memcpy (&effectDesc->mDesc, desc, sizeof(effect_descriptor_t));
+    effectDesc->mIo = io;
+    effectDesc->mStrategy = static_cast<routing_strategy>(strategy);
+    effectDesc->mSession = session;
+    effectDesc->mEnabled = false;
+
+    add(id, effectDesc);
+
+    return NO_ERROR;
+}
+
+status_t EffectDescriptorCollection::unregisterEffect(int id)
+{
+    ssize_t index = indexOfKey(id);
+    if (index < 0) {
+        ALOGW("unregisterEffect() unknown effect ID %d", id);
+        return INVALID_OPERATION;
+    }
+
+    sp<EffectDescriptor> effectDesc = valueAt(index);
+
+    setEffectEnabled(effectDesc, false);
+
+    if (mTotalEffectsMemory < effectDesc->mDesc.memoryUsage) {
+        ALOGW("unregisterEffect() memory %d too big for total %d",
+                effectDesc->mDesc.memoryUsage, mTotalEffectsMemory);
+        effectDesc->mDesc.memoryUsage = mTotalEffectsMemory;
+    }
+    mTotalEffectsMemory -= effectDesc->mDesc.memoryUsage;
+    ALOGV("unregisterEffect() effect %s, ID %d, memory %d total memory %d",
+            effectDesc->mDesc.name, id, effectDesc->mDesc.memoryUsage, mTotalEffectsMemory);
+
+    removeItem(id);
+
+    return NO_ERROR;
+}
+
+status_t EffectDescriptorCollection::setEffectEnabled(int id, bool enabled)
+{
+    ssize_t index = indexOfKey(id);
+    if (index < 0) {
+        ALOGW("unregisterEffect() unknown effect ID %d", id);
+        return INVALID_OPERATION;
+    }
+
+    return setEffectEnabled(valueAt(index), enabled);
+}
+
+
+status_t EffectDescriptorCollection::setEffectEnabled(const sp<EffectDescriptor> &effectDesc,
+                                                      bool enabled)
+{
+    if (enabled == effectDesc->mEnabled) {
+        ALOGV("setEffectEnabled(%s) effect already %s",
+             enabled?"true":"false", enabled?"enabled":"disabled");
+        return INVALID_OPERATION;
+    }
+
+    if (enabled) {
+        if (mTotalEffectsCpuLoad + effectDesc->mDesc.cpuLoad > getMaxEffectsCpuLoad()) {
+            ALOGW("setEffectEnabled(true) CPU Load limit exceeded for Fx %s, CPU %f MIPS",
+                 effectDesc->mDesc.name, (float)effectDesc->mDesc.cpuLoad/10);
+            return INVALID_OPERATION;
+        }
+        mTotalEffectsCpuLoad += effectDesc->mDesc.cpuLoad;
+        ALOGV("setEffectEnabled(true) total CPU %d", mTotalEffectsCpuLoad);
+    } else {
+        if (mTotalEffectsCpuLoad < effectDesc->mDesc.cpuLoad) {
+            ALOGW("setEffectEnabled(false) CPU load %d too high for total %d",
+                    effectDesc->mDesc.cpuLoad, mTotalEffectsCpuLoad);
+            effectDesc->mDesc.cpuLoad = mTotalEffectsCpuLoad;
+        }
+        mTotalEffectsCpuLoad -= effectDesc->mDesc.cpuLoad;
+        ALOGV("setEffectEnabled(false) total CPU %d", mTotalEffectsCpuLoad);
+    }
+    effectDesc->mEnabled = enabled;
+    return NO_ERROR;
+}
+
+bool EffectDescriptorCollection::isNonOffloadableEffectEnabled()
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<EffectDescriptor> effectDesc = valueAt(i);
+        if (effectDesc->mEnabled && (effectDesc->mStrategy == STRATEGY_MEDIA) &&
+                ((effectDesc->mDesc.flags & EFFECT_FLAG_OFFLOAD_SUPPORTED) == 0)) {
+            ALOGV("isNonOffloadableEffectEnabled() non offloadable effect %s enabled on session %d",
+                  effectDesc->mDesc.name, effectDesc->mSession);
+            return true;
+        }
+    }
+    return false;
+}
+
+uint32_t EffectDescriptorCollection::getMaxEffectsCpuLoad() const
+{
+    return MAX_EFFECTS_CPU_LOAD;
+}
+
+uint32_t EffectDescriptorCollection::getMaxEffectsMemory() const
+{
+    return MAX_EFFECTS_MEMORY;
+}
+
+status_t EffectDescriptorCollection::dump(int fd)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "\nTotal Effects CPU: %f MIPS, Total Effects memory: %d KB\n",
+             (float)mTotalEffectsCpuLoad/10, mTotalEffectsMemory);
+    write(fd, buffer, strlen(buffer));
+
+    snprintf(buffer, SIZE, "Registered effects:\n");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        snprintf(buffer, SIZE, "- Effect %d dump:\n", keyAt(i));
+        write(fd, buffer, strlen(buffer));
+        valueAt(i)->dump(fd);
+    }
+    return NO_ERROR;
+}
+
+}; //namespace android
diff --git a/services/audiopolicy/managerdefault/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
similarity index 75%
rename from services/audiopolicy/managerdefault/HwModule.cpp
rename to services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index a04bdc8..0097d69 100644
--- a/services/audiopolicy/managerdefault/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -17,9 +17,13 @@
 #define LOG_TAG "APM::HwModule"
 //#define LOG_NDEBUG 0
 
-#include "AudioPolicyManager.h"
+#include "HwModule.h"
+#include "IOProfile.h"
+#include "AudioGain.h"
+#include "ConfigParsingUtils.h"
 #include "audio_policy_conf.h"
 #include <hardware/audio.h>
+#include <policy.h>
 
 namespace android {
 
@@ -138,7 +142,7 @@
 
     audio_devices_t type = AUDIO_DEVICE_NONE;
     while (node) {
-        if (strcmp(node->name, DEVICE_TYPE) == 0) {
+        if (strcmp(node->name, APM_DEVICE_TYPE) == 0) {
             type = ConfigParsingUtils::parseDeviceNames((char *)node->value);
             break;
         }
@@ -154,7 +158,7 @@
 
     node = root->first_child;
     while (node) {
-        if (strcmp(node->name, DEVICE_ADDRESS) == 0) {
+        if (strcmp(node->name, APM_DEVICE_ADDRESS) == 0) {
             deviceDesc->mAddress = String8((char *)node->value);
         } else if (strcmp(node->name, CHANNELS_TAG) == 0) {
             if (audio_is_input_device(type)) {
@@ -276,4 +280,92 @@
     }
 }
 
+sp <HwModule> HwModuleCollection::getModuleFromName(const char *name) const
+{
+    sp <HwModule> module;
+
+    for (size_t i = 0; i < size(); i++)
+    {
+        if (strcmp(itemAt(i)->mName, name) == 0) {
+            return itemAt(i);
+        }
+    }
+    return module;
+}
+
+
+sp <HwModule> HwModuleCollection::getModuleForDevice(audio_devices_t device) const
+{
+    sp <HwModule> module;
+
+    for (size_t i = 0; i < size(); i++) {
+        if (itemAt(i)->mHandle == 0) {
+            continue;
+        }
+        if (audio_is_output_device(device)) {
+            for (size_t j = 0; j < itemAt(i)->mOutputProfiles.size(); j++)
+            {
+                if (itemAt(i)->mOutputProfiles[j]->mSupportedDevices.types() & device) {
+                    return itemAt(i);
+                }
+            }
+        } else {
+            for (size_t j = 0; j < itemAt(i)->mInputProfiles.size(); j++) {
+                if (itemAt(i)->mInputProfiles[j]->mSupportedDevices.types() &
+                        device & ~AUDIO_DEVICE_BIT_IN) {
+                    return itemAt(i);
+                }
+            }
+        }
+    }
+    return module;
+}
+
+sp<DeviceDescriptor>  HwModuleCollection::getDeviceDescriptor(const audio_devices_t device,
+                                                              const char *device_address,
+                                                              const char *device_name) const
+{
+    String8 address = (device_address == NULL) ? String8("") : String8(device_address);
+    // handle legacy remote submix case where the address was not always specified
+    if (device_distinguishes_on_address(device) && (address.length() == 0)) {
+        address = String8("0");
+    }
+
+    for (size_t i = 0; i < size(); i++) {
+        const sp<HwModule> hwModule = itemAt(i);
+        if (hwModule->mHandle == 0) {
+            continue;
+        }
+        DeviceVector deviceList =
+                hwModule->mDeclaredDevices.getDevicesFromTypeAddr(device, address);
+        if (!deviceList.isEmpty()) {
+            return deviceList.itemAt(0);
+        }
+        deviceList = hwModule->mDeclaredDevices.getDevicesFromType(device);
+        if (!deviceList.isEmpty()) {
+            return deviceList.itemAt(0);
+        }
+    }
+
+    sp<DeviceDescriptor> devDesc =
+            new DeviceDescriptor(String8(device_name != NULL ? device_name : ""), device);
+    devDesc->mAddress = address;
+    return devDesc;
+}
+
+status_t HwModuleCollection::dump(int fd) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "\nHW Modules dump:\n");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        snprintf(buffer, SIZE, "- HW Module %zu:\n", i + 1);
+        write(fd, buffer, strlen(buffer));
+        itemAt(i)->dump(fd);
+    }
+    return NO_ERROR;
+}
+
 } //namespace android
diff --git a/services/audiopolicy/managerdefault/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
similarity index 82%
rename from services/audiopolicy/managerdefault/IOProfile.cpp
rename to services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index 538ac1a..376dd22 100644
--- a/services/audiopolicy/managerdefault/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -17,7 +17,9 @@
 #define LOG_TAG "APM::IOProfile"
 //#define LOG_NDEBUG 0
 
-#include "AudioPolicyManager.h"
+#include "IOProfile.h"
+#include "HwModule.h"
+#include "AudioGain.h"
 
 namespace android {
 
@@ -35,19 +37,27 @@
 // Sampling rate, format and channel mask must be specified in order to
 // get a valid a match
 bool IOProfile::isCompatibleProfile(audio_devices_t device,
-                                                        String8 address,
-                                                        uint32_t samplingRate,
-                                                        uint32_t *updatedSamplingRate,
-                                                        audio_format_t format,
-                                                        audio_channel_mask_t channelMask,
-                                                        uint32_t flags) const
+                                    String8 address,
+                                    uint32_t samplingRate,
+                                    uint32_t *updatedSamplingRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    uint32_t flags) const
 {
     const bool isPlaybackThread = mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SOURCE;
     const bool isRecordThread = mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SINK;
     ALOG_ASSERT(isPlaybackThread != isRecordThread);
 
-    if (device != AUDIO_DEVICE_NONE && mSupportedDevices.getDevice(device, address) == 0) {
-        return false;
+
+    if (device != AUDIO_DEVICE_NONE) {
+        // just check types if multiple devices are selected
+        if (popcount(device & ~AUDIO_DEVICE_BIT_IN) > 1) {
+            if ((mSupportedDevices.types() & device) != device) {
+                return false;
+            }
+        } else if (mSupportedDevices.getDevice(device, address) == 0) {
+            return false;
+        }
     }
 
     if (samplingRate == 0) {
diff --git a/services/audiopolicy/common/managerdefinitions/src/SoundTriggerSession.cpp b/services/audiopolicy/common/managerdefinitions/src/SoundTriggerSession.cpp
new file mode 100644
index 0000000..8ca3ae0
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/SoundTriggerSession.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::SoundTriggerSession"
+//#define LOG_NDEBUG 0
+
+#include "SoundTriggerSession.h"
+
+
+namespace android {
+
+status_t SoundTriggerSessionCollection::acquireSession(audio_session_t session,
+                                                                   audio_io_handle_t ioHandle)
+{
+    add(session, ioHandle);
+
+    return NO_ERROR;
+}
+
+status_t SoundTriggerSessionCollection::releaseSession(audio_session_t session)
+{
+    ssize_t index = indexOfKey(session);
+    if (index < 0) {
+        ALOGW("acquireSoundTriggerSession() session %d not registered", session);
+        return BAD_VALUE;
+    }
+
+    removeItem(session);
+    return NO_ERROR;
+}
+
+}; //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp
new file mode 100644
index 0000000..b682e2c
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::Volumes"
+//#define LOG_NDEBUG 0
+
+//#define VERY_VERBOSE_LOGGING
+#ifdef VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) do { } while(0)
+#endif
+
+#include "StreamDescriptor.h"
+#include <utils/Log.h>
+#include <utils/String8.h>
+
+namespace android {
+
+// --- StreamDescriptor class implementation
+
+StreamDescriptor::StreamDescriptor()
+    :   mIndexMin(0), mIndexMax(1), mCanBeMuted(true)
+{
+    mIndexCur.add(AUDIO_DEVICE_OUT_DEFAULT, 0);
+}
+
+int StreamDescriptor::getVolumeIndex(audio_devices_t device) const
+{
+    device = Volume::getDeviceForVolume(device);
+    // there is always a valid entry for AUDIO_DEVICE_OUT_DEFAULT
+    if (mIndexCur.indexOfKey(device) < 0) {
+        device = AUDIO_DEVICE_OUT_DEFAULT;
+    }
+    return mIndexCur.valueFor(device);
+}
+
+void StreamDescriptor::clearCurrentVolumeIndex()
+{
+    mIndexCur.clear();
+}
+
+void StreamDescriptor::addCurrentVolumeIndex(audio_devices_t device, int index)
+{
+    mIndexCur.add(device, index);
+}
+
+void StreamDescriptor::setVolumeIndexMin(int volIndexMin)
+{
+    mIndexMin = volIndexMin;
+}
+
+void StreamDescriptor::setVolumeIndexMax(int volIndexMax)
+{
+    mIndexMax = volIndexMax;
+}
+
+void StreamDescriptor::setVolumeCurvePoint(Volume::device_category deviceCategory,
+                                           const VolumeCurvePoint *point)
+{
+    mVolumeCurve[deviceCategory] = point;
+}
+
+void StreamDescriptor::dump(int fd) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, "%s         %02d         %02d         ",
+             mCanBeMuted ? "true " : "false", mIndexMin, mIndexMax);
+    result.append(buffer);
+    for (size_t i = 0; i < mIndexCur.size(); i++) {
+        snprintf(buffer, SIZE, "%04x : %02d, ",
+                 mIndexCur.keyAt(i),
+                 mIndexCur.valueAt(i));
+        result.append(buffer);
+    }
+    result.append("\n");
+
+    write(fd, result.string(), result.size());
+}
+
+StreamDescriptorCollection::StreamDescriptorCollection()
+{
+    for (size_t stream = 0 ; stream < AUDIO_STREAM_CNT; stream++) {
+        add(static_cast<audio_stream_type_t>(stream), StreamDescriptor());
+    }
+}
+
+bool StreamDescriptorCollection::canBeMuted(audio_stream_type_t stream)
+{
+    return valueAt(stream).canBeMuted();
+}
+
+void StreamDescriptorCollection::clearCurrentVolumeIndex(audio_stream_type_t stream)
+{
+    editValueAt(stream).clearCurrentVolumeIndex();
+}
+
+void StreamDescriptorCollection::addCurrentVolumeIndex(audio_stream_type_t stream,
+                                                       audio_devices_t device, int index)
+{
+    editValueAt(stream).addCurrentVolumeIndex(device, index);
+}
+
+void StreamDescriptorCollection::setVolumeCurvePoint(audio_stream_type_t stream,
+                                                     Volume::device_category deviceCategory,
+                                                     const VolumeCurvePoint *point)
+{
+    editValueAt(stream).setVolumeCurvePoint(deviceCategory, point);
+}
+
+const VolumeCurvePoint *StreamDescriptorCollection::getVolumeCurvePoint(audio_stream_type_t stream,
+                                                                        Volume::device_category deviceCategory) const
+{
+    return valueAt(stream).getVolumeCurvePoint(deviceCategory);
+}
+
+void StreamDescriptorCollection::setVolumeIndexMin(audio_stream_type_t stream,int volIndexMin)
+{
+    return editValueAt(stream).setVolumeIndexMin(volIndexMin);
+}
+
+void StreamDescriptorCollection::setVolumeIndexMax(audio_stream_type_t stream,int volIndexMax)
+{
+    return editValueAt(stream).setVolumeIndexMax(volIndexMax);
+}
+
+status_t StreamDescriptorCollection::dump(int fd) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "\nStreams dump:\n");
+    write(fd, buffer, strlen(buffer));
+    snprintf(buffer, SIZE,
+             " Stream  Can be muted  Index Min  Index Max  Index Cur [device : index]...\n");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        snprintf(buffer, SIZE, " %02zu      ", i);
+        write(fd, buffer, strlen(buffer));
+        valueAt(i).dump(fd);
+    }
+
+    return NO_ERROR;
+}
+
+}; // namespace android
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h b/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
new file mode 100755
index 0000000..eadaa77
--- /dev/null
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <AudioPolicyManagerObserver.h>
+#include <RoutingStrategy.h>
+#include <Volume.h>
+#include <HwModule.h>
+#include <DeviceDescriptor.h>
+#include <system/audio.h>
+#include <system/audio_policy.h>
+#include <utils/Errors.h>
+#include <utils/Vector.h>
+
+namespace android {
+
+/**
+ * This interface is dedicated to the policy manager that a Policy Engine shall implement.
+ */
+class AudioPolicyManagerInterface
+{
+public:
+    /**
+     * Checks if the engine was correctly initialized.
+     *
+     * @return NO_ERROR if initialization has been done correctly, error code otherwise..
+     */
+    virtual status_t initCheck() = 0;
+
+    /**
+     * Sets the Manager observer that allows the engine to retrieve information on collection
+     * of devices, streams, HwModules, ...
+     *
+     * @param[in] observer handle on the manager.
+     */
+    virtual void setObserver(AudioPolicyManagerObserver *observer) = 0;
+
+    /**
+     * Get the input device selected for a given input source.
+     *
+     * @param[in] inputSource to get the selected input device associated to
+     *
+     * @return selected input device for the given input source, may be none if error.
+     */
+    virtual audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const = 0;
+
+    /**
+     * Get the output device associated to a given strategy.
+     *
+     * @param[in] stream type for which the selected ouput device is requested.
+     *
+     * @return selected ouput device for the given strategy, may be none if error.
+     */
+    virtual audio_devices_t getDeviceForStrategy(routing_strategy stategy) const = 0;
+
+    /**
+     * Get the strategy selected for a given stream type.
+     *
+     * @param[in] stream: for which the selected strategy followed by is requested.
+     *
+     * @return strategy to be followed.
+     */
+    virtual routing_strategy getStrategyForStream(audio_stream_type_t stream) = 0;
+
+    /**
+     * Get the strategy selected for a given usage.
+     *
+     * @param[in] usage to get the selected strategy followed by.
+     *
+     * @return strategy to be followed.
+     */
+    virtual routing_strategy getStrategyForUsage(audio_usage_t usage) = 0;
+
+    /**
+     * Set the Telephony Mode.
+     *
+     * @param[in] mode: Android Phone state (normal, ringtone, csv, in communication)
+     *
+     * @return NO_ERROR if Telephony Mode set correctly, error code otherwise.
+     */
+    virtual status_t setPhoneState(audio_mode_t mode) = 0;
+
+    /**
+     * Get the telephony Mode
+     *
+     * @return the current telephony mode
+     */
+    virtual audio_mode_t getPhoneState() const = 0;
+
+    /**
+     * Set Force Use config for a given usage.
+     *
+     * @param[in] usage for which a configuration shall be forced.
+     * @param[in] config wished to be forced for the given usage.
+     *
+     * @return NO_ERROR if the Force Use config was set correctly, error code otherwise (e.g. config not
+     * allowed a given usage...)
+     */
+    virtual status_t setForceUse(audio_policy_force_use_t usage,
+                                 audio_policy_forced_cfg_t config) = 0;
+
+    /**
+     * Get Force Use config for a given usage.
+     *
+     * @param[in] usage for which a configuration shall be forced.
+     *
+     * @return config wished to be forced for the given usage.
+     */
+    virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const = 0;
+
+    /**
+     * Set the connection state of device(s).
+     *
+     * @param[in] devDesc for which the state has changed.
+     * @param[in] state of availability of this(these) device(s).
+     *
+     * @return NO_ERROR if devices criterion updated correctly, error code otherwise.
+     */
+    virtual status_t setDeviceConnectionState(const android::sp<android::DeviceDescriptor> devDesc,
+                                              audio_policy_dev_state_t state) = 0;
+
+    /**
+     * Translate a volume index given by the UI to an amplification value for a stream type
+     * and a device category.
+     *
+     * @param[in] deviceCategory for which the conversion is requested.
+     * @param[in] stream type for which the conversion is requested.
+     * @param[in] indexInUi index received from the UI to be translated.
+     *
+     * @return amplification value matching the UI index for this given device and stream.
+     */
+    virtual float volIndexToAmpl(Volume::device_category deviceCategory, audio_stream_type_t stream,
+                                 int indexInUi) = 0;
+
+    /**
+     * Initialize the min / max index of volume applicable for a given stream type. These indexes
+     * will be used upon conversion of UI index to volume amplification.
+     *
+     * @param[in] stream type for which the indexes need to be set
+     * @param[in] indexMin Minimum index allowed for this stream.
+     * @param[in] indexMax Maximum index allowed for this stream.
+     */
+    virtual status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax) = 0;
+
+    /**
+     * Initialize volume curves for each strategy and device category
+     *
+     * @param[in] isSpeakerDrcEnabled true on devices that use DRC on the DEVICE_CATEGORY_SPEAKER
+                  path to boost soft sounds, used to adjust volume curves accordingly
+     */
+    virtual void initializeVolumeCurves(bool isSpeakerDrcEnabled) = 0;
+
+protected:
+    virtual ~AudioPolicyManagerInterface() {}
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
new file mode 100755
index 0000000..4f5427e
--- /dev/null
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <AudioGain.h>
+#include <AudioPort.h>
+#include <AudioPatch.h>
+#include <IOProfile.h>
+#include <DeviceDescriptor.h>
+#include <AudioInputDescriptor.h>
+#include <AudioOutputDescriptor.h>
+#include <AudioPolicyMix.h>
+#include <SoundTriggerSession.h>
+#include <StreamDescriptor.h>
+
+namespace android {
+
+/**
+ * This interface is an observer that the manager shall implement to allows e.g. the engine
+ * to access to policy pillars elements (like output / input descritors collections,
+ * HwModule collections, AudioMix, ...
+ */
+class AudioPolicyManagerObserver
+{
+public:
+    virtual const AudioPatchCollection &getAudioPatches() const = 0;
+
+    virtual const SoundTriggerSessionCollection &getSoundTriggerSessionCollection() const = 0;
+
+    virtual const AudioPolicyMixCollection &getAudioPolicyMixCollection() const = 0;
+
+    virtual const AudioOutputCollection &getOutputs() const = 0;
+
+    virtual const AudioInputCollection &getInputs() const = 0;
+
+    virtual const DeviceVector &getAvailableOutputDevices() const = 0;
+
+    virtual const DeviceVector &getAvailableInputDevices() const = 0;
+
+    virtual StreamDescriptorCollection &getStreamDescriptors() = 0;
+
+    virtual const sp<DeviceDescriptor> &getDefaultOutputDevice() const = 0;
+
+protected:
+    virtual ~AudioPolicyManagerObserver() {}
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/enginedefault/Android.mk b/services/audiopolicy/enginedefault/Android.mk
new file mode 100755
index 0000000..b0ae835
--- /dev/null
+++ b/services/audiopolicy/enginedefault/Android.mk
@@ -0,0 +1,48 @@
+LOCAL_PATH := $(call my-dir)
+
+# Component build
+#######################################################################
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+    src/Engine.cpp \
+    src/EngineInstance.cpp \
+    src/Gains.cpp \
+
+
+audio_policy_engine_includes_common := \
+    $(LOCAL_PATH)/include \
+    $(TOPDIR)frameworks/av/services/audiopolicy/engine/interface
+
+LOCAL_CFLAGS += \
+    -Wall \
+    -Werror \
+    -Wextra \
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := \
+    $(audio_policy_engine_includes_common)
+
+LOCAL_C_INCLUDES := \
+    $(audio_policy_engine_includes_common) \
+    $(TARGET_OUT_HEADERS)/hw \
+    $(call include-path-for, frameworks-av) \
+    $(call include-path-for, audio-utils) \
+    $(call include-path-for, bionic) \
+    $(TOPDIR)frameworks/av/services/audiopolicy/common/include
+
+
+LOCAL_MODULE := libaudiopolicyenginedefault
+LOCAL_MODULE_TAGS := optional
+LOCAL_STATIC_LIBRARIES := \
+    libmedia_helper \
+    libaudiopolicycomponents
+
+LOCAL_SHARED_LIBRARIES += \
+    libcutils \
+    libutils \
+    libaudioutils \
+
+include external/stlport/libstlport.mk
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audiopolicy/enginedefault/include/AudioPolicyEngineInstance.h b/services/audiopolicy/enginedefault/include/AudioPolicyEngineInstance.h
new file mode 100755
index 0000000..1e329f0
--- /dev/null
+++ b/services/audiopolicy/enginedefault/include/AudioPolicyEngineInstance.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+class AudioPolicyManagerInterface;
+
+namespace android
+{
+namespace audio_policy
+{
+
+class Engine;
+
+class EngineInstance
+{
+protected:
+    EngineInstance();
+
+public:
+    virtual ~EngineInstance();
+
+    /**
+     * Get Audio Policy Engine instance.
+     *
+     * @return pointer to Route Manager Instance object.
+     */
+    static EngineInstance *getInstance();
+
+    /**
+     * Interface query.
+     * The first client of an interface of the policy engine will start the singleton.
+     *
+     * @tparam RequestedInterface: interface that the client is wishing to retrieve.
+     *
+     * @return interface handle.
+     */
+    template <class RequestedInterface>
+    RequestedInterface *queryInterface() const;
+
+protected:
+    /**
+     * Get Audio Policy Engine instance.
+     *
+     * @return Audio Policy Engine singleton.
+     */
+    Engine *getEngine() const;
+
+private:
+    /* Copy facilities are put private to disable copy. */
+    EngineInstance(const EngineInstance &object);
+    EngineInstance &operator=(const EngineInstance &object);
+};
+
+/**
+ * Limit template instantation to supported type interfaces.
+ * Compile time error will claim if invalid interface is requested.
+ */
+template <>
+AudioPolicyManagerInterface *EngineInstance::queryInterface() const;
+
+} // namespace audio_policy
+} // namespace android
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
new file mode 100755
index 0000000..1fd3341
--- /dev/null
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -0,0 +1,707 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPolicyEngine"
+//#define LOG_NDEBUG 0
+
+//#define VERY_VERBOSE_LOGGING
+#ifdef VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) do { } while(0)
+#endif
+
+#include "Engine.h"
+#include "Gains.h"
+#include <AudioPolicyManagerObserver.h>
+#include <AudioPort.h>
+#include <IOProfile.h>
+#include <policy.h>
+#include <utils/String8.h>
+#include <utils/Log.h>
+
+namespace android
+{
+namespace audio_policy
+{
+
+Engine::Engine()
+    : mManagerInterface(this),
+      mPhoneState(AUDIO_MODE_NORMAL),
+      mApmObserver(NULL)
+{
+    for (int i = 0; i < AUDIO_POLICY_FORCE_USE_CNT; i++) {
+        mForceUse[i] = AUDIO_POLICY_FORCE_NONE;
+    }
+}
+
+Engine::~Engine()
+{
+}
+
+void Engine::setObserver(AudioPolicyManagerObserver *observer)
+{
+    ALOG_ASSERT(observer != NULL, "Invalid Audio Policy Manager observer");
+    mApmObserver = observer;
+}
+
+status_t Engine::initCheck()
+{
+    return (mApmObserver != NULL) ?  NO_ERROR : NO_INIT;
+}
+
+float Engine::volIndexToAmpl(Volume::device_category category, audio_stream_type_t streamType,
+                             int indexInUi)
+{
+    const StreamDescriptor &streamDesc = mApmObserver->getStreamDescriptors().valueAt(streamType);
+    return Gains::volIndexToAmpl(category, streamDesc, indexInUi);
+}
+
+status_t Engine::initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax)
+{
+    ALOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax);
+    if (indexMin < 0 || indexMin >= indexMax) {
+        ALOGW("initStreamVolume() invalid index limits for stream %d, min %d, max %d",
+              stream , indexMin, indexMax);
+        return BAD_VALUE;
+    }
+    mApmObserver->getStreamDescriptors().setVolumeIndexMin(stream, indexMin);
+    mApmObserver->getStreamDescriptors().setVolumeIndexMax(stream, indexMax);
+    return NO_ERROR;
+}
+
+void Engine::initializeVolumeCurves(bool isSpeakerDrcEnabled)
+{
+    StreamDescriptorCollection &streams = mApmObserver->getStreamDescriptors();
+
+    for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
+        for (int j = 0; j < Volume::DEVICE_CATEGORY_CNT; j++) {
+            streams.setVolumeCurvePoint(static_cast<audio_stream_type_t>(i),
+                                         static_cast<Volume::device_category>(j),
+                                         Gains::sVolumeProfiles[i][j]);
+        }
+    }
+
+    // Check availability of DRC on speaker path: if available, override some of the speaker curves
+    if (isSpeakerDrcEnabled) {
+        streams.setVolumeCurvePoint(AUDIO_STREAM_SYSTEM, Volume::DEVICE_CATEGORY_SPEAKER,
+                Gains::sDefaultSystemVolumeCurveDrc);
+        streams.setVolumeCurvePoint(AUDIO_STREAM_RING, Volume::DEVICE_CATEGORY_SPEAKER,
+                Gains::sSpeakerSonificationVolumeCurveDrc);
+        streams.setVolumeCurvePoint(AUDIO_STREAM_ALARM, Volume::DEVICE_CATEGORY_SPEAKER,
+                Gains::sSpeakerSonificationVolumeCurveDrc);
+        streams.setVolumeCurvePoint(AUDIO_STREAM_NOTIFICATION, Volume::DEVICE_CATEGORY_SPEAKER,
+                Gains::sSpeakerSonificationVolumeCurveDrc);
+        streams.setVolumeCurvePoint(AUDIO_STREAM_MUSIC, Volume::DEVICE_CATEGORY_SPEAKER,
+                Gains::sSpeakerMediaVolumeCurveDrc);
+        streams.setVolumeCurvePoint(AUDIO_STREAM_ACCESSIBILITY, Volume::DEVICE_CATEGORY_SPEAKER,
+                Gains::sSpeakerMediaVolumeCurveDrc);
+    }
+}
+
+
+status_t Engine::setPhoneState(audio_mode_t state)
+{
+    ALOGV("setPhoneState() state %d", state);
+
+    if (state < 0 || state >= AUDIO_MODE_CNT) {
+        ALOGW("setPhoneState() invalid state %d", state);
+        return BAD_VALUE;
+    }
+
+    if (state == mPhoneState ) {
+        ALOGW("setPhoneState() setting same state %d", state);
+        return BAD_VALUE;
+    }
+
+    // store previous phone state for management of sonification strategy below
+    int oldState = mPhoneState;
+    mPhoneState = state;
+    StreamDescriptorCollection &streams = mApmObserver->getStreamDescriptors();
+    // are we entering or starting a call
+    if (!is_state_in_call(oldState) && is_state_in_call(state)) {
+        ALOGV("  Entering call in setPhoneState()");
+        for (int j = 0; j < Volume::DEVICE_CATEGORY_CNT; j++) {
+            streams.setVolumeCurvePoint(AUDIO_STREAM_DTMF, static_cast<Volume::device_category>(j),
+                                         Gains::sVolumeProfiles[AUDIO_STREAM_VOICE_CALL][j]);
+        }
+    } else if (is_state_in_call(oldState) && !is_state_in_call(state)) {
+        ALOGV("  Exiting call in setPhoneState()");
+        for (int j = 0; j < Volume::DEVICE_CATEGORY_CNT; j++) {
+            streams.setVolumeCurvePoint(AUDIO_STREAM_DTMF, static_cast<Volume::device_category>(j),
+                                         Gains::sVolumeProfiles[AUDIO_STREAM_DTMF][j]);
+        }
+    }
+    return NO_ERROR;
+}
+
+status_t Engine::setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config)
+{
+    switch(usage) {
+    case AUDIO_POLICY_FORCE_FOR_COMMUNICATION:
+        if (config != AUDIO_POLICY_FORCE_SPEAKER && config != AUDIO_POLICY_FORCE_BT_SCO &&
+            config != AUDIO_POLICY_FORCE_NONE) {
+            ALOGW("setForceUse() invalid config %d for FOR_COMMUNICATION", config);
+            return BAD_VALUE;
+        }
+        mForceUse[usage] = config;
+        break;
+    case AUDIO_POLICY_FORCE_FOR_MEDIA:
+        if (config != AUDIO_POLICY_FORCE_HEADPHONES && config != AUDIO_POLICY_FORCE_BT_A2DP &&
+            config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY &&
+            config != AUDIO_POLICY_FORCE_ANALOG_DOCK &&
+            config != AUDIO_POLICY_FORCE_DIGITAL_DOCK && config != AUDIO_POLICY_FORCE_NONE &&
+            config != AUDIO_POLICY_FORCE_NO_BT_A2DP && config != AUDIO_POLICY_FORCE_SPEAKER ) {
+            ALOGW("setForceUse() invalid config %d for FOR_MEDIA", config);
+            return BAD_VALUE;
+        }
+        mForceUse[usage] = config;
+        break;
+    case AUDIO_POLICY_FORCE_FOR_RECORD:
+        if (config != AUDIO_POLICY_FORCE_BT_SCO && config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY &&
+            config != AUDIO_POLICY_FORCE_NONE) {
+            ALOGW("setForceUse() invalid config %d for FOR_RECORD", config);
+            return BAD_VALUE;
+        }
+        mForceUse[usage] = config;
+        break;
+    case AUDIO_POLICY_FORCE_FOR_DOCK:
+        if (config != AUDIO_POLICY_FORCE_NONE && config != AUDIO_POLICY_FORCE_BT_CAR_DOCK &&
+            config != AUDIO_POLICY_FORCE_BT_DESK_DOCK &&
+            config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY &&
+            config != AUDIO_POLICY_FORCE_ANALOG_DOCK &&
+            config != AUDIO_POLICY_FORCE_DIGITAL_DOCK) {
+            ALOGW("setForceUse() invalid config %d for FOR_DOCK", config);
+        }
+        mForceUse[usage] = config;
+        break;
+    case AUDIO_POLICY_FORCE_FOR_SYSTEM:
+        if (config != AUDIO_POLICY_FORCE_NONE &&
+            config != AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
+            ALOGW("setForceUse() invalid config %d for FOR_SYSTEM", config);
+        }
+        mForceUse[usage] = config;
+        break;
+    case AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO:
+        if (config != AUDIO_POLICY_FORCE_NONE &&
+            config != AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED) {
+            ALOGW("setForceUse() invalid config %d forHDMI_SYSTEM_AUDIO", config);
+        }
+        mForceUse[usage] = config;
+        break;
+    default:
+        ALOGW("setForceUse() invalid usage %d", usage);
+        break;
+    }
+    return NO_ERROR;
+}
+
+routing_strategy Engine::getStrategyForStream(audio_stream_type_t stream)
+{
+    // stream to strategy mapping
+    switch (stream) {
+    case AUDIO_STREAM_VOICE_CALL:
+    case AUDIO_STREAM_BLUETOOTH_SCO:
+        return STRATEGY_PHONE;
+    case AUDIO_STREAM_RING:
+    case AUDIO_STREAM_ALARM:
+        return STRATEGY_SONIFICATION;
+    case AUDIO_STREAM_NOTIFICATION:
+        return STRATEGY_SONIFICATION_RESPECTFUL;
+    case AUDIO_STREAM_DTMF:
+        return STRATEGY_DTMF;
+    default:
+        ALOGE("unknown stream type %d", stream);
+    case AUDIO_STREAM_SYSTEM:
+        // NOTE: SYSTEM stream uses MEDIA strategy because muting music and switching outputs
+        // while key clicks are played produces a poor result
+    case AUDIO_STREAM_MUSIC:
+        return STRATEGY_MEDIA;
+    case AUDIO_STREAM_ENFORCED_AUDIBLE:
+        return STRATEGY_ENFORCED_AUDIBLE;
+    case AUDIO_STREAM_TTS:
+        return STRATEGY_TRANSMITTED_THROUGH_SPEAKER;
+    case AUDIO_STREAM_ACCESSIBILITY:
+        return STRATEGY_ACCESSIBILITY;
+    case AUDIO_STREAM_REROUTING:
+        return STRATEGY_REROUTING;
+    }
+}
+
+routing_strategy Engine::getStrategyForUsage(audio_usage_t usage)
+{
+    const AudioOutputCollection &outputs = mApmObserver->getOutputs();
+
+    // usage to strategy mapping
+    switch (usage) {
+    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+        if (outputs.isStreamActive(AUDIO_STREAM_RING) ||
+                outputs.isStreamActive(AUDIO_STREAM_ALARM)) {
+            return STRATEGY_SONIFICATION;
+        }
+        if (isInCall()) {
+            return STRATEGY_PHONE;
+        }
+        return STRATEGY_ACCESSIBILITY;
+
+    case AUDIO_USAGE_MEDIA:
+    case AUDIO_USAGE_GAME:
+    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
+    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
+        return STRATEGY_MEDIA;
+
+    case AUDIO_USAGE_VOICE_COMMUNICATION:
+        return STRATEGY_PHONE;
+
+    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
+        return STRATEGY_DTMF;
+
+    case AUDIO_USAGE_ALARM:
+    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
+        return STRATEGY_SONIFICATION;
+
+    case AUDIO_USAGE_NOTIFICATION:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
+    case AUDIO_USAGE_NOTIFICATION_EVENT:
+        return STRATEGY_SONIFICATION_RESPECTFUL;
+
+    case AUDIO_USAGE_UNKNOWN:
+    default:
+        return STRATEGY_MEDIA;
+    }
+}
+
+audio_devices_t Engine::getDeviceForStrategy(routing_strategy strategy) const
+{
+    const DeviceVector &availableOutputDevices = mApmObserver->getAvailableOutputDevices();
+    const DeviceVector &availableInputDevices = mApmObserver->getAvailableInputDevices();
+
+    const AudioOutputCollection &outputs = mApmObserver->getOutputs();
+
+    uint32_t device = AUDIO_DEVICE_NONE;
+    uint32_t availableOutputDevicesType = availableOutputDevices.types();
+
+    switch (strategy) {
+
+    case STRATEGY_TRANSMITTED_THROUGH_SPEAKER:
+        device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
+        if (!device) {
+            ALOGE("getDeviceForStrategy() no device found for "\
+                    "STRATEGY_TRANSMITTED_THROUGH_SPEAKER");
+        }
+        break;
+
+    case STRATEGY_SONIFICATION_RESPECTFUL:
+        if (isInCall()) {
+            device = getDeviceForStrategy(STRATEGY_SONIFICATION);
+        } else if (outputs.isStreamActiveRemotely(AUDIO_STREAM_MUSIC,
+                SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
+            // while media is playing on a remote device, use the the sonification behavior.
+            // Note that we test this usecase before testing if media is playing because
+            //   the isStreamActive() method only informs about the activity of a stream, not
+            //   if it's for local playback. Note also that we use the same delay between both tests
+            device = getDeviceForStrategy(STRATEGY_SONIFICATION);
+            //user "safe" speaker if available instead of normal speaker to avoid triggering
+            //other acoustic safety mechanisms for notification
+            if (device == AUDIO_DEVICE_OUT_SPEAKER && (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE))
+                device = AUDIO_DEVICE_OUT_SPEAKER_SAFE;
+        } else if (outputs.isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
+            // while media is playing (or has recently played), use the same device
+            device = getDeviceForStrategy(STRATEGY_MEDIA);
+        } else {
+            // when media is not playing anymore, fall back on the sonification behavior
+            device = getDeviceForStrategy(STRATEGY_SONIFICATION);
+            //user "safe" speaker if available instead of normal speaker to avoid triggering
+            //other acoustic safety mechanisms for notification
+            if (device == AUDIO_DEVICE_OUT_SPEAKER && (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE))
+                device = AUDIO_DEVICE_OUT_SPEAKER_SAFE;
+        }
+        break;
+
+    case STRATEGY_DTMF:
+        if (!isInCall()) {
+            // when off call, DTMF strategy follows the same rules as MEDIA strategy
+            device = getDeviceForStrategy(STRATEGY_MEDIA);
+            break;
+        }
+        // when in call, DTMF and PHONE strategies follow the same rules
+        // FALL THROUGH
+
+    case STRATEGY_PHONE:
+        // Force use of only devices on primary output if:
+        // - in call AND
+        //   - cannot route from voice call RX OR
+        //   - audio HAL version is < 3.0 and TX device is on the primary HW module
+        if (getPhoneState() == AUDIO_MODE_IN_CALL) {
+            audio_devices_t txDevice = getDeviceForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
+            sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
+            audio_devices_t availPrimaryInputDevices =
+                 availableInputDevices.getDevicesFromHwModule(primaryOutput->getModuleHandle());
+            audio_devices_t availPrimaryOutputDevices =
+                    primaryOutput->supportedDevices() & availableOutputDevices.types();
+
+            if (((availableInputDevices.types() &
+                    AUDIO_DEVICE_IN_TELEPHONY_RX & ~AUDIO_DEVICE_BIT_IN) == 0) ||
+                    (((txDevice & availPrimaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
+                         (primaryOutput->getAudioPort()->mModule->mHalVersion <
+                             AUDIO_DEVICE_API_VERSION_3_0))) {
+                availableOutputDevicesType = availPrimaryOutputDevices;
+            }
+        }
+        // for phone strategy, we first consider the forced use and then the available devices by order
+        // of priority
+        switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
+        case AUDIO_POLICY_FORCE_BT_SCO:
+            if (!isInCall() || strategy != STRATEGY_DTMF) {
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
+                if (device) break;
+            }
+            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET;
+            if (device) break;
+            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO;
+            if (device) break;
+            // if SCO device is requested but no SCO device is available, fall back to default case
+            // FALL THROUGH
+
+        default:    // FORCE_NONE
+            // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to A2DP
+            if (!isInCall() &&
+                    (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
+                    (outputs.getA2dpOutput() != 0)) {
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
+                if (device) break;
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
+                if (device) break;
+            }
+            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
+            if (device) break;
+            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
+            if (device) break;
+            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
+            if (device) break;
+            if (!isInCall()) {
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
+                if (device) break;
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
+                if (device) break;
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
+                if (device) break;
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
+                if (device) break;
+            }
+            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_EARPIECE;
+            if (device) break;
+            device = mApmObserver->getDefaultOutputDevice()->type();
+            if (device == AUDIO_DEVICE_NONE) {
+                ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE");
+            }
+            break;
+
+        case AUDIO_POLICY_FORCE_SPEAKER:
+            // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to
+            // A2DP speaker when forcing to speaker output
+            if (!isInCall() &&
+                    (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
+                    (outputs.getA2dpOutput() != 0)) {
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
+                if (device) break;
+            }
+            if (!isInCall()) {
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
+                if (device) break;
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
+                if (device) break;
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
+                if (device) break;
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
+                if (device) break;
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
+                if (device) break;
+            }
+            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
+            if (device) break;
+            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
+            if (device) break;
+            device = mApmObserver->getDefaultOutputDevice()->type();
+            if (device == AUDIO_DEVICE_NONE) {
+                ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE, FORCE_SPEAKER");
+            }
+            break;
+        }
+    break;
+
+    case STRATEGY_SONIFICATION:
+
+        // If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by
+        // handleIncallSonification().
+        if (isInCall()) {
+            device = getDeviceForStrategy(STRATEGY_PHONE);
+            break;
+        }
+        // FALL THROUGH
+
+    case STRATEGY_ENFORCED_AUDIBLE:
+        // strategy STRATEGY_ENFORCED_AUDIBLE uses same routing policy as STRATEGY_SONIFICATION
+        // except:
+        //   - when in call where it doesn't default to STRATEGY_PHONE behavior
+        //   - in countries where not enforced in which case it follows STRATEGY_MEDIA
+
+        if ((strategy == STRATEGY_SONIFICATION) ||
+                (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) {
+            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
+            if (device == AUDIO_DEVICE_NONE) {
+                ALOGE("getDeviceForStrategy() speaker device not found for STRATEGY_SONIFICATION");
+            }
+        }
+        // The second device used for sonification is the same as the device used by media strategy
+        // FALL THROUGH
+
+    // FIXME: STRATEGY_ACCESSIBILITY and STRATEGY_REROUTING follow STRATEGY_MEDIA for now
+    case STRATEGY_ACCESSIBILITY:
+        if (strategy == STRATEGY_ACCESSIBILITY) {
+            // do not route accessibility prompts to a digital output currently configured with a
+            // compressed format as they would likely not be mixed and dropped.
+            for (size_t i = 0; i < outputs.size(); i++) {
+                sp<AudioOutputDescriptor> desc = outputs.valueAt(i);
+                audio_devices_t devices = desc->device() &
+                    (AUDIO_DEVICE_OUT_HDMI | AUDIO_DEVICE_OUT_SPDIF | AUDIO_DEVICE_OUT_HDMI_ARC);
+                if (desc->isActive() && !audio_is_linear_pcm(desc->mFormat) &&
+                        devices != AUDIO_DEVICE_NONE) {
+                    availableOutputDevicesType = availableOutputDevices.types() & ~devices;
+                }
+            }
+        }
+        // FALL THROUGH
+
+    case STRATEGY_REROUTING:
+    case STRATEGY_MEDIA: {
+        uint32_t device2 = AUDIO_DEVICE_NONE;
+        if (strategy != STRATEGY_SONIFICATION) {
+            // no sonification on remote submix (e.g. WFD)
+            if (availableOutputDevices.getDevice(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, String8("0")) != 0) {
+                device2 = availableOutputDevices.types() & AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
+            }
+        }
+        if ((device2 == AUDIO_DEVICE_NONE) &&
+                (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
+                (outputs.getA2dpOutput() != 0)) {
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
+            if (device2 == AUDIO_DEVICE_NONE) {
+                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
+            }
+            if (device2 == AUDIO_DEVICE_NONE) {
+                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
+            }
+        }
+        if ((device2 == AUDIO_DEVICE_NONE) &&
+            (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] == AUDIO_POLICY_FORCE_SPEAKER)) {
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
+        }
+        if (device2 == AUDIO_DEVICE_NONE) {
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
+        }
+        if (device2 == AUDIO_DEVICE_NONE) {
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
+        }
+        if (device2 == AUDIO_DEVICE_NONE) {
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
+        }
+        if (device2 == AUDIO_DEVICE_NONE) {
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
+        }
+        if (device2 == AUDIO_DEVICE_NONE) {
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
+        }
+        if (device2 == AUDIO_DEVICE_NONE) {
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
+        }
+        if ((device2 == AUDIO_DEVICE_NONE) && (strategy != STRATEGY_SONIFICATION)) {
+            // no sonification on aux digital (e.g. HDMI)
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
+        }
+        if ((device2 == AUDIO_DEVICE_NONE) &&
+                (mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK] == AUDIO_POLICY_FORCE_ANALOG_DOCK)) {
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
+        }
+        if (device2 == AUDIO_DEVICE_NONE) {
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
+        }
+        int device3 = AUDIO_DEVICE_NONE;
+        if (strategy == STRATEGY_MEDIA) {
+            // ARC, SPDIF and AUX_LINE can co-exist with others.
+            device3 = availableOutputDevicesType & AUDIO_DEVICE_OUT_HDMI_ARC;
+            device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPDIF);
+            device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_LINE);
+        }
+
+        device2 |= device3;
+        // device is DEVICE_OUT_SPEAKER if we come from case STRATEGY_SONIFICATION or
+        // STRATEGY_ENFORCED_AUDIBLE, AUDIO_DEVICE_NONE otherwise
+        device |= device2;
+
+        // If hdmi system audio mode is on, remove speaker out of output list.
+        if ((strategy == STRATEGY_MEDIA) &&
+            (mForceUse[AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO] ==
+                AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED)) {
+            device &= ~AUDIO_DEVICE_OUT_SPEAKER;
+        }
+
+        if (device) break;
+        device = mApmObserver->getDefaultOutputDevice()->type();
+        if (device == AUDIO_DEVICE_NONE) {
+            ALOGE("getDeviceForStrategy() no device found for STRATEGY_MEDIA");
+        }
+        } break;
+
+    default:
+        ALOGW("getDeviceForStrategy() unknown strategy: %d", strategy);
+        break;
+    }
+
+    ALOGVV("getDeviceForStrategy() strategy %d, device %x", strategy, device);
+    return device;
+}
+
+
+audio_devices_t Engine::getDeviceForInputSource(audio_source_t inputSource) const
+{
+    const DeviceVector &availableOutputDevices = mApmObserver->getAvailableOutputDevices();
+    const DeviceVector &availableInputDevices = mApmObserver->getAvailableInputDevices();
+    const AudioOutputCollection &outputs = mApmObserver->getOutputs();
+    audio_devices_t availableDeviceTypes = availableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
+
+    uint32_t device = AUDIO_DEVICE_NONE;
+
+    switch (inputSource) {
+    case AUDIO_SOURCE_VOICE_UPLINK:
+      if (availableDeviceTypes & AUDIO_DEVICE_IN_VOICE_CALL) {
+          device = AUDIO_DEVICE_IN_VOICE_CALL;
+          break;
+      }
+      break;
+
+    case AUDIO_SOURCE_DEFAULT:
+    case AUDIO_SOURCE_MIC:
+    if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) {
+        device = AUDIO_DEVICE_IN_BLUETOOTH_A2DP;
+    } else if ((mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO) &&
+        (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET)) {
+        device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
+    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
+        device = AUDIO_DEVICE_IN_WIRED_HEADSET;
+    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
+        device = AUDIO_DEVICE_IN_USB_DEVICE;
+    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
+        device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+    }
+    break;
+
+    case AUDIO_SOURCE_VOICE_COMMUNICATION:
+        // Allow only use of devices on primary input if in call and HAL does not support routing
+        // to voice call path.
+        if ((getPhoneState() == AUDIO_MODE_IN_CALL) &&
+                (availableOutputDevices.types() & AUDIO_DEVICE_OUT_TELEPHONY_TX) == 0) {
+            sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
+            availableDeviceTypes =
+                    availableInputDevices.getDevicesFromHwModule(primaryOutput->getModuleHandle())
+                    & ~AUDIO_DEVICE_BIT_IN;
+        }
+
+        switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
+        case AUDIO_POLICY_FORCE_BT_SCO:
+            // if SCO device is requested but no SCO device is available, fall back to default case
+            if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
+                device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
+                break;
+            }
+            // FALL THROUGH
+
+        default:    // FORCE_NONE
+            if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
+                device = AUDIO_DEVICE_IN_WIRED_HEADSET;
+            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
+                device = AUDIO_DEVICE_IN_USB_DEVICE;
+            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
+                device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+            }
+            break;
+
+        case AUDIO_POLICY_FORCE_SPEAKER:
+            if (availableDeviceTypes & AUDIO_DEVICE_IN_BACK_MIC) {
+                device = AUDIO_DEVICE_IN_BACK_MIC;
+            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
+                device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+            }
+            break;
+        }
+        break;
+
+    case AUDIO_SOURCE_VOICE_RECOGNITION:
+    case AUDIO_SOURCE_HOTWORD:
+        if (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO &&
+                availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
+            device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
+        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
+            device = AUDIO_DEVICE_IN_WIRED_HEADSET;
+        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
+            device = AUDIO_DEVICE_IN_USB_DEVICE;
+        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
+            device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+        }
+        break;
+    case AUDIO_SOURCE_CAMCORDER:
+        if (availableDeviceTypes & AUDIO_DEVICE_IN_BACK_MIC) {
+            device = AUDIO_DEVICE_IN_BACK_MIC;
+        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
+            device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+        }
+        break;
+    case AUDIO_SOURCE_VOICE_DOWNLINK:
+    case AUDIO_SOURCE_VOICE_CALL:
+        if (availableDeviceTypes & AUDIO_DEVICE_IN_VOICE_CALL) {
+            device = AUDIO_DEVICE_IN_VOICE_CALL;
+        }
+        break;
+    case AUDIO_SOURCE_REMOTE_SUBMIX:
+        if (availableDeviceTypes & AUDIO_DEVICE_IN_REMOTE_SUBMIX) {
+            device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
+        }
+        break;
+     case AUDIO_SOURCE_FM_TUNER:
+        if (availableDeviceTypes & AUDIO_DEVICE_IN_FM_TUNER) {
+            device = AUDIO_DEVICE_IN_FM_TUNER;
+        }
+        break;
+    default:
+        ALOGW("getDeviceForInputSource() invalid input source %d", inputSource);
+        break;
+    }
+    ALOGV("getDeviceForInputSource()input source %d, device %08x", inputSource, device);
+    return device;
+}
+
+template <>
+AudioPolicyManagerInterface *Engine::queryInterface()
+{
+    return &mManagerInterface;
+}
+
+} // namespace audio_policy
+} // namespace android
+
+
diff --git a/services/audiopolicy/enginedefault/src/Engine.h b/services/audiopolicy/enginedefault/src/Engine.h
new file mode 100755
index 0000000..f44556c
--- /dev/null
+++ b/services/audiopolicy/enginedefault/src/Engine.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+
+#include "AudioPolicyManagerInterface.h"
+#include "Gains.h"
+#include <AudioGain.h>
+#include <policy.h>
+
+namespace android
+{
+
+class AudioPolicyManagerObserver;
+
+namespace audio_policy
+{
+
+class Engine
+{
+public:
+    Engine();
+    virtual ~Engine();
+
+    template <class RequestedInterface>
+    RequestedInterface *queryInterface();
+
+private:
+    /// Interface members
+    class ManagerInterfaceImpl : public AudioPolicyManagerInterface
+    {
+    public:
+        ManagerInterfaceImpl(Engine *policyEngine)
+            : mPolicyEngine(policyEngine) {}
+
+        virtual void setObserver(AudioPolicyManagerObserver *observer)
+        {
+            mPolicyEngine->setObserver(observer);
+        }
+        virtual status_t initCheck()
+        {
+            return mPolicyEngine->initCheck();
+        }
+        virtual audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const
+        {
+            return mPolicyEngine->getDeviceForInputSource(inputSource);
+        }
+        virtual audio_devices_t getDeviceForStrategy(routing_strategy strategy) const
+        {
+            return mPolicyEngine->getDeviceForStrategy(strategy);
+        }
+        virtual routing_strategy getStrategyForStream(audio_stream_type_t stream)
+        {
+            return mPolicyEngine->getStrategyForStream(stream);
+        }
+        virtual routing_strategy getStrategyForUsage(audio_usage_t usage)
+        {
+            return mPolicyEngine->getStrategyForUsage(usage);
+        }
+        virtual status_t setPhoneState(audio_mode_t mode)
+        {
+            return mPolicyEngine->setPhoneState(mode);
+        }
+        virtual audio_mode_t getPhoneState() const
+        {
+            return mPolicyEngine->getPhoneState();
+        }
+        virtual status_t setForceUse(audio_policy_force_use_t usage,
+                                     audio_policy_forced_cfg_t config)
+        {
+            return mPolicyEngine->setForceUse(usage, config);
+        }
+        virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const
+        {
+            return mPolicyEngine->getForceUse(usage);
+        }
+        virtual status_t setDeviceConnectionState(const sp<DeviceDescriptor> /*devDesc*/,
+                                                  audio_policy_dev_state_t /*state*/)
+        {
+            return NO_ERROR;
+        }
+        virtual status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax)
+        {
+            return mPolicyEngine->initStreamVolume(stream, indexMin, indexMax);
+        }
+        virtual void initializeVolumeCurves(bool isSpeakerDrcEnabled)
+        {
+            return mPolicyEngine->initializeVolumeCurves(isSpeakerDrcEnabled);
+        }
+        virtual float volIndexToAmpl(Volume::device_category deviceCategory,
+                                     audio_stream_type_t stream,int indexInUi)
+        {
+            return mPolicyEngine->volIndexToAmpl(deviceCategory, stream, indexInUi);
+        }
+    private:
+        Engine *mPolicyEngine;
+    } mManagerInterface;
+
+private:
+    /* Copy facilities are put private to disable copy. */
+    Engine(const Engine &object);
+    Engine &operator=(const Engine &object);
+
+    void setObserver(AudioPolicyManagerObserver *observer);
+
+    status_t initCheck();
+
+    inline bool isInCall() const
+    {
+        return is_state_in_call(mPhoneState);
+    }
+
+    status_t setPhoneState(audio_mode_t mode);
+    audio_mode_t getPhoneState() const
+    {
+        return mPhoneState;
+    }
+    status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
+    audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const
+    {
+        return mForceUse[usage];
+    }
+    status_t setDefaultDevice(audio_devices_t device);
+
+    routing_strategy getStrategyForStream(audio_stream_type_t stream);
+    routing_strategy getStrategyForUsage(audio_usage_t usage);
+    audio_devices_t getDeviceForStrategy(routing_strategy strategy) const;
+    audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const;
+
+    float volIndexToAmpl(Volume::device_category category,
+                         audio_stream_type_t stream, int indexInUi);
+    status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax);
+    void initializeVolumeCurves(bool isSpeakerDrcEnabled);
+
+    audio_mode_t mPhoneState;  /**< current phone state. */
+
+    /** current forced use configuration. */
+    audio_policy_forced_cfg_t mForceUse[AUDIO_POLICY_FORCE_USE_CNT];
+
+    AudioPolicyManagerObserver *mApmObserver;
+};
+} // namespace audio_policy
+} // namespace android
+
diff --git a/services/audiopolicy/enginedefault/src/EngineInstance.cpp b/services/audiopolicy/enginedefault/src/EngineInstance.cpp
new file mode 100755
index 0000000..17e9832
--- /dev/null
+++ b/services/audiopolicy/enginedefault/src/EngineInstance.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <AudioPolicyManagerInterface.h>
+#include "AudioPolicyEngineInstance.h"
+#include "Engine.h"
+
+namespace android
+{
+namespace audio_policy
+{
+
+EngineInstance::EngineInstance()
+{
+}
+
+EngineInstance *EngineInstance::getInstance()
+{
+    static EngineInstance instance;
+    return &instance;
+}
+
+EngineInstance::~EngineInstance()
+{
+}
+
+Engine *EngineInstance::getEngine() const
+{
+    static Engine engine;
+    return &engine;
+}
+
+template <>
+AudioPolicyManagerInterface *EngineInstance::queryInterface() const
+{
+    return getEngine()->queryInterface<AudioPolicyManagerInterface>();
+}
+
+} // namespace audio_policy
+} // namespace android
+
diff --git a/services/audiopolicy/enginedefault/src/Gains.cpp b/services/audiopolicy/enginedefault/src/Gains.cpp
new file mode 100644
index 0000000..a684fdd
--- /dev/null
+++ b/services/audiopolicy/enginedefault/src/Gains.cpp
@@ -0,0 +1,247 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::Gains"
+//#define LOG_NDEBUG 0
+
+//#define VERY_VERBOSE_LOGGING
+#ifdef VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) do { } while(0)
+#endif
+
+#include "Gains.h"
+#include <Volume.h>
+#include <math.h>
+#include <utils/String8.h>
+
+namespace android {
+
+// Enginedefault
+const VolumeCurvePoint
+Gains::sDefaultVolumeCurve[Volume::VOLCNT] = {
+    {1, -49.5f}, {33, -33.5f}, {66, -17.0f}, {100, 0.0f}
+};
+
+
+const VolumeCurvePoint
+Gains::sDefaultMediaVolumeCurve[Volume::VOLCNT] = {
+    {1, -58.0f}, {20, -40.0f}, {60, -17.0f}, {100, 0.0f}
+};
+
+const VolumeCurvePoint
+Gains::sExtMediaSystemVolumeCurve[Volume::VOLCNT] = {
+    {1, -58.0f}, {20, -40.0f}, {60, -21.0f}, {100, -10.0f}
+};
+
+const VolumeCurvePoint
+Gains::sSpeakerMediaVolumeCurve[Volume::VOLCNT] = {
+    {1, -56.0f}, {20, -34.0f}, {60, -11.0f}, {100, 0.0f}
+};
+
+const VolumeCurvePoint
+Gains::sSpeakerMediaVolumeCurveDrc[Volume::VOLCNT] = {
+    {1, -55.0f}, {20, -43.0f}, {86, -12.0f}, {100, 0.0f}
+};
+
+const VolumeCurvePoint
+Gains::sSpeakerSonificationVolumeCurve[Volume::VOLCNT] = {
+    {1, -29.7f}, {33, -20.1f}, {66, -10.2f}, {100, 0.0f}
+};
+
+const VolumeCurvePoint
+Gains::sSpeakerSonificationVolumeCurveDrc[Volume::VOLCNT] = {
+    {1, -35.7f}, {33, -26.1f}, {66, -13.2f}, {100, 0.0f}
+};
+
+// AUDIO_STREAM_SYSTEM, AUDIO_STREAM_ENFORCED_AUDIBLE and AUDIO_STREAM_DTMF volume tracks
+// AUDIO_STREAM_RING on phones and AUDIO_STREAM_MUSIC on tablets.
+// AUDIO_STREAM_DTMF tracks AUDIO_STREAM_VOICE_CALL while in call (See AudioService.java).
+// The range is constrained between -24dB and -6dB over speaker and -30dB and -18dB over headset.
+
+const VolumeCurvePoint
+Gains::sDefaultSystemVolumeCurve[Volume::VOLCNT] = {
+    {1, -24.0f}, {33, -18.0f}, {66, -12.0f}, {100, -6.0f}
+};
+
+const VolumeCurvePoint
+Gains::sDefaultSystemVolumeCurveDrc[Volume::VOLCNT] = {
+    {1, -34.0f}, {33, -24.0f}, {66, -15.0f}, {100, -6.0f}
+};
+
+const VolumeCurvePoint
+Gains::sHeadsetSystemVolumeCurve[Volume::VOLCNT] = {
+    {1, -30.0f}, {33, -26.0f}, {66, -22.0f}, {100, -18.0f}
+};
+
+const VolumeCurvePoint
+Gains::sDefaultVoiceVolumeCurve[Volume::VOLCNT] = {
+    {0, -42.0f}, {33, -28.0f}, {66, -14.0f}, {100, 0.0f}
+};
+
+const VolumeCurvePoint
+Gains::sSpeakerVoiceVolumeCurve[Volume::VOLCNT] = {
+    {0, -24.0f}, {33, -16.0f}, {66, -8.0f}, {100, 0.0f}
+};
+
+const VolumeCurvePoint
+Gains::sLinearVolumeCurve[Volume::VOLCNT] = {
+    {0, -96.0f}, {33, -68.0f}, {66, -34.0f}, {100, 0.0f}
+};
+
+const VolumeCurvePoint
+Gains::sSilentVolumeCurve[Volume::VOLCNT] = {
+    {0, -96.0f}, {1, -96.0f}, {2, -96.0f}, {100, -96.0f}
+};
+
+const VolumeCurvePoint
+Gains::sFullScaleVolumeCurve[Volume::VOLCNT] = {
+    {0, 0.0f}, {1, 0.0f}, {2, 0.0f}, {100, 0.0f}
+};
+
+const VolumeCurvePoint *Gains::sVolumeProfiles[AUDIO_STREAM_CNT]
+                                                  [Volume::DEVICE_CATEGORY_CNT] = {
+    { // AUDIO_STREAM_VOICE_CALL
+        Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        Gains::sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_SYSTEM
+        Gains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sDefaultSystemVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
+        Gains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_RING
+        Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
+        Gains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_MUSIC
+        Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        Gains::sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_ALARM
+        Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
+        Gains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_NOTIFICATION
+        Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
+        Gains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_BLUETOOTH_SCO
+        Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        Gains::sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_ENFORCED_AUDIBLE
+        Gains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        Gains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    {  // AUDIO_STREAM_DTMF
+        Gains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        Gains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_TTS
+      // "Transmitted Through Speaker": always silent except on DEVICE_CATEGORY_SPEAKER
+        Gains::sSilentVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sLinearVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sSilentVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        Gains::sSilentVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_ACCESSIBILITY
+        Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        Gains::sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_REROUTING
+        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        Gains::sFullScaleVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_PATCH
+        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        Gains::sFullScaleVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+};
+
+//static
+float Gains::volIndexToAmpl(audio_devices_t device, const StreamDescriptor& streamDesc,
+        int indexInUi)
+{
+    Volume::device_category deviceCategory = Volume::getDeviceCategory(device);
+    const VolumeCurvePoint *curve = streamDesc.getVolumeCurvePoint(deviceCategory);
+
+    // the volume index in the UI is relative to the min and max volume indices for this stream type
+    int nbSteps = 1 + curve[Volume::VOLMAX].mIndex -
+            curve[Volume::VOLMIN].mIndex;
+    int volIdx = (nbSteps * (indexInUi - streamDesc.getVolumeIndexMin())) /
+            (streamDesc.getVolumeIndexMax() - streamDesc.getVolumeIndexMin());
+
+    // find what part of the curve this index volume belongs to, or if it's out of bounds
+    int segment = 0;
+    if (volIdx < curve[Volume::VOLMIN].mIndex) {         // out of bounds
+        return 0.0f;
+    } else if (volIdx < curve[Volume::VOLKNEE1].mIndex) {
+        segment = 0;
+    } else if (volIdx < curve[Volume::VOLKNEE2].mIndex) {
+        segment = 1;
+    } else if (volIdx <= curve[Volume::VOLMAX].mIndex) {
+        segment = 2;
+    } else {                                                               // out of bounds
+        return 1.0f;
+    }
+
+    // linear interpolation in the attenuation table in dB
+    float decibels = curve[segment].mDBAttenuation +
+            ((float)(volIdx - curve[segment].mIndex)) *
+                ( (curve[segment+1].mDBAttenuation -
+                        curve[segment].mDBAttenuation) /
+                    ((float)(curve[segment+1].mIndex -
+                            curve[segment].mIndex)) );
+
+    float amplification = exp( decibels * 0.115129f); // exp( dB * ln(10) / 20 )
+
+    ALOGVV("VOLUME vol index=[%d %d %d], dB=[%.1f %.1f %.1f] ampl=%.5f",
+            curve[segment].mIndex, volIdx,
+            curve[segment+1].mIndex,
+            curve[segment].mDBAttenuation,
+            decibels,
+            curve[segment+1].mDBAttenuation,
+            amplification);
+
+    return amplification;
+}
+
+}; // namespace android
diff --git a/services/audiopolicy/enginedefault/src/Gains.h b/services/audiopolicy/enginedefault/src/Gains.h
new file mode 100644
index 0000000..b5601ca
--- /dev/null
+++ b/services/audiopolicy/enginedefault/src/Gains.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <StreamDescriptor.h>
+#include <utils/KeyedVector.h>
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class StreamDescriptor;
+
+class Gains
+{
+public :
+    static float volIndexToAmpl(audio_devices_t device, const StreamDescriptor& streamDesc,
+                    int indexInUi);
+
+    // default volume curve
+    static const VolumeCurvePoint sDefaultVolumeCurve[Volume::VOLCNT];
+    // default volume curve for media strategy
+    static const VolumeCurvePoint sDefaultMediaVolumeCurve[Volume::VOLCNT];
+    // volume curve for non-media audio on ext media outputs (HDMI, Line, etc)
+    static const VolumeCurvePoint sExtMediaSystemVolumeCurve[Volume::VOLCNT];
+    // volume curve for media strategy on speakers
+    static const VolumeCurvePoint sSpeakerMediaVolumeCurve[Volume::VOLCNT];
+    static const VolumeCurvePoint sSpeakerMediaVolumeCurveDrc[Volume::VOLCNT];
+    // volume curve for sonification strategy on speakers
+    static const VolumeCurvePoint sSpeakerSonificationVolumeCurve[Volume::VOLCNT];
+    static const VolumeCurvePoint sSpeakerSonificationVolumeCurveDrc[Volume::VOLCNT];
+    static const VolumeCurvePoint sDefaultSystemVolumeCurve[Volume::VOLCNT];
+    static const VolumeCurvePoint sDefaultSystemVolumeCurveDrc[Volume::VOLCNT];
+    static const VolumeCurvePoint sHeadsetSystemVolumeCurve[Volume::VOLCNT];
+    static const VolumeCurvePoint sDefaultVoiceVolumeCurve[Volume::VOLCNT];
+    static const VolumeCurvePoint sSpeakerVoiceVolumeCurve[Volume::VOLCNT];
+    static const VolumeCurvePoint sLinearVolumeCurve[Volume::VOLCNT];
+    static const VolumeCurvePoint sSilentVolumeCurve[Volume::VOLCNT];
+    static const VolumeCurvePoint sFullScaleVolumeCurve[Volume::VOLCNT];
+    // default volume curves per stream and device category. See initializeVolumeCurves()
+    static const VolumeCurvePoint *sVolumeProfiles[AUDIO_STREAM_CNT][Volume::DEVICE_CATEGORY_CNT];
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/managerdefault/AudioInputDescriptor.cpp b/services/audiopolicy/managerdefault/AudioInputDescriptor.cpp
deleted file mode 100644
index f4054c8..0000000
--- a/services/audiopolicy/managerdefault/AudioInputDescriptor.cpp
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "APM::AudioInputDescriptor"
-//#define LOG_NDEBUG 0
-
-#include "AudioPolicyManager.h"
-
-namespace android {
-
-AudioInputDescriptor::AudioInputDescriptor(const sp<IOProfile>& profile)
-    : mId(0), mIoHandle(0),
-      mDevice(AUDIO_DEVICE_NONE), mPolicyMix(NULL), mPatchHandle(0), mRefCount(0),
-      mInputSource(AUDIO_SOURCE_DEFAULT), mProfile(profile), mIsSoundTrigger(false)
-{
-    if (profile != NULL) {
-        mSamplingRate = profile->pickSamplingRate();
-        mFormat = profile->pickFormat();
-        mChannelMask = profile->pickChannelMask();
-        if (profile->mGains.size() > 0) {
-            profile->mGains[0]->getDefaultConfig(&mGain);
-        }
-    }
-}
-
-void AudioInputDescriptor::toAudioPortConfig(
-                                                   struct audio_port_config *dstConfig,
-                                                   const struct audio_port_config *srcConfig) const
-{
-    ALOG_ASSERT(mProfile != 0,
-                "toAudioPortConfig() called on input with null profile %d", mIoHandle);
-    dstConfig->config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE|AUDIO_PORT_CONFIG_CHANNEL_MASK|
-                            AUDIO_PORT_CONFIG_FORMAT|AUDIO_PORT_CONFIG_GAIN;
-    if (srcConfig != NULL) {
-        dstConfig->config_mask |= srcConfig->config_mask;
-    }
-
-    AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
-
-    dstConfig->id = mId;
-    dstConfig->role = AUDIO_PORT_ROLE_SINK;
-    dstConfig->type = AUDIO_PORT_TYPE_MIX;
-    dstConfig->ext.mix.hw_module = mProfile->mModule->mHandle;
-    dstConfig->ext.mix.handle = mIoHandle;
-    dstConfig->ext.mix.usecase.source = mInputSource;
-}
-
-void AudioInputDescriptor::toAudioPort(
-                                                    struct audio_port *port) const
-{
-    ALOG_ASSERT(mProfile != 0, "toAudioPort() called on input with null profile %d", mIoHandle);
-
-    mProfile->toAudioPort(port);
-    port->id = mId;
-    toAudioPortConfig(&port->active_config);
-    port->ext.mix.hw_module = mProfile->mModule->mHandle;
-    port->ext.mix.handle = mIoHandle;
-    port->ext.mix.latency_class = AUDIO_LATENCY_NORMAL;
-}
-
-status_t AudioInputDescriptor::dump(int fd)
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    snprintf(buffer, SIZE, " ID: %d\n", mId);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Sampling rate: %d\n", mSamplingRate);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Format: %d\n", mFormat);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Channels: %08x\n", mChannelMask);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Devices %08x\n", mDevice);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Ref Count %d\n", mRefCount);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Open Ref Count %d\n", mOpenRefCount);
-    result.append(buffer);
-
-    write(fd, result.string(), result.size());
-
-    return NO_ERROR;
-}
-
-}; //namespace android
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 53ec0f6..797a2b4 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -24,20 +24,11 @@
 #define ALOGVV(a...) do { } while(0)
 #endif
 
-// A device mask for all audio input devices that are considered "virtual" when evaluating
-// active inputs in getActiveInput()
-#define APM_AUDIO_IN_DEVICE_VIRTUAL_ALL  (AUDIO_DEVICE_IN_REMOTE_SUBMIX|AUDIO_DEVICE_IN_FM_TUNER)
-// A device mask for all audio output devices that are considered "remote" when evaluating
-// active output devices in isStreamActiveRemotely()
-#define APM_AUDIO_OUT_DEVICE_REMOTE_ALL  AUDIO_DEVICE_OUT_REMOTE_SUBMIX
-// A device mask for all audio input and output devices where matching inputs/outputs on device
-// type alone is not enough: the address must match too
-#define APM_AUDIO_DEVICE_MATCH_ADDRESS_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX | \
-                                            AUDIO_DEVICE_OUT_REMOTE_SUBMIX)
-
 #include <inttypes.h>
 #include <math.h>
 
+#include <AudioPolicyManagerInterface.h>
+#include <AudioPolicyEngineInstance.h>
 #include <cutils/properties.h>
 #include <utils/Log.h>
 #include <hardware/audio.h>
@@ -47,6 +38,8 @@
 #include <soundtrigger/SoundTrigger.h>
 #include "AudioPolicyManager.h"
 #include "audio_policy_conf.h"
+#include <ConfigParsingUtils.h>
+#include <policy.h>
 
 namespace android {
 
@@ -73,7 +66,8 @@
     // connect/disconnect only 1 device at a time
     if (!audio_is_output_device(device) && !audio_is_input_device(device)) return BAD_VALUE;
 
-    sp<DeviceDescriptor> devDesc = getDeviceDescriptor(device, device_address, device_name);
+    sp<DeviceDescriptor> devDesc =
+            mHwModules.getDeviceDescriptor(device, device_address, device_name);
 
     // handle output devices
     if (audio_is_output_device(device)) {
@@ -97,7 +91,7 @@
             // register new device as available
             index = mAvailableOutputDevices.add(devDesc);
             if (index >= 0) {
-                sp<HwModule> module = getModuleForDevice(device);
+                sp<HwModule> module = mHwModules.getModuleForDevice(device);
                 if (module == 0) {
                     ALOGD("setDeviceConnectionState() could not find HW module for device %08x",
                           device);
@@ -113,6 +107,9 @@
                 mAvailableOutputDevices.remove(devDesc);
                 return INVALID_OPERATION;
             }
+            // Propagate device availability to Engine
+            mEngine->setDeviceConnectionState(devDesc, state);
+
             // outputs should never be empty here
             ALOG_ASSERT(outputs.size() != 0, "setDeviceConnectionState():"
                     "checkOutputsForDevice() returned no outputs but status OK");
@@ -143,6 +140,9 @@
             mAvailableOutputDevices.remove(devDesc);
 
             checkOutputsForDevice(devDesc, state, outputs, devDesc->mAddress);
+
+            // Propagate device availability to Engine
+            mEngine->setDeviceConnectionState(devDesc, state);
             } break;
 
         default:
@@ -171,20 +171,20 @@
         }
 
         updateDevicesAndOutputs();
-        if (mPhoneState == AUDIO_MODE_IN_CALL) {
+        if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL) {
             audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
             updateCallRouting(newDevice);
         }
         for (size_t i = 0; i < mOutputs.size(); i++) {
             audio_io_handle_t output = mOutputs.keyAt(i);
-            if ((mPhoneState != AUDIO_MODE_IN_CALL) || (output != mPrimaryOutput)) {
+            if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (output != mPrimaryOutput)) {
                 audio_devices_t newDevice = getNewOutputDevice(mOutputs.keyAt(i),
                                                                true /*fromCache*/);
                 // do not force device change on duplicated output because if device is 0, it will
                 // also force a device 0 for the two outputs it is duplicated to which may override
                 // a valid device selection on those outputs.
                 bool force = !mOutputs.valueAt(i)->isDuplicated()
-                        && (!deviceDistinguishesOnAddress(device)
+                        && (!device_distinguishes_on_address(device)
                                 // always force when disconnecting (a non-duplicated device)
                                 || (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE));
                 setOutputDevice(output, newDevice, force, 0);
@@ -208,7 +208,7 @@
                 ALOGW("setDeviceConnectionState() device already connected: %d", device);
                 return INVALID_OPERATION;
             }
-            sp<HwModule> module = getModuleForDevice(device);
+            sp<HwModule> module = mHwModules.getModuleForDevice(device);
             if (module == NULL) {
                 ALOGW("setDeviceConnectionState(): could not find HW module for device %08x",
                       device);
@@ -230,6 +230,8 @@
             param.addInt(String8(AUDIO_PARAMETER_DEVICE_CONNECT), device);
             mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
 
+            // Propagate device availability to Engine
+            mEngine->setDeviceConnectionState(devDesc, state);
         } break;
 
         // handle input device disconnection
@@ -249,6 +251,8 @@
             checkInputsForDevice(device, state, inputs, devDesc->mAddress);
             mAvailableInputDevices.remove(devDesc);
 
+            // Propagate device availability to Engine
+            mEngine->setDeviceConnectionState(devDesc, state);
         } break;
 
         default:
@@ -258,7 +262,7 @@
 
         closeAllInputs();
 
-        if (mPhoneState == AUDIO_MODE_IN_CALL) {
+        if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL) {
             audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
             updateCallRouting(newDevice);
         }
@@ -272,9 +276,10 @@
 }
 
 audio_policy_dev_state_t AudioPolicyManager::getDeviceConnectionState(audio_devices_t device,
-                                                  const char *device_address)
+                                                                      const char *device_address)
 {
-    sp<DeviceDescriptor> devDesc = getDeviceDescriptor(device, device_address, "");
+    sp<DeviceDescriptor> devDesc = mHwModules.getDeviceDescriptor(device, device_address, "");
+
     DeviceVector *deviceVector;
 
     if (audio_is_output_device(device)) {
@@ -285,44 +290,7 @@
         ALOGW("getDeviceConnectionState() invalid device type %08x", device);
         return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
     }
-
-    ssize_t index = deviceVector->indexOf(devDesc);
-    if (index >= 0) {
-        return AUDIO_POLICY_DEVICE_STATE_AVAILABLE;
-    } else {
-        return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
-    }
-}
-
-sp<DeviceDescriptor>  AudioPolicyManager::getDeviceDescriptor(const audio_devices_t device,
-                                                              const char *device_address,
-                                                              const char *device_name)
-{
-    String8 address = (device_address == NULL) ? String8("") : String8(device_address);
-    // handle legacy remote submix case where the address was not always specified
-    if (deviceDistinguishesOnAddress(device) && (address.length() == 0)) {
-        address = String8("0");
-    }
-
-    for (size_t i = 0; i < mHwModules.size(); i++) {
-        if (mHwModules[i]->mHandle == 0) {
-            continue;
-        }
-        DeviceVector deviceList =
-                mHwModules[i]->mDeclaredDevices.getDevicesFromTypeAddr(device, address);
-        if (!deviceList.isEmpty()) {
-            return deviceList.itemAt(0);
-        }
-        deviceList = mHwModules[i]->mDeclaredDevices.getDevicesFromType(device);
-        if (!deviceList.isEmpty()) {
-            return deviceList.itemAt(0);
-        }
-    }
-
-    sp<DeviceDescriptor> devDesc =
-            new DeviceDescriptor(String8(device_name != NULL ? device_name : ""), device);
-    devDesc->mAddress = address;
-    return devDesc;
+    return deviceVector->getDeviceConnectionState(devDesc);
 }
 
 void AudioPolicyManager::updateCallRouting(audio_devices_t rxDevice, int delayMs)
@@ -393,8 +361,7 @@
         ALOGW_IF(status != NO_ERROR, "updateCallRouting() error %d creating RX audio patch",
                                                status);
         if (status == NO_ERROR) {
-            mCallRxPatch = new AudioPatch((audio_patch_handle_t)nextUniqueId(),
-                                       &patch, mUidCached);
+            mCallRxPatch = new AudioPatch(&patch, mUidCached);
             mCallRxPatch->mAfPatchHandle = afPatchHandle;
             mCallRxPatch->mUid = mUidCached;
         }
@@ -436,8 +403,7 @@
         ALOGW_IF(status != NO_ERROR, "setPhoneState() error %d creating TX audio patch",
                                                status);
         if (status == NO_ERROR) {
-            mCallTxPatch = new AudioPatch((audio_patch_handle_t)nextUniqueId(),
-                                       &patch, mUidCached);
+            mCallTxPatch = new AudioPatch(&patch, mUidCached);
             mCallTxPatch->mAfPatchHandle = afPatchHandle;
             mCallTxPatch->mUid = mUidCached;
         }
@@ -447,16 +413,14 @@
 void AudioPolicyManager::setPhoneState(audio_mode_t state)
 {
     ALOGV("setPhoneState() state %d", state);
-    if (state < 0 || state >= AUDIO_MODE_CNT) {
-        ALOGW("setPhoneState() invalid state %d", state);
+    // store previous phone state for management of sonification strategy below
+    int oldState = mEngine->getPhoneState();
+
+    if (mEngine->setPhoneState(state) != NO_ERROR) {
+        ALOGW("setPhoneState() invalid or same state %d", state);
         return;
     }
-
-    if (state == mPhoneState ) {
-        ALOGW("setPhoneState() setting same state %d", state);
-        return;
-    }
-
+    /// Opens: can these line be executed after the switch of volume curves???
     // if leaving call state, handle special case of active streams
     // pertaining to sonification strategy see handleIncallSonification()
     if (isInCall()) {
@@ -472,36 +436,12 @@
         mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
     }
 
-    // store previous phone state for management of sonification strategy below
-    int oldState = mPhoneState;
-    mPhoneState = state;
-    bool force = false;
-
-    // are we entering or starting a call
-    if (!isStateInCall(oldState) && isStateInCall(state)) {
-        ALOGV("  Entering call in setPhoneState()");
-        // force routing command to audio hardware when starting a call
-        // even if no device change is needed
-        force = true;
-        for (int j = 0; j < ApmGains::DEVICE_CATEGORY_CNT; j++) {
-            mStreams[AUDIO_STREAM_DTMF].mVolumeCurve[j] =
-                    ApmGains::sVolumeProfiles[AUDIO_STREAM_VOICE_CALL][j];
-        }
-    } else if (isStateInCall(oldState) && !isStateInCall(state)) {
-        ALOGV("  Exiting call in setPhoneState()");
-        // force routing command to audio hardware when exiting a call
-        // even if no device change is needed
-        force = true;
-        for (int j = 0; j < ApmGains::DEVICE_CATEGORY_CNT; j++) {
-            mStreams[AUDIO_STREAM_DTMF].mVolumeCurve[j] =
-                    ApmGains::sVolumeProfiles[AUDIO_STREAM_DTMF][j];
-        }
-    } else if (isStateInCall(state) && (state != oldState)) {
-        ALOGV("  Switching between telephony and VoIP in setPhoneState()");
-        // force routing command to audio hardware when switching between telephony and VoIP
-        // even if no device change is needed
-        force = true;
-    }
+    /**
+     * Switching to or from incall state or switching between telephony and VoIP lead to force
+     * routing command.
+     */
+    bool force = ((is_state_in_call(oldState) != is_state_in_call(state))
+                  || (is_state_in_call(state) && (state != oldState)));
 
     // check for device and output changes triggered by new phone state
     checkA2dpSuspend();
@@ -518,12 +458,12 @@
             // mute media and sonification strategies and delay device switch by the largest
             // latency of any output where either strategy is active.
             // This avoid sending the ring tone or music tail into the earpiece or headset.
-            if ((desc->isStrategyActive(STRATEGY_MEDIA,
-                                     SONIFICATION_HEADSET_MUSIC_DELAY,
-                                     sysTime) ||
-                    desc->isStrategyActive(STRATEGY_SONIFICATION,
-                                         SONIFICATION_HEADSET_MUSIC_DELAY,
-                                         sysTime)) &&
+            if ((isStrategyActive(desc, STRATEGY_MEDIA,
+                                  SONIFICATION_HEADSET_MUSIC_DELAY,
+                                  sysTime) ||
+                 isStrategyActive(desc, STRATEGY_SONIFICATION,
+                                  SONIFICATION_HEADSET_MUSIC_DELAY,
+                                  sysTime)) &&
                     (delayMs < (int)desc->mLatency*2)) {
                 delayMs = desc->mLatency*2;
             }
@@ -581,84 +521,35 @@
     }
 }
 
+audio_mode_t AudioPolicyManager::getPhoneState() {
+    return mEngine->getPhoneState();
+}
+
 void AudioPolicyManager::setForceUse(audio_policy_force_use_t usage,
                                          audio_policy_forced_cfg_t config)
 {
-    ALOGV("setForceUse() usage %d, config %d, mPhoneState %d", usage, config, mPhoneState);
+    ALOGV("setForceUse() usage %d, config %d, mPhoneState %d", usage, config, mEngine->getPhoneState());
 
-    bool forceVolumeReeval = false;
-    switch(usage) {
-    case AUDIO_POLICY_FORCE_FOR_COMMUNICATION:
-        if (config != AUDIO_POLICY_FORCE_SPEAKER && config != AUDIO_POLICY_FORCE_BT_SCO &&
-            config != AUDIO_POLICY_FORCE_NONE) {
-            ALOGW("setForceUse() invalid config %d for FOR_COMMUNICATION", config);
-            return;
-        }
-        forceVolumeReeval = true;
-        mForceUse[usage] = config;
-        break;
-    case AUDIO_POLICY_FORCE_FOR_MEDIA:
-        if (config != AUDIO_POLICY_FORCE_HEADPHONES && config != AUDIO_POLICY_FORCE_BT_A2DP &&
-            config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY &&
-            config != AUDIO_POLICY_FORCE_ANALOG_DOCK &&
-            config != AUDIO_POLICY_FORCE_DIGITAL_DOCK && config != AUDIO_POLICY_FORCE_NONE &&
-            config != AUDIO_POLICY_FORCE_NO_BT_A2DP && config != AUDIO_POLICY_FORCE_SPEAKER ) {
-            ALOGW("setForceUse() invalid config %d for FOR_MEDIA", config);
-            return;
-        }
-        mForceUse[usage] = config;
-        break;
-    case AUDIO_POLICY_FORCE_FOR_RECORD:
-        if (config != AUDIO_POLICY_FORCE_BT_SCO && config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY &&
-            config != AUDIO_POLICY_FORCE_NONE) {
-            ALOGW("setForceUse() invalid config %d for FOR_RECORD", config);
-            return;
-        }
-        mForceUse[usage] = config;
-        break;
-    case AUDIO_POLICY_FORCE_FOR_DOCK:
-        if (config != AUDIO_POLICY_FORCE_NONE && config != AUDIO_POLICY_FORCE_BT_CAR_DOCK &&
-            config != AUDIO_POLICY_FORCE_BT_DESK_DOCK &&
-            config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY &&
-            config != AUDIO_POLICY_FORCE_ANALOG_DOCK &&
-            config != AUDIO_POLICY_FORCE_DIGITAL_DOCK) {
-            ALOGW("setForceUse() invalid config %d for FOR_DOCK", config);
-        }
-        forceVolumeReeval = true;
-        mForceUse[usage] = config;
-        break;
-    case AUDIO_POLICY_FORCE_FOR_SYSTEM:
-        if (config != AUDIO_POLICY_FORCE_NONE &&
-            config != AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
-            ALOGW("setForceUse() invalid config %d for FOR_SYSTEM", config);
-        }
-        forceVolumeReeval = true;
-        mForceUse[usage] = config;
-        break;
-    case AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO:
-        if (config != AUDIO_POLICY_FORCE_NONE &&
-            config != AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED) {
-            ALOGW("setForceUse() invalid config %d forHDMI_SYSTEM_AUDIO", config);
-        }
-        mForceUse[usage] = config;
-        break;
-    default:
-        ALOGW("setForceUse() invalid usage %d", usage);
-        break;
+    if (mEngine->setForceUse(usage, config) != NO_ERROR) {
+        ALOGW("setForceUse() could not set force cfg %d for usage %d", config, usage);
+        return;
     }
+    bool forceVolumeReeval = (usage == AUDIO_POLICY_FORCE_FOR_COMMUNICATION) ||
+            (usage == AUDIO_POLICY_FORCE_FOR_DOCK) ||
+            (usage == AUDIO_POLICY_FORCE_FOR_SYSTEM);
 
     // check for device and output changes triggered by new force usage
     checkA2dpSuspend();
     checkOutputForAllStrategies();
     updateDevicesAndOutputs();
-    if (mPhoneState == AUDIO_MODE_IN_CALL) {
+    if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL) {
         audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, true /*fromCache*/);
         updateCallRouting(newDevice);
     }
     for (size_t i = 0; i < mOutputs.size(); i++) {
         audio_io_handle_t output = mOutputs.keyAt(i);
         audio_devices_t newDevice = getNewOutputDevice(output, true /*fromCache*/);
-        if ((mPhoneState != AUDIO_MODE_IN_CALL) || (output != mPrimaryOutput)) {
+        if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (output != mPrimaryOutput)) {
             setOutputDevice(output, newDevice, (newDevice != AUDIO_DEVICE_NONE));
         }
         if (forceVolumeReeval && (newDevice != AUDIO_DEVICE_NONE)) {
@@ -666,18 +557,13 @@
         }
     }
 
-    audio_io_handle_t activeInput = getActiveInput();
+    audio_io_handle_t activeInput = mInputs.getActiveInput();
     if (activeInput != 0) {
         setInputDevice(activeInput, getNewInputDevice(activeInput));
     }
 
 }
 
-audio_policy_forced_cfg_t AudioPolicyManager::getForceUse(audio_policy_force_use_t usage)
-{
-    return mForceUse[usage];
-}
-
 void AudioPolicyManager::setSystemProperty(const char* property, const char* value)
 {
     ALOGV("setSystemProperty() property %s, value %s", property, value);
@@ -711,11 +597,11 @@
 }
 
 audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream,
-                                    uint32_t samplingRate,
-                                    audio_format_t format,
-                                    audio_channel_mask_t channelMask,
-                                    audio_output_flags_t flags,
-                                    const audio_offload_info_t *offloadInfo)
+                                                uint32_t samplingRate,
+                                                audio_format_t format,
+                                                audio_channel_mask_t channelMask,
+                                                audio_output_flags_t flags,
+                                                const audio_offload_info_t *offloadInfo)
 {
     routing_strategy strategy = getStrategy(stream);
     audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
@@ -753,45 +639,16 @@
         }
         stream_type_to_audio_attributes(*stream, &attributes);
     }
-
-    for (size_t i = 0; i < mPolicyMixes.size(); i++) {
-        sp<AudioOutputDescriptor> desc;
-        if (mPolicyMixes[i]->mMix.mMixType == MIX_TYPE_PLAYERS) {
-            for (size_t j = 0; j < mPolicyMixes[i]->mMix.mCriteria.size(); j++) {
-                if ((RULE_MATCH_ATTRIBUTE_USAGE == mPolicyMixes[i]->mMix.mCriteria[j].mRule &&
-                        mPolicyMixes[i]->mMix.mCriteria[j].mAttr.mUsage == attributes.usage) ||
-                    (RULE_EXCLUDE_ATTRIBUTE_USAGE == mPolicyMixes[i]->mMix.mCriteria[j].mRule &&
-                        mPolicyMixes[i]->mMix.mCriteria[j].mAttr.mUsage != attributes.usage)) {
-                    desc = mPolicyMixes[i]->mOutput;
-                    break;
-                }
-                if (strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
-                        strncmp(attributes.tags + strlen("addr="),
-                                mPolicyMixes[i]->mMix.mRegistrationId.string(),
-                                AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) {
-                    desc = mPolicyMixes[i]->mOutput;
-                    break;
-                }
-            }
-        } else if (mPolicyMixes[i]->mMix.mMixType == MIX_TYPE_RECORDERS) {
-            if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE &&
-                    strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
-                    strncmp(attributes.tags + strlen("addr="),
-                            mPolicyMixes[i]->mMix.mRegistrationId.string(),
-                            AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) {
-                desc = mPolicyMixes[i]->mOutput;
-            }
+    sp<AudioOutputDescriptor> desc;
+    if (mPolicyMixes.getOutputForAttr(attributes, desc) == NO_ERROR) {
+        ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
+        if (!audio_is_linear_pcm(format)) {
+            return BAD_VALUE;
         }
-        if (desc != 0) {
-            if (!audio_is_linear_pcm(format)) {
-                return BAD_VALUE;
-            }
-            desc->mPolicyMix = &mPolicyMixes[i]->mMix;
-            *stream = streamTypefromAttributesInt(&attributes);
-            *output = desc->mIoHandle;
-            ALOGV("getOutputForAttr() returns output %d", *output);
-            return NO_ERROR;
-        }
+        *stream = streamTypefromAttributesInt(&attributes);
+        *output = desc->mIoHandle;
+        ALOGV("getOutputForAttr() returns output %d", *output);
+        return NO_ERROR;
     }
     if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
         ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
@@ -799,7 +656,7 @@
     }
 
     ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s flags=%08x",
-            attributes.usage, attributes.content_type, attributes.tags, attributes.flags);
+          attributes.usage, attributes.content_type, attributes.tags, attributes.flags);
 
     routing_strategy strategy = (routing_strategy) getStrategyForAttr(&attributes);
     audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
@@ -909,7 +766,7 @@
     // in the background.
 
     if (((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) ||
-            !isNonOffloadableEffectEnabled()) {
+            !mEffects.isNonOffloadableEffectEnabled()) {
         profile = getProfileForDirectOutput(device,
                                            samplingRate,
                                            format,
@@ -1095,7 +952,7 @@
     uint32_t beaconMuteLatency = 0;
     if (stream == AUDIO_STREAM_TTS) {
         ALOGV("\t found BEACON stream");
-        if (isAnyOutputActive(AUDIO_STREAM_TTS /*streamToIgnore*/)) {
+        if (mOutputs.isAnyOutputActive(AUDIO_STREAM_TTS /*streamToIgnore*/)) {
             return INVALID_OPERATION;
         } else {
             beaconMuteLatency = handleEventForBeacon(STARTING_BEACON);
@@ -1274,7 +1131,7 @@
         sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
         if (outputDesc->isActive()) {
             mpClientInterface->closeOutput(output);
-            mOutputs.removeItem(output);
+            removeOutput(output);
             mTestOutputs[testIndex] = 0;
         }
         return;
@@ -1332,19 +1189,13 @@
 
     if (inputSource == AUDIO_SOURCE_REMOTE_SUBMIX &&
             strncmp(attr->tags, "addr=", strlen("addr=")) == 0) {
+        status_t ret = mPolicyMixes.getInputMixForAttr(*attr, policyMix);
+        if (ret != NO_ERROR) {
+            return ret;
+        }
+        *inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
         device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
         address = String8(attr->tags + strlen("addr="));
-        ssize_t index = mPolicyMixes.indexOfKey(address);
-        if (index < 0) {
-            ALOGW("getInputForAttr() no policy for address %s", address.string());
-            return BAD_VALUE;
-        }
-        if (mPolicyMixes[index]->mMix.mMixType != MIX_TYPE_PLAYERS) {
-            ALOGW("getInputForAttr() bad policy mix type for address %s", address.string());
-            return BAD_VALUE;
-        }
-        policyMix = &mPolicyMixes[index]->mMix;
-        *inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
     } else {
         device = getDeviceAndMixForInputSource(inputSource, &policyMix);
         if (device == AUDIO_DEVICE_NONE) {
@@ -1482,10 +1333,10 @@
     }
 
     // virtual input devices are compatible with other input devices
-    if (!isVirtualInputDevice(inputDesc->mDevice)) {
+    if (!is_virtual_input_device(inputDesc->mDevice)) {
 
         // for a non-virtual input device, check if there is another (non-virtual) active input
-        audio_io_handle_t activeInput = getActiveInput();
+        audio_io_handle_t activeInput = mInputs.getActiveInput();
         if (activeInput != 0 && activeInput != input) {
 
             // If the already active input uses AUDIO_SOURCE_HOTWORD then it is closed,
@@ -1503,7 +1354,7 @@
     }
 
     if (inputDesc->mRefCount == 0) {
-        if (activeInputsCount() == 0) {
+        if (mInputs.activeInputsCount() == 0) {
             SoundTrigger::setCaptureState(true);
         }
         setInputDevice(input, getNewInputDevice(input), true /* force */);
@@ -1575,7 +1426,7 @@
 
         resetInputDevice(input);
 
-        if (activeInputsCount() == 0) {
+        if (mInputs.activeInputsCount() == 0) {
             SoundTrigger::setCaptureState(false);
         }
     }
@@ -1642,25 +1493,20 @@
                                             int indexMax)
 {
     ALOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax);
-    if (indexMin < 0 || indexMin >= indexMax) {
-        ALOGW("initStreamVolume() invalid index limits for stream %d, min %d, max %d", stream , indexMin, indexMax);
-        return;
-    }
-    mStreams[stream].mIndexMin = indexMin;
-    mStreams[stream].mIndexMax = indexMax;
+    mEngine->initStreamVolume(stream, indexMin, indexMax);
     //FIXME: AUDIO_STREAM_ACCESSIBILITY volume follows AUDIO_STREAM_MUSIC for now
     if (stream == AUDIO_STREAM_MUSIC) {
-        mStreams[AUDIO_STREAM_ACCESSIBILITY].mIndexMin = indexMin;
-        mStreams[AUDIO_STREAM_ACCESSIBILITY].mIndexMax = indexMax;
+        mEngine->initStreamVolume(AUDIO_STREAM_ACCESSIBILITY, indexMin, indexMax);
     }
 }
 
 status_t AudioPolicyManager::setStreamVolumeIndex(audio_stream_type_t stream,
-                                                      int index,
-                                                      audio_devices_t device)
+                                                  int index,
+                                                  audio_devices_t device)
 {
 
-    if ((index < mStreams[stream].mIndexMin) || (index > mStreams[stream].mIndexMax)) {
+    if ((index < mStreams[stream].getVolumeIndexMin()) ||
+            (index > mStreams[stream].getVolumeIndexMax())) {
         return BAD_VALUE;
     }
     if (!audio_is_output_device(device)) {
@@ -1668,7 +1514,7 @@
     }
 
     // Force max volume if stream cannot be muted
-    if (!mStreams[stream].mCanBeMuted) index = mStreams[stream].mIndexMax;
+    if (!mStreams.canBeMuted(stream)) index = mStreams[stream].getVolumeIndexMax();
 
     ALOGV("setStreamVolumeIndex() stream %d, device %04x, index %d",
           stream, device, index);
@@ -1676,9 +1522,9 @@
     // if device is AUDIO_DEVICE_OUT_DEFAULT set default value and
     // clear all device specific values
     if (device == AUDIO_DEVICE_OUT_DEFAULT) {
-        mStreams[stream].mIndexCur.clear();
+        mStreams.clearCurrentVolumeIndex(stream);
     }
-    mStreams[stream].mIndexCur.add(device, index);
+    mStreams.addCurrentVolumeIndex(stream, device, index);
 
     // update volume on all outputs whose current device is also selected by the same
     // strategy as the device specified by the caller
@@ -1688,7 +1534,7 @@
     //FIXME: AUDIO_STREAM_ACCESSIBILITY volume follows AUDIO_STREAM_MUSIC for now
     audio_devices_t accessibilityDevice = AUDIO_DEVICE_NONE;
     if (stream == AUDIO_STREAM_MUSIC) {
-        mStreams[AUDIO_STREAM_ACCESSIBILITY].mIndexCur.add(device, index);
+        mStreams.addCurrentVolumeIndex(AUDIO_STREAM_ACCESSIBILITY, device, index);
         accessibilityDevice = getDeviceForStrategy(STRATEGY_ACCESSIBILITY, true /*fromCache*/);
     }
     if ((device != AUDIO_DEVICE_OUT_DEFAULT) &&
@@ -1697,8 +1543,7 @@
     }
     status_t status = NO_ERROR;
     for (size_t i = 0; i < mOutputs.size(); i++) {
-        audio_devices_t curDevice =
-                ApmGains::getDeviceForVolume(mOutputs.valueAt(i)->device());
+        audio_devices_t curDevice = Volume::getDeviceForVolume(mOutputs.valueAt(i)->device());
         if ((device == AUDIO_DEVICE_OUT_DEFAULT) || ((curDevice & strategyDevice) != 0)) {
             status_t volStatus = checkAndSetVolume(stream, index, mOutputs.keyAt(i), curDevice);
             if (volStatus != NO_ERROR) {
@@ -1728,7 +1573,7 @@
     if (device == AUDIO_DEVICE_OUT_DEFAULT) {
         device = getDeviceForStrategy(getStrategy(stream), true /*fromCache*/);
     }
-    device = ApmGains::getDeviceForVolume(device);
+    device = Volume::getDeviceForVolume(device);
 
     *index =  mStreams[stream].getVolumeIndex(device);
     ALOGV("getStreamVolumeIndex() stream %d device %08x index %d", stream, device, *index);
@@ -1805,137 +1650,7 @@
             return INVALID_OPERATION;
         }
     }
-
-    if (mTotalEffectsMemory + desc->memoryUsage > getMaxEffectsMemory()) {
-        ALOGW("registerEffect() memory limit exceeded for Fx %s, Memory %d KB",
-                desc->name, desc->memoryUsage);
-        return INVALID_OPERATION;
-    }
-    mTotalEffectsMemory += desc->memoryUsage;
-    ALOGV("registerEffect() effect %s, io %d, strategy %d session %d id %d",
-            desc->name, io, strategy, session, id);
-    ALOGV("registerEffect() memory %d, total memory %d", desc->memoryUsage, mTotalEffectsMemory);
-
-    sp<EffectDescriptor> effectDesc = new EffectDescriptor();
-    memcpy (&effectDesc->mDesc, desc, sizeof(effect_descriptor_t));
-    effectDesc->mIo = io;
-    effectDesc->mStrategy = (routing_strategy)strategy;
-    effectDesc->mSession = session;
-    effectDesc->mEnabled = false;
-
-    mEffects.add(id, effectDesc);
-
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::unregisterEffect(int id)
-{
-    ssize_t index = mEffects.indexOfKey(id);
-    if (index < 0) {
-        ALOGW("unregisterEffect() unknown effect ID %d", id);
-        return INVALID_OPERATION;
-    }
-
-    sp<EffectDescriptor> effectDesc = mEffects.valueAt(index);
-
-    setEffectEnabled(effectDesc, false);
-
-    if (mTotalEffectsMemory < effectDesc->mDesc.memoryUsage) {
-        ALOGW("unregisterEffect() memory %d too big for total %d",
-                effectDesc->mDesc.memoryUsage, mTotalEffectsMemory);
-        effectDesc->mDesc.memoryUsage = mTotalEffectsMemory;
-    }
-    mTotalEffectsMemory -= effectDesc->mDesc.memoryUsage;
-    ALOGV("unregisterEffect() effect %s, ID %d, memory %d total memory %d",
-            effectDesc->mDesc.name, id, effectDesc->mDesc.memoryUsage, mTotalEffectsMemory);
-
-    mEffects.removeItem(id);
-
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::setEffectEnabled(int id, bool enabled)
-{
-    ssize_t index = mEffects.indexOfKey(id);
-    if (index < 0) {
-        ALOGW("unregisterEffect() unknown effect ID %d", id);
-        return INVALID_OPERATION;
-    }
-
-    return setEffectEnabled(mEffects.valueAt(index), enabled);
-}
-
-status_t AudioPolicyManager::setEffectEnabled(const sp<EffectDescriptor>& effectDesc, bool enabled)
-{
-    if (enabled == effectDesc->mEnabled) {
-        ALOGV("setEffectEnabled(%s) effect already %s",
-             enabled?"true":"false", enabled?"enabled":"disabled");
-        return INVALID_OPERATION;
-    }
-
-    if (enabled) {
-        if (mTotalEffectsCpuLoad + effectDesc->mDesc.cpuLoad > getMaxEffectsCpuLoad()) {
-            ALOGW("setEffectEnabled(true) CPU Load limit exceeded for Fx %s, CPU %f MIPS",
-                 effectDesc->mDesc.name, (float)effectDesc->mDesc.cpuLoad/10);
-            return INVALID_OPERATION;
-        }
-        mTotalEffectsCpuLoad += effectDesc->mDesc.cpuLoad;
-        ALOGV("setEffectEnabled(true) total CPU %d", mTotalEffectsCpuLoad);
-    } else {
-        if (mTotalEffectsCpuLoad < effectDesc->mDesc.cpuLoad) {
-            ALOGW("setEffectEnabled(false) CPU load %d too high for total %d",
-                    effectDesc->mDesc.cpuLoad, mTotalEffectsCpuLoad);
-            effectDesc->mDesc.cpuLoad = mTotalEffectsCpuLoad;
-        }
-        mTotalEffectsCpuLoad -= effectDesc->mDesc.cpuLoad;
-        ALOGV("setEffectEnabled(false) total CPU %d", mTotalEffectsCpuLoad);
-    }
-    effectDesc->mEnabled = enabled;
-    return NO_ERROR;
-}
-
-bool AudioPolicyManager::isNonOffloadableEffectEnabled()
-{
-    for (size_t i = 0; i < mEffects.size(); i++) {
-        sp<EffectDescriptor> effectDesc = mEffects.valueAt(i);
-        if (effectDesc->mEnabled && (effectDesc->mStrategy == STRATEGY_MEDIA) &&
-                ((effectDesc->mDesc.flags & EFFECT_FLAG_OFFLOAD_SUPPORTED) == 0)) {
-            ALOGV("isNonOffloadableEffectEnabled() non offloadable effect %s enabled on session %d",
-                  effectDesc->mDesc.name, effectDesc->mSession);
-            return true;
-        }
-    }
-    return false;
-}
-
-bool AudioPolicyManager::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
-{
-    nsecs_t sysTime = systemTime();
-    for (size_t i = 0; i < mOutputs.size(); i++) {
-        const sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
-        if (outputDesc->isStreamActive(stream, inPastMs, sysTime)) {
-            return true;
-        }
-    }
-    return false;
-}
-
-bool AudioPolicyManager::isStreamActiveRemotely(audio_stream_type_t stream,
-                                                    uint32_t inPastMs) const
-{
-    nsecs_t sysTime = systemTime();
-    for (size_t i = 0; i < mOutputs.size(); i++) {
-        const sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
-        if (((outputDesc->device() & APM_AUDIO_OUT_DEVICE_REMOTE_ALL) != 0) &&
-                outputDesc->isStreamActive(stream, inPastMs, sysTime)) {
-            // do not consider re routing (when the output is going to a dynamic policy)
-            // as "remote playback"
-            if (outputDesc->mPolicyMix == NULL) {
-                return true;
-            }
-        }
-    }
-    return false;
+    return mEffects.registerEffect(desc, io, strategy, session, id);
 }
 
 bool AudioPolicyManager::isSourceActive(audio_source_t source) const
@@ -2005,9 +1720,8 @@
 
     for (size_t i = 0; i < mixes.size(); i++) {
         String8 address = mixes[i].mRegistrationId;
-        ssize_t index = mPolicyMixes.indexOfKey(address);
-        if (index >= 0) {
-            ALOGE("registerPolicyMixes(): mix for address %s already registered", address.string());
+
+        if (mPolicyMixes.registerMix(address, mixes[i]) != NO_ERROR) {
             continue;
         }
         audio_config_t outputConfig = mixes[i].mFormat;
@@ -2020,9 +1734,7 @@
                                  AUDIO_DEVICE_OUT_REMOTE_SUBMIX, address);
         module->addInputProfile(address, &inputConfig,
                                  AUDIO_DEVICE_IN_REMOTE_SUBMIX, address);
-        sp<AudioPolicyMix> policyMix = new AudioPolicyMix();
-        policyMix->mMix = mixes[i];
-        mPolicyMixes.add(address, policyMix);
+
         if (mixes[i].mMixType == MIX_TYPE_PLAYERS) {
             setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
                                      AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
@@ -2055,14 +1767,11 @@
 
     for (size_t i = 0; i < mixes.size(); i++) {
         String8 address = mixes[i].mRegistrationId;
-        ssize_t index = mPolicyMixes.indexOfKey(address);
-        if (index < 0) {
-            ALOGE("unregisterPolicyMixes(): mix for address %s not registered", address.string());
+
+        if (mPolicyMixes.unregisterMix(address) != NO_ERROR) {
             continue;
         }
 
-        mPolicyMixes.removeItemsAt(index);
-
         if (getDeviceConnectionState(AUDIO_DEVICE_IN_REMOTE_SUBMIX, address.string()) ==
                                              AUDIO_POLICY_DEVICE_STATE_AVAILABLE)
         {
@@ -2096,87 +1805,32 @@
 
     snprintf(buffer, SIZE, " Primary Output: %d\n", mPrimaryOutput);
     result.append(buffer);
-    snprintf(buffer, SIZE, " Phone state: %d\n", mPhoneState);
+    snprintf(buffer, SIZE, " Phone state: %d\n", mEngine->getPhoneState());
     result.append(buffer);
     snprintf(buffer, SIZE, " Force use for communications %d\n",
-             mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]);
+             mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION));
     result.append(buffer);
-    snprintf(buffer, SIZE, " Force use for media %d\n", mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA]);
+    snprintf(buffer, SIZE, " Force use for media %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA));
     result.append(buffer);
-    snprintf(buffer, SIZE, " Force use for record %d\n", mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD]);
+    snprintf(buffer, SIZE, " Force use for record %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD));
     result.append(buffer);
-    snprintf(buffer, SIZE, " Force use for dock %d\n", mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK]);
+    snprintf(buffer, SIZE, " Force use for dock %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_DOCK));
     result.append(buffer);
-    snprintf(buffer, SIZE, " Force use for system %d\n", mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM]);
+    snprintf(buffer, SIZE, " Force use for system %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM));
     result.append(buffer);
     snprintf(buffer, SIZE, " Force use for hdmi system audio %d\n",
-            mForceUse[AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO]);
-    result.append(buffer);
-
-    snprintf(buffer, SIZE, " Available output devices:\n");
+            mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO));
     result.append(buffer);
     write(fd, result.string(), result.size());
-    for (size_t i = 0; i < mAvailableOutputDevices.size(); i++) {
-        mAvailableOutputDevices[i]->dump(fd, 2, i);
-    }
-    snprintf(buffer, SIZE, "\n Available input devices:\n");
-    write(fd, buffer, strlen(buffer));
-    for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
-        mAvailableInputDevices[i]->dump(fd, 2, i);
-    }
 
-    snprintf(buffer, SIZE, "\nHW Modules dump:\n");
-    write(fd, buffer, strlen(buffer));
-    for (size_t i = 0; i < mHwModules.size(); i++) {
-        snprintf(buffer, SIZE, "- HW Module %zu:\n", i + 1);
-        write(fd, buffer, strlen(buffer));
-        mHwModules[i]->dump(fd);
-    }
-
-    snprintf(buffer, SIZE, "\nOutputs dump:\n");
-    write(fd, buffer, strlen(buffer));
-    for (size_t i = 0; i < mOutputs.size(); i++) {
-        snprintf(buffer, SIZE, "- Output %d dump:\n", mOutputs.keyAt(i));
-        write(fd, buffer, strlen(buffer));
-        mOutputs.valueAt(i)->dump(fd);
-    }
-
-    snprintf(buffer, SIZE, "\nInputs dump:\n");
-    write(fd, buffer, strlen(buffer));
-    for (size_t i = 0; i < mInputs.size(); i++) {
-        snprintf(buffer, SIZE, "- Input %d dump:\n", mInputs.keyAt(i));
-        write(fd, buffer, strlen(buffer));
-        mInputs.valueAt(i)->dump(fd);
-    }
-
-    snprintf(buffer, SIZE, "\nStreams dump:\n");
-    write(fd, buffer, strlen(buffer));
-    snprintf(buffer, SIZE,
-             " Stream  Can be muted  Index Min  Index Max  Index Cur [device : index]...\n");
-    write(fd, buffer, strlen(buffer));
-    for (size_t i = 0; i < AUDIO_STREAM_CNT; i++) {
-        snprintf(buffer, SIZE, " %02zu      ", i);
-        write(fd, buffer, strlen(buffer));
-        mStreams[i].dump(fd);
-    }
-
-    snprintf(buffer, SIZE, "\nTotal Effects CPU: %f MIPS, Total Effects memory: %d KB\n",
-            (float)mTotalEffectsCpuLoad/10, mTotalEffectsMemory);
-    write(fd, buffer, strlen(buffer));
-
-    snprintf(buffer, SIZE, "Registered effects:\n");
-    write(fd, buffer, strlen(buffer));
-    for (size_t i = 0; i < mEffects.size(); i++) {
-        snprintf(buffer, SIZE, "- Effect %d dump:\n", mEffects.keyAt(i));
-        write(fd, buffer, strlen(buffer));
-        mEffects.valueAt(i)->dump(fd);
-    }
-
-    snprintf(buffer, SIZE, "\nAudio Patches:\n");
-    write(fd, buffer, strlen(buffer));
-    for (size_t i = 0; i < mAudioPatches.size(); i++) {
-        mAudioPatches[i]->dump(fd, 2, i);
-    }
+    mAvailableOutputDevices.dump(fd, String8("output"));
+    mAvailableInputDevices.dump(fd, String8("input"));
+    mHwModules.dump(fd);
+    mOutputs.dump(fd);
+    mInputs.dump(fd);
+    mStreams.dump(fd);
+    mEffects.dump(fd);
+    mAudioPatches.dump(fd);
 
     return NO_ERROR;
 }
@@ -2233,7 +1887,7 @@
     // FIXME: We should check the audio session here but we do not have it in this context.
     // This may prevent offloading in rare situations where effects are left active by apps
     // in the background.
-    if (isNonOffloadableEffectEnabled()) {
+    if (mEffects.isNonOffloadableEffectEnabled()) {
         return false;
     }
 
@@ -2312,93 +1966,6 @@
     return NO_ERROR;
 }
 
-sp<AudioOutputDescriptor> AudioPolicyManager::getOutputFromId(
-                                                                    audio_port_handle_t id) const
-{
-    sp<AudioOutputDescriptor> outputDesc = NULL;
-    for (size_t i = 0; i < mOutputs.size(); i++) {
-        outputDesc = mOutputs.valueAt(i);
-        if (outputDesc->mId == id) {
-            break;
-        }
-    }
-    return outputDesc;
-}
-
-sp<AudioInputDescriptor> AudioPolicyManager::getInputFromId(
-                                                                    audio_port_handle_t id) const
-{
-    sp<AudioInputDescriptor> inputDesc = NULL;
-    for (size_t i = 0; i < mInputs.size(); i++) {
-        inputDesc = mInputs.valueAt(i);
-        if (inputDesc->mId == id) {
-            break;
-        }
-    }
-    return inputDesc;
-}
-
-sp <HwModule> AudioPolicyManager::getModuleForDevice(
-                                                                    audio_devices_t device) const
-{
-    sp <HwModule> module;
-
-    for (size_t i = 0; i < mHwModules.size(); i++) {
-        if (mHwModules[i]->mHandle == 0) {
-            continue;
-        }
-        if (audio_is_output_device(device)) {
-            for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
-            {
-                if (mHwModules[i]->mOutputProfiles[j]->mSupportedDevices.types() & device) {
-                    return mHwModules[i];
-                }
-            }
-        } else {
-            for (size_t j = 0; j < mHwModules[i]->mInputProfiles.size(); j++) {
-                if (mHwModules[i]->mInputProfiles[j]->mSupportedDevices.types() &
-                        device & ~AUDIO_DEVICE_BIT_IN) {
-                    return mHwModules[i];
-                }
-            }
-        }
-    }
-    return module;
-}
-
-sp <HwModule> AudioPolicyManager::getModuleFromName(const char *name) const
-{
-    sp <HwModule> module;
-
-    for (size_t i = 0; i < mHwModules.size(); i++)
-    {
-        if (strcmp(mHwModules[i]->mName, name) == 0) {
-            return mHwModules[i];
-        }
-    }
-    return module;
-}
-
-audio_devices_t AudioPolicyManager::availablePrimaryOutputDevices()
-{
-    sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(mPrimaryOutput);
-    audio_devices_t devices = outputDesc->mProfile->mSupportedDevices.types();
-    return devices & mAvailableOutputDevices.types();
-}
-
-audio_devices_t AudioPolicyManager::availablePrimaryInputDevices()
-{
-    audio_module_handle_t primaryHandle =
-                                mOutputs.valueFor(mPrimaryOutput)->mProfile->mModule->mHandle;
-    audio_devices_t devices = AUDIO_DEVICE_NONE;
-    for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
-        if (mAvailableInputDevices[i]->mModule->mHandle == primaryHandle) {
-            devices |= mAvailableInputDevices[i]->mDeviceType;
-        }
-    }
-    return devices;
-}
-
 status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch,
                                                audio_patch_handle_t *handle,
                                                uid_t uid)
@@ -2454,7 +2021,7 @@
     }
 
     if (patch->sources[0].type == AUDIO_PORT_TYPE_MIX) {
-        sp<AudioOutputDescriptor> outputDesc = getOutputFromId(patch->sources[0].id);
+        sp<AudioOutputDescriptor> outputDesc = mOutputs.getOutputFromId(patch->sources[0].id);
         if (outputDesc == NULL) {
             ALOGV("createAudioPatch() output not found for id %d", patch->sources[0].id);
             return BAD_VALUE;
@@ -2483,15 +2050,14 @@
                 return BAD_VALUE;
             }
 
-            if (!outputDesc->mProfile->isCompatibleProfile(devDesc->mDeviceType,
+            if (!outputDesc->mProfile->isCompatibleProfile(devDesc->type(),
                                                            devDesc->mAddress,
                                                            patch->sources[0].sample_rate,
-                                                         NULL,  // updatedSamplingRate
-                                                         patch->sources[0].format,
-                                                         patch->sources[0].channel_mask,
-                                                         AUDIO_OUTPUT_FLAG_NONE /*FIXME*/)) {
-                ALOGV("createAudioPatch() profile not supported for device %08x",
-                      devDesc->mDeviceType);
+                                                           NULL,  // updatedSamplingRate
+                                                           patch->sources[0].format,
+                                                           patch->sources[0].channel_mask,
+                                                           AUDIO_OUTPUT_FLAG_NONE /*FIXME*/)) {
+                ALOGV("createAudioPatch() profile not supported for device %08x", devDesc->type());
                 return INVALID_OPERATION;
             }
             devices.add(devDesc);
@@ -2523,7 +2089,7 @@
             if (patch->num_sinks > 1) {
                 return INVALID_OPERATION;
             }
-            sp<AudioInputDescriptor> inputDesc = getInputFromId(patch->sinks[0].id);
+            sp<AudioInputDescriptor> inputDesc = mInputs.getInputFromId(patch->sinks[0].id);
             if (inputDesc == NULL) {
                 return BAD_VALUE;
             }
@@ -2538,7 +2104,7 @@
                 return BAD_VALUE;
             }
 
-            if (!inputDesc->mProfile->isCompatibleProfile(devDesc->mDeviceType,
+            if (!inputDesc->mProfile->isCompatibleProfile(devDesc->type(),
                                                           devDesc->mAddress,
                                                           patch->sinks[0].sample_rate,
                                                           NULL, /*updatedSampleRate*/
@@ -2552,8 +2118,8 @@
             }
             // TODO: reconfigure output format and channels here
             ALOGV("createAudioPatch() setting device %08x on output %d",
-                                                  devDesc->mDeviceType, inputDesc->mIoHandle);
-            setInputDevice(inputDesc->mIoHandle, devDesc->mDeviceType, true, handle);
+                                                  devDesc->type(), inputDesc->mIoHandle);
+            setInputDevice(inputDesc->mIoHandle, devDesc->type(), true, handle);
             index = mAudioPatches.indexOfKey(*handle);
             if (index >= 0) {
                 if (patchDesc != 0 && patchDesc != mAudioPatches.valueAt(index)) {
@@ -2603,8 +2169,7 @@
                         return INVALID_OPERATION;
                     }
                     SortedVector<audio_io_handle_t> outputs =
-                                            getOutputsForDevice(sinkDeviceDesc->mDeviceType,
-                                                                mOutputs);
+                                            getOutputsForDevice(sinkDeviceDesc->type(), mOutputs);
                     // if the sink device is reachable via an opened output stream, request to go via
                     // this output stream by adding a second source to the patch description
                     audio_io_handle_t output = selectOutput(outputs,
@@ -2634,8 +2199,7 @@
                                                                   status, afPatchHandle);
             if (status == NO_ERROR) {
                 if (index < 0) {
-                    patchDesc = new AudioPatch((audio_patch_handle_t)nextUniqueId(),
-                                               &newPatch, uid);
+                    patchDesc = new AudioPatch(&newPatch, uid);
                     addAudioPatch(patchDesc->mHandle, patchDesc);
                 } else {
                     patchDesc->mPatch = newPatch;
@@ -2678,7 +2242,7 @@
     struct audio_patch *patch = &patchDesc->mPatch;
     patchDesc->mUid = mUidCached;
     if (patch->sources[0].type == AUDIO_PORT_TYPE_MIX) {
-        sp<AudioOutputDescriptor> outputDesc = getOutputFromId(patch->sources[0].id);
+        sp<AudioOutputDescriptor> outputDesc = mOutputs.getOutputFromId(patch->sources[0].id);
         if (outputDesc == NULL) {
             ALOGV("releaseAudioPatch() output not found for id %d", patch->sources[0].id);
             return BAD_VALUE;
@@ -2691,7 +2255,7 @@
                        NULL);
     } else if (patch->sources[0].type == AUDIO_PORT_TYPE_DEVICE) {
         if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
-            sp<AudioInputDescriptor> inputDesc = getInputFromId(patch->sinks[0].id);
+            sp<AudioInputDescriptor> inputDesc = mInputs.getInputFromId(patch->sinks[0].id);
             if (inputDesc == NULL) {
                 ALOGV("releaseAudioPatch() input not found for id %d", patch->sinks[0].id);
                 return BAD_VALUE;
@@ -2721,30 +2285,11 @@
                                               struct audio_patch *patches,
                                               unsigned int *generation)
 {
-    if (num_patches == NULL || (*num_patches != 0 && patches == NULL) ||
-            generation == NULL) {
+    if (generation == NULL) {
         return BAD_VALUE;
     }
-    ALOGV("listAudioPatches() num_patches %d patches %p available patches %zu",
-          *num_patches, patches, mAudioPatches.size());
-    if (patches == NULL) {
-        *num_patches = 0;
-    }
-
-    size_t patchesWritten = 0;
-    size_t patchesMax = *num_patches;
-    for (size_t i = 0;
-            i  < mAudioPatches.size() && patchesWritten < patchesMax; i++) {
-        patches[patchesWritten] = mAudioPatches[i]->mPatch;
-        patches[patchesWritten++].id = mAudioPatches[i]->mHandle;
-        ALOGV("listAudioPatches() patch %zu num_sources %d num_sinks %d",
-              i, mAudioPatches[i]->mPatch.num_sources, mAudioPatches[i]->mPatch.num_sinks);
-    }
-    *num_patches = mAudioPatches.size();
-
     *generation = curAudioPortGeneration();
-    ALOGV("listAudioPatches() got %zu patches needed %d", patchesWritten, *num_patches);
-    return NO_ERROR;
+    return mAudioPatches.listAudioPatches(num_patches, patches);
 }
 
 status_t AudioPolicyManager::setAudioPortConfig(const struct audio_port_config *config)
@@ -2763,7 +2308,7 @@
     sp<AudioPortConfig> audioPortConfig;
     if (config->type == AUDIO_PORT_TYPE_MIX) {
         if (config->role == AUDIO_PORT_ROLE_SOURCE) {
-            sp<AudioOutputDescriptor> outputDesc = getOutputFromId(config->id);
+            sp<AudioOutputDescriptor> outputDesc = mOutputs.getOutputFromId(config->id);
             if (outputDesc == NULL) {
                 return BAD_VALUE;
             }
@@ -2772,7 +2317,7 @@
                         outputDesc->mIoHandle);
             audioPortConfig = outputDesc;
         } else if (config->role == AUDIO_PORT_ROLE_SINK) {
-            sp<AudioInputDescriptor> inputDesc = getInputFromId(config->id);
+            sp<AudioInputDescriptor> inputDesc = mInputs.getInputFromId(config->id);
             if (inputDesc == NULL) {
                 return BAD_VALUE;
             }
@@ -2829,79 +2374,24 @@
     *ioHandle = (audio_io_handle_t)mpClientInterface->newAudioUniqueId();
     *device = getDeviceAndMixForInputSource(AUDIO_SOURCE_HOTWORD);
 
-    mSoundTriggerSessions.add(*session, *ioHandle);
-
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::releaseSoundTriggerSession(audio_session_t session)
-{
-    ssize_t index = mSoundTriggerSessions.indexOfKey(session);
-    if (index < 0) {
-        ALOGW("acquireSoundTriggerSession() session %d not registered", session);
-        return BAD_VALUE;
-    }
-
-    mSoundTriggerSessions.removeItem(session);
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::addAudioPatch(audio_patch_handle_t handle,
-                                           const sp<AudioPatch>& patch)
-{
-    ssize_t index = mAudioPatches.indexOfKey(handle);
-
-    if (index >= 0) {
-        ALOGW("addAudioPatch() patch %d already in", handle);
-        return ALREADY_EXISTS;
-    }
-    mAudioPatches.add(handle, patch);
-    ALOGV("addAudioPatch() handle %d af handle %d num_sources %d num_sinks %d source handle %d"
-            "sink handle %d",
-          handle, patch->mAfPatchHandle, patch->mPatch.num_sources, patch->mPatch.num_sinks,
-          patch->mPatch.sources[0].id, patch->mPatch.sinks[0].id);
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::removeAudioPatch(audio_patch_handle_t handle)
-{
-    ssize_t index = mAudioPatches.indexOfKey(handle);
-
-    if (index < 0) {
-        ALOGW("removeAudioPatch() patch %d not in", handle);
-        return ALREADY_EXISTS;
-    }
-    ALOGV("removeAudioPatch() handle %d af handle %d", handle,
-                      mAudioPatches.valueAt(index)->mAfPatchHandle);
-    mAudioPatches.removeItemsAt(index);
-    return NO_ERROR;
+    return mSoundTriggerSessions.acquireSession(*session, *ioHandle);
 }
 
 // ----------------------------------------------------------------------------
 // AudioPolicyManager
 // ----------------------------------------------------------------------------
-
-uint32_t AudioPolicyManager::nextUniqueId()
-{
-    return android_atomic_inc(&mNextUniqueId);
-}
-
 uint32_t AudioPolicyManager::nextAudioPortGeneration()
 {
     return android_atomic_inc(&mAudioPortGeneration);
 }
 
-int32_t volatile AudioPolicyManager::mNextUniqueId = 1;
-
 AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface)
     :
 #ifdef AUDIO_POLICY_TEST
     Thread(false),
 #endif //AUDIO_POLICY_TEST
     mPrimaryOutput((audio_io_handle_t)0),
-    mPhoneState(AUDIO_MODE_NORMAL),
     mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f),
-    mTotalEffectsCpuLoad(0), mTotalEffectsMemory(0),
     mA2dpSuspended(false),
     mSpeakerDrcEnabled(false),
     mAudioPortGeneration(1),
@@ -2909,24 +2399,39 @@
     mBeaconPlayingRefCount(0),
     mBeaconMuted(false)
 {
+    audio_policy::EngineInstance *engineInstance = audio_policy::EngineInstance::getInstance();
+    if (!engineInstance) {
+        ALOGE("%s:  Could not get an instance of policy engine", __FUNCTION__);
+        return;
+    }
+    // Retrieve the Policy Manager Interface
+    mEngine = engineInstance->queryInterface<AudioPolicyManagerInterface>();
+    if (mEngine == NULL) {
+        ALOGE("%s: Failed to get Policy Engine Interface", __FUNCTION__);
+        return;
+    }
+    mEngine->setObserver(this);
+    status_t status = mEngine->initCheck();
+    ALOG_ASSERT(status == NO_ERROR, "Policy engine not initialized(err=%d)", status);
+
     mUidCached = getuid();
     mpClientInterface = clientInterface;
 
-    for (int i = 0; i < AUDIO_POLICY_FORCE_USE_CNT; i++) {
-        mForceUse[i] = AUDIO_POLICY_FORCE_NONE;
-    }
-
     mDefaultOutputDevice = new DeviceDescriptor(String8("Speaker"), AUDIO_DEVICE_OUT_SPEAKER);
-    if (loadAudioPolicyConfig(AUDIO_POLICY_VENDOR_CONFIG_FILE) != NO_ERROR) {
-        if (loadAudioPolicyConfig(AUDIO_POLICY_CONFIG_FILE) != NO_ERROR) {
+    if (ConfigParsingUtils::loadAudioPolicyConfig(AUDIO_POLICY_VENDOR_CONFIG_FILE,
+                 mHwModules, mAvailableInputDevices, mAvailableOutputDevices,
+                 mDefaultOutputDevice, mSpeakerDrcEnabled) != NO_ERROR) {
+        if (ConfigParsingUtils::loadAudioPolicyConfig(AUDIO_POLICY_CONFIG_FILE,
+                                  mHwModules, mAvailableInputDevices, mAvailableOutputDevices,
+                                  mDefaultOutputDevice, mSpeakerDrcEnabled) != NO_ERROR) {
             ALOGE("could not load audio policy configuration file, setting defaults");
             defaultAudioPolicyConfig();
         }
     }
     // mAvailableOutputDevices and mAvailableInputDevices now contain all attached devices
 
-    // must be done after reading the policy
-    initializeVolumeCurves();
+    // must be done after reading the policy (since conditionned by Speaker Drc Enabling)
+    mEngine->initializeVolumeCurves(mSpeakerDrcEnabled);
 
     // open all output streams needed to access attached devices
     audio_devices_t outputDeviceTypes = mAvailableOutputDevices.types();
@@ -2954,13 +2459,13 @@
                 continue;
             }
             audio_devices_t profileType = outProfile->mSupportedDevices.types();
-            if ((profileType & mDefaultOutputDevice->mDeviceType) != AUDIO_DEVICE_NONE) {
-                profileType = mDefaultOutputDevice->mDeviceType;
+            if ((profileType & mDefaultOutputDevice->type()) != AUDIO_DEVICE_NONE) {
+                profileType = mDefaultOutputDevice->type();
             } else {
                 // chose first device present in mSupportedDevices also part of
                 // outputDeviceTypes
                 for (size_t k = 0; k  < outProfile->mSupportedDevices.size(); k++) {
-                    profileType = outProfile->mSupportedDevices[k]->mDeviceType;
+                    profileType = outProfile->mSupportedDevices[k]->type();
                     if ((profileType & outputDeviceTypes) != 0) {
                         break;
                     }
@@ -2995,7 +2500,7 @@
                 outputDesc->mFormat = config.format;
 
                 for (size_t k = 0; k  < outProfile->mSupportedDevices.size(); k++) {
-                    audio_devices_t type = outProfile->mSupportedDevices[k]->mDeviceType;
+                    audio_devices_t type = outProfile->mSupportedDevices[k]->type();
                     ssize_t index =
                             mAvailableOutputDevices.indexOf(outProfile->mSupportedDevices[k]);
                     // give a valid ID to an attached device once confirmed it is reachable
@@ -3027,7 +2532,7 @@
             // inputDeviceTypes
             audio_devices_t profileType = AUDIO_DEVICE_NONE;
             for (size_t k = 0; k  < inProfile->mSupportedDevices.size(); k++) {
-                profileType = inProfile->mSupportedDevices[k]->mDeviceType;
+                profileType = inProfile->mSupportedDevices[k]->type();
                 if (profileType & inputDeviceTypes) {
                     break;
                 }
@@ -3063,7 +2568,7 @@
 
             if (status == NO_ERROR) {
                 for (size_t k = 0; k  < inProfile->mSupportedDevices.size(); k++) {
-                    audio_devices_t type = inProfile->mSupportedDevices[k]->mDeviceType;
+                    audio_devices_t type = inProfile->mSupportedDevices[k]->type();
                     ssize_t index =
                             mAvailableInputDevices.indexOf(inProfile->mSupportedDevices[k]);
                     // give a valid ID to an attached device once confirmed it is reachable
@@ -3082,23 +2587,29 @@
     // make sure all attached devices have been allocated a unique ID
     for (size_t i = 0; i  < mAvailableOutputDevices.size();) {
         if (!mAvailableOutputDevices[i]->isAttached()) {
-            ALOGW("Input device %08x unreachable", mAvailableOutputDevices[i]->mDeviceType);
+            ALOGW("Input device %08x unreachable", mAvailableOutputDevices[i]->type());
             mAvailableOutputDevices.remove(mAvailableOutputDevices[i]);
             continue;
         }
+        // The device is now validated and can be appended to the available devices of the engine
+        mEngine->setDeviceConnectionState(mAvailableOutputDevices[i],
+                                          AUDIO_POLICY_DEVICE_STATE_AVAILABLE);
         i++;
     }
     for (size_t i = 0; i  < mAvailableInputDevices.size();) {
         if (!mAvailableInputDevices[i]->isAttached()) {
-            ALOGW("Input device %08x unreachable", mAvailableInputDevices[i]->mDeviceType);
+            ALOGW("Input device %08x unreachable", mAvailableInputDevices[i]->type());
             mAvailableInputDevices.remove(mAvailableInputDevices[i]);
             continue;
         }
+        // The device is now validated and can be appended to the available devices of the engine
+        mEngine->setDeviceConnectionState(mAvailableInputDevices[i],
+                                          AUDIO_POLICY_DEVICE_STATE_AVAILABLE);
         i++;
     }
     // make sure default device is reachable
     if (mAvailableOutputDevices.indexOf(mDefaultOutputDevice) < 0) {
-        ALOGE("Default device %08x is unreachable", mDefaultOutputDevice->mDeviceType);
+        ALOGE("Default device %08x is unreachable", mDefaultOutputDevice->type());
     }
 
     ALOGE_IF((mPrimaryOutput == 0), "Failed to open primary output");
@@ -3254,8 +2765,7 @@
 
                 audio_module_handle_t moduleHandle = outputDesc->mModule->mHandle;
 
-                mOutputs.removeItem(mPrimaryOutput);
-
+                removeOutput(mPrimaryOutput);
                 sp<AudioOutputDescriptor> outputDesc = new AudioOutputDescriptor(NULL);
                 outputDesc->mDevice = AUDIO_DEVICE_OUT_SPEAKER;
                 audio_config_t config = AUDIO_CONFIG_INITIALIZER;
@@ -3314,16 +2824,19 @@
 
 void AudioPolicyManager::addOutput(audio_io_handle_t output, sp<AudioOutputDescriptor> outputDesc)
 {
-    outputDesc->mIoHandle = output;
-    outputDesc->mId = nextUniqueId();
+    outputDesc->setIoHandle(output);
     mOutputs.add(output, outputDesc);
     nextAudioPortGeneration();
 }
 
+void AudioPolicyManager::removeOutput(audio_io_handle_t output)
+{
+    mOutputs.removeItem(output);
+}
+
 void AudioPolicyManager::addInput(audio_io_handle_t input, sp<AudioInputDescriptor> inputDesc)
 {
-    inputDesc->mIoHandle = input;
-    inputDesc->mId = nextUniqueId();
+    inputDesc->setIoHandle(input);
     mInputs.add(input, inputDesc);
     nextAudioPortGeneration();
 }
@@ -3342,11 +2855,11 @@
 }
 
 status_t AudioPolicyManager::checkOutputsForDevice(const sp<DeviceDescriptor> devDesc,
-                                                       audio_policy_dev_state_t state,
-                                                       SortedVector<audio_io_handle_t>& outputs,
-                                                       const String8 address)
+                                                   audio_policy_dev_state_t state,
+                                                   SortedVector<audio_io_handle_t>& outputs,
+                                                   const String8 address)
 {
-    audio_devices_t device = devDesc->mDeviceType;
+    audio_devices_t device = devDesc->type();
     sp<AudioOutputDescriptor> desc;
     // erase all current sample rates, formats and channel masks
     devDesc->clearCapabilities();
@@ -3356,7 +2869,7 @@
         for (size_t i = 0; i < mOutputs.size(); i++) {
             desc = mOutputs.valueAt(i);
             if (!desc->isDuplicated() && (desc->mProfile->mSupportedDevices.types() & device)) {
-                if (!deviceDistinguishesOnAddress(device)) {
+                if (!device_distinguishes_on_address(device)) {
                     ALOGV("checkOutputsForDevice(): adding opened output %d", mOutputs.keyAt(i));
                     outputs.add(mOutputs.keyAt(i));
                 } else {
@@ -3376,7 +2889,7 @@
             {
                 sp<IOProfile> profile = mHwModules[i]->mOutputProfiles[j];
                 if (profile->mSupportedDevices.types() & device) {
-                    if (!deviceDistinguishesOnAddress(device) ||
+                    if (!device_distinguishes_on_address(device) ||
                             address == profile->mSupportedDevices[0]->mAddress) {
                         profiles.add(profile);
                         ALOGV("checkOutputsForDevice(): adding profile %zu from module %zu", j, i);
@@ -3512,15 +3025,15 @@
 
                 if (output != AUDIO_IO_HANDLE_NONE) {
                     addOutput(output, desc);
-                    if (deviceDistinguishesOnAddress(device) && address != "0") {
-                        ssize_t index = mPolicyMixes.indexOfKey(address);
-                        if (index >= 0) {
-                            mPolicyMixes[index]->mOutput = desc;
-                            desc->mPolicyMix = &mPolicyMixes[index]->mMix;
-                        } else {
+                    if (device_distinguishes_on_address(device) && address != "0") {
+                        sp<AudioPolicyMix> policyMix;
+                        if (mPolicyMixes.getAudioPolicyMix(address, policyMix) != NO_ERROR) {
                             ALOGE("checkOutputsForDevice() cannot find policy for address %s",
                                   address.string());
                         }
+                        policyMix->setOutput(desc);
+                        desc->mPolicyMix = &(policyMix->getMix());
+
                     } else if ((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) {
                         // no duplicated output for direct outputs and
                         // outputs used by dynamic policy mixes
@@ -3550,7 +3063,7 @@
                             ALOGW("checkOutputsForDevice() could not open dup output for %d and %d",
                                     mPrimaryOutput, output);
                             mpClientInterface->closeOutput(output);
-                            mOutputs.removeItem(output);
+                            removeOutput(output);
                             nextAudioPortGeneration();
                             output = AUDIO_IO_HANDLE_NONE;
                         }
@@ -3567,7 +3080,7 @@
                 outputs.add(output);
                 devDesc->importAudioPort(profile);
 
-                if (deviceDistinguishesOnAddress(device)) {
+                if (device_distinguishes_on_address(device)) {
                     ALOGV("checkOutputsForDevice(): setOutputDevice(dev=0x%x, addr=%s)",
                             device, address.string());
                     setOutputDevice(output, device, true/*force*/, 0/*delay*/,
@@ -3587,7 +3100,7 @@
             desc = mOutputs.valueAt(i);
             if (!desc->isDuplicated()) {
                 // exact match on device
-                if (deviceDistinguishesOnAddress(device) &&
+                if (device_distinguishes_on_address(device) &&
                         (desc->mProfile->mSupportedDevices.types() == device)) {
                     findIoHandlesByAddress(desc, device, address, outputs);
                 } else if (!(desc->mProfile->mSupportedDevices.types()
@@ -3630,9 +3143,9 @@
 }
 
 status_t AudioPolicyManager::checkInputsForDevice(audio_devices_t device,
-                                                      audio_policy_dev_state_t state,
-                                                      SortedVector<audio_io_handle_t>& inputs,
-                                                      const String8 address)
+                                                  audio_policy_dev_state_t state,
+                                                  SortedVector<audio_io_handle_t>& inputs,
+                                                  const String8 address)
 {
     sp<AudioInputDescriptor> desc;
     if (state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE) {
@@ -3659,7 +3172,7 @@
                 sp<IOProfile> profile = mHwModules[module_idx]->mInputProfiles[profile_index];
 
                 if (profile->mSupportedDevices.types() & (device & ~AUDIO_DEVICE_BIT_IN)) {
-                    if (!deviceDistinguishesOnAddress(device) ||
+                    if (!device_distinguishes_on_address(device) ||
                             address == profile->mSupportedDevices[0]->mAddress) {
                         profiles.add(profile);
                         ALOGV("checkInputsForDevice(): adding profile %zu from module %zu",
@@ -3831,12 +3344,7 @@
         ALOGW("closeOutput() unknown output %d", output);
         return;
     }
-
-    for (size_t i = 0; i < mPolicyMixes.size(); i++) {
-        if (mPolicyMixes[i]->mOutput == outputDesc) {
-            mPolicyMixes[i]->mOutput.clear();
-        }
-    }
+    mPolicyMixes.closeOutput(outputDesc);
 
     // look for duplicated outputs connected to the output being removed.
     for (size_t i = 0; i < mOutputs.size(); i++) {
@@ -3862,7 +3370,7 @@
             ALOGV("closeOutput() closing also duplicated output %d", duplicatedOutput);
 
             mpClientInterface->closeOutput(duplicatedOutput);
-            mOutputs.removeItem(duplicatedOutput);
+            removeOutput(duplicatedOutput);
         }
     }
 
@@ -3881,7 +3389,7 @@
     mpClientInterface->setParameters(output, param.toString());
 
     mpClientInterface->closeOutput(output);
-    mOutputs.removeItem(output);
+    removeOutput(output);
     mPreviousOutputs = mOutputs;
 }
 
@@ -3910,7 +3418,7 @@
 }
 
 SortedVector<audio_io_handle_t> AudioPolicyManager::getOutputsForDevice(audio_devices_t device,
-                        DefaultKeyedVector<audio_io_handle_t, sp<AudioOutputDescriptor> > openOutputs)
+                                                                        AudioOutputCollection openOutputs)
 {
     SortedVector<audio_io_handle_t> outputs;
 
@@ -3927,7 +3435,7 @@
 }
 
 bool AudioPolicyManager::vectorsEqual(SortedVector<audio_io_handle_t>& outputs1,
-                                   SortedVector<audio_io_handle_t>& outputs2)
+                                      SortedVector<audio_io_handle_t>& outputs2)
 {
     if (outputs1.size() != outputs2.size()) {
         return false;
@@ -3971,7 +3479,7 @@
         // mute strategy while moving tracks from one output to another
         for (size_t i = 0; i < srcOutputs.size(); i++) {
             sp<AudioOutputDescriptor> desc = mOutputs.valueFor(srcOutputs[i]);
-            if (desc->isStrategyActive(strategy)) {
+            if (isStrategyActive(desc, strategy)) {
                 setStrategyMute(strategy, true, srcOutputs[i]);
                 setStrategyMute(strategy, false, srcOutputs[i], MUTE_TIME_MS, newDevice);
             }
@@ -4010,10 +3518,10 @@
 
 void AudioPolicyManager::checkOutputForAllStrategies()
 {
-    if (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)
+    if (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)
         checkOutputForStrategy(STRATEGY_ENFORCED_AUDIBLE);
     checkOutputForStrategy(STRATEGY_PHONE);
-    if (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] != AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)
+    if (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) != AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)
         checkOutputForStrategy(STRATEGY_ENFORCED_AUDIBLE);
     checkOutputForStrategy(STRATEGY_SONIFICATION);
     checkOutputForStrategy(STRATEGY_SONIFICATION_RESPECTFUL);
@@ -4023,21 +3531,9 @@
     checkOutputForStrategy(STRATEGY_REROUTING);
 }
 
-audio_io_handle_t AudioPolicyManager::getA2dpOutput()
-{
-    for (size_t i = 0; i < mOutputs.size(); i++) {
-        sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
-        if (!outputDesc->isDuplicated() && outputDesc->device() & AUDIO_DEVICE_OUT_ALL_A2DP) {
-            return mOutputs.keyAt(i);
-        }
-    }
-
-    return 0;
-}
-
 void AudioPolicyManager::checkA2dpSuspend()
 {
-    audio_io_handle_t a2dpOutput = getA2dpOutput();
+    audio_io_handle_t a2dpOutput = mOutputs.getA2dpOutput();
     if (a2dpOutput == 0) {
         mA2dpSuspended = false;
         return;
@@ -4061,20 +3557,20 @@
     //
     if (mA2dpSuspended) {
         if ((!isScoConnected ||
-             ((mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] != AUDIO_POLICY_FORCE_BT_SCO) &&
-              (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] != AUDIO_POLICY_FORCE_BT_SCO))) &&
-             ((mPhoneState != AUDIO_MODE_IN_CALL) &&
-              (mPhoneState != AUDIO_MODE_RINGTONE))) {
+             ((mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION) != AUDIO_POLICY_FORCE_BT_SCO) &&
+              (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) != AUDIO_POLICY_FORCE_BT_SCO))) &&
+             ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) &&
+              (mEngine->getPhoneState() != AUDIO_MODE_RINGTONE))) {
 
             mpClientInterface->restoreOutput(a2dpOutput);
             mA2dpSuspended = false;
         }
     } else {
         if ((isScoConnected &&
-             ((mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] == AUDIO_POLICY_FORCE_BT_SCO) ||
-              (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO))) ||
-             ((mPhoneState == AUDIO_MODE_IN_CALL) ||
-              (mPhoneState == AUDIO_MODE_RINGTONE))) {
+             ((mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION) == AUDIO_POLICY_FORCE_BT_SCO) ||
+              (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) == AUDIO_POLICY_FORCE_BT_SCO))) ||
+             ((mEngine->getPhoneState() == AUDIO_MODE_IN_CALL) ||
+              (mEngine->getPhoneState() == AUDIO_MODE_RINGTONE))) {
 
             mpClientInterface->suspendOutput(a2dpOutput);
             mA2dpSuspended = true;
@@ -4117,27 +3613,27 @@
     //      use device for strategy DTMF
     // 9: the strategy for beacon, a.k.a. "transmitted through speaker" is active on the output:
     //      use device for strategy t-t-s
-    if (outputDesc->isStrategyActive(STRATEGY_ENFORCED_AUDIBLE) &&
-        mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
+    if (isStrategyActive(outputDesc, STRATEGY_ENFORCED_AUDIBLE) &&
+        mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
         device = getDeviceForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
     } else if (isInCall() ||
-                    outputDesc->isStrategyActive(STRATEGY_PHONE)) {
+                    isStrategyActive(outputDesc, STRATEGY_PHONE)) {
         device = getDeviceForStrategy(STRATEGY_PHONE, fromCache);
-    } else if (outputDesc->isStrategyActive(STRATEGY_ENFORCED_AUDIBLE)) {
+    } else if (isStrategyActive(outputDesc, STRATEGY_ENFORCED_AUDIBLE)) {
         device = getDeviceForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
-    } else if (outputDesc->isStrategyActive(STRATEGY_SONIFICATION)) {
+    } else if (isStrategyActive(outputDesc, STRATEGY_SONIFICATION)) {
         device = getDeviceForStrategy(STRATEGY_SONIFICATION, fromCache);
-    } else if (outputDesc->isStrategyActive(STRATEGY_SONIFICATION_RESPECTFUL)) {
+    } else if (isStrategyActive(outputDesc, STRATEGY_SONIFICATION_RESPECTFUL)) {
         device = getDeviceForStrategy(STRATEGY_SONIFICATION_RESPECTFUL, fromCache);
-    } else if (outputDesc->isStrategyActive(STRATEGY_ACCESSIBILITY)) {
+    } else if (isStrategyActive(outputDesc, STRATEGY_ACCESSIBILITY)) {
         device = getDeviceForStrategy(STRATEGY_ACCESSIBILITY, fromCache);
-    } else if (outputDesc->isStrategyActive(STRATEGY_MEDIA)) {
+    } else if (isStrategyActive(outputDesc, STRATEGY_MEDIA)) {
         device = getDeviceForStrategy(STRATEGY_MEDIA, fromCache);
-    } else if (outputDesc->isStrategyActive(STRATEGY_DTMF)) {
+    } else if (isStrategyActive(outputDesc, STRATEGY_DTMF)) {
         device = getDeviceForStrategy(STRATEGY_DTMF, fromCache);
-    } else if (outputDesc->isStrategyActive(STRATEGY_TRANSMITTED_THROUGH_SPEAKER)) {
+    } else if (isStrategyActive(outputDesc, STRATEGY_TRANSMITTED_THROUGH_SPEAKER)) {
         device = getDeviceForStrategy(STRATEGY_TRANSMITTED_THROUGH_SPEAKER, fromCache);
-    } else if (outputDesc->isStrategyActive(STRATEGY_REROUTING)) {
+    } else if (isStrategyActive(outputDesc, STRATEGY_REROUTING)) {
         device = getDeviceForStrategy(STRATEGY_REROUTING, fromCache);
     }
 
@@ -4182,7 +3678,7 @@
     SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(devices, mOutputs);
     for (size_t i = 0; i < outputs.size(); i++) {
         sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
-        if (outputDesc->isStrategyActive(strategy)) {
+        if (isStrategyActive(outputDesc, strategy)) {
             devices = outputDesc->device();
             break;
         }
@@ -4198,39 +3694,10 @@
     return devices;
 }
 
-routing_strategy AudioPolicyManager::getStrategy(
-        audio_stream_type_t stream) {
-
+routing_strategy AudioPolicyManager::getStrategy(audio_stream_type_t stream) const
+{
     ALOG_ASSERT(stream != AUDIO_STREAM_PATCH,"getStrategy() called for AUDIO_STREAM_PATCH");
-
-    // stream to strategy mapping
-    switch (stream) {
-    case AUDIO_STREAM_VOICE_CALL:
-    case AUDIO_STREAM_BLUETOOTH_SCO:
-        return STRATEGY_PHONE;
-    case AUDIO_STREAM_RING:
-    case AUDIO_STREAM_ALARM:
-        return STRATEGY_SONIFICATION;
-    case AUDIO_STREAM_NOTIFICATION:
-        return STRATEGY_SONIFICATION_RESPECTFUL;
-    case AUDIO_STREAM_DTMF:
-        return STRATEGY_DTMF;
-    default:
-        ALOGE("unknown stream type %d", stream);
-    case AUDIO_STREAM_SYSTEM:
-        // NOTE: SYSTEM stream uses MEDIA strategy because muting music and switching outputs
-        // while key clicks are played produces a poor result
-    case AUDIO_STREAM_MUSIC:
-        return STRATEGY_MEDIA;
-    case AUDIO_STREAM_ENFORCED_AUDIBLE:
-        return STRATEGY_ENFORCED_AUDIBLE;
-    case AUDIO_STREAM_TTS:
-        return STRATEGY_TRANSMITTED_THROUGH_SPEAKER;
-    case AUDIO_STREAM_ACCESSIBILITY:
-        return STRATEGY_ACCESSIBILITY;
-    case AUDIO_STREAM_REROUTING:
-        return STRATEGY_REROUTING;
-    }
+    return mEngine->getStrategyForStream(stream);
 }
 
 uint32_t AudioPolicyManager::getStrategyForAttr(const audio_attributes_t *attr) {
@@ -4241,45 +3708,8 @@
     if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
         return (uint32_t) STRATEGY_ENFORCED_AUDIBLE;
     }
-
     // usage to strategy mapping
-    switch (attr->usage) {
-    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
-        if (isStreamActive(AUDIO_STREAM_RING) || isStreamActive(AUDIO_STREAM_ALARM)) {
-            return (uint32_t) STRATEGY_SONIFICATION;
-        }
-        if (isInCall()) {
-            return (uint32_t) STRATEGY_PHONE;
-        }
-        return (uint32_t) STRATEGY_ACCESSIBILITY;
-
-    case AUDIO_USAGE_MEDIA:
-    case AUDIO_USAGE_GAME:
-    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
-    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
-        return (uint32_t) STRATEGY_MEDIA;
-
-    case AUDIO_USAGE_VOICE_COMMUNICATION:
-        return (uint32_t) STRATEGY_PHONE;
-
-    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
-        return (uint32_t) STRATEGY_DTMF;
-
-    case AUDIO_USAGE_ALARM:
-    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
-        return (uint32_t) STRATEGY_SONIFICATION;
-
-    case AUDIO_USAGE_NOTIFICATION:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
-    case AUDIO_USAGE_NOTIFICATION_EVENT:
-        return (uint32_t) STRATEGY_SONIFICATION_RESPECTFUL;
-
-    case AUDIO_USAGE_UNKNOWN:
-    default:
-        return (uint32_t) STRATEGY_MEDIA;
-    }
+    return static_cast<uint32_t>(mEngine->getStrategyForUsage(attr->usage));
 }
 
 void AudioPolicyManager::handleNotificationRoutingForStream(audio_stream_type_t stream) {
@@ -4293,21 +3723,6 @@
     }
 }
 
-bool AudioPolicyManager::isAnyOutputActive(audio_stream_type_t streamToIgnore) {
-    for (size_t s = 0 ; s < AUDIO_STREAM_CNT ; s++) {
-        if (s == (size_t) streamToIgnore) {
-            continue;
-        }
-        for (size_t i = 0; i < mOutputs.size(); i++) {
-            const sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
-            if (outputDesc->mRefCount[s] != 0) {
-                return true;
-            }
-        }
-    }
-    return false;
-}
-
 uint32_t AudioPolicyManager::handleEventForBeacon(int event) {
     switch(event) {
     case STARTING_OUTPUT:
@@ -4362,292 +3777,14 @@
 }
 
 audio_devices_t AudioPolicyManager::getDeviceForStrategy(routing_strategy strategy,
-                                                             bool fromCache)
+                                                         bool fromCache)
 {
-    uint32_t device = AUDIO_DEVICE_NONE;
-
     if (fromCache) {
         ALOGVV("getDeviceForStrategy() from cache strategy %d, device %x",
               strategy, mDeviceForStrategy[strategy]);
         return mDeviceForStrategy[strategy];
     }
-    audio_devices_t availableOutputDeviceTypes = mAvailableOutputDevices.types();
-    switch (strategy) {
-
-    case STRATEGY_TRANSMITTED_THROUGH_SPEAKER:
-        device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
-        if (!device) {
-            ALOGE("getDeviceForStrategy() no device found for "\
-                    "STRATEGY_TRANSMITTED_THROUGH_SPEAKER");
-        }
-        break;
-
-    case STRATEGY_SONIFICATION_RESPECTFUL:
-        if (isInCall()) {
-            device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/);
-        } else if (isStreamActiveRemotely(AUDIO_STREAM_MUSIC,
-                SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
-            // while media is playing on a remote device, use the the sonification behavior.
-            // Note that we test this usecase before testing if media is playing because
-            //   the isStreamActive() method only informs about the activity of a stream, not
-            //   if it's for local playback. Note also that we use the same delay between both tests
-            device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/);
-            //user "safe" speaker if available instead of normal speaker to avoid triggering
-            //other acoustic safety mechanisms for notification
-            if (device == AUDIO_DEVICE_OUT_SPEAKER && (availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER_SAFE))
-                device = AUDIO_DEVICE_OUT_SPEAKER_SAFE;
-        } else if (isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
-            // while media is playing (or has recently played), use the same device
-            device = getDeviceForStrategy(STRATEGY_MEDIA, false /*fromCache*/);
-        } else {
-            // when media is not playing anymore, fall back on the sonification behavior
-            device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/);
-            //user "safe" speaker if available instead of normal speaker to avoid triggering
-            //other acoustic safety mechanisms for notification
-            if (device == AUDIO_DEVICE_OUT_SPEAKER && (availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER_SAFE))
-                device = AUDIO_DEVICE_OUT_SPEAKER_SAFE;
-        }
-
-        break;
-
-    case STRATEGY_DTMF:
-        if (!isInCall()) {
-            // when off call, DTMF strategy follows the same rules as MEDIA strategy
-            device = getDeviceForStrategy(STRATEGY_MEDIA, false /*fromCache*/);
-            break;
-        }
-        // when in call, DTMF and PHONE strategies follow the same rules
-        // FALL THROUGH
-
-    case STRATEGY_PHONE:
-        // Force use of only devices on primary output if:
-        // - in call AND
-        //   - cannot route from voice call RX OR
-        //   - audio HAL version is < 3.0 and TX device is on the primary HW module
-        if (mPhoneState == AUDIO_MODE_IN_CALL) {
-            audio_devices_t txDevice =
-                    getDeviceAndMixForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
-            sp<AudioOutputDescriptor> hwOutputDesc = mOutputs.valueFor(mPrimaryOutput);
-            if (((mAvailableInputDevices.types() &
-                    AUDIO_DEVICE_IN_TELEPHONY_RX & ~AUDIO_DEVICE_BIT_IN) == 0) ||
-                    (((txDevice & availablePrimaryInputDevices() & ~AUDIO_DEVICE_BIT_IN) != 0) &&
-                         (hwOutputDesc->getAudioPort()->mModule->mHalVersion <
-                             AUDIO_DEVICE_API_VERSION_3_0))) {
-                availableOutputDeviceTypes = availablePrimaryOutputDevices();
-            }
-        }
-        // for phone strategy, we first consider the forced use and then the available devices by order
-        // of priority
-        switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
-        case AUDIO_POLICY_FORCE_BT_SCO:
-            if (!isInCall() || strategy != STRATEGY_DTMF) {
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
-                if (device) break;
-            }
-            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET;
-            if (device) break;
-            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_SCO;
-            if (device) break;
-            // if SCO device is requested but no SCO device is available, fall back to default case
-            // FALL THROUGH
-
-        default:    // FORCE_NONE
-            // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to A2DP
-            if (!isInCall() &&
-                    (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
-                    (getA2dpOutput() != 0)) {
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
-                if (device) break;
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
-                if (device) break;
-            }
-            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
-            if (device) break;
-            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADSET;
-            if (device) break;
-            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_DEVICE;
-            if (device) break;
-            if (!isInCall()) {
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_ACCESSORY;
-                if (device) break;
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
-                if (device) break;
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_AUX_DIGITAL;
-                if (device) break;
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
-                if (device) break;
-            }
-            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_EARPIECE;
-            if (device) break;
-            device = mDefaultOutputDevice->mDeviceType;
-            if (device == AUDIO_DEVICE_NONE) {
-                ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE");
-            }
-            break;
-
-        case AUDIO_POLICY_FORCE_SPEAKER:
-            // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to
-            // A2DP speaker when forcing to speaker output
-            if (!isInCall() &&
-                    (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
-                    (getA2dpOutput() != 0)) {
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
-                if (device) break;
-            }
-            if (!isInCall()) {
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_ACCESSORY;
-                if (device) break;
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_DEVICE;
-                if (device) break;
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
-                if (device) break;
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_AUX_DIGITAL;
-                if (device) break;
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
-                if (device) break;
-            }
-            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_LINE;
-            if (device) break;
-            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
-            if (device) break;
-            device = mDefaultOutputDevice->mDeviceType;
-            if (device == AUDIO_DEVICE_NONE) {
-                ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE, FORCE_SPEAKER");
-            }
-            break;
-        }
-    break;
-
-    case STRATEGY_SONIFICATION:
-
-        // If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by
-        // handleIncallSonification().
-        if (isInCall()) {
-            device = getDeviceForStrategy(STRATEGY_PHONE, false /*fromCache*/);
-            break;
-        }
-        // FALL THROUGH
-
-    case STRATEGY_ENFORCED_AUDIBLE:
-        // strategy STRATEGY_ENFORCED_AUDIBLE uses same routing policy as STRATEGY_SONIFICATION
-        // except:
-        //   - when in call where it doesn't default to STRATEGY_PHONE behavior
-        //   - in countries where not enforced in which case it follows STRATEGY_MEDIA
-
-        if ((strategy == STRATEGY_SONIFICATION) ||
-                (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) {
-            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
-            if (device == AUDIO_DEVICE_NONE) {
-                ALOGE("getDeviceForStrategy() speaker device not found for STRATEGY_SONIFICATION");
-            }
-        }
-        // The second device used for sonification is the same as the device used by media strategy
-        // FALL THROUGH
-
-    // FIXME: STRATEGY_ACCESSIBILITY and STRATEGY_REROUTING follow STRATEGY_MEDIA for now
-    case STRATEGY_ACCESSIBILITY:
-        if (strategy == STRATEGY_ACCESSIBILITY) {
-            // do not route accessibility prompts to a digital output currently configured with a
-            // compressed format as they would likely not be mixed and dropped.
-            for (size_t i = 0; i < mOutputs.size(); i++) {
-                sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
-                audio_devices_t devices = desc->device() &
-                    (AUDIO_DEVICE_OUT_HDMI | AUDIO_DEVICE_OUT_SPDIF | AUDIO_DEVICE_OUT_HDMI_ARC);
-                if (desc->isActive() && !audio_is_linear_pcm(desc->mFormat) &&
-                        devices != AUDIO_DEVICE_NONE) {
-                    availableOutputDeviceTypes = availableOutputDeviceTypes & ~devices;
-                }
-            }
-        }
-        // FALL THROUGH
-
-    case STRATEGY_REROUTING:
-    case STRATEGY_MEDIA: {
-        uint32_t device2 = AUDIO_DEVICE_NONE;
-        if (strategy != STRATEGY_SONIFICATION) {
-            // no sonification on remote submix (e.g. WFD)
-            if (mAvailableOutputDevices.getDevice(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, String8("0")) != 0) {
-                device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
-            }
-        }
-        if ((device2 == AUDIO_DEVICE_NONE) &&
-                (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
-                (getA2dpOutput() != 0)) {
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
-            if (device2 == AUDIO_DEVICE_NONE) {
-                device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
-            }
-            if (device2 == AUDIO_DEVICE_NONE) {
-                device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
-            }
-        }
-        if ((device2 == AUDIO_DEVICE_NONE) &&
-            (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] == AUDIO_POLICY_FORCE_SPEAKER)) {
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
-        }
-        if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
-        }
-        if ((device2 == AUDIO_DEVICE_NONE)) {
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_LINE;
-        }
-        if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADSET;
-        }
-        if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_ACCESSORY;
-        }
-        if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_DEVICE;
-        }
-        if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
-        }
-        if ((device2 == AUDIO_DEVICE_NONE) && (strategy != STRATEGY_SONIFICATION)) {
-            // no sonification on aux digital (e.g. HDMI)
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_AUX_DIGITAL;
-        }
-        if ((device2 == AUDIO_DEVICE_NONE) &&
-                (mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK] == AUDIO_POLICY_FORCE_ANALOG_DOCK)) {
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
-        }
-        if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
-        }
-        int device3 = AUDIO_DEVICE_NONE;
-        if (strategy == STRATEGY_MEDIA) {
-            // ARC, SPDIF and AUX_LINE can co-exist with others.
-            device3 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_HDMI_ARC;
-            device3 |= (availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPDIF);
-            device3 |= (availableOutputDeviceTypes & AUDIO_DEVICE_OUT_AUX_LINE);
-        }
-
-        device2 |= device3;
-        // device is DEVICE_OUT_SPEAKER if we come from case STRATEGY_SONIFICATION or
-        // STRATEGY_ENFORCED_AUDIBLE, AUDIO_DEVICE_NONE otherwise
-        device |= device2;
-
-        // If hdmi system audio mode is on, remove speaker out of output list.
-        if ((strategy == STRATEGY_MEDIA) &&
-            (mForceUse[AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO] ==
-                AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED)) {
-            device &= ~AUDIO_DEVICE_OUT_SPEAKER;
-        }
-
-        if (device) break;
-        device = mDefaultOutputDevice->mDeviceType;
-        if (device == AUDIO_DEVICE_NONE) {
-            ALOGE("getDeviceForStrategy() no device found for STRATEGY_MEDIA");
-        }
-        } break;
-
-    default:
-        ALOGW("getDeviceForStrategy() unknown strategy: %d", strategy);
-        break;
-    }
-
-    ALOGVV("getDeviceForStrategy() strategy %d, device %x", strategy, device);
-    return device;
+    return mEngine->getDeviceForStrategy(strategy);
 }
 
 void AudioPolicyManager::updateDevicesAndOutputs()
@@ -4698,7 +3835,7 @@
                 ALOGVV("checkDeviceMuteStrategies() %s strategy %d (curDevice %04x) on output %d",
                       mute ? "muting" : "unmuting", i, curDevice, curOutput);
                 setStrategyMute((routing_strategy)i, mute, curOutput, mute ? 0 : delayMs);
-                if (desc->isStrategyActive((routing_strategy)i)) {
+                if (isStrategyActive(desc, (routing_strategy)i)) {
                     if (mute) {
                         // FIXME: should not need to double latency if volume could be applied
                         // immediately by the audioflinger mixer. We must account for the delay
@@ -4721,7 +3858,7 @@
             muteWaitMs = outputDesc->latency() * 2;
         }
         for (size_t i = 0; i < NUM_STRATEGIES; i++) {
-            if (outputDesc->isStrategyActive((routing_strategy)i)) {
+            if (isStrategyActive(outputDesc, (routing_strategy)i)) {
                 setStrategyMute((routing_strategy)i, true, outputDesc->mIoHandle);
                 // do tempMute unmute after twice the mute wait time
                 setStrategyMute((routing_strategy)i, false, outputDesc->mIoHandle,
@@ -4827,8 +3964,7 @@
                                        status, afPatchHandle, patch.num_sources, patch.num_sinks);
             if (status == NO_ERROR) {
                 if (index < 0) {
-                    patchDesc = new AudioPatch((audio_patch_handle_t)nextUniqueId(),
-                                               &patch, mUidCached);
+                    patchDesc = new AudioPatch(&patch, mUidCached);
                     addAudioPatch(patchDesc->mHandle, patchDesc);
                 } else {
                     patchDesc->mPatch = patch;
@@ -4847,7 +3983,7 @@
         // inform all input as well
         for (size_t i = 0; i < mInputs.size(); i++) {
             const sp<AudioInputDescriptor>  inputDescriptor = mInputs.valueAt(i);
-            if (!isVirtualInputDevice(inputDescriptor->mDevice)) {
+            if (!is_virtual_input_device(inputDescriptor->mDevice)) {
                 AudioParameter inputCmd = AudioParameter();
                 ALOGV("%s: inform input %d of device:%d", __func__,
                       inputDescriptor->mIoHandle, device);
@@ -4934,8 +4070,7 @@
                                                                           status, afPatchHandle);
             if (status == NO_ERROR) {
                 if (index < 0) {
-                    patchDesc = new AudioPatch((audio_patch_handle_t)nextUniqueId(),
-                                               &patch, mUidCached);
+                    patchDesc = new AudioPatch(&patch, mUidCached);
                     addAudioPatch(patchDesc->mHandle, patchDesc);
                 } else {
                     patchDesc->mPatch = patch;
@@ -4978,11 +4113,11 @@
 }
 
 sp<IOProfile> AudioPolicyManager::getInputProfile(audio_devices_t device,
-                                                   String8 address,
-                                                   uint32_t& samplingRate,
-                                                   audio_format_t format,
-                                                   audio_channel_mask_t channelMask,
-                                                   audio_input_flags_t flags)
+                                                  String8 address,
+                                                  uint32_t& samplingRate,
+                                                  audio_format_t format,
+                                                  audio_channel_mask_t channelMask,
+                                                  audio_input_flags_t flags)
 {
     // Choose an input profile based on the requested capture parameters: select the first available
     // profile supporting all requested parameters.
@@ -5009,224 +4144,35 @@
 
 
 audio_devices_t AudioPolicyManager::getDeviceAndMixForInputSource(audio_source_t inputSource,
-                                                            AudioMix **policyMix)
+                                                                  AudioMix **policyMix)
 {
-    audio_devices_t availableDeviceTypes = mAvailableInputDevices.types() &
-                                            ~AUDIO_DEVICE_BIT_IN;
+    audio_devices_t availableDeviceTypes = mAvailableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
+    audio_devices_t selectedDeviceFromMix =
+           mPolicyMixes.getDeviceAndMixForInputSource(inputSource, availableDeviceTypes, policyMix);
 
-    for (size_t i = 0; i < mPolicyMixes.size(); i++) {
-        if (mPolicyMixes[i]->mMix.mMixType != MIX_TYPE_RECORDERS) {
-            continue;
-        }
-        for (size_t j = 0; j < mPolicyMixes[i]->mMix.mCriteria.size(); j++) {
-            if ((RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET == mPolicyMixes[i]->mMix.mCriteria[j].mRule &&
-                    mPolicyMixes[i]->mMix.mCriteria[j].mAttr.mSource == inputSource) ||
-               (RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET == mPolicyMixes[i]->mMix.mCriteria[j].mRule &&
-                    mPolicyMixes[i]->mMix.mCriteria[j].mAttr.mSource != inputSource)) {
-                if (availableDeviceTypes & AUDIO_DEVICE_IN_REMOTE_SUBMIX) {
-                    if (policyMix != NULL) {
-                        *policyMix = &mPolicyMixes[i]->mMix;
-                    }
-                    return AUDIO_DEVICE_IN_REMOTE_SUBMIX;
-                }
-                break;
-            }
-        }
+    if (selectedDeviceFromMix != AUDIO_DEVICE_NONE) {
+        return selectedDeviceFromMix;
     }
-
     return getDeviceForInputSource(inputSource);
 }
 
 audio_devices_t AudioPolicyManager::getDeviceForInputSource(audio_source_t inputSource)
 {
-    uint32_t device = AUDIO_DEVICE_NONE;
-    audio_devices_t availableDeviceTypes = mAvailableInputDevices.types() &
-                                            ~AUDIO_DEVICE_BIT_IN;
-
-    switch (inputSource) {
-    case AUDIO_SOURCE_VOICE_UPLINK:
-      if (availableDeviceTypes & AUDIO_DEVICE_IN_VOICE_CALL) {
-          device = AUDIO_DEVICE_IN_VOICE_CALL;
-          break;
-      }
-      break;
-
-    case AUDIO_SOURCE_DEFAULT:
-    case AUDIO_SOURCE_MIC:
-    if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) {
-        device = AUDIO_DEVICE_IN_BLUETOOTH_A2DP;
-    } else if ((mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO) &&
-        (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET)) {
-        device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
-    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
-        device = AUDIO_DEVICE_IN_WIRED_HEADSET;
-    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
-        device = AUDIO_DEVICE_IN_USB_DEVICE;
-    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
-        device = AUDIO_DEVICE_IN_BUILTIN_MIC;
-    }
-    break;
-
-    case AUDIO_SOURCE_VOICE_COMMUNICATION:
-        // Allow only use of devices on primary input if in call and HAL does not support routing
-        // to voice call path.
-        if ((mPhoneState == AUDIO_MODE_IN_CALL) &&
-                (mAvailableOutputDevices.types() & AUDIO_DEVICE_OUT_TELEPHONY_TX) == 0) {
-            availableDeviceTypes = availablePrimaryInputDevices() & ~AUDIO_DEVICE_BIT_IN;
-        }
-
-        switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
-        case AUDIO_POLICY_FORCE_BT_SCO:
-            // if SCO device is requested but no SCO device is available, fall back to default case
-            if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
-                device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
-                break;
-            }
-            // FALL THROUGH
-
-        default:    // FORCE_NONE
-            if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
-                device = AUDIO_DEVICE_IN_WIRED_HEADSET;
-            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
-                device = AUDIO_DEVICE_IN_USB_DEVICE;
-            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
-                device = AUDIO_DEVICE_IN_BUILTIN_MIC;
-            }
-            break;
-
-        case AUDIO_POLICY_FORCE_SPEAKER:
-            if (availableDeviceTypes & AUDIO_DEVICE_IN_BACK_MIC) {
-                device = AUDIO_DEVICE_IN_BACK_MIC;
-            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
-                device = AUDIO_DEVICE_IN_BUILTIN_MIC;
-            }
-            break;
-        }
-        break;
-
-    case AUDIO_SOURCE_VOICE_RECOGNITION:
-    case AUDIO_SOURCE_HOTWORD:
-        if (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO &&
-                availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
-            device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
-        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
-            device = AUDIO_DEVICE_IN_WIRED_HEADSET;
-        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
-            device = AUDIO_DEVICE_IN_USB_DEVICE;
-        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
-            device = AUDIO_DEVICE_IN_BUILTIN_MIC;
-        }
-        break;
-    case AUDIO_SOURCE_CAMCORDER:
-        if (availableDeviceTypes & AUDIO_DEVICE_IN_BACK_MIC) {
-            device = AUDIO_DEVICE_IN_BACK_MIC;
-        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
-            device = AUDIO_DEVICE_IN_BUILTIN_MIC;
-        }
-        break;
-    case AUDIO_SOURCE_VOICE_DOWNLINK:
-    case AUDIO_SOURCE_VOICE_CALL:
-        if (availableDeviceTypes & AUDIO_DEVICE_IN_VOICE_CALL) {
-            device = AUDIO_DEVICE_IN_VOICE_CALL;
-        }
-        break;
-    case AUDIO_SOURCE_REMOTE_SUBMIX:
-        if (availableDeviceTypes & AUDIO_DEVICE_IN_REMOTE_SUBMIX) {
-            device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
-        }
-        break;
-     case AUDIO_SOURCE_FM_TUNER:
-        if (availableDeviceTypes & AUDIO_DEVICE_IN_FM_TUNER) {
-            device = AUDIO_DEVICE_IN_FM_TUNER;
-        }
-        break;
-    default:
-        ALOGW("getDeviceForInputSource() invalid input source %d", inputSource);
-        break;
-    }
-    ALOGV("getDeviceForInputSource()input source %d, device %08x", inputSource, device);
-    return device;
-}
-
-bool AudioPolicyManager::isVirtualInputDevice(audio_devices_t device)
-{
-    if ((device & AUDIO_DEVICE_BIT_IN) != 0) {
-        device &= ~AUDIO_DEVICE_BIT_IN;
-        if ((popcount(device) == 1) && ((device & ~APM_AUDIO_IN_DEVICE_VIRTUAL_ALL) == 0))
-            return true;
-    }
-    return false;
-}
-
-bool AudioPolicyManager::deviceDistinguishesOnAddress(audio_devices_t device) {
-    return ((device & APM_AUDIO_DEVICE_MATCH_ADDRESS_ALL & ~AUDIO_DEVICE_BIT_IN) != 0);
-}
-
-audio_io_handle_t AudioPolicyManager::getActiveInput(bool ignoreVirtualInputs)
-{
-    for (size_t i = 0; i < mInputs.size(); i++) {
-        const sp<AudioInputDescriptor>  input_descriptor = mInputs.valueAt(i);
-        if ((input_descriptor->mRefCount > 0)
-                && (!ignoreVirtualInputs || !isVirtualInputDevice(input_descriptor->mDevice))) {
-            return mInputs.keyAt(i);
-        }
-    }
-    return 0;
-}
-
-uint32_t AudioPolicyManager::activeInputsCount() const
-{
-    uint32_t count = 0;
-    for (size_t i = 0; i < mInputs.size(); i++) {
-        const sp<AudioInputDescriptor>  desc = mInputs.valueAt(i);
-        if (desc->mRefCount > 0) {
-            count++;
-        }
-    }
-    return count;
-}
-
-
-void AudioPolicyManager::initializeVolumeCurves()
-{
-    for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
-        for (int j = 0; j < ApmGains::DEVICE_CATEGORY_CNT; j++) {
-            mStreams[i].mVolumeCurve[j] =
-                    ApmGains::sVolumeProfiles[i][j];
-        }
-    }
-
-    // Check availability of DRC on speaker path: if available, override some of the speaker curves
-    if (mSpeakerDrcEnabled) {
-        mStreams[AUDIO_STREAM_SYSTEM].mVolumeCurve[ApmGains::DEVICE_CATEGORY_SPEAKER] =
-                ApmGains::sDefaultSystemVolumeCurveDrc;
-        mStreams[AUDIO_STREAM_RING].mVolumeCurve[ApmGains::DEVICE_CATEGORY_SPEAKER] =
-                ApmGains::sSpeakerSonificationVolumeCurveDrc;
-        mStreams[AUDIO_STREAM_ALARM].mVolumeCurve[ApmGains::DEVICE_CATEGORY_SPEAKER] =
-                ApmGains::sSpeakerSonificationVolumeCurveDrc;
-        mStreams[AUDIO_STREAM_NOTIFICATION].mVolumeCurve[ApmGains::DEVICE_CATEGORY_SPEAKER] =
-                ApmGains::sSpeakerSonificationVolumeCurveDrc;
-        mStreams[AUDIO_STREAM_MUSIC].mVolumeCurve[ApmGains::DEVICE_CATEGORY_SPEAKER] =
-                ApmGains::sSpeakerMediaVolumeCurveDrc;
-        mStreams[AUDIO_STREAM_ACCESSIBILITY].mVolumeCurve[ApmGains::DEVICE_CATEGORY_SPEAKER] =
-                ApmGains::sSpeakerMediaVolumeCurveDrc;
-    }
+    return mEngine->getDeviceForInputSource(inputSource);
 }
 
 float AudioPolicyManager::computeVolume(audio_stream_type_t stream,
-                                            int index,
-                                            audio_io_handle_t output,
-                                            audio_devices_t device)
+                                        int index,
+                                        audio_io_handle_t output,
+                                        audio_devices_t device)
 {
     float volume = 1.0;
     sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
-    StreamDescriptor &streamDesc = mStreams[stream];
 
     if (device == AUDIO_DEVICE_NONE) {
         device = outputDesc->device();
     }
-
-    volume = ApmGains::volIndexToAmpl(device, streamDesc, index);
+    volume = mEngine->volIndexToAmpl(Volume::getDeviceCategory(device), stream, index);
 
     // if a headset is connected, apply the following rules to ring tones and notifications
     // to avoid sound level bursts in user's ears:
@@ -5242,8 +4188,8 @@
                 || (stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL)
                 || (stream == AUDIO_STREAM_SYSTEM)
                 || ((stream_strategy == STRATEGY_ENFORCED_AUDIBLE) &&
-                    (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_NONE))) &&
-        streamDesc.mCanBeMuted) {
+                    (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_NONE))) &&
+            mStreams.canBeMuted(stream)) {
         volume *= SONIFICATION_HEADSET_VOLUME_FACTOR;
         // when the phone is ringing we must consider that music could have been paused just before
         // by the music application and behave as if music was active if the last music track was
@@ -5268,11 +4214,11 @@
 }
 
 status_t AudioPolicyManager::checkAndSetVolume(audio_stream_type_t stream,
-                                                   int index,
-                                                   audio_io_handle_t output,
-                                                   audio_devices_t device,
-                                                   int delayMs,
-                                                   bool force)
+                                               int index,
+                                               audio_io_handle_t output,
+                                               audio_devices_t device,
+                                               int delayMs,
+                                               bool force)
 {
 
     // do not change actual stream volume if the stream is muted
@@ -5281,14 +4227,13 @@
               stream, mOutputs.valueFor(output)->mMuteCount[stream]);
         return NO_ERROR;
     }
-
+    audio_policy_forced_cfg_t forceUseForComm =
+            mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION);
     // do not change in call volume if bluetooth is connected and vice versa
-    if ((stream == AUDIO_STREAM_VOICE_CALL &&
-            mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] == AUDIO_POLICY_FORCE_BT_SCO) ||
-        (stream == AUDIO_STREAM_BLUETOOTH_SCO &&
-                mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] != AUDIO_POLICY_FORCE_BT_SCO)) {
+    if ((stream == AUDIO_STREAM_VOICE_CALL && forceUseForComm == AUDIO_POLICY_FORCE_BT_SCO) ||
+        (stream == AUDIO_STREAM_BLUETOOTH_SCO && forceUseForComm != AUDIO_POLICY_FORCE_BT_SCO)) {
         ALOGV("checkAndSetVolume() cannot set stream %d volume with force use = %d for comm",
-             stream, mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]);
+             stream, forceUseForComm);
         return INVALID_OPERATION;
     }
 
@@ -5325,7 +4270,7 @@
         float voiceVolume;
         // Force voice volume to max for bluetooth SCO as volume is managed by the headset
         if (stream == AUDIO_STREAM_VOICE_CALL) {
-            voiceVolume = (float)index/(float)mStreams[stream].mIndexMax;
+            voiceVolume = (float)index/(float)mStreams[stream].getVolumeIndexMax();
         } else {
             voiceVolume = 1.0;
         }
@@ -5340,9 +4285,9 @@
 }
 
 void AudioPolicyManager::applyStreamVolumes(audio_io_handle_t output,
-                                                audio_devices_t device,
-                                                int delayMs,
-                                                bool force)
+                                            audio_devices_t device,
+                                            int delayMs,
+                                            bool force)
 {
     ALOGVV("applyStreamVolumes() for output %d and device %x", output, device);
 
@@ -5360,10 +4305,10 @@
 }
 
 void AudioPolicyManager::setStrategyMute(routing_strategy strategy,
-                                             bool on,
-                                             audio_io_handle_t output,
-                                             int delayMs,
-                                             audio_devices_t device)
+                                         bool on,
+                                         audio_io_handle_t output,
+                                         int delayMs,
+                                         audio_devices_t device)
 {
     ALOGVV("setStrategyMute() strategy %d, mute %d, output %d", strategy, on, output);
     for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
@@ -5377,12 +4322,12 @@
 }
 
 void AudioPolicyManager::setStreamMute(audio_stream_type_t stream,
-                                           bool on,
-                                           audio_io_handle_t output,
-                                           int delayMs,
-                                           audio_devices_t device)
+                                       bool on,
+                                       audio_io_handle_t output,
+                                       int delayMs,
+                                       audio_devices_t device)
 {
-    StreamDescriptor &streamDesc = mStreams[stream];
+    const StreamDescriptor &streamDesc = mStreams[stream];
     sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
     if (device == AUDIO_DEVICE_NONE) {
         device = outputDesc->device();
@@ -5393,9 +4338,9 @@
 
     if (on) {
         if (outputDesc->mMuteCount[stream] == 0) {
-            if (streamDesc.mCanBeMuted &&
+            if (streamDesc.canBeMuted() &&
                     ((stream != AUDIO_STREAM_ENFORCED_AUDIBLE) ||
-                     (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_NONE))) {
+                     (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_NONE))) {
                 checkAndSetVolume(stream, 0, output, device, delayMs);
             }
         }
@@ -5461,192 +4406,6 @@
     }
 }
 
-bool AudioPolicyManager::isInCall()
-{
-    return isStateInCall(mPhoneState);
-}
-
-bool AudioPolicyManager::isStateInCall(int state) {
-    return ((state == AUDIO_MODE_IN_CALL) ||
-            (state == AUDIO_MODE_IN_COMMUNICATION));
-}
-
-uint32_t AudioPolicyManager::getMaxEffectsCpuLoad()
-{
-    return MAX_EFFECTS_CPU_LOAD;
-}
-
-uint32_t AudioPolicyManager::getMaxEffectsMemory()
-{
-    return MAX_EFFECTS_MEMORY;
-}
-
-
-// --- EffectDescriptor class implementation
-
-status_t AudioPolicyManager::EffectDescriptor::dump(int fd)
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    snprintf(buffer, SIZE, " I/O: %d\n", mIo);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Strategy: %d\n", mStrategy);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Session: %d\n", mSession);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Name: %s\n",  mDesc.name);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " %s\n",  mEnabled ? "Enabled" : "Disabled");
-    result.append(buffer);
-    write(fd, result.string(), result.size());
-
-    return NO_ERROR;
-}
-
-
-// --- audio_policy.conf file parsing
-// TODO candidate to be moved to ConfigParsingUtils
-void AudioPolicyManager::loadHwModule(cnode *root)
-{
-    status_t status = NAME_NOT_FOUND;
-    cnode *node;
-    sp<HwModule> module = new HwModule(root->name);
-
-    node = config_find(root, DEVICES_TAG);
-    if (node != NULL) {
-        node = node->first_child;
-        while (node) {
-            ALOGV("loadHwModule() loading device %s", node->name);
-            status_t tmpStatus = module->loadDevice(node);
-            if (status == NAME_NOT_FOUND || status == NO_ERROR) {
-                status = tmpStatus;
-            }
-            node = node->next;
-        }
-    }
-    node = config_find(root, OUTPUTS_TAG);
-    if (node != NULL) {
-        node = node->first_child;
-        while (node) {
-            ALOGV("loadHwModule() loading output %s", node->name);
-            status_t tmpStatus = module->loadOutput(node);
-            if (status == NAME_NOT_FOUND || status == NO_ERROR) {
-                status = tmpStatus;
-            }
-            node = node->next;
-        }
-    }
-    node = config_find(root, INPUTS_TAG);
-    if (node != NULL) {
-        node = node->first_child;
-        while (node) {
-            ALOGV("loadHwModule() loading input %s", node->name);
-            status_t tmpStatus = module->loadInput(node);
-            if (status == NAME_NOT_FOUND || status == NO_ERROR) {
-                status = tmpStatus;
-            }
-            node = node->next;
-        }
-    }
-    loadGlobalConfig(root, module);
-
-    if (status == NO_ERROR) {
-        mHwModules.add(module);
-    }
-}
-
-// TODO candidate to be moved to ConfigParsingUtils
-void AudioPolicyManager::loadHwModules(cnode *root)
-{
-    cnode *node = config_find(root, AUDIO_HW_MODULE_TAG);
-    if (node == NULL) {
-        return;
-    }
-
-    node = node->first_child;
-    while (node) {
-        ALOGV("loadHwModules() loading module %s", node->name);
-        loadHwModule(node);
-        node = node->next;
-    }
-}
-
-// TODO candidate to be moved to ConfigParsingUtils
-void AudioPolicyManager::loadGlobalConfig(cnode *root, const sp<HwModule>& module)
-{
-    cnode *node = config_find(root, GLOBAL_CONFIG_TAG);
-
-    if (node == NULL) {
-        return;
-    }
-    DeviceVector declaredDevices;
-    if (module != NULL) {
-        declaredDevices = module->mDeclaredDevices;
-    }
-
-    node = node->first_child;
-    while (node) {
-        if (strcmp(ATTACHED_OUTPUT_DEVICES_TAG, node->name) == 0) {
-            mAvailableOutputDevices.loadDevicesFromName((char *)node->value,
-                                                        declaredDevices);
-            ALOGV("loadGlobalConfig() Attached Output Devices %08x",
-                  mAvailableOutputDevices.types());
-        } else if (strcmp(DEFAULT_OUTPUT_DEVICE_TAG, node->name) == 0) {
-            audio_devices_t device = (audio_devices_t)ConfigParsingUtils::stringToEnum(
-                    sDeviceNameToEnumTable,
-                    ARRAY_SIZE(sDeviceNameToEnumTable),
-                    (char *)node->value);
-            if (device != AUDIO_DEVICE_NONE) {
-                mDefaultOutputDevice = new DeviceDescriptor(String8("default-output"), device);
-            } else {
-                ALOGW("loadGlobalConfig() default device not specified");
-            }
-            ALOGV("loadGlobalConfig() mDefaultOutputDevice %08x", mDefaultOutputDevice->mDeviceType);
-        } else if (strcmp(ATTACHED_INPUT_DEVICES_TAG, node->name) == 0) {
-            mAvailableInputDevices.loadDevicesFromName((char *)node->value,
-                                                       declaredDevices);
-            ALOGV("loadGlobalConfig() Available InputDevices %08x", mAvailableInputDevices.types());
-        } else if (strcmp(SPEAKER_DRC_ENABLED_TAG, node->name) == 0) {
-            mSpeakerDrcEnabled = ConfigParsingUtils::stringToBool((char *)node->value);
-            ALOGV("loadGlobalConfig() mSpeakerDrcEnabled = %d", mSpeakerDrcEnabled);
-        } else if (strcmp(AUDIO_HAL_VERSION_TAG, node->name) == 0) {
-            uint32_t major, minor;
-            sscanf((char *)node->value, "%u.%u", &major, &minor);
-            module->mHalVersion = HARDWARE_DEVICE_API_VERSION(major, minor);
-            ALOGV("loadGlobalConfig() mHalVersion = %04x major %u minor %u",
-                  module->mHalVersion, major, minor);
-        }
-        node = node->next;
-    }
-}
-
-// TODO candidate to be moved to ConfigParsingUtils
-status_t AudioPolicyManager::loadAudioPolicyConfig(const char *path)
-{
-    cnode *root;
-    char *data;
-
-    data = (char *)load_file(path, NULL);
-    if (data == NULL) {
-        return -ENODEV;
-    }
-    root = config_node("", "");
-    config_load(root, data);
-
-    loadHwModules(root);
-    // legacy audio_policy.conf files have one global_configuration section
-    loadGlobalConfig(root, getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY));
-    config_free(root);
-    free(root);
-    free(data);
-
-    ALOGI("loadAudioPolicyConfig() loaded %s\n", path);
-
-    return NO_ERROR;
-}
-
 void AudioPolicyManager::defaultAudioPolicyConfig(void)
 {
     sp<HwModule> module;
@@ -5732,7 +4491,8 @@
     }
 }
 
-bool AudioPolicyManager::isValidAttributes(const audio_attributes_t *paa) {
+bool AudioPolicyManager::isValidAttributes(const audio_attributes_t *paa)
+{
     // has flags that map to a strategy?
     if ((paa->flags & (AUDIO_FLAG_AUDIBILITY_ENFORCED | AUDIO_FLAG_SCO | AUDIO_FLAG_BEACON)) != 0) {
         return true;
@@ -5763,4 +4523,39 @@
     return true;
 }
 
+bool AudioPolicyManager::isStrategyActive(const sp<AudioOutputDescriptor> outputDesc,
+                                          routing_strategy strategy, uint32_t inPastMs,
+                                          nsecs_t sysTime) const
+{
+    if ((sysTime == 0) && (inPastMs != 0)) {
+        sysTime = systemTime();
+    }
+    for (int i = 0; i < (int)AUDIO_STREAM_CNT; i++) {
+        if (i == AUDIO_STREAM_PATCH) {
+            continue;
+        }
+        if (((getStrategy((audio_stream_type_t)i) == strategy) ||
+                (NUM_STRATEGIES == strategy)) &&
+                outputDesc->isStreamActive((audio_stream_type_t)i, inPastMs, sysTime)) {
+            return true;
+        }
+    }
+    return false;
+}
+
+audio_policy_forced_cfg_t AudioPolicyManager::getForceUse(audio_policy_force_use_t usage)
+{
+    return mEngine->getForceUse(usage);
+}
+
+bool AudioPolicyManager::isInCall()
+{
+    return isStateInCall(mEngine->getPhoneState());
+}
+
+bool AudioPolicyManager::isStateInCall(int state)
+{
+    return is_state_in_call(state);
+}
+
 }; // namespace android
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 61ea6f2..02b678a 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#pragma once
 
 #include <stdint.h>
 #include <sys/types.h>
@@ -26,14 +27,21 @@
 #include <media/AudioPolicy.h>
 #include "AudioPolicyInterface.h"
 
-#include "Gains.h"
-#include "Ports.h"
-#include "ConfigParsingUtils.h"
-#include "Devices.h"
-#include "IOProfile.h"
-#include "HwModule.h"
-#include "AudioInputDescriptor.h"
-#include "AudioOutputDescriptor.h"
+#include <AudioPolicyManagerInterface.h>
+#include <AudioPolicyManagerObserver.h>
+#include <AudioGain.h>
+#include <AudioPort.h>
+#include <AudioPatch.h>
+#include <ConfigParsingUtils.h>
+#include <DeviceDescriptor.h>
+#include <IOProfile.h>
+#include <HwModule.h>
+#include <AudioInputDescriptor.h>
+#include <AudioOutputDescriptor.h>
+#include <AudioPolicyMix.h>
+#include <EffectDescriptor.h>
+#include <SoundTriggerSession.h>
+#include <StreamDescriptor.h>
 
 namespace android {
 
@@ -46,9 +54,7 @@
 // Time in milliseconds during which we consider that music is still active after a music
 // track was stopped - see computeVolume()
 #define SONIFICATION_HEADSET_MUSIC_DELAY  5000
-// Time in milliseconds after media stopped playing during which we consider that the
-// sonification should be as unobtrusive as during the time media was playing.
-#define SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY 5000
+
 // Time in milliseconds during witch some streams are muted while the audio path
 // is switched
 #define MUTE_TIME_MS 2000
@@ -61,14 +67,12 @@
 // Can be overridden by the audio.offload.min.duration.secs property
 #define OFFLOAD_DEFAULT_MIN_DURATION_SECS 60
 
-#define MAX_MIXER_SAMPLING_RATE 48000
-#define MAX_MIXER_CHANNEL_COUNT 8
-
 // ----------------------------------------------------------------------------
 // AudioPolicyManager implements audio policy manager behavior common to all platforms.
 // ----------------------------------------------------------------------------
 
-class AudioPolicyManager: public AudioPolicyInterface
+class AudioPolicyManager : public AudioPolicyInterface, public AudioPolicyManagerObserver
+
 #ifdef AUDIO_POLICY_TEST
     , public Thread
 #endif //AUDIO_POLICY_TEST
@@ -89,6 +93,7 @@
         virtual void setForceUse(audio_policy_force_use_t usage,
                                  audio_policy_forced_cfg_t config);
         virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
+
         virtual void setSystemProperty(const char* property, const char* value);
         virtual status_t initCheck();
         virtual audio_io_handle_t getOutput(audio_stream_type_t stream,
@@ -158,16 +163,28 @@
                                         uint32_t strategy,
                                         int session,
                                         int id);
-        virtual status_t unregisterEffect(int id);
-        virtual status_t setEffectEnabled(int id, bool enabled);
+        virtual status_t unregisterEffect(int id)
+        {
+            return mEffects.unregisterEffect(id);
+        }
+        virtual status_t setEffectEnabled(int id, bool enabled)
+        {
+            return mEffects.setEffectEnabled(id, enabled);
+        }
 
-        virtual bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+        virtual bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const
+        {
+            return mOutputs.isStreamActive(stream, inPastMs);
+        }
         // return whether a stream is playing remotely, override to change the definition of
         //   local/remote playback, used for instance by notification manager to not make
         //   media players lose audio focus when not playing locally
         //   For the base implementation, "remotely" means playing during screen mirroring which
         //   uses an output for playback with a non-empty, non "0" address.
-        virtual bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+        virtual bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0) const
+        {
+            return mOutputs.isStreamActiveRemotely(stream, inPastMs);
+        }
         virtual bool isSourceActive(audio_source_t source) const;
 
         virtual status_t dump(int fd);
@@ -195,39 +212,61 @@
                                                audio_io_handle_t *ioHandle,
                                                audio_devices_t *device);
 
-        virtual status_t releaseSoundTriggerSession(audio_session_t session);
+        virtual status_t releaseSoundTriggerSession(audio_session_t session)
+        {
+            return mSoundTriggerSessions.releaseSession(session);
+        }
 
         virtual status_t registerPolicyMixes(Vector<AudioMix> mixes);
         virtual status_t unregisterPolicyMixes(Vector<AudioMix> mixes);
 
         // Audio policy configuration file parsing (audio_policy.conf)
         // TODO candidates to be moved to ConfigParsingUtils
-                void loadHwModule(cnode *root);
-                void loadHwModules(cnode *root);
-                void loadGlobalConfig(cnode *root, const sp<HwModule>& module);
-                status_t loadAudioPolicyConfig(const char *path);
                 void defaultAudioPolicyConfig(void);
 
-                // return the strategy corresponding to a given stream type
-                static routing_strategy getStrategy(audio_stream_type_t stream);
+        // return the strategy corresponding to a given stream type
+        routing_strategy getStrategy(audio_stream_type_t stream) const;
 
-                static uint32_t nextUniqueId();
-protected:
-
-        class EffectDescriptor : public RefBase
+        // From AudioPolicyManagerObserver
+        virtual const AudioPatchCollection &getAudioPatches() const
         {
-        public:
-
-            status_t dump(int fd);
-
-            int mIo;                // io the effect is attached to
-            routing_strategy mStrategy; // routing strategy the effect is associated to
-            int mSession;               // audio session the effect is on
-            effect_descriptor_t mDesc;  // effect descriptor
-            bool mEnabled;              // enabled state: CPU load being used or not
-        };
-
+            return mAudioPatches;
+        }
+        virtual const SoundTriggerSessionCollection &getSoundTriggerSessionCollection() const
+        {
+            return mSoundTriggerSessions;
+        }
+        virtual const AudioPolicyMixCollection &getAudioPolicyMixCollection() const
+        {
+            return mPolicyMixes;
+        }
+        virtual const AudioOutputCollection &getOutputs() const
+        {
+            return mOutputs;
+        }
+        virtual const AudioInputCollection &getInputs() const
+        {
+            return mInputs;
+        }
+        virtual const DeviceVector &getAvailableOutputDevices() const
+        {
+            return mAvailableOutputDevices;
+        }
+        virtual const DeviceVector &getAvailableInputDevices() const
+        {
+            return mAvailableInputDevices;
+        }
+        virtual StreamDescriptorCollection &getStreamDescriptors()
+        {
+            return mStreams;
+        }
+        virtual const sp<DeviceDescriptor> &getDefaultOutputDevice() const
+        {
+            return mDefaultOutputDevice;
+        }
+protected:
         void addOutput(audio_io_handle_t output, sp<AudioOutputDescriptor> outputDesc);
+        void removeOutput(audio_io_handle_t output);
         void addInput(audio_io_handle_t input, sp<AudioInputDescriptor> inputDesc);
 
         // return appropriate device for streams handled by the specified strategy according to current
@@ -244,6 +283,9 @@
         virtual audio_devices_t getDeviceForStrategy(routing_strategy strategy,
                                                      bool fromCache);
 
+        bool isStrategyActive(const sp<AudioOutputDescriptor> outputDesc, routing_strategy strategy,
+                              uint32_t inPastMs = 0, nsecs_t sysTime = 0) const;
+
         // change the route of the specified output. Returns the number of ms we have slept to
         // allow new routing to take effect in certain cases.
         virtual uint32_t setOutputDevice(audio_io_handle_t output,
@@ -265,16 +307,6 @@
         // select input device corresponding to requested audio source
         virtual audio_devices_t getDeviceForInputSource(audio_source_t inputSource);
 
-        // return io handle of active input or 0 if no input is active
-        //    Only considers inputs from physical devices (e.g. main mic, headset mic) when
-        //    ignoreVirtualInputs is true.
-        audio_io_handle_t getActiveInput(bool ignoreVirtualInputs = true);
-
-        uint32_t activeInputsCount() const;
-
-        // initialize volume curves for each strategy and device category
-        void initializeVolumeCurves();
-
         // compute the actual volume for a given stream according to the requested index and a particular
         // device
         virtual float computeVolume(audio_stream_type_t stream, int index,
@@ -307,9 +339,10 @@
         // a special tone in the device used for communication
         void handleIncallSonification(audio_stream_type_t stream, bool starting, bool stateChange);
 
+        audio_mode_t getPhoneState();
+
         // true if device is in a telephony or VoIP call
         virtual bool isInCall();
-
         // true if given state represents a device in a telephony or VoIP call
         virtual bool isStateInCall(int state);
 
@@ -347,9 +380,6 @@
         // manages A2DP output suspend/restore according to phone state and BT SCO usage
         void checkA2dpSuspend();
 
-        // returns the A2DP output handle if it is open or 0 otherwise
-        audio_io_handle_t getA2dpOutput();
-
         // selects the most appropriate device on output for current state
         // must be called every time a condition that affects the device choice for a given output is
         // changed: connected device, phone state, force use, output start, output stop..
@@ -366,18 +396,23 @@
         // selects the most appropriate device on input for current state
         audio_devices_t getNewInputDevice(audio_io_handle_t input);
 
-        virtual uint32_t getMaxEffectsCpuLoad();
-        virtual uint32_t getMaxEffectsMemory();
+        virtual uint32_t getMaxEffectsCpuLoad()
+        {
+            return mEffects.getMaxEffectsCpuLoad();
+        }
+
+        virtual uint32_t getMaxEffectsMemory()
+        {
+            return mEffects.getMaxEffectsMemory();
+        }
 #ifdef AUDIO_POLICY_TEST
         virtual     bool        threadLoop();
                     void        exit();
         int testOutputIndex(audio_io_handle_t output);
 #endif //AUDIO_POLICY_TEST
 
-        status_t setEffectEnabled(const sp<EffectDescriptor>& effectDesc, bool enabled);
-
         SortedVector<audio_io_handle_t> getOutputsForDevice(audio_devices_t device,
-                        DefaultKeyedVector<audio_io_handle_t, sp<AudioOutputDescriptor> > openOutputs);
+                                                            AudioOutputCollection openOutputs);
         bool vectorsEqual(SortedVector<audio_io_handle_t>& outputs1,
                                            SortedVector<audio_io_handle_t>& outputs2);
 
@@ -407,60 +442,57 @@
 
         audio_io_handle_t selectOutputForEffects(const SortedVector<audio_io_handle_t>& outputs);
 
-        bool isNonOffloadableEffectEnabled();
+        virtual status_t addAudioPatch(audio_patch_handle_t handle, const sp<AudioPatch>& patch)
+        {
+            return mAudioPatches.addAudioPatch(handle, patch);
+        }
+        virtual status_t removeAudioPatch(audio_patch_handle_t handle)
+        {
+            return mAudioPatches.removeAudioPatch(handle);
+        }
 
-        virtual status_t addAudioPatch(audio_patch_handle_t handle,
-                               const sp<AudioPatch>& patch);
-        virtual status_t removeAudioPatch(audio_patch_handle_t handle);
-
-        sp<AudioOutputDescriptor> getOutputFromId(audio_port_handle_t id) const;
-        sp<AudioInputDescriptor> getInputFromId(audio_port_handle_t id) const;
-        sp<HwModule> getModuleForDevice(audio_devices_t device) const;
-        sp<HwModule> getModuleFromName(const char *name) const;
-        audio_devices_t availablePrimaryOutputDevices();
-        audio_devices_t availablePrimaryInputDevices();
+        audio_devices_t availablePrimaryOutputDevices() const
+        {
+            return mOutputs.getSupportedDevices(mPrimaryOutput) & mAvailableOutputDevices.types();
+        }
+        audio_devices_t availablePrimaryInputDevices() const
+        {
+            return mAvailableInputDevices.getDevicesFromHwModule(
+                        mOutputs.valueFor(mPrimaryOutput)->getModuleHandle());
+        }
 
         void updateCallRouting(audio_devices_t rxDevice, int delayMs = 0);
 
-
         uid_t mUidCached;
         AudioPolicyClientInterface *mpClientInterface;  // audio policy client interface
         audio_io_handle_t mPrimaryOutput;              // primary output handle
         // list of descriptors for outputs currently opened
-        DefaultKeyedVector<audio_io_handle_t, sp<AudioOutputDescriptor> > mOutputs;
+        AudioOutputCollection mOutputs;
         // copy of mOutputs before setDeviceConnectionState() opens new outputs
         // reset to mOutputs when updateDevicesAndOutputs() is called.
-        DefaultKeyedVector<audio_io_handle_t, sp<AudioOutputDescriptor> > mPreviousOutputs;
-        DefaultKeyedVector<audio_io_handle_t, sp<AudioInputDescriptor> > mInputs;     // list of input descriptors
+        AudioOutputCollection mPreviousOutputs;
+        AudioInputCollection mInputs;     // list of input descriptors
         DeviceVector  mAvailableOutputDevices; // all available output devices
         DeviceVector  mAvailableInputDevices;  // all available input devices
-        int mPhoneState;                                                    // current phone state
-        audio_policy_forced_cfg_t mForceUse[AUDIO_POLICY_FORCE_USE_CNT];   // current forced use configuration
 
-        StreamDescriptor mStreams[AUDIO_STREAM_CNT];           // stream descriptors for volume control
-        bool    mLimitRingtoneVolume;                                       // limit ringtone volume to music volume if headset connected
+        StreamDescriptorCollection mStreams; // stream descriptors for volume control
+        bool    mLimitRingtoneVolume;        // limit ringtone volume to music volume if headset connected
         audio_devices_t mDeviceForStrategy[NUM_STRATEGIES];
-        float   mLastVoiceVolume;                                           // last voice volume value sent to audio HAL
+        float   mLastVoiceVolume;            // last voice volume value sent to audio HAL
 
-        // Maximum CPU load allocated to audio effects in 0.1 MIPS (ARMv5TE, 0 WS memory) units
-        static const uint32_t MAX_EFFECTS_CPU_LOAD = 1000;
-        // Maximum memory allocated to audio effects in KB
-        static const uint32_t MAX_EFFECTS_MEMORY = 512;
-        uint32_t mTotalEffectsCpuLoad; // current CPU load used by effects
-        uint32_t mTotalEffectsMemory;  // current memory used by effects
-        KeyedVector<int, sp<EffectDescriptor> > mEffects;  // list of registered audio effects
+        EffectDescriptorCollection mEffects;  // list of registered audio effects
         bool    mA2dpSuspended;  // true if A2DP output is suspended
         sp<DeviceDescriptor> mDefaultOutputDevice; // output device selected by default at boot time
         bool mSpeakerDrcEnabled;// true on devices that use DRC on the DEVICE_CATEGORY_SPEAKER path
                                 // to boost soft sounds, used to adjust volume curves accordingly
 
-        Vector < sp<HwModule> > mHwModules;
-        static volatile int32_t mNextUniqueId;
+        HwModuleCollection mHwModules;
+
         volatile int32_t mAudioPortGeneration;
 
-        DefaultKeyedVector<audio_patch_handle_t, sp<AudioPatch> > mAudioPatches;
+        AudioPatchCollection mAudioPatches;
 
-        DefaultKeyedVector<audio_session_t, audio_io_handle_t> mSoundTriggerSessions;
+        SoundTriggerSessionCollection mSoundTriggerSessions;
 
         sp<AudioPatch> mCallTxPatch;
         sp<AudioPatch> mCallRxPatch;
@@ -477,16 +509,7 @@
         uint32_t mBeaconPlayingRefCount;// ref count for the playing beacon streams
         bool mBeaconMuted;              // has STREAM_TTS been muted
 
-        // custom mix entry in mPolicyMixes
-        class AudioPolicyMix : public RefBase {
-        public:
-            AudioPolicyMix() {}
-
-            AudioMix    mMix;                   // Audio policy mix descriptor
-            sp<AudioOutputDescriptor> mOutput;  // Corresponding output stream
-        };
-        DefaultKeyedVector<String8, sp<AudioPolicyMix> > mPolicyMixes; // list of registered mixes
-
+        AudioPolicyMixCollection mPolicyMixes; // list of registered mixes
 
 #ifdef AUDIO_POLICY_TEST
         Mutex   mLock;
@@ -503,14 +526,14 @@
         uint32_t        mTestLatencyMs;
 #endif //AUDIO_POLICY_TEST
 
-        static bool isVirtualInputDevice(audio_devices_t device);
-
         uint32_t nextAudioPortGeneration();
+
+        // Audio Policy Engine Interface.
+        AudioPolicyManagerInterface *mEngine;
 private:
         // updates device caching and output for streams that can influence the
         //    routing of notifications
         void handleNotificationRoutingForStream(audio_stream_type_t stream);
-        static bool deviceDistinguishesOnAddress(audio_devices_t device);
         // find the outputs on a given output descriptor that have the given address.
         // to be called on an AudioOutputDescriptor whose supported devices (as defined
         //   in mProfile->mSupportedDevices) matches the device whose address is to be matched.
@@ -533,8 +556,6 @@
                 const audio_offload_info_t *offloadInfo);
         // internal function to derive a stream type value from audio attributes
         audio_stream_type_t streamTypefromAttributesInt(const audio_attributes_t *attr);
-        // return true if any output is playing anything besides the stream to ignore
-        bool isAnyOutputActive(audio_stream_type_t streamToIgnore);
         // event is one of STARTING_OUTPUT, STARTING_BEACON, STOPPING_OUTPUT, STOPPING_BEACON
         // returns 0 if no mute/unmute event happened, the largest latency of the device where
         //   the mute/unmute happened
@@ -552,9 +573,6 @@
                                                           audio_policy_dev_state_t state,
                                                           const char *device_address,
                                                           const char *device_name);
-        sp<DeviceDescriptor>  getDeviceDescriptor(const audio_devices_t device,
-                                                  const char *device_address,
-                                                  const char *device_name);
 };
 
 };
diff --git a/services/audiopolicy/managerdefault/ConfigParsingUtils.cpp b/services/audiopolicy/managerdefault/ConfigParsingUtils.cpp
deleted file mode 100644
index 1afd487..0000000
--- a/services/audiopolicy/managerdefault/ConfigParsingUtils.cpp
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "APM::ConfigParsingUtils"
-//#define LOG_NDEBUG 0
-
-#include "AudioPolicyManager.h"
-
-namespace android {
-
-//static
-uint32_t ConfigParsingUtils::stringToEnum(const struct StringToEnum *table,
-                                              size_t size,
-                                              const char *name)
-{
-    for (size_t i = 0; i < size; i++) {
-        if (strcmp(table[i].name, name) == 0) {
-            ALOGV("stringToEnum() found %s", table[i].name);
-            return table[i].value;
-        }
-    }
-    return 0;
-}
-
-//static
-const char *ConfigParsingUtils::enumToString(const struct StringToEnum *table,
-                                              size_t size,
-                                              uint32_t value)
-{
-    for (size_t i = 0; i < size; i++) {
-        if (table[i].value == value) {
-            return table[i].name;
-        }
-    }
-    return "";
-}
-
-//static
-bool ConfigParsingUtils::stringToBool(const char *value)
-{
-    return ((strcasecmp("true", value) == 0) || (strcmp("1", value) == 0));
-}
-
-
-// --- audio_policy.conf file parsing
-//static
-uint32_t ConfigParsingUtils::parseOutputFlagNames(char *name)
-{
-    uint32_t flag = 0;
-
-    // it is OK to cast name to non const here as we are not going to use it after
-    // strtok() modifies it
-    char *flagName = strtok(name, "|");
-    while (flagName != NULL) {
-        if (strlen(flagName) != 0) {
-            flag |= ConfigParsingUtils::stringToEnum(sOutputFlagNameToEnumTable,
-                               ARRAY_SIZE(sOutputFlagNameToEnumTable),
-                               flagName);
-        }
-        flagName = strtok(NULL, "|");
-    }
-    //force direct flag if offload flag is set: offloading implies a direct output stream
-    // and all common behaviors are driven by checking only the direct flag
-    // this should normally be set appropriately in the policy configuration file
-    if ((flag & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
-        flag |= AUDIO_OUTPUT_FLAG_DIRECT;
-    }
-
-    return flag;
-}
-
-//static
-uint32_t ConfigParsingUtils::parseInputFlagNames(char *name)
-{
-    uint32_t flag = 0;
-
-    // it is OK to cast name to non const here as we are not going to use it after
-    // strtok() modifies it
-    char *flagName = strtok(name, "|");
-    while (flagName != NULL) {
-        if (strlen(flagName) != 0) {
-            flag |= stringToEnum(sInputFlagNameToEnumTable,
-                               ARRAY_SIZE(sInputFlagNameToEnumTable),
-                               flagName);
-        }
-        flagName = strtok(NULL, "|");
-    }
-    return flag;
-}
-
-//static
-audio_devices_t ConfigParsingUtils::parseDeviceNames(char *name)
-{
-    uint32_t device = 0;
-
-    char *devName = strtok(name, "|");
-    while (devName != NULL) {
-        if (strlen(devName) != 0) {
-            device |= stringToEnum(sDeviceNameToEnumTable,
-                                 ARRAY_SIZE(sDeviceNameToEnumTable),
-                                 devName);
-         }
-        devName = strtok(NULL, "|");
-     }
-    return device;
-}
-
-}; // namespace android
diff --git a/services/audiopolicy/managerdefault/Gains.cpp b/services/audiopolicy/managerdefault/Gains.cpp
deleted file mode 100644
index 4aca26d..0000000
--- a/services/audiopolicy/managerdefault/Gains.cpp
+++ /dev/null
@@ -1,446 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "APM::Gains"
-//#define LOG_NDEBUG 0
-
-//#define VERY_VERBOSE_LOGGING
-#ifdef VERY_VERBOSE_LOGGING
-#define ALOGVV ALOGV
-#else
-#define ALOGVV(a...) do { } while(0)
-#endif
-
-#include "AudioPolicyManager.h"
-
-#include <math.h>
-
-namespace android {
-
-const VolumeCurvePoint
-ApmGains::sDefaultVolumeCurve[ApmGains::VOLCNT] = {
-    {1, -49.5f}, {33, -33.5f}, {66, -17.0f}, {100, 0.0f}
-};
-
-
-const VolumeCurvePoint
-ApmGains::sDefaultMediaVolumeCurve[ApmGains::VOLCNT] = {
-    {1, -58.0f}, {20, -40.0f}, {60, -17.0f}, {100, 0.0f}
-};
-
-const VolumeCurvePoint
-ApmGains::sExtMediaSystemVolumeCurve[ApmGains::VOLCNT] = {
-    {1, -58.0f}, {20, -40.0f}, {60, -21.0f}, {100, -10.0f}
-};
-
-const VolumeCurvePoint
-ApmGains::sSpeakerMediaVolumeCurve[ApmGains::VOLCNT] = {
-    {1, -56.0f}, {20, -34.0f}, {60, -11.0f}, {100, 0.0f}
-};
-
-const VolumeCurvePoint
-ApmGains::sSpeakerMediaVolumeCurveDrc[ApmGains::VOLCNT] = {
-    {1, -55.0f}, {20, -43.0f}, {86, -12.0f}, {100, 0.0f}
-};
-
-const VolumeCurvePoint
-ApmGains::sSpeakerSonificationVolumeCurve[ApmGains::VOLCNT] = {
-    {1, -29.7f}, {33, -20.1f}, {66, -10.2f}, {100, 0.0f}
-};
-
-const VolumeCurvePoint
-ApmGains::sSpeakerSonificationVolumeCurveDrc[ApmGains::VOLCNT] = {
-    {1, -35.7f}, {33, -26.1f}, {66, -13.2f}, {100, 0.0f}
-};
-
-// AUDIO_STREAM_SYSTEM, AUDIO_STREAM_ENFORCED_AUDIBLE and AUDIO_STREAM_DTMF volume tracks
-// AUDIO_STREAM_RING on phones and AUDIO_STREAM_MUSIC on tablets.
-// AUDIO_STREAM_DTMF tracks AUDIO_STREAM_VOICE_CALL while in call (See AudioService.java).
-// The range is constrained between -24dB and -6dB over speaker and -30dB and -18dB over headset.
-
-const VolumeCurvePoint
-ApmGains::sDefaultSystemVolumeCurve[ApmGains::VOLCNT] = {
-    {1, -24.0f}, {33, -18.0f}, {66, -12.0f}, {100, -6.0f}
-};
-
-const VolumeCurvePoint
-ApmGains::sDefaultSystemVolumeCurveDrc[ApmGains::VOLCNT] = {
-    {1, -34.0f}, {33, -24.0f}, {66, -15.0f}, {100, -6.0f}
-};
-
-const VolumeCurvePoint
-ApmGains::sHeadsetSystemVolumeCurve[ApmGains::VOLCNT] = {
-    {1, -30.0f}, {33, -26.0f}, {66, -22.0f}, {100, -18.0f}
-};
-
-const VolumeCurvePoint
-ApmGains::sDefaultVoiceVolumeCurve[ApmGains::VOLCNT] = {
-    {0, -42.0f}, {33, -28.0f}, {66, -14.0f}, {100, 0.0f}
-};
-
-const VolumeCurvePoint
-ApmGains::sSpeakerVoiceVolumeCurve[ApmGains::VOLCNT] = {
-    {0, -24.0f}, {33, -16.0f}, {66, -8.0f}, {100, 0.0f}
-};
-
-const VolumeCurvePoint
-ApmGains::sLinearVolumeCurve[ApmGains::VOLCNT] = {
-    {0, -96.0f}, {33, -68.0f}, {66, -34.0f}, {100, 0.0f}
-};
-
-const VolumeCurvePoint
-ApmGains::sSilentVolumeCurve[ApmGains::VOLCNT] = {
-    {0, -96.0f}, {1, -96.0f}, {2, -96.0f}, {100, -96.0f}
-};
-
-const VolumeCurvePoint
-ApmGains::sFullScaleVolumeCurve[ApmGains::VOLCNT] = {
-    {0, 0.0f}, {1, 0.0f}, {2, 0.0f}, {100, 0.0f}
-};
-
-const VolumeCurvePoint *ApmGains::sVolumeProfiles[AUDIO_STREAM_CNT]
-                                                  [ApmGains::DEVICE_CATEGORY_CNT] = {
-    { // AUDIO_STREAM_VOICE_CALL
-        ApmGains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        ApmGains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        ApmGains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        ApmGains::sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_SYSTEM
-        ApmGains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        ApmGains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        ApmGains::sDefaultSystemVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
-        ApmGains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_RING
-        ApmGains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        ApmGains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        ApmGains::sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
-        ApmGains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_MUSIC
-        ApmGains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        ApmGains::sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        ApmGains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        ApmGains::sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_ALARM
-        ApmGains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        ApmGains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        ApmGains::sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
-        ApmGains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_NOTIFICATION
-        ApmGains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        ApmGains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        ApmGains::sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
-        ApmGains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_BLUETOOTH_SCO
-        ApmGains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        ApmGains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        ApmGains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        ApmGains::sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_ENFORCED_AUDIBLE
-        ApmGains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        ApmGains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        ApmGains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        ApmGains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    {  // AUDIO_STREAM_DTMF
-        ApmGains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        ApmGains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        ApmGains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        ApmGains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_TTS
-      // "Transmitted Through Speaker": always silent except on DEVICE_CATEGORY_SPEAKER
-        ApmGains::sSilentVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        ApmGains::sLinearVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        ApmGains::sSilentVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        ApmGains::sSilentVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_ACCESSIBILITY
-        ApmGains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        ApmGains::sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        ApmGains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        ApmGains::sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_REROUTING
-        ApmGains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        ApmGains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        ApmGains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        ApmGains::sFullScaleVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_PATCH
-        ApmGains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        ApmGains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        ApmGains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        ApmGains::sFullScaleVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-};
-
-//static
-audio_devices_t ApmGains::getDeviceForVolume(audio_devices_t device)
-{
-    if (device == AUDIO_DEVICE_NONE) {
-        // this happens when forcing a route update and no track is active on an output.
-        // In this case the returned category is not important.
-        device =  AUDIO_DEVICE_OUT_SPEAKER;
-    } else if (popcount(device) > 1) {
-        // Multiple device selection is either:
-        //  - speaker + one other device: give priority to speaker in this case.
-        //  - one A2DP device + another device: happens with duplicated output. In this case
-        // retain the device on the A2DP output as the other must not correspond to an active
-        // selection if not the speaker.
-        //  - HDMI-CEC system audio mode only output: give priority to available item in order.
-        if (device & AUDIO_DEVICE_OUT_SPEAKER) {
-            device = AUDIO_DEVICE_OUT_SPEAKER;
-        } else if (device & AUDIO_DEVICE_OUT_HDMI_ARC) {
-            device = AUDIO_DEVICE_OUT_HDMI_ARC;
-        } else if (device & AUDIO_DEVICE_OUT_AUX_LINE) {
-            device = AUDIO_DEVICE_OUT_AUX_LINE;
-        } else if (device & AUDIO_DEVICE_OUT_SPDIF) {
-            device = AUDIO_DEVICE_OUT_SPDIF;
-        } else {
-            device = (audio_devices_t)(device & AUDIO_DEVICE_OUT_ALL_A2DP);
-        }
-    }
-
-    /*SPEAKER_SAFE is an alias of SPEAKER for purposes of volume control*/
-    if (device == AUDIO_DEVICE_OUT_SPEAKER_SAFE)
-        device = AUDIO_DEVICE_OUT_SPEAKER;
-
-    ALOGW_IF(popcount(device) != 1,
-            "getDeviceForVolume() invalid device combination: %08x",
-            device);
-
-    return device;
-}
-
-//static
-ApmGains::device_category ApmGains::getDeviceCategory(audio_devices_t device)
-{
-    switch(getDeviceForVolume(device)) {
-        case AUDIO_DEVICE_OUT_EARPIECE:
-            return ApmGains::DEVICE_CATEGORY_EARPIECE;
-        case AUDIO_DEVICE_OUT_WIRED_HEADSET:
-        case AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
-        case AUDIO_DEVICE_OUT_BLUETOOTH_SCO:
-        case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET:
-        case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP:
-        case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES:
-            return ApmGains::DEVICE_CATEGORY_HEADSET;
-        case AUDIO_DEVICE_OUT_LINE:
-        case AUDIO_DEVICE_OUT_AUX_DIGITAL:
-        /*USB?  Remote submix?*/
-            return ApmGains::DEVICE_CATEGORY_EXT_MEDIA;
-        case AUDIO_DEVICE_OUT_SPEAKER:
-        case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT:
-        case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER:
-        case AUDIO_DEVICE_OUT_USB_ACCESSORY:
-        case AUDIO_DEVICE_OUT_USB_DEVICE:
-        case AUDIO_DEVICE_OUT_REMOTE_SUBMIX:
-        default:
-            return ApmGains::DEVICE_CATEGORY_SPEAKER;
-    }
-}
-
-//static
-float ApmGains::volIndexToAmpl(audio_devices_t device, const StreamDescriptor& streamDesc,
-        int indexInUi)
-{
-    ApmGains::device_category deviceCategory = ApmGains::getDeviceCategory(device);
-    const VolumeCurvePoint *curve = streamDesc.mVolumeCurve[deviceCategory];
-
-    // the volume index in the UI is relative to the min and max volume indices for this stream type
-    int nbSteps = 1 + curve[ApmGains::VOLMAX].mIndex -
-            curve[ApmGains::VOLMIN].mIndex;
-    int volIdx = (nbSteps * (indexInUi - streamDesc.mIndexMin)) /
-            (streamDesc.mIndexMax - streamDesc.mIndexMin);
-
-    // find what part of the curve this index volume belongs to, or if it's out of bounds
-    int segment = 0;
-    if (volIdx < curve[ApmGains::VOLMIN].mIndex) {         // out of bounds
-        return 0.0f;
-    } else if (volIdx < curve[ApmGains::VOLKNEE1].mIndex) {
-        segment = 0;
-    } else if (volIdx < curve[ApmGains::VOLKNEE2].mIndex) {
-        segment = 1;
-    } else if (volIdx <= curve[ApmGains::VOLMAX].mIndex) {
-        segment = 2;
-    } else {                                                               // out of bounds
-        return 1.0f;
-    }
-
-    // linear interpolation in the attenuation table in dB
-    float decibels = curve[segment].mDBAttenuation +
-            ((float)(volIdx - curve[segment].mIndex)) *
-                ( (curve[segment+1].mDBAttenuation -
-                        curve[segment].mDBAttenuation) /
-                    ((float)(curve[segment+1].mIndex -
-                            curve[segment].mIndex)) );
-
-    float amplification = exp( decibels * 0.115129f); // exp( dB * ln(10) / 20 )
-
-    ALOGVV("VOLUME vol index=[%d %d %d], dB=[%.1f %.1f %.1f] ampl=%.5f",
-            curve[segment].mIndex, volIdx,
-            curve[segment+1].mIndex,
-            curve[segment].mDBAttenuation,
-            decibels,
-            curve[segment+1].mDBAttenuation,
-            amplification);
-
-    return amplification;
-}
-
-
-
-AudioGain::AudioGain(int index, bool useInChannelMask)
-{
-    mIndex = index;
-    mUseInChannelMask = useInChannelMask;
-    memset(&mGain, 0, sizeof(struct audio_gain));
-}
-
-void AudioGain::getDefaultConfig(struct audio_gain_config *config)
-{
-    config->index = mIndex;
-    config->mode = mGain.mode;
-    config->channel_mask = mGain.channel_mask;
-    if ((mGain.mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
-        config->values[0] = mGain.default_value;
-    } else {
-        uint32_t numValues;
-        if (mUseInChannelMask) {
-            numValues = audio_channel_count_from_in_mask(mGain.channel_mask);
-        } else {
-            numValues = audio_channel_count_from_out_mask(mGain.channel_mask);
-        }
-        for (size_t i = 0; i < numValues; i++) {
-            config->values[i] = mGain.default_value;
-        }
-    }
-    if ((mGain.mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) {
-        config->ramp_duration_ms = mGain.min_ramp_ms;
-    }
-}
-
-status_t AudioGain::checkConfig(const struct audio_gain_config *config)
-{
-    if ((config->mode & ~mGain.mode) != 0) {
-        return BAD_VALUE;
-    }
-    if ((config->mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
-        if ((config->values[0] < mGain.min_value) ||
-                    (config->values[0] > mGain.max_value)) {
-            return BAD_VALUE;
-        }
-    } else {
-        if ((config->channel_mask & ~mGain.channel_mask) != 0) {
-            return BAD_VALUE;
-        }
-        uint32_t numValues;
-        if (mUseInChannelMask) {
-            numValues = audio_channel_count_from_in_mask(config->channel_mask);
-        } else {
-            numValues = audio_channel_count_from_out_mask(config->channel_mask);
-        }
-        for (size_t i = 0; i < numValues; i++) {
-            if ((config->values[i] < mGain.min_value) ||
-                    (config->values[i] > mGain.max_value)) {
-                return BAD_VALUE;
-            }
-        }
-    }
-    if ((config->mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) {
-        if ((config->ramp_duration_ms < mGain.min_ramp_ms) ||
-                    (config->ramp_duration_ms > mGain.max_ramp_ms)) {
-            return BAD_VALUE;
-        }
-    }
-    return NO_ERROR;
-}
-
-void AudioGain::dump(int fd, int spaces, int index) const
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    snprintf(buffer, SIZE, "%*sGain %d:\n", spaces, "", index+1);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- mode: %08x\n", spaces, "", mGain.mode);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- channel_mask: %08x\n", spaces, "", mGain.channel_mask);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- min_value: %d mB\n", spaces, "", mGain.min_value);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- max_value: %d mB\n", spaces, "", mGain.max_value);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- default_value: %d mB\n", spaces, "", mGain.default_value);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- step_value: %d mB\n", spaces, "", mGain.step_value);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- min_ramp_ms: %d ms\n", spaces, "", mGain.min_ramp_ms);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- max_ramp_ms: %d ms\n", spaces, "", mGain.max_ramp_ms);
-    result.append(buffer);
-
-    write(fd, result.string(), result.size());
-}
-
-
-// --- StreamDescriptor class implementation
-
-StreamDescriptor::StreamDescriptor()
-    :   mIndexMin(0), mIndexMax(1), mCanBeMuted(true)
-{
-    mIndexCur.add(AUDIO_DEVICE_OUT_DEFAULT, 0);
-}
-
-int StreamDescriptor::getVolumeIndex(audio_devices_t device)
-{
-    device = ApmGains::getDeviceForVolume(device);
-    // there is always a valid entry for AUDIO_DEVICE_OUT_DEFAULT
-    if (mIndexCur.indexOfKey(device) < 0) {
-        device = AUDIO_DEVICE_OUT_DEFAULT;
-    }
-    return mIndexCur.valueFor(device);
-}
-
-void StreamDescriptor::dump(int fd)
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    snprintf(buffer, SIZE, "%s         %02d         %02d         ",
-             mCanBeMuted ? "true " : "false", mIndexMin, mIndexMax);
-    result.append(buffer);
-    for (size_t i = 0; i < mIndexCur.size(); i++) {
-        snprintf(buffer, SIZE, "%04x : %02d, ",
-                 mIndexCur.keyAt(i),
-                 mIndexCur.valueAt(i));
-        result.append(buffer);
-    }
-    result.append("\n");
-
-    write(fd, result.string(), result.size());
-}
-
-}; // namespace android
diff --git a/services/audiopolicy/managerdefault/Gains.h b/services/audiopolicy/managerdefault/Gains.h
deleted file mode 100644
index b4ab129..0000000
--- a/services/audiopolicy/managerdefault/Gains.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-namespace android {
-
-class VolumeCurvePoint
-{
-public:
-    int mIndex;
-    float mDBAttenuation;
-};
-
-class StreamDescriptor;
-
-class ApmGains
-{
-public :
-    // 4 points to define the volume attenuation curve, each characterized by the volume
-    // index (from 0 to 100) at which they apply, and the attenuation in dB at that index.
-    // we use 100 steps to avoid rounding errors when computing the volume in volIndexToAmpl()
-    enum { VOLMIN = 0, VOLKNEE1 = 1, VOLKNEE2 = 2, VOLMAX = 3, VOLCNT = 4};
-
-    // device categories used for volume curve management.
-    enum device_category {
-        DEVICE_CATEGORY_HEADSET,
-        DEVICE_CATEGORY_SPEAKER,
-        DEVICE_CATEGORY_EARPIECE,
-        DEVICE_CATEGORY_EXT_MEDIA,
-        DEVICE_CATEGORY_CNT
-    };
-
-    // returns the category the device belongs to with regard to volume curve management
-    static ApmGains::device_category getDeviceCategory(audio_devices_t device);
-
-    // extract one device relevant for volume control from multiple device selection
-    static audio_devices_t getDeviceForVolume(audio_devices_t device);
-
-    static float volIndexToAmpl(audio_devices_t device, const StreamDescriptor& streamDesc,
-                    int indexInUi);
-
-    // default volume curve
-    static const VolumeCurvePoint sDefaultVolumeCurve[ApmGains::VOLCNT];
-    // default volume curve for media strategy
-    static const VolumeCurvePoint sDefaultMediaVolumeCurve[ApmGains::VOLCNT];
-    // volume curve for non-media audio on ext media outputs (HDMI, Line, etc)
-    static const VolumeCurvePoint sExtMediaSystemVolumeCurve[ApmGains::VOLCNT];
-    // volume curve for media strategy on speakers
-    static const VolumeCurvePoint sSpeakerMediaVolumeCurve[ApmGains::VOLCNT];
-    static const VolumeCurvePoint sSpeakerMediaVolumeCurveDrc[ApmGains::VOLCNT];
-    // volume curve for sonification strategy on speakers
-    static const VolumeCurvePoint sSpeakerSonificationVolumeCurve[ApmGains::VOLCNT];
-    static const VolumeCurvePoint sSpeakerSonificationVolumeCurveDrc[ApmGains::VOLCNT];
-    static const VolumeCurvePoint sDefaultSystemVolumeCurve[ApmGains::VOLCNT];
-    static const VolumeCurvePoint sDefaultSystemVolumeCurveDrc[ApmGains::VOLCNT];
-    static const VolumeCurvePoint sHeadsetSystemVolumeCurve[ApmGains::VOLCNT];
-    static const VolumeCurvePoint sDefaultVoiceVolumeCurve[ApmGains::VOLCNT];
-    static const VolumeCurvePoint sSpeakerVoiceVolumeCurve[ApmGains::VOLCNT];
-    static const VolumeCurvePoint sLinearVolumeCurve[ApmGains::VOLCNT];
-    static const VolumeCurvePoint sSilentVolumeCurve[ApmGains::VOLCNT];
-    static const VolumeCurvePoint sFullScaleVolumeCurve[ApmGains::VOLCNT];
-    // default volume curves per stream and device category. See initializeVolumeCurves()
-    static const VolumeCurvePoint *sVolumeProfiles[AUDIO_STREAM_CNT][ApmGains::DEVICE_CATEGORY_CNT];
-};
-
-
-class AudioGain: public RefBase
-{
-public:
-    AudioGain(int index, bool useInChannelMask);
-    virtual ~AudioGain() {}
-
-    void dump(int fd, int spaces, int index) const;
-
-    void getDefaultConfig(struct audio_gain_config *config);
-    status_t checkConfig(const struct audio_gain_config *config);
-    int               mIndex;
-    struct audio_gain mGain;
-    bool              mUseInChannelMask;
-};
-
-
-// stream descriptor used for volume control
-class StreamDescriptor
-{
-public:
-    StreamDescriptor();
-
-    int getVolumeIndex(audio_devices_t device);
-    void dump(int fd);
-
-    int mIndexMin;      // min volume index
-    int mIndexMax;      // max volume index
-    KeyedVector<audio_devices_t, int> mIndexCur;   // current volume index per device
-    bool mCanBeMuted;   // true is the stream can be muted
-
-    const VolumeCurvePoint *mVolumeCurve[ApmGains::DEVICE_CATEGORY_CNT];
-};
-
-}; // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index eb9116d..00f188f 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -532,7 +532,7 @@
         mLock.unlock();
         svc.clear();
         mLock.lock();
-        if (!exitPending() && mAudioCommands.isEmpty()) {
+        if (!exitPending() && (mAudioCommands.isEmpty() || waitTime != INT64_MAX)) {
             // release delayed commands wake lock
             release_wake_lock(mName.string());
             ALOGV("AudioCommandThread() going to sleep");
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 5d6423a..9c60911 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -42,7 +42,6 @@
     api1/client2/CaptureSequencer.cpp \
     api1/client2/ZslProcessor3.cpp \
     api2/CameraDeviceClient.cpp \
-    api_pro/ProCamera2Client.cpp \
     device2/Camera2Device.cpp \
     device3/Camera3Device.cpp \
     device3/Camera3Stream.cpp \
@@ -54,6 +53,7 @@
     device3/StatusTracker.cpp \
     gui/RingBufferConsumer.cpp \
     utils/CameraTraces.cpp \
+    utils/AutoConditionLock.cpp \
 
 LOCAL_SHARED_LIBRARIES:= \
     libui \
diff --git a/services/camera/libcameraservice/CameraDeviceFactory.cpp b/services/camera/libcameraservice/CameraDeviceFactory.cpp
index bfef50e..6589e27 100644
--- a/services/camera/libcameraservice/CameraDeviceFactory.cpp
+++ b/services/camera/libcameraservice/CameraDeviceFactory.cpp
@@ -48,6 +48,7 @@
         case CAMERA_DEVICE_API_VERSION_3_0:
         case CAMERA_DEVICE_API_VERSION_3_1:
         case CAMERA_DEVICE_API_VERSION_3_2:
+        case CAMERA_DEVICE_API_VERSION_3_3:
             device = new Camera3Device(cameraId);
             break;
         default:
diff --git a/services/camera/libcameraservice/CameraFlashlight.cpp b/services/camera/libcameraservice/CameraFlashlight.cpp
index 7a79750..8613ac6 100644
--- a/services/camera/libcameraservice/CameraFlashlight.cpp
+++ b/services/camera/libcameraservice/CameraFlashlight.cpp
@@ -55,8 +55,7 @@
 
     status_t res = OK;
 
-    if (mCameraModule->getRawModule()->module_api_version >=
-            CAMERA_MODULE_API_VERSION_2_4) {
+    if (mCameraModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_4) {
         mFlashControl = new ModuleFlashControl(*mCameraModule, *mCallbacks);
         if (mFlashControl == NULL) {
             ALOGV("%s: cannot create flash control for module api v2.4+",
@@ -66,8 +65,8 @@
     } else {
         uint32_t deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
 
-        if (mCameraModule->getRawModule()->module_api_version >=
-                CAMERA_MODULE_API_VERSION_2_0) {
+        if (mCameraModule->getModuleApiVersion() >=
+                    CAMERA_MODULE_API_VERSION_2_0) {
             camera_info info;
             res = mCameraModule->getCameraInfo(
                     atoi(String8(cameraId).string()), &info);
@@ -110,6 +109,20 @@
     status_t res = OK;
     Mutex::Autolock l(mLock);
 
+    if (mOpenedCameraIds.indexOf(cameraId) != NAME_NOT_FOUND) {
+        // This case is needed to avoid state corruption during the following call sequence:
+        // CameraService::setTorchMode for camera ID 0 begins, does torch status checks
+        // CameraService::connect for camera ID 0 begins, calls prepareDeviceOpen, ends
+        // CameraService::setTorchMode for camera ID 0 continues, calls
+        //        CameraFlashlight::setTorchMode
+
+        // TODO: Move torch status checks and state updates behind this CameraFlashlight lock
+        // to avoid other similar race conditions.
+        ALOGE("%s: Camera device %s is in use, cannot set torch mode.",
+                __FUNCTION__, cameraId.string());
+        return -EBUSY;
+    }
+
     if (mFlashControl == NULL) {
         if (enabled == false) {
             return OK;
@@ -210,8 +223,7 @@
         return NO_INIT;
     }
 
-    if (mCameraModule->getRawModule()->module_api_version <
-            CAMERA_MODULE_API_VERSION_2_4) {
+    if (mCameraModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_4) {
         // framework is going to open a camera device, all flash light control
         // should be closed for backward compatible support.
         mFlashControl.clear();
@@ -260,8 +272,7 @@
     if (mOpenedCameraIds.size() != 0)
         return OK;
 
-    if (mCameraModule->getRawModule()->module_api_version <
-            CAMERA_MODULE_API_VERSION_2_4) {
+    if (mCameraModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_4) {
         // notify torch available for all cameras with a flash
         int numCameras = mCameraModule->getNumberOfCameras();
         for (int i = 0; i < numCameras; i++) {
@@ -389,7 +400,7 @@
         return NO_MEMORY;
     }
     res = device->createStream(mAnw, width, height, format,
-            HAL_DATASPACE_UNKNOWN, &mStreamId);
+            HAL_DATASPACE_UNKNOWN, CAMERA3_STREAM_ROTATION_0, &mStreamId);
     if (res) {
         return res;
     }
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 6f37f16..e9c96c6 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -17,9 +17,14 @@
 #define LOG_TAG "CameraService"
 //#define LOG_NDEBUG 0
 
+#include <algorithm>
+#include <climits>
 #include <stdio.h>
-#include <string.h>
+#include <cstring>
+#include <ctime>
+#include <string>
 #include <sys/types.h>
+#include <inttypes.h>
 #include <pthread.h>
 
 #include <binder/AppOpsManager.h>
@@ -27,9 +32,9 @@
 #include <binder/IServiceManager.h>
 #include <binder/MemoryBase.h>
 #include <binder/MemoryHeapBase.h>
+#include <binder/ProcessInfoService.h>
 #include <cutils/atomic.h>
 #include <cutils/properties.h>
-#include <cutils/multiuser.h>
 #include <gui/Surface.h>
 #include <hardware/hardware.h>
 #include <media/AudioSystem.h>
@@ -46,7 +51,6 @@
 #include "CameraService.h"
 #include "api1/CameraClient.h"
 #include "api1/Camera2Client.h"
-#include "api_pro/ProCamera2Client.h"
 #include "api2/CameraDeviceClient.h"
 #include "utils/CameraTraces.h"
 #include "CameraDeviceFactory.h"
@@ -67,25 +71,16 @@
 
 // ----------------------------------------------------------------------------
 
-static int getCallingPid() {
-    return IPCThreadState::self()->getCallingPid();
-}
-
-static int getCallingUid() {
-    return IPCThreadState::self()->getCallingUid();
-}
-
 extern "C" {
 static void camera_device_status_change(
         const struct camera_module_callbacks* callbacks,
         int camera_id,
         int new_status) {
     sp<CameraService> cs = const_cast<CameraService*>(
-                                static_cast<const CameraService*>(callbacks));
+            static_cast<const CameraService*>(callbacks));
 
-    cs->onDeviceStatusChanged(
-        camera_id,
-        new_status);
+    cs->onDeviceStatusChanged(static_cast<camera_device_status_t>(camera_id),
+            static_cast<camera_device_status_t>(new_status));
 }
 
 static void torch_mode_status_change(
@@ -127,24 +122,20 @@
 // should be ok for now.
 static CameraService *gCameraService;
 
-CameraService::CameraService()
-    :mSoundRef(0), mModule(0), mFlashlight(0)
-{
+CameraService::CameraService() : mEventLog(DEFAULT_EVICTION_LOG_LENGTH),
+        mLastUserId(DEFAULT_LAST_USER_ID), mSoundRef(0), mModule(0), mFlashlight(0) {
     ALOGI("CameraService started (pid=%d)", getpid());
     gCameraService = this;
 
-    for (size_t i = 0; i < MAX_CAMERAS; ++i) {
-        mStatusList[i] = ICameraServiceListener::STATUS_PRESENT;
-    }
-
     this->camera_device_status_change = android::camera_device_status_change;
     this->torch_mode_status_change = android::torch_mode_status_change;
 
+    mServiceLockWrapper = std::make_shared<WaitableMutexWrapper>(&mServiceLock);
 }
 
 void CameraService::onFirstRef()
 {
-    LOG1("CameraService::onFirstRef");
+    ALOGI("CameraService process starting");
 
     BnCameraService::onFirstRef();
 
@@ -156,39 +147,64 @@
     }
     else {
         mModule = new CameraModule(rawModule);
-        const hw_module_t *common = mModule->getRawModule();
-        ALOGI("Loaded \"%s\" cameraCa module", common->name);
+        ALOGI("Loaded \"%s\" camera module", mModule->getModuleName());
         mNumberOfCameras = mModule->getNumberOfCameras();
-        if (mNumberOfCameras > MAX_CAMERAS) {
-            ALOGE("Number of cameras(%d) > MAX_CAMERAS(%d).",
-                    mNumberOfCameras, MAX_CAMERAS);
-            mNumberOfCameras = MAX_CAMERAS;
-        }
 
         mFlashlight = new CameraFlashlight(*mModule, *this);
         status_t res = mFlashlight->findFlashUnits();
         if (res) {
             // impossible because we haven't open any camera devices.
-            ALOGE("failed to find flash units.");
+            ALOGE("Failed to find flash units.");
         }
 
         for (int i = 0; i < mNumberOfCameras; i++) {
-            setCameraFree(i);
+            String8 cameraId = String8::format("%d", i);
 
-            String8 cameraName = String8::format("%d", i);
-            if (mFlashlight->hasFlashUnit(cameraName)) {
-                mTorchStatusMap.add(cameraName,
+            // Defaults to use for cost and conflicting devices
+            int cost = 100;
+            char** conflicting_devices = nullptr;
+            size_t conflicting_devices_length = 0;
+
+            // If using post-2.4 module version, query the cost + conflicting devices from the HAL
+            if (mModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_4) {
+                struct camera_info info;
+                status_t rc = mModule->getCameraInfo(i, &info);
+                if (rc == NO_ERROR) {
+                    cost = info.resource_cost;
+                    conflicting_devices = info.conflicting_devices;
+                    conflicting_devices_length = info.conflicting_devices_length;
+                } else {
+                    ALOGE("%s: Received error loading camera info for device %d, cost and"
+                            " conflicting devices fields set to defaults for this device.",
+                            __FUNCTION__, i);
+                }
+            }
+
+            std::set<String8> conflicting;
+            for (size_t i = 0; i < conflicting_devices_length; i++) {
+                conflicting.emplace(String8(conflicting_devices[i]));
+            }
+
+            // Initialize state for each camera device
+            {
+                Mutex::Autolock lock(mCameraStatesLock);
+                mCameraStates.emplace(cameraId, std::make_shared<CameraState>(cameraId, cost,
+                        conflicting));
+            }
+
+            if (mFlashlight->hasFlashUnit(cameraId)) {
+                mTorchStatusMap.add(cameraId,
                         ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF);
             }
         }
 
-        if (common->module_api_version >= CAMERA_MODULE_API_VERSION_2_1) {
+        if (mModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_1) {
             mModule->setCallbacks(this);
         }
 
         VendorTagDescriptor::clearGlobalVendorTagDescriptor();
 
-        if (common->module_api_version >= CAMERA_MODULE_API_VERSION_2_2) {
+        if (mModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_2) {
             setUpVendorTags();
         }
 
@@ -197,81 +213,70 @@
 }
 
 CameraService::~CameraService() {
-    for (int i = 0; i < mNumberOfCameras; i++) {
-        if (mBusy[i]) {
-            ALOGE("camera %d is still in use in destructor!", i);
-        }
-    }
-
     if (mModule) {
         delete mModule;
+        mModule = nullptr;
     }
     VendorTagDescriptor::clearGlobalVendorTagDescriptor();
-    gCameraService = NULL;
+    gCameraService = nullptr;
 }
 
-void CameraService::onDeviceStatusChanged(int cameraId,
-                                          int newStatus)
-{
-    ALOGV("%s: Status changed for cameraId=%d, newStatus=%d", __FUNCTION__,
+void CameraService::onDeviceStatusChanged(camera_device_status_t  cameraId,
+        camera_device_status_t newStatus) {
+    ALOGI("%s: Status changed for cameraId=%d, newStatus=%d", __FUNCTION__,
           cameraId, newStatus);
 
-    if (cameraId < 0 || cameraId >= MAX_CAMERAS) {
+    String8 id = String8::format("%d", cameraId);
+    std::shared_ptr<CameraState> state = getCameraState(id);
+
+    if (state == nullptr) {
         ALOGE("%s: Bad camera ID %d", __FUNCTION__, cameraId);
         return;
     }
 
-    if ((int)getStatus(cameraId) == newStatus) {
-        ALOGE("%s: State transition to the same status 0x%x not allowed",
-              __FUNCTION__, (uint32_t)newStatus);
+    ICameraServiceListener::Status oldStatus = state->getStatus();
+
+    if (oldStatus == static_cast<ICameraServiceListener::Status>(newStatus)) {
+        ALOGE("%s: State transition to the same status %#x not allowed", __FUNCTION__, newStatus);
         return;
     }
 
-    /* don't do this in updateStatus
-       since it is also called from connect and we could get into a deadlock */
     if (newStatus == CAMERA_DEVICE_STATUS_NOT_PRESENT) {
-        Vector<sp<BasicClient> > clientsToDisconnect;
+        sp<BasicClient> clientToDisconnect;
         {
-           Mutex::Autolock al(mServiceLock);
+            // Don't do this in updateStatus to avoid deadlock over mServiceLock
+            Mutex::Autolock lock(mServiceLock);
 
-           /* Remove cached parameters from shim cache */
-           mShimParams.removeItem(cameraId);
+            // Set the device status to NOT_PRESENT, clients will no longer be able to connect
+            // to this device until the status changes
+            updateStatus(ICameraServiceListener::STATUS_NOT_PRESENT, id);
 
-           /* Find all clients that we need to disconnect */
-           sp<BasicClient> client = mClient[cameraId].promote();
-           if (client.get() != NULL) {
-               clientsToDisconnect.push_back(client);
-           }
+            // Remove cached shim parameters
+            state->setShimParams(CameraParameters());
 
-           int i = cameraId;
-           for (size_t j = 0; j < mProClientList[i].size(); ++j) {
-               sp<ProClient> cl = mProClientList[i][j].promote();
-               if (cl != NULL) {
-                   clientsToDisconnect.push_back(cl);
-               }
-           }
+            // Remove the client from the list of active clients
+            clientToDisconnect = removeClientLocked(id);
+
+            // Notify the client of disconnection
+            clientToDisconnect->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
+                    CaptureResultExtras{});
         }
 
-        /* now disconnect them. don't hold the lock
-           or we can get into a deadlock */
+        ALOGI("%s: Client for camera ID %s evicted due to device status change from HAL",
+                __FUNCTION__, id.string());
 
-        for (size_t i = 0; i < clientsToDisconnect.size(); ++i) {
-            sp<BasicClient> client = clientsToDisconnect[i];
-
-            client->disconnect();
-            /**
-             * The remote app will no longer be able to call methods on the
-             * client since the client PID will be reset to 0
-             */
+        // Disconnect client
+        if (clientToDisconnect.get() != nullptr) {
+            // Ensure not in binder RPC so client disconnect PID checks work correctly
+            LOG_ALWAYS_FATAL_IF(getCallingPid() != getpid(),
+                    "onDeviceStatusChanged must be called from the camera service process!");
+            clientToDisconnect->disconnect();
         }
 
-        ALOGV("%s: After unplug, disconnected %zu clients",
-              __FUNCTION__, clientsToDisconnect.size());
+    } else {
+        updateStatus(static_cast<ICameraServiceListener::Status>(newStatus), id);
     }
 
-    updateStatus(
-            static_cast<ICameraServiceListener::Status>(newStatus), cameraId);
-
 }
 
 void CameraService::onTorchStatusChanged(const String8& cameraId,
@@ -304,9 +309,11 @@
         return;
     }
 
-    Vector<sp<ICameraServiceListener> >::const_iterator it;
-    for (it = mListenerList.begin(); it != mListenerList.end(); ++it) {
-        (*it)->onTorchStatusChanged(newStatus, String16(cameraId.string()));
+    {
+        Mutex::Autolock lock(mStatusListenerLock);
+        for (auto& i : mListenerList) {
+            i->onTorchStatusChanged(newStatus, String16{cameraId});
+        }
     }
 }
 
@@ -333,6 +340,15 @@
     return rc;
 }
 
+int CameraService::cameraIdToInt(const String8& cameraId) {
+    errno = 0;
+    size_t pos = 0;
+    int ret = stoi(std::string{cameraId.string()}, &pos);
+    if (errno != 0 || pos != cameraId.size()) {
+        return -1;
+    }
+    return ret;
+}
 
 status_t CameraService::generateShimMetadata(int cameraId, /*out*/CameraMetadata* cameraInfo) {
     status_t ret = OK;
@@ -438,7 +454,7 @@
 
     int facing;
     status_t ret = OK;
-    if (mModule->getRawModule()->module_api_version < CAMERA_MODULE_API_VERSION_2_0 ||
+    if (mModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_0 ||
             getDeviceVersion(cameraId, &facing) <= CAMERA_DEVICE_API_VERSION_2_1 ) {
         /**
          * Backwards compatibility mode for old HALs:
@@ -466,6 +482,54 @@
     return ret;
 }
 
+int CameraService::getCallingPid() {
+    return IPCThreadState::self()->getCallingPid();
+}
+
+int CameraService::getCallingUid() {
+    return IPCThreadState::self()->getCallingUid();
+}
+
+String8 CameraService::getFormattedCurrentTime() {
+    time_t now = time(nullptr);
+    char formattedTime[64];
+    strftime(formattedTime, sizeof(formattedTime), "%m-%d %H:%M:%S", localtime(&now));
+    return String8(formattedTime);
+}
+
+int CameraService::getCameraPriorityFromProcState(int procState) {
+    // Find the priority for the camera usage based on the process state.  Higher priority clients
+    // win for evictions.
+    // Note: Unlike the ordering for ActivityManager, persistent system processes will always lose
+    //       the camera to the top/foreground applications.
+    switch(procState) {
+        case PROCESS_STATE_TOP: // User visible
+            return 100;
+        case PROCESS_STATE_IMPORTANT_FOREGROUND: // Foreground
+            return 90;
+        case PROCESS_STATE_PERSISTENT: // Persistent system services
+        case PROCESS_STATE_PERSISTENT_UI:
+            return 80;
+        case PROCESS_STATE_IMPORTANT_BACKGROUND: // "Important" background processes
+            return 70;
+        case PROCESS_STATE_BACKUP: // Everything else
+        case PROCESS_STATE_HEAVY_WEIGHT:
+        case PROCESS_STATE_SERVICE:
+        case PROCESS_STATE_RECEIVER:
+        case PROCESS_STATE_HOME:
+        case PROCESS_STATE_LAST_ACTIVITY:
+        case PROCESS_STATE_CACHED_ACTIVITY:
+        case PROCESS_STATE_CACHED_ACTIVITY_CLIENT:
+        case PROCESS_STATE_CACHED_EMPTY:
+            return 1;
+        case PROCESS_STATE_NONEXISTENT:
+            return -1;
+        default:
+            ALOGE("%s: Received unknown process state from ActivityManagerService!", __FUNCTION__);
+            return -1;
+    }
+}
+
 status_t CameraService::getCameraVendorTagDescriptor(/*out*/sp<VendorTagDescriptor>& desc) {
     if (!mModule) {
         ALOGE("%s: camera hardware module doesn't exist", __FUNCTION__);
@@ -483,7 +547,7 @@
     }
 
     int deviceVersion;
-    if (mModule->getRawModule()->module_api_version >= CAMERA_MODULE_API_VERSION_2_0) {
+    if (mModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_0) {
         deviceVersion = info.device_version;
     } else {
         deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
@@ -545,54 +609,90 @@
     return true;
 }
 
-status_t CameraService::initializeShimMetadata(int cameraId) {
-    int pid = getCallingPid();
-    int uid = getCallingUid();
-    status_t ret = validateConnect(cameraId, uid);
-    if (ret != OK) {
-        // Error already logged by callee
-        return ret;
+status_t CameraService::makeClient(const sp<CameraService>& cameraService,
+        const sp<IInterface>& cameraCb, const String16& packageName, const String8& cameraId,
+        int facing, int clientPid, uid_t clientUid, int servicePid, bool legacyMode,
+        int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
+        /*out*/sp<BasicClient>* client) {
+
+    // TODO: Update CameraClients + HAL interface to use strings for Camera IDs
+    int id = cameraIdToInt(cameraId);
+    if (id == -1) {
+        ALOGE("%s: Invalid camera ID %s, cannot convert to integer.", __FUNCTION__,
+                cameraId.string());
+        return BAD_VALUE;
     }
 
-    bool needsNewClient = false;
-    sp<Client> client;
+    if (halVersion < 0 || halVersion == deviceVersion) {
+        // Default path: HAL version is unspecified by caller, create CameraClient
+        // based on device version reported by the HAL.
+        switch(deviceVersion) {
+          case CAMERA_DEVICE_API_VERSION_1_0:
+            if (effectiveApiLevel == API_1) {  // Camera1 API route
+                sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
+                *client = new CameraClient(cameraService, tmp, packageName, id, facing,
+                        clientPid, clientUid, getpid(), legacyMode);
+            } else { // Camera2 API route
+                ALOGW("Camera using old HAL version: %d", deviceVersion);
+                return -EOPNOTSUPP;
+            }
+            break;
+          case CAMERA_DEVICE_API_VERSION_2_0:
+          case CAMERA_DEVICE_API_VERSION_2_1:
+          case CAMERA_DEVICE_API_VERSION_3_0:
+          case CAMERA_DEVICE_API_VERSION_3_1:
+          case CAMERA_DEVICE_API_VERSION_3_2:
+          case CAMERA_DEVICE_API_VERSION_3_3:
+            if (effectiveApiLevel == API_1) { // Camera1 API route
+                sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
+                *client = new Camera2Client(cameraService, tmp, packageName, id, facing,
+                        clientPid, clientUid, servicePid, legacyMode);
+            } else { // Camera2 API route
+                sp<ICameraDeviceCallbacks> tmp =
+                        static_cast<ICameraDeviceCallbacks*>(cameraCb.get());
+                *client = new CameraDeviceClient(cameraService, tmp, packageName, id,
+                        facing, clientPid, clientUid, servicePid);
+            }
+            break;
+          default:
+            // Should not be reachable
+            ALOGE("Unknown camera device HAL version: %d", deviceVersion);
+            return INVALID_OPERATION;
+        }
+    } else {
+        // A particular HAL version is requested by caller. Create CameraClient
+        // based on the requested HAL version.
+        if (deviceVersion > CAMERA_DEVICE_API_VERSION_1_0 &&
+            halVersion == CAMERA_DEVICE_API_VERSION_1_0) {
+            // Only support higher HAL version device opened as HAL1.0 device.
+            sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
+            *client = new CameraClient(cameraService, tmp, packageName, id, facing,
+                    clientPid, clientUid, servicePid, legacyMode);
+        } else {
+            // Other combinations (e.g. HAL3.x open as HAL2.x) are not supported yet.
+            ALOGE("Invalid camera HAL version %x: HAL %x device can only be"
+                    " opened as HAL %x device", halVersion, deviceVersion,
+                    CAMERA_DEVICE_API_VERSION_1_0);
+            return INVALID_OPERATION;
+        }
+    }
+    return NO_ERROR;
+}
+
+status_t CameraService::initializeShimMetadata(int cameraId) {
+    int uid = getCallingUid();
 
     String16 internalPackageName("media");
-    {   // Scope for service lock
-        Mutex::Autolock lock(mServiceLock);
-        if (mClient[cameraId] != NULL) {
-            client = static_cast<Client*>(mClient[cameraId].promote().get());
-        }
-        if (client == NULL) {
-            needsNewClient = true;
-            ret = connectHelperLocked(/*out*/client,
-                                      /*cameraClient*/NULL, // Empty binder callbacks
-                                      cameraId,
-                                      internalPackageName,
-                                      uid,
-                                      pid);
-
-            if (ret != OK) {
-                // Error already logged by callee
-                return ret;
-            }
-        }
-
-        if (client == NULL) {
-            ALOGE("%s: Could not connect to client camera device.", __FUNCTION__);
-            return BAD_VALUE;
-        }
-
-        String8 rawParams = client->getParameters();
-        CameraParameters params(rawParams);
-        mShimParams.add(cameraId, params);
+    String8 id = String8::format("%d", cameraId);
+    status_t ret = NO_ERROR;
+    sp<Client> tmp = nullptr;
+    if ((ret = connectHelper<ICameraClient,Client>(sp<ICameraClient>{nullptr}, id,
+            static_cast<int>(CAMERA_HAL_API_VERSION_UNSPECIFIED), internalPackageName, uid, API_1,
+            false, true, tmp)) != NO_ERROR) {
+        ALOGE("%s: Error %d (%s) initializing shim metadata.", __FUNCTION__, ret, strerror(ret));
+        return ret;
     }
-
-    // Close client if one was opened solely for this call
-    if (needsNewClient) {
-        client->disconnect();
-    }
-    return OK;
+    return NO_ERROR;
 }
 
 status_t CameraService::getLegacyParametersLazy(int cameraId,
@@ -608,42 +708,55 @@
         return BAD_VALUE;
     }
 
-    ssize_t index = -1;
-    {   // Scope for service lock
+    String8 id = String8::format("%d", cameraId);
+
+    // Check if we already have parameters
+    {
+        // Scope for service lock
         Mutex::Autolock lock(mServiceLock);
-        index = mShimParams.indexOfKey(cameraId);
-        // Release service lock so initializeShimMetadata can be called correctly.
-
-        if (index >= 0) {
-            *parameters = mShimParams[index];
+        auto cameraState = getCameraState(id);
+        if (cameraState == nullptr) {
+            ALOGE("%s: Invalid camera ID: %s", __FUNCTION__, id.string());
+            return BAD_VALUE;
+        }
+        CameraParameters p = cameraState->getShimParams();
+        if (!p.isEmpty()) {
+            *parameters = p;
+            return NO_ERROR;
         }
     }
 
-    if (index < 0) {
-        int64_t token = IPCThreadState::self()->clearCallingIdentity();
-        ret = initializeShimMetadata(cameraId);
-        IPCThreadState::self()->restoreCallingIdentity(token);
-        if (ret != OK) {
-            // Error already logged by callee
-            return ret;
+    int64_t token = IPCThreadState::self()->clearCallingIdentity();
+    ret = initializeShimMetadata(cameraId);
+    IPCThreadState::self()->restoreCallingIdentity(token);
+    if (ret != NO_ERROR) {
+        // Error already logged by callee
+        return ret;
+    }
+
+    // Check for parameters again
+    {
+        // Scope for service lock
+        Mutex::Autolock lock(mServiceLock);
+        auto cameraState = getCameraState(id);
+        if (cameraState == nullptr) {
+            ALOGE("%s: Invalid camera ID: %s", __FUNCTION__, id.string());
+            return BAD_VALUE;
         }
-
-        {   // Scope for service lock
-            Mutex::Autolock lock(mServiceLock);
-            index = mShimParams.indexOfKey(cameraId);
-
-            LOG_ALWAYS_FATAL_IF(index < 0, "index should have been initialized");
-
-            *parameters = mShimParams[index];
+        CameraParameters p = cameraState->getShimParams();
+        if (!p.isEmpty()) {
+            *parameters = p;
+            return NO_ERROR;
         }
     }
 
-    return OK;
+    ALOGE("%s: Parameters were not initialized, or were empty.  Device may not be present.",
+            __FUNCTION__);
+    return INVALID_OPERATION;
 }
 
-status_t CameraService::validateConnect(int cameraId,
-                                    /*inout*/
-                                    int& clientUid) const {
+status_t CameraService::validateConnectLocked(const String8& cameraId, /*inout*/int& clientUid)
+        const {
 
     int callingPid = getCallingPid();
 
@@ -652,23 +765,25 @@
     } else {
         // We only trust our own process to forward client UIDs
         if (callingPid != getpid()) {
-            ALOGE("CameraService::connect X (pid %d) rejected (don't trust clientUid)",
+            ALOGE("CameraService::connect X (PID %d) rejected (don't trust clientUid)",
                     callingPid);
             return PERMISSION_DENIED;
         }
     }
 
     if (!mModule) {
-        ALOGE("Camera HAL module not loaded");
+        ALOGE("CameraService::connect X (PID %d) rejected (camera HAL module not loaded)",
+                callingPid);
         return -ENODEV;
     }
 
-    if (cameraId < 0 || cameraId >= mNumberOfCameras) {
-        ALOGE("CameraService::connect X (pid %d) rejected (invalid cameraId %d).",
-            callingPid, cameraId);
+    if (getCameraState(cameraId) == nullptr) {
+        ALOGE("CameraService::connect X (PID %d) rejected (invalid camera ID %s)", callingPid,
+                cameraId.string());
         return -ENODEV;
     }
 
+    // Check device policy for this camera
     char value[PROPERTY_VALUE_MAX];
     char key[PROPERTY_KEY_MAX];
     int clientUserId = multiuser_get_user_id(clientUid);
@@ -676,142 +791,225 @@
     property_get(key, value, "0");
     if (strcmp(value, "1") == 0) {
         // Camera is disabled by DevicePolicyManager.
-        ALOGI("Camera is disabled. connect X (pid %d) rejected", callingPid);
+        ALOGE("CameraService::connect X (PID %d) rejected (camera %s is disabled by device "
+                "policy)", callingPid, cameraId.string());
         return -EACCES;
     }
 
-    ICameraServiceListener::Status currentStatus = getStatus(cameraId);
+    // Only allow clients who are being used by the current foreground device user.
+    if (mLastUserId != clientUserId && mLastUserId != DEFAULT_LAST_USER_ID) {
+        ALOGE("CameraService::connect X (PID %d) rejected (cannot connect from non-foreground "
+                "device user)", callingPid);
+        return PERMISSION_DENIED;
+    }
+
+    return checkIfDeviceIsUsable(cameraId);
+}
+
+status_t CameraService::checkIfDeviceIsUsable(const String8& cameraId) const {
+    auto cameraState = getCameraState(cameraId);
+    int callingPid = getCallingPid();
+    if (cameraState == nullptr) {
+        ALOGE("CameraService::connect X (PID %d) rejected (invalid camera ID %s)", callingPid,
+                cameraId.string());
+        return -ENODEV;
+    }
+
+    ICameraServiceListener::Status currentStatus = cameraState->getStatus();
     if (currentStatus == ICameraServiceListener::STATUS_NOT_PRESENT) {
-        ALOGI("Camera is not plugged in,"
-               " connect X (pid %d) rejected", callingPid);
+        ALOGE("CameraService::connect X (PID %d) rejected (camera %s is not connected)",
+                callingPid, cameraId.string());
         return -ENODEV;
     } else if (currentStatus == ICameraServiceListener::STATUS_ENUMERATING) {
-        ALOGI("Camera is enumerating,"
-               " connect X (pid %d) rejected", callingPid);
+        ALOGE("CameraService::connect X (PID %d) rejected, (camera %s is initializing)",
+                callingPid, cameraId.string());
         return -EBUSY;
     }
-    // Else don't check for STATUS_NOT_AVAILABLE.
-    //  -- It's done implicitly in canConnectUnsafe /w the mBusy array
 
-    return OK;
+    return NO_ERROR;
 }
 
-bool CameraService::canConnectUnsafe(int cameraId,
-                                     const String16& clientPackageName,
-                                     const sp<IBinder>& remoteCallback,
-                                     sp<BasicClient> &client) {
-    String8 clientName8(clientPackageName);
-    int callingPid = getCallingPid();
+void CameraService::finishConnectLocked(const sp<BasicClient>& client,
+        const CameraService::DescriptorPtr& desc) {
 
-    if (mClient[cameraId] != 0) {
-        client = mClient[cameraId].promote();
-        if (client != 0) {
-            if (remoteCallback == client->getRemote()) {
-                LOG1("CameraService::connect X (pid %d) (the same client)",
-                     callingPid);
-                return true;
-            } else {
-                // TODOSC: need to support 1 regular client,
-                // multiple shared clients here
-                ALOGW("CameraService::connect X (pid %d) rejected"
-                      " (existing client).", callingPid);
-                return false;
+    // Make a descriptor for the incoming client
+    auto clientDescriptor = CameraService::CameraClientManager::makeClientDescriptor(client, desc);
+    auto evicted = mActiveClientManager.addAndEvict(clientDescriptor);
+
+    logConnected(desc->getKey(), static_cast<int>(desc->getOwnerId()),
+            String8(client->getPackageName()));
+
+    if (evicted.size() > 0) {
+        // This should never happen - clients should already have been removed in disconnect
+        for (auto& i : evicted) {
+            ALOGE("%s: Invalid state: Client for camera %s was not removed in disconnect",
+                    __FUNCTION__, i->getKey().string());
+        }
+
+        LOG_ALWAYS_FATAL("%s: Invalid state for CameraService, clients not evicted properly",
+                __FUNCTION__);
+    }
+}
+
+status_t CameraService::handleEvictionsLocked(const String8& cameraId, int clientPid,
+        apiLevel effectiveApiLevel, const sp<IBinder>& remoteCallback, const String8& packageName,
+        /*out*/
+        sp<BasicClient>* client,
+        std::shared_ptr<resource_policy::ClientDescriptor<String8, sp<BasicClient>>>* partial) {
+
+    status_t ret = NO_ERROR;
+    std::vector<sp<BasicClient>> evictedClients;
+    DescriptorPtr clientDescriptor;
+    {
+        if (effectiveApiLevel == API_1) {
+            // If we are using API1, any existing client for this camera ID with the same remote
+            // should be returned rather than evicted to allow MediaRecorder to work properly.
+
+            auto current = mActiveClientManager.get(cameraId);
+            if (current != nullptr) {
+                auto clientSp = current->getValue();
+                if (clientSp.get() != nullptr) { // should never be needed
+                    if (clientSp->getRemote() == remoteCallback) {
+                        ALOGI("CameraService::connect X (PID %d) (second call from same"
+                                "app binder, returning the same client)", clientPid);
+                        *client = clientSp;
+                        return NO_ERROR;
+                    }
+                }
             }
         }
-        mClient[cameraId].clear();
-    }
 
-    /*
-    mBusy is set to false as the last step of the Client destructor,
-    after which it is guaranteed that the Client destructor has finished (
-    including any inherited destructors)
+        // Return error if the device was unplugged or removed by the HAL for some reason
+        if ((ret = checkIfDeviceIsUsable(cameraId)) != NO_ERROR) {
+            return ret;
+        }
 
-    We only need this for a Client subclasses since we don't allow
-    multiple Clents to be opened concurrently, but multiple BasicClient
-    would be fine
-    */
-    if (mBusy[cameraId]) {
-        ALOGW("CameraService::connect X (pid %d, \"%s\") rejected"
-                " (camera %d is still busy).", callingPid,
-                clientName8.string(), cameraId);
-        return false;
-    }
+        // Get current active client PIDs
+        std::vector<int> ownerPids(mActiveClientManager.getAllOwners());
+        ownerPids.push_back(clientPid);
 
-    return true;
-}
+        // Use the value +PROCESS_STATE_NONEXISTENT, to avoid taking
+        // address of PROCESS_STATE_NONEXISTENT as a reference argument
+        // for the vector constructor. PROCESS_STATE_NONEXISTENT does
+        // not have an out-of-class definition.
+        std::vector<int> priorities(ownerPids.size(), +PROCESS_STATE_NONEXISTENT);
 
-status_t CameraService::connectHelperLocked(
-        /*out*/
-        sp<Client>& client,
-        /*in*/
-        const sp<ICameraClient>& cameraClient,
-        int cameraId,
-        const String16& clientPackageName,
-        int clientUid,
-        int callingPid,
-        int halVersion,
-        bool legacyMode) {
+        // Get priorites of all active PIDs
+        ProcessInfoService::getProcessStatesFromPids(ownerPids.size(), &ownerPids[0],
+                /*out*/&priorities[0]);
 
-    // give flashlight a chance to close devices if necessary.
-    mFlashlight->prepareDeviceOpen(String8::format("%d", cameraId));
+        // Update all active clients' priorities
+        std::map<int,int> pidToPriorityMap;
+        for (size_t i = 0; i < ownerPids.size() - 1; i++) {
+            pidToPriorityMap.emplace(ownerPids[i], getCameraPriorityFromProcState(priorities[i]));
+        }
+        mActiveClientManager.updatePriorities(pidToPriorityMap);
 
-    int facing = -1;
-    int deviceVersion = getDeviceVersion(cameraId, &facing);
-
-    if (halVersion < 0 || halVersion == deviceVersion) {
-        // Default path: HAL version is unspecified by caller, create CameraClient
-        // based on device version reported by the HAL.
-        switch(deviceVersion) {
-          case CAMERA_DEVICE_API_VERSION_1_0:
-            client = new CameraClient(this, cameraClient,
-                    clientPackageName, cameraId,
-                    facing, callingPid, clientUid, getpid(), legacyMode);
-            break;
-          case CAMERA_DEVICE_API_VERSION_2_0:
-          case CAMERA_DEVICE_API_VERSION_2_1:
-          case CAMERA_DEVICE_API_VERSION_3_0:
-          case CAMERA_DEVICE_API_VERSION_3_1:
-          case CAMERA_DEVICE_API_VERSION_3_2:
-            client = new Camera2Client(this, cameraClient,
-                    clientPackageName, cameraId,
-                    facing, callingPid, clientUid, getpid(), legacyMode);
-            break;
-          case -1:
-            ALOGE("Invalid camera id %d", cameraId);
+        // Get state for the given cameraId
+        auto state = getCameraState(cameraId);
+        if (state == nullptr) {
+            ALOGE("CameraService::connect X (PID %d) rejected (no camera device with ID %s)",
+                clientPid, cameraId.string());
             return BAD_VALUE;
-          default:
-            ALOGE("Unknown camera device HAL version: %d", deviceVersion);
-            return INVALID_OPERATION;
         }
-    } else {
-        // A particular HAL version is requested by caller. Create CameraClient
-        // based on the requested HAL version.
-        if (deviceVersion > CAMERA_DEVICE_API_VERSION_1_0 &&
-            halVersion == CAMERA_DEVICE_API_VERSION_1_0) {
-            // Only support higher HAL version device opened as HAL1.0 device.
-            client = new CameraClient(this, cameraClient,
-                    clientPackageName, cameraId,
-                    facing, callingPid, clientUid, getpid(), legacyMode);
-        } else {
-            // Other combinations (e.g. HAL3.x open as HAL2.x) are not supported yet.
-            ALOGE("Invalid camera HAL version %x: HAL %x device can only be"
-                    " opened as HAL %x device", halVersion, deviceVersion,
-                    CAMERA_DEVICE_API_VERSION_1_0);
-            return INVALID_OPERATION;
+
+        // Make descriptor for incoming client
+        clientDescriptor = CameraClientManager::makeClientDescriptor(cameraId,
+                sp<BasicClient>{nullptr}, static_cast<int32_t>(state->getCost()),
+                state->getConflicting(),
+                getCameraPriorityFromProcState(priorities[priorities.size() - 1]), clientPid);
+
+        // Find clients that would be evicted
+        auto evicted = mActiveClientManager.wouldEvict(clientDescriptor);
+
+        // If the incoming client was 'evicted,' higher priority clients have the camera in the
+        // background, so we cannot do evictions
+        if (std::find(evicted.begin(), evicted.end(), clientDescriptor) != evicted.end()) {
+            ALOGE("CameraService::connect X (PID %d) rejected (existing client(s) with higher"
+                    " priority).", clientPid);
+
+            sp<BasicClient> clientSp = clientDescriptor->getValue();
+            String8 curTime = getFormattedCurrentTime();
+            auto incompatibleClients =
+                    mActiveClientManager.getIncompatibleClients(clientDescriptor);
+
+            String8 msg = String8::format("%s : DENIED connect device %s client for package %s "
+                    "(PID %d, priority %d)", curTime.string(),
+                    cameraId.string(), packageName.string(), clientPid,
+                    getCameraPriorityFromProcState(priorities[priorities.size() - 1]));
+
+            for (auto& i : incompatibleClients) {
+                msg.appendFormat("\n   - Blocked by existing device %s client for package %s"
+                        "(PID %" PRId32 ", priority %" PRId32 ")", i->getKey().string(),
+                        String8{i->getValue()->getPackageName()}.string(), i->getOwnerId(),
+                        i->getPriority());
+            }
+
+            // Log the client's attempt
+            mEventLog.add(msg);
+
+            return -EBUSY;
+        }
+
+        for (auto& i : evicted) {
+            sp<BasicClient> clientSp = i->getValue();
+            if (clientSp.get() == nullptr) {
+                ALOGE("%s: Invalid state: Null client in active client list.", __FUNCTION__);
+
+                // TODO: Remove this
+                LOG_ALWAYS_FATAL("%s: Invalid state for CameraService, null client in active list",
+                        __FUNCTION__);
+                mActiveClientManager.remove(i);
+                continue;
+            }
+
+            ALOGE("CameraService::connect evicting conflicting client for camera ID %s",
+                    i->getKey().string());
+            evictedClients.push_back(clientSp);
+
+            String8 curTime = getFormattedCurrentTime();
+
+            // Log the clients evicted
+            mEventLog.add(String8::format("%s : EVICT device %s client for package %s (PID %"
+                    PRId32 ", priority %" PRId32 ")\n   - Evicted by device %s client for "
+                    "package %s (PID %d, priority %" PRId32 ")", curTime.string(),
+                    i->getKey().string(), String8{clientSp->getPackageName()}.string(),
+                    i->getOwnerId(), i->getPriority(), cameraId.string(),
+                    packageName.string(), clientPid,
+                    getCameraPriorityFromProcState(priorities[priorities.size() - 1])));
+
+            // Notify the client of disconnection
+            clientSp->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
+                    CaptureResultExtras());
         }
     }
 
-    status_t status = connectFinishUnsafe(client, client->getRemote());
-    if (status != OK) {
-        // this is probably not recoverable.. maybe the client can try again
-        return status;
+    // Do not hold mServiceLock while disconnecting clients, but retain the condition blocking
+    // other clients from connecting in mServiceLockWrapper if held
+    mServiceLock.unlock();
+
+    // Clear caller identity temporarily so client disconnect PID checks work correctly
+    int64_t token = IPCThreadState::self()->clearCallingIdentity();
+
+    // Destroy evicted clients
+    for (auto& i : evictedClients) {
+        // Disconnect is blocking, and should only have returned when HAL has cleaned up
+        i->disconnect(); // Clients will remove themselves from the active client list here
+    }
+    evictedClients.clear();
+
+    IPCThreadState::self()->restoreCallingIdentity(token);
+
+    // Once clients have been disconnected, relock
+    mServiceLock.lock();
+
+    // Check again if the device was unplugged or something while we weren't holding mServiceLock
+    if ((ret = checkIfDeviceIsUsable(cameraId)) != NO_ERROR) {
+        return ret;
     }
 
-    mClient[cameraId] = client;
-    LOG1("CameraService::connect X (id %d, this pid is %d)", cameraId,
-         getpid());
-
-    return OK;
+    *partial = clientDescriptor;
+    return NO_ERROR;
 }
 
 status_t CameraService::connect(
@@ -822,47 +1020,18 @@
         /*out*/
         sp<ICamera>& device) {
 
-    String8 clientName8(clientPackageName);
-    int callingPid = getCallingPid();
+    status_t ret = NO_ERROR;
+    String8 id = String8::format("%d", cameraId);
+    sp<Client> client = nullptr;
+    ret = connectHelper<ICameraClient,Client>(cameraClient, id, CAMERA_HAL_API_VERSION_UNSPECIFIED,
+            clientPackageName, clientUid, API_1, false, false, /*out*/client);
 
-    LOG1("CameraService::connect E (pid %d \"%s\", id %d)", callingPid,
-            clientName8.string(), cameraId);
-
-    status_t status = validateConnect(cameraId, /*inout*/clientUid);
-    if (status != OK) {
-        return status;
+    if(ret != NO_ERROR) {
+        return ret;
     }
 
-
-    sp<Client> client;
-    {
-        Mutex::Autolock lock(mServiceLock);
-        sp<BasicClient> clientTmp;
-        if (!canConnectUnsafe(cameraId, clientPackageName,
-                              IInterface::asBinder(cameraClient),
-                              /*out*/clientTmp)) {
-            return -EBUSY;
-        } else if (client.get() != NULL) {
-            device = static_cast<Client*>(clientTmp.get());
-            return OK;
-        }
-
-        status = connectHelperLocked(/*out*/client,
-                                     cameraClient,
-                                     cameraId,
-                                     clientPackageName,
-                                     clientUid,
-                                     callingPid);
-        if (status != OK) {
-            return status;
-        }
-
-    }
-    // important: release the mutex here so the client can call back
-    //    into the service from its destructor (can be at the end of the call)
-
     device = client;
-    return OK;
+    return NO_ERROR;
 }
 
 status_t CameraService::connectLegacy(
@@ -873,7 +1042,7 @@
         /*out*/
         sp<ICamera>& device) {
 
-    int apiVersion = mModule->getRawModule()->module_api_version;
+    int apiVersion = mModule->getModuleApiVersion();
     if (halVersion != CAMERA_HAL_API_VERSION_UNSPECIFIED &&
             apiVersion < CAMERA_MODULE_API_VERSION_2_3) {
         /*
@@ -887,70 +1056,41 @@
         return INVALID_OPERATION;
     }
 
-    String8 clientName8(clientPackageName);
-    int callingPid = getCallingPid();
+    status_t ret = NO_ERROR;
+    String8 id = String8::format("%d", cameraId);
+    sp<Client> client = nullptr;
+    ret = connectHelper<ICameraClient,Client>(cameraClient, id, halVersion, clientPackageName,
+            clientUid, API_1, true, false, /*out*/client);
 
-    LOG1("CameraService::connect legacy E (pid %d \"%s\", id %d)", callingPid,
-            clientName8.string(), cameraId);
-
-    status_t status = validateConnect(cameraId, /*inout*/clientUid);
-    if (status != OK) {
-        return status;
+    if(ret != NO_ERROR) {
+        return ret;
     }
 
-    sp<Client> client;
-    {
-        Mutex::Autolock lock(mServiceLock);
-        sp<BasicClient> clientTmp;
-        if (!canConnectUnsafe(cameraId, clientPackageName,
-                              IInterface::asBinder(cameraClient),
-                              /*out*/clientTmp)) {
-            return -EBUSY;
-        } else if (client.get() != NULL) {
-            device = static_cast<Client*>(clientTmp.get());
-            return OK;
-        }
-
-        status = connectHelperLocked(/*out*/client,
-                                     cameraClient,
-                                     cameraId,
-                                     clientPackageName,
-                                     clientUid,
-                                     callingPid,
-                                     halVersion,
-                                     /*legacyMode*/true);
-        if (status != OK) {
-            return status;
-        }
-
-    }
-    // important: release the mutex here so the client can call back
-    //    into the service from its destructor (can be at the end of the call)
-
     device = client;
-    return OK;
+    return NO_ERROR;
 }
 
-bool CameraService::validCameraIdForSetTorchMode(const String8& cameraId) {
-    // invalid string for int
-    if (cameraId.string() == NULL) {
-        return false;
-    }
-    errno = 0;
-    char *endptr;
-    long id = strtol(cameraId.string(), &endptr, 10); // base 10
-    if (errno || id > INT_MAX || id < INT_MIN || *endptr != 0) {
-        return false;
+status_t CameraService::connectDevice(
+        const sp<ICameraDeviceCallbacks>& cameraCb,
+        int cameraId,
+        const String16& clientPackageName,
+        int clientUid,
+        /*out*/
+        sp<ICameraDeviceUser>& device) {
+
+    status_t ret = NO_ERROR;
+    String8 id = String8::format("%d", cameraId);
+    sp<CameraDeviceClient> client = nullptr;
+    ret = connectHelper<ICameraDeviceCallbacks,CameraDeviceClient>(cameraCb, id,
+            CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName, clientUid, API_2, false, false,
+            /*out*/client);
+
+    if(ret != NO_ERROR) {
+        return ret;
     }
 
-    // id matches one of the plugged-in devices?
-    ICameraServiceListener::Status deviceStatus = getStatus(id);
-    if (deviceStatus != ICameraServiceListener::STATUS_PRESENT &&
-            deviceStatus != ICameraServiceListener::STATUS_NOT_AVAILABLE) {
-        return false;
-    }
-
-    return true;
+    device = client;
+    return NO_ERROR;
 }
 
 status_t CameraService::setTorchMode(const String16& cameraId, bool enabled,
@@ -963,7 +1103,15 @@
     String8 id = String8(cameraId.string());
 
     // verify id is valid.
-    if (validCameraIdForSetTorchMode(id) == false) {
+    auto state = getCameraState(id);
+    if (state == nullptr) {
+        ALOGE("%s: camera id is invalid %s", id.string());
+        return -EINVAL;
+    }
+
+    ICameraServiceListener::Status cameraStatus = state->getStatus();
+    if (cameraStatus != ICameraServiceListener::STATUS_PRESENT &&
+            cameraStatus != ICameraServiceListener::STATUS_NOT_AVAILABLE) {
         ALOGE("%s: camera id is invalid %s", id.string());
         return -EINVAL;
     }
@@ -979,8 +1127,7 @@
         }
 
         if (status == ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE) {
-            if (getStatus(atoi(id.string())) ==
-                        ICameraServiceListener::STATUS_NOT_AVAILABLE) {
+            if (cameraStatus == ICameraServiceListener::STATUS_NOT_AVAILABLE) {
                 ALOGE("%s: torch mode of camera %s is not available because "
                         "camera is in use", __FUNCTION__, id.string());
                 return -EBUSY;
@@ -1022,174 +1169,21 @@
     return OK;
 }
 
-status_t CameraService::connectFinishUnsafe(const sp<BasicClient>& client,
-                                            const sp<IBinder>& remoteCallback) {
-    status_t status = client->initialize(mModule);
-    if (status != OK) {
-        ALOGE("%s: Could not initialize client from HAL module.", __FUNCTION__);
-        return status;
-    }
-    if (remoteCallback != NULL) {
-        remoteCallback->linkToDeath(this);
-    }
-
-    return OK;
-}
-
-status_t CameraService::connectPro(
-                                        const sp<IProCameraCallbacks>& cameraCb,
-                                        int cameraId,
-                                        const String16& clientPackageName,
-                                        int clientUid,
-                                        /*out*/
-                                        sp<IProCameraUser>& device)
-{
-    if (cameraCb == 0) {
-        ALOGE("%s: Callback must not be null", __FUNCTION__);
-        return BAD_VALUE;
-    }
-
-    String8 clientName8(clientPackageName);
-    int callingPid = getCallingPid();
-
-    LOG1("CameraService::connectPro E (pid %d \"%s\", id %d)", callingPid,
-            clientName8.string(), cameraId);
-    status_t status = validateConnect(cameraId, /*inout*/clientUid);
-    if (status != OK) {
-        return status;
-    }
-
-    sp<ProClient> client;
-    {
-        Mutex::Autolock lock(mServiceLock);
-        {
-            sp<BasicClient> client;
-            if (!canConnectUnsafe(cameraId, clientPackageName,
-                                  IInterface::asBinder(cameraCb),
-                                  /*out*/client)) {
-                return -EBUSY;
-            }
-        }
-
-        int facing = -1;
-        int deviceVersion = getDeviceVersion(cameraId, &facing);
-
-        switch(deviceVersion) {
-          case CAMERA_DEVICE_API_VERSION_1_0:
-            ALOGE("Camera id %d uses HALv1, doesn't support ProCamera",
-                  cameraId);
-            return -EOPNOTSUPP;
+void CameraService::notifySystemEvent(int eventId, int arg0) {
+    switch(eventId) {
+        case ICameraService::USER_SWITCHED: {
+            doUserSwitch(/*newUserId*/arg0);
             break;
-          case CAMERA_DEVICE_API_VERSION_2_0:
-          case CAMERA_DEVICE_API_VERSION_2_1:
-          case CAMERA_DEVICE_API_VERSION_3_0:
-          case CAMERA_DEVICE_API_VERSION_3_1:
-          case CAMERA_DEVICE_API_VERSION_3_2:
-            client = new ProCamera2Client(this, cameraCb, clientPackageName,
-                    cameraId, facing, callingPid, clientUid, getpid());
+        }
+        case ICameraService::NO_EVENT:
+        default: {
+            ALOGW("%s: Received invalid system event from system_server: %d", __FUNCTION__,
+                    eventId);
             break;
-          case -1:
-            ALOGE("Invalid camera id %d", cameraId);
-            return BAD_VALUE;
-          default:
-            ALOGE("Unknown camera device HAL version: %d", deviceVersion);
-            return INVALID_OPERATION;
         }
-
-        status_t status = connectFinishUnsafe(client, client->getRemote());
-        if (status != OK) {
-            return status;
-        }
-
-        mProClientList[cameraId].push(client);
-
-        LOG1("CameraService::connectPro X (id %d, this pid is %d)", cameraId,
-                getpid());
     }
-    // important: release the mutex here so the client can call back
-    //    into the service from its destructor (can be at the end of the call)
-    device = client;
-    return OK;
 }
 
-status_t CameraService::connectDevice(
-        const sp<ICameraDeviceCallbacks>& cameraCb,
-        int cameraId,
-        const String16& clientPackageName,
-        int clientUid,
-        /*out*/
-        sp<ICameraDeviceUser>& device)
-{
-
-    String8 clientName8(clientPackageName);
-    int callingPid = getCallingPid();
-
-    LOG1("CameraService::connectDevice E (pid %d \"%s\", id %d)", callingPid,
-            clientName8.string(), cameraId);
-
-    status_t status = validateConnect(cameraId, /*inout*/clientUid);
-    if (status != OK) {
-        return status;
-    }
-
-    sp<CameraDeviceClient> client;
-    {
-        Mutex::Autolock lock(mServiceLock);
-        {
-            sp<BasicClient> client;
-            if (!canConnectUnsafe(cameraId, clientPackageName,
-                                  IInterface::asBinder(cameraCb),
-                                  /*out*/client)) {
-                return -EBUSY;
-            }
-        }
-
-        int facing = -1;
-        int deviceVersion = getDeviceVersion(cameraId, &facing);
-
-        // give flashlight a chance to close devices if necessary.
-        mFlashlight->prepareDeviceOpen(String8::format("%d", cameraId));
-
-        switch(deviceVersion) {
-          case CAMERA_DEVICE_API_VERSION_1_0:
-            ALOGW("Camera using old HAL version: %d", deviceVersion);
-            return -EOPNOTSUPP;
-           // TODO: don't allow 2.0  Only allow 2.1 and higher
-          case CAMERA_DEVICE_API_VERSION_2_0:
-          case CAMERA_DEVICE_API_VERSION_2_1:
-          case CAMERA_DEVICE_API_VERSION_3_0:
-          case CAMERA_DEVICE_API_VERSION_3_1:
-          case CAMERA_DEVICE_API_VERSION_3_2:
-            client = new CameraDeviceClient(this, cameraCb, clientPackageName,
-                    cameraId, facing, callingPid, clientUid, getpid());
-            break;
-          case -1:
-            ALOGE("Invalid camera id %d", cameraId);
-            return BAD_VALUE;
-          default:
-            ALOGE("Unknown camera device HAL version: %d", deviceVersion);
-            return INVALID_OPERATION;
-        }
-
-        status_t status = connectFinishUnsafe(client, client->getRemote());
-        if (status != OK) {
-            // this is probably not recoverable.. maybe the client can try again
-            return status;
-        }
-
-        LOG1("CameraService::connectDevice X (id %d, this pid is %d)", cameraId,
-                getpid());
-
-        mClient[cameraId] = client;
-    }
-    // important: release the mutex here so the client can call back
-    //    into the service from its destructor (can be at the end of the call)
-
-    device = client;
-    return OK;
-}
-
-
 status_t CameraService::addListener(
                                 const sp<ICameraServiceListener>& listener) {
     ALOGV("%s: Add listener %p", __FUNCTION__, listener.get());
@@ -1201,23 +1195,29 @@
 
     Mutex::Autolock lock(mServiceLock);
 
-    Vector<sp<ICameraServiceListener> >::iterator it, end;
-    for (it = mListenerList.begin(); it != mListenerList.end(); ++it) {
-        if (IInterface::asBinder(*it) == IInterface::asBinder(listener)) {
-            ALOGW("%s: Tried to add listener %p which was already subscribed",
-                  __FUNCTION__, listener.get());
-            return ALREADY_EXISTS;
+    {
+        Mutex::Autolock lock(mStatusListenerLock);
+        for (auto& it : mListenerList) {
+            if (IInterface::asBinder(it) == IInterface::asBinder(listener)) {
+                ALOGW("%s: Tried to add listener %p which was already subscribed",
+                      __FUNCTION__, listener.get());
+                return ALREADY_EXISTS;
+            }
         }
+
+        mListenerList.push_back(listener);
     }
 
-    mListenerList.push_back(listener);
 
     /* Immediately signal current status to this listener only */
     {
-        Mutex::Autolock m(mStatusMutex) ;
-        int numCams = getNumberOfCameras();
-        for (int i = 0; i < numCams; ++i) {
-            listener->onStatusChanged(mStatusList[i], i);
+        Mutex::Autolock lock(mCameraStatesLock);
+        for (auto& i : mCameraStates) {
+            // TODO: Update binder to use String16 for camera IDs and remove;
+            int id = cameraIdToInt(i.first);
+            if (id == -1) continue;
+
+            listener->onStatusChanged(i.second->getStatus(), id);
         }
     }
 
@@ -1228,13 +1228,12 @@
             String16 id = String16(mTorchStatusMap.keyAt(i).string());
             listener->onTorchStatusChanged(mTorchStatusMap.valueAt(i), id);
         }
-
     }
 
     return OK;
 }
-status_t CameraService::removeListener(
-                                const sp<ICameraServiceListener>& listener) {
+
+status_t CameraService::removeListener(const sp<ICameraServiceListener>& listener) {
     ALOGV("%s: Remove listener %p", __FUNCTION__, listener.get());
 
     if (listener == 0) {
@@ -1244,11 +1243,13 @@
 
     Mutex::Autolock lock(mServiceLock);
 
-    Vector<sp<ICameraServiceListener> >::iterator it;
-    for (it = mListenerList.begin(); it != mListenerList.end(); ++it) {
-        if (IInterface::asBinder(*it) == IInterface::asBinder(listener)) {
-            mListenerList.erase(it);
-            return OK;
+    {
+        Mutex::Autolock lock(mStatusListenerLock);
+        for (auto it = mListenerList.begin(); it != mListenerList.end(); it++) {
+            if (IInterface::asBinder(*it) == IInterface::asBinder(listener)) {
+                mListenerList.erase(it);
+                return OK;
+            }
         }
     }
 
@@ -1258,10 +1259,7 @@
     return BAD_VALUE;
 }
 
-status_t CameraService::getLegacyParameters(
-            int cameraId,
-            /*out*/
-            String16* parameters) {
+status_t CameraService::getLegacyParameters(int cameraId, /*out*/String16* parameters) {
     ALOGV("%s: for camera ID = %d", __FUNCTION__, cameraId);
 
     if (parameters == NULL) {
@@ -1316,6 +1314,7 @@
             return OK;
         }
       case CAMERA_DEVICE_API_VERSION_3_2:
+      case CAMERA_DEVICE_API_VERSION_3_3:
         ALOGV("%s: Camera id %d uses HAL3.2 or newer, supports api1/api2 directly",
                 __FUNCTION__, cameraId);
         return OK;
@@ -1330,140 +1329,177 @@
     return OK;
 }
 
-void CameraService::removeClientByRemote(const wp<IBinder>& remoteBinder) {
-    int callingPid = getCallingPid();
-    LOG1("CameraService::removeClientByRemote E (pid %d)", callingPid);
-
-    // Declare this before the lock to make absolutely sure the
-    // destructor won't be called with the lock held.
+void CameraService::removeByClient(const BasicClient* client) {
     Mutex::Autolock lock(mServiceLock);
-
-    int outIndex;
-    sp<BasicClient> client = findClientUnsafe(remoteBinder, outIndex);
-
-    if (client != 0) {
-        // Found our camera, clear and leave.
-        LOG1("removeClient: clear camera %d", outIndex);
-
-        sp<IBinder> remote = client->getRemote();
-        if (remote != NULL) {
-            remote->unlinkToDeath(this);
-        }
-
-        mClient[outIndex].clear();
-    } else {
-
-        sp<ProClient> clientPro = findProClientUnsafe(remoteBinder);
-
-        if (clientPro != NULL) {
-            // Found our camera, clear and leave.
-            LOG1("removeClient: clear pro %p", clientPro.get());
-
-            IInterface::asBinder(clientPro->getRemoteCallback())->unlinkToDeath(this);
+    for (auto& i : mActiveClientManager.getAll()) {
+        auto clientSp = i->getValue();
+        if (clientSp.get() == client) {
+            mActiveClientManager.remove(i);
         }
     }
-
-    LOG1("CameraService::removeClientByRemote X (pid %d)", callingPid);
 }
 
-sp<CameraService::ProClient> CameraService::findProClientUnsafe(
-                        const wp<IBinder>& cameraCallbacksRemote)
-{
-    sp<ProClient> clientPro;
+bool CameraService::evictClientIdByRemote(const wp<IBinder>& remote) {
+    const int callingPid = getCallingPid();
+    const int servicePid = getpid();
+    bool ret = false;
+    {
+        // Acquire mServiceLock and prevent other clients from connecting
+        std::unique_ptr<AutoConditionLock> lock =
+                AutoConditionLock::waitAndAcquire(mServiceLockWrapper);
 
-    for (int i = 0; i < mNumberOfCameras; ++i) {
-        Vector<size_t> removeIdx;
 
-        for (size_t j = 0; j < mProClientList[i].size(); ++j) {
-            wp<ProClient> cl = mProClientList[i][j];
+        std::vector<sp<BasicClient>> evicted;
+        for (auto& i : mActiveClientManager.getAll()) {
+            auto clientSp = i->getValue();
+            if (clientSp.get() == nullptr) {
+                ALOGE("%s: Dead client still in mActiveClientManager.", __FUNCTION__);
+                mActiveClientManager.remove(i);
+                continue;
+            }
+            if (remote == clientSp->getRemote() && (callingPid == servicePid ||
+                    callingPid == clientSp->getClientPid())) {
+                mActiveClientManager.remove(i);
+                evicted.push_back(clientSp);
 
-            sp<ProClient> clStrong = cl.promote();
-            if (clStrong != NULL && clStrong->getRemote() == cameraCallbacksRemote) {
-                clientPro = clStrong;
-                break;
-            } else if (clStrong == NULL) {
-                // mark to clean up dead ptr
-                removeIdx.push(j);
+                // Notify the client of disconnection
+                clientSp->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
+                        CaptureResultExtras());
             }
         }
 
-        // remove stale ptrs (in reverse so the indices dont change)
-        for (ssize_t j = (ssize_t)removeIdx.size() - 1; j >= 0; --j) {
-            mProClientList[i].removeAt(removeIdx[j]);
+        // Do not hold mServiceLock while disconnecting clients, but retain the condition blocking
+        // other clients from connecting in mServiceLockWrapper if held
+        mServiceLock.unlock();
+
+        // Do not clear caller identity, remote caller should be client proccess
+
+        for (auto& i : evicted) {
+            if (i.get() != nullptr) {
+                i->disconnect();
+                ret = true;
+            }
         }
 
-    }
+        // Reacquire mServiceLock
+        mServiceLock.lock();
 
-    return clientPro;
+    } // lock is destroyed, allow further connect calls
+
+    return ret;
 }
 
-sp<CameraService::BasicClient> CameraService::findClientUnsafe(
-                        const wp<IBinder>& cameraClient, int& outIndex) {
-    sp<BasicClient> client;
 
-    for (int i = 0; i < mNumberOfCameras; i++) {
+std::shared_ptr<CameraService::CameraState> CameraService::getCameraState(
+        const String8& cameraId) const {
+    std::shared_ptr<CameraState> state;
+    {
+        Mutex::Autolock lock(mCameraStatesLock);
+        auto iter = mCameraStates.find(cameraId);
+        if (iter != mCameraStates.end()) {
+            state = iter->second;
+        }
+    }
+    return state;
+}
 
-        // This happens when we have already disconnected (or this is
-        // just another unused camera).
-        if (mClient[i] == 0) continue;
+sp<CameraService::BasicClient> CameraService::removeClientLocked(const String8& cameraId) {
+    // Remove from active clients list
+    auto clientDescriptorPtr = mActiveClientManager.remove(cameraId);
+    if (clientDescriptorPtr == nullptr) {
+        ALOGW("%s: Could not evict client, no client for camera ID %s", __FUNCTION__,
+                cameraId.string());
+        return sp<BasicClient>{nullptr};
+    }
 
-        // Promote mClient. It can fail if we are called from this path:
-        // Client::~Client() -> disconnect() -> removeClientByRemote().
-        client = mClient[i].promote();
+    return clientDescriptorPtr->getValue();
+}
 
-        // Clean up stale client entry
-        if (client == NULL) {
-            mClient[i].clear();
+void CameraService::doUserSwitch(int newUserId) {
+    // Acquire mServiceLock and prevent other clients from connecting
+    std::unique_ptr<AutoConditionLock> lock =
+            AutoConditionLock::waitAndAcquire(mServiceLockWrapper);
+
+    if (newUserId <= 0) {
+        ALOGW("%s: Bad user ID %d given during user switch, resetting to default.", __FUNCTION__,
+                newUserId);
+        newUserId = DEFAULT_LAST_USER_ID;
+    }
+
+    mLastUserId = newUserId;
+
+    // Current user has switched, evict all current clients.
+    std::vector<sp<BasicClient>> evicted;
+    for (auto& i : mActiveClientManager.getAll()) {
+        auto clientSp = i->getValue();
+
+        if (clientSp.get() == nullptr) {
+            ALOGE("%s: Dead client still in mActiveClientManager.", __FUNCTION__);
             continue;
         }
 
-        if (cameraClient == client->getRemote()) {
-            // Found our camera
-            outIndex = i;
-            return client;
-        }
+        evicted.push_back(clientSp);
+
+        String8 curTime = getFormattedCurrentTime();
+
+        ALOGE("Evicting conflicting client for camera ID %s due to user change",
+                i->getKey().string());
+        // Log the clients evicted
+        mEventLog.add(String8::format("%s : EVICT device %s client for package %s (PID %"
+                PRId32 ", priority %" PRId32 ")\n   - Evicted due to user switch.",
+                curTime.string(), i->getKey().string(),
+                String8{clientSp->getPackageName()}.string(), i->getOwnerId(),
+                i->getPriority()));
+
     }
 
-    outIndex = -1;
-    return NULL;
+    // Do not hold mServiceLock while disconnecting clients, but retain the condition
+    // blocking other clients from connecting in mServiceLockWrapper if held.
+    mServiceLock.unlock();
+
+    // Clear caller identity temporarily so client disconnect PID checks work correctly
+    int64_t token = IPCThreadState::self()->clearCallingIdentity();
+
+    for (auto& i : evicted) {
+        i->disconnect();
+    }
+
+    IPCThreadState::self()->restoreCallingIdentity(token);
+
+    // Reacquire mServiceLock
+    mServiceLock.lock();
 }
 
-CameraService::BasicClient* CameraService::getClientByIdUnsafe(int cameraId) {
-    if (cameraId < 0 || cameraId >= mNumberOfCameras) return NULL;
-    return mClient[cameraId].unsafe_get();
+void CameraService::logDisconnected(const String8& cameraId, int clientPid,
+        const String8& clientPackage) {
+
+    String8 curTime = getFormattedCurrentTime();
+    // Log the clients evicted
+    mEventLog.add(String8::format("%s : DISCONNECT device %s client for package %s (PID %d)",
+            curTime.string(), cameraId.string(), clientPackage.string(), clientPid));
 }
 
-Mutex* CameraService::getClientLockById(int cameraId) {
-    if (cameraId < 0 || cameraId >= mNumberOfCameras) return NULL;
-    return &mClientLock[cameraId];
+void CameraService::logConnected(const String8& cameraId, int clientPid,
+        const String8& clientPackage) {
+
+    String8 curTime = getFormattedCurrentTime();
+    // Log the clients evicted
+    mEventLog.add(String8::format("%s : CONNECT device %s client for package %s (PID %d)",
+            curTime.string(), cameraId.string(), clientPackage.string(), clientPid));
 }
 
-sp<CameraService::BasicClient> CameraService::getClientByRemote(
-                                const wp<IBinder>& cameraClient) {
+status_t CameraService::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+        uint32_t flags) {
 
-    // Declare this before the lock to make absolutely sure the
-    // destructor won't be called with the lock held.
-    sp<BasicClient> client;
+    const int pid = getCallingPid();
+    const int selfPid = getpid();
 
-    Mutex::Autolock lock(mServiceLock);
-
-    int outIndex;
-    client = findClientUnsafe(cameraClient, outIndex);
-
-    return client;
-}
-
-status_t CameraService::onTransact(
-    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) {
     // Permission checks
     switch (code) {
         case BnCameraService::CONNECT:
-        case BnCameraService::CONNECT_PRO:
         case BnCameraService::CONNECT_DEVICE:
-        case BnCameraService::CONNECT_LEGACY:
-            const int pid = getCallingPid();
-            const int self_pid = getpid();
-            if (pid != self_pid) {
+        case BnCameraService::CONNECT_LEGACY: {
+            if (pid != selfPid) {
                 // we're called from a different process, do the real check
                 if (!checkCallingPermission(
                         String16("android.permission.CAMERA"))) {
@@ -1474,29 +1510,26 @@
                 }
             }
             break;
+        }
+        case BnCameraService::NOTIFY_SYSTEM_EVENT: {
+            if (pid != selfPid) {
+                // Ensure we're being called by system_server, or similar process with
+                // permissions to notify the camera service about system events
+                if (!checkCallingPermission(
+                        String16("android.permission.CAMERA_SEND_SYSTEM_EVENTS"))) {
+                    const int uid = getCallingUid();
+                    ALOGE("Permission Denial: cannot send updates to camera service about system"
+                            " events from pid=%d, uid=%d", pid, uid);
+                    return PERMISSION_DENIED;
+                }
+            }
+            break;
+        }
     }
 
     return BnCameraService::onTransact(code, data, reply, flags);
 }
 
-// The reason we need this busy bit is a new CameraService::connect() request
-// may come in while the previous Client's destructor has not been run or is
-// still running. If the last strong reference of the previous Client is gone
-// but the destructor has not been finished, we should not allow the new Client
-// to be created because we need to wait for the previous Client to tear down
-// the hardware first.
-void CameraService::setCameraBusy(int cameraId) {
-    android_atomic_write(1, &mBusy[cameraId]);
-
-    ALOGV("setCameraBusy cameraId=%d", cameraId);
-}
-
-void CameraService::setCameraFree(int cameraId) {
-    android_atomic_write(0, &mBusy[cameraId]);
-
-    ALOGV("setCameraFree cameraId=%d", cameraId);
-}
-
 // We share the media players for shutter and recording sound for all clients.
 // A reference count is kept to determine when we will actually release the
 // media players.
@@ -1565,7 +1598,6 @@
 
     mRemoteCallback = cameraClient;
 
-    cameraService->setCameraBusy(cameraId);
     cameraService->loadSound();
 
     LOG1("Client::Client X (pid %d, id %d)", callingPid, cameraId);
@@ -1587,7 +1619,7 @@
         int cameraId, int cameraFacing,
         int clientPid, uid_t clientUid,
         int servicePid):
-        mClientPackageName(clientPackageName)
+        mClientPackageName(clientPackageName), mDisconnected(false)
 {
     mCameraService = cameraService;
     mRemoteBinder = remoteCallback;
@@ -1606,14 +1638,38 @@
 }
 
 void CameraService::BasicClient::disconnect() {
-    ALOGV("BasicClient::disconnect");
-    mCameraService->removeClientByRemote(mRemoteBinder);
+    if (mDisconnected) {
+        ALOGE("%s: Disconnect called on already disconnected client for device %d", __FUNCTION__,
+                mCameraId);
+        return;
+    }
+    mDisconnected = true;;
+
+    mCameraService->removeByClient(this);
+    mCameraService->logDisconnected(String8::format("%d", mCameraId), mClientPid,
+            String8(mClientPackageName));
+
+    sp<IBinder> remote = getRemote();
+    if (remote != nullptr) {
+        remote->unlinkToDeath(mCameraService);
+    }
 
     finishCameraOps();
+    ALOGI("%s: Disconnected client for camera %d for PID %d", __FUNCTION__, mCameraId, mClientPid);
+
     // client shouldn't be able to call into us anymore
     mClientPid = 0;
 }
 
+String16 CameraService::BasicClient::getPackageName() const {
+    return mClientPackageName;
+}
+
+
+int CameraService::BasicClient::getClientPid() const {
+    return mClientPid;
+}
+
 status_t CameraService::BasicClient::startCameraOps() {
     int32_t res;
     // Notify app ops that the camera is not available
@@ -1639,7 +1695,7 @@
 
     // Transition device availability listeners from PRESENT -> NOT_AVAILABLE
     mCameraService->updateStatus(ICameraServiceListener::STATUS_NOT_AVAILABLE,
-            mCameraId);
+            String8::format("%d", mCameraId));
 
     return OK;
 }
@@ -1652,18 +1708,12 @@
                 mClientPackageName);
         mOpsActive = false;
 
-        // Notify device availability listeners that this camera is available
-        // again
+        auto rejected = {ICameraServiceListener::STATUS_NOT_PRESENT,
+                ICameraServiceListener::STATUS_ENUMERATING};
 
-        StatusVector rejectSourceStates;
-        rejectSourceStates.push_back(ICameraServiceListener::STATUS_NOT_PRESENT);
-        rejectSourceStates.push_back(ICameraServiceListener::STATUS_ENUMERATING);
-
-        // Transition to PRESENT if the camera is not in either of above 2
-        // states
+        // Transition to PRESENT if the camera is not in either of the rejected states
         mCameraService->updateStatus(ICameraServiceListener::STATUS_PRESENT,
-                mCameraId,
-                &rejectSourceStates);
+                String8::format("%d", mCameraId), rejected);
 
         // Notify flashlight that a camera device is closed.
         mCameraService->mFlashlight->deviceClosed(
@@ -1710,26 +1760,15 @@
 
 // ----------------------------------------------------------------------------
 
-Mutex* CameraService::Client::getClientLockFromCookie(void* user) {
-    return gCameraService->getClientLockById((int)(intptr_t) user);
-}
-
-// Provide client pointer for callbacks. Client lock returned from getClientLockFromCookie should
-// be acquired for this to be safe
-CameraService::Client* CameraService::Client::getClientFromCookie(void* user) {
-    BasicClient *basicClient = gCameraService->getClientByIdUnsafe((int)(intptr_t) user);
-    // OK: only CameraClient calls this, and they already cast anyway.
-    Client* client = static_cast<Client*>(basicClient);
-
-    // This could happen if the Client is in the process of shutting down (the
-    // last strong reference is gone, but the destructor hasn't finished
-    // stopping the hardware).
-    if (client == NULL) return NULL;
-
-    // destruction already started, so should not be accessed
-    if (client->mDestructionStarted) return NULL;
-
-    return client;
+// Provide client strong pointer for callbacks.
+sp<CameraService::Client> CameraService::Client::getClientFromCookie(void* user) {
+    String8 cameraId = String8::format("%d", (int)(intptr_t) user);
+    auto clientDescriptor = gCameraService->mActiveClientManager.get(cameraId);
+    if (clientDescriptor != nullptr) {
+        return sp<Client>{
+                static_cast<Client*>(clientDescriptor->getValue().get())};
+    }
+    return sp<Client>{nullptr};
 }
 
 void CameraService::Client::notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
@@ -1741,7 +1780,6 @@
 void CameraService::Client::disconnect() {
     ALOGV("Client::disconnect");
     BasicClient::disconnect();
-    mCameraService->setCameraFree(mCameraId);
 }
 
 CameraService::Client::OpsCallback::OpsCallback(wp<BasicClient> client):
@@ -1757,30 +1795,101 @@
 }
 
 // ----------------------------------------------------------------------------
-//                  IProCamera
+//                  CameraState
 // ----------------------------------------------------------------------------
 
-CameraService::ProClient::ProClient(const sp<CameraService>& cameraService,
-        const sp<IProCameraCallbacks>& remoteCallback,
-        const String16& clientPackageName,
-        int cameraId,
-        int cameraFacing,
-        int clientPid,
-        uid_t clientUid,
-        int servicePid)
-        : CameraService::BasicClient(cameraService, IInterface::asBinder(remoteCallback),
-                clientPackageName, cameraId, cameraFacing,
-                clientPid,  clientUid, servicePid)
-{
-    mRemoteCallback = remoteCallback;
+CameraService::CameraState::CameraState(const String8& id, int cost,
+        const std::set<String8>& conflicting) : mId(id),
+        mStatus(ICameraServiceListener::STATUS_PRESENT), mCost(cost), mConflicting(conflicting) {}
+
+CameraService::CameraState::~CameraState() {}
+
+ICameraServiceListener::Status CameraService::CameraState::getStatus() const {
+    Mutex::Autolock lock(mStatusLock);
+    return mStatus;
 }
 
-CameraService::ProClient::~ProClient() {
+CameraParameters CameraService::CameraState::getShimParams() const {
+    return mShimParams;
 }
 
-void CameraService::ProClient::notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
-        const CaptureResultExtras& resultExtras) {
-    mRemoteCallback->notifyCallback(CAMERA_MSG_ERROR, CAMERA_ERROR_RELEASED, 0);
+void CameraService::CameraState::setShimParams(const CameraParameters& params) {
+    mShimParams = params;
+}
+
+int CameraService::CameraState::getCost() const {
+    return mCost;
+}
+
+std::set<String8> CameraService::CameraState::getConflicting() const {
+    return mConflicting;
+}
+
+String8 CameraService::CameraState::getId() const {
+    return mId;
+}
+
+// ----------------------------------------------------------------------------
+//                  CameraClientManager
+// ----------------------------------------------------------------------------
+
+CameraService::CameraClientManager::~CameraClientManager() {}
+
+sp<CameraService::BasicClient> CameraService::CameraClientManager::getCameraClient(
+        const String8& id) const {
+    auto descriptor = get(id);
+    if (descriptor == nullptr) {
+        return sp<BasicClient>{nullptr};
+    }
+    return descriptor->getValue();
+}
+
+String8 CameraService::CameraClientManager::toString() const {
+    auto all = getAll();
+    String8 ret("[");
+    bool hasAny = false;
+    for (auto& i : all) {
+        hasAny = true;
+        String8 key = i->getKey();
+        int32_t cost = i->getCost();
+        int32_t pid = i->getOwnerId();
+        int32_t priority = i->getPriority();
+        auto conflicting = i->getConflicting();
+        auto clientSp = i->getValue();
+        String8 packageName;
+        if (clientSp.get() != nullptr) {
+            packageName = String8{clientSp->getPackageName()};
+        }
+        ret.appendFormat("\n(Camera ID: %s, Cost: %" PRId32 ", PID: %" PRId32 ", Priority: %"
+                PRId32 ", ", key.string(), cost, pid, priority);
+
+        if (packageName.size() != 0) {
+            ret.appendFormat("Client Package Name: %s", packageName.string());
+        }
+
+        ret.append(", Conflicting Client Devices: {");
+        for (auto& j : conflicting) {
+            ret.appendFormat("%s, ", j.string());
+        }
+        ret.append("})");
+    }
+    if (hasAny) ret.append("\n");
+    ret.append("]\n");
+    return ret;
+}
+
+CameraService::DescriptorPtr CameraService::CameraClientManager::makeClientDescriptor(
+        const String8& key, const sp<BasicClient>& value, int32_t cost,
+        const std::set<String8>& conflictingKeys, int32_t priority, int32_t ownerId) {
+
+    return std::make_shared<resource_policy::ClientDescriptor<String8, sp<BasicClient>>>(
+            key, value, cost, conflictingKeys, priority, ownerId);
+}
+
+CameraService::DescriptorPtr CameraService::CameraClientManager::makeClientDescriptor(
+        const sp<BasicClient>& value, const CameraService::DescriptorPtr& partial) {
+    return makeClientDescriptor(partial->getKey(), value, partial->getCost(),
+            partial->getConflicting(), partial->getPriority(), partial->getOwnerId());
 }
 
 // ----------------------------------------------------------------------------
@@ -1825,12 +1934,13 @@
             return NO_ERROR;
         }
 
-        const hw_module_t* common = mModule->getRawModule();
-        result = String8::format("Camera module HAL API version: 0x%x\n", common->hal_api_version);
-        result.appendFormat("Camera module API version: 0x%x\n", common->module_api_version);
-        result.appendFormat("Camera module name: %s\n", common->name);
-        result.appendFormat("Camera module author: %s\n", common->author);
-        result.appendFormat("Number of camera devices: %d\n\n", mNumberOfCameras);
+        result = String8::format("Camera module HAL API version: 0x%x\n", mModule->getHalApiVersion());
+        result.appendFormat("Camera module API version: 0x%x\n", mModule->getModuleApiVersion());
+        result.appendFormat("Camera module name: %s\n", mModule->getModuleName());
+        result.appendFormat("Camera module author: %s\n", mModule->getModuleAuthor());
+        result.appendFormat("Number of camera devices: %d\n", mNumberOfCameras);
+        String8 activeClientString = mActiveClientManager.toString();
+        result.appendFormat("Active Camera Clients:\n%s", activeClientString.string());
 
         sp<VendorTagDescriptor> desc = VendorTagDescriptor::getGlobalVendorTagDescriptor();
         if (desc == NULL) {
@@ -1845,11 +1955,31 @@
             desc->dump(fd, /*verbosity*/2, /*indentation*/4);
         }
 
-        for (int i = 0; i < mNumberOfCameras; i++) {
-            result = String8::format("Camera %d static information:\n", i);
+        result = String8("Prior client events (most recent at top):\n");
+
+        for (const auto& msg : mEventLog) {
+            result.appendFormat("%s\n", msg.string());
+        }
+
+        if (mEventLog.size() == DEFAULT_EVICTION_LOG_LENGTH) {
+            result.append("...\n");
+        }
+
+        write(fd, result.string(), result.size());
+
+        bool stateLocked = tryLock(mCameraStatesLock);
+        if (!stateLocked) {
+            result = String8::format("CameraStates in use, may be deadlocked\n");
+            write(fd, result.string(), result.size());
+        }
+
+        for (auto& state : mCameraStates) {
+            String8 cameraId = state.first;
+            result = String8::format("Camera %s information:\n", cameraId.string());
             camera_info info;
 
-            status_t rc = mModule->getCameraInfo(i, &info);
+            // TODO: Change getCameraInfo + HAL to use String cameraIds
+            status_t rc = mModule->getCameraInfo(cameraIdToInt(cameraId), &info);
             if (rc != OK) {
                 result.appendFormat("  Error reading static information!\n");
                 write(fd, result.string(), result.size());
@@ -1858,12 +1988,24 @@
                         info.facing == CAMERA_FACING_BACK ? "BACK" : "FRONT");
                 result.appendFormat("  Orientation: %d\n", info.orientation);
                 int deviceVersion;
-                if (common->module_api_version < CAMERA_MODULE_API_VERSION_2_0) {
+                if (mModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_0) {
                     deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
                 } else {
                     deviceVersion = info.device_version;
                 }
-                result.appendFormat("  Device version: 0x%x\n", deviceVersion);
+
+                auto conflicting = state.second->getConflicting();
+                result.appendFormat("  Resource Cost: %d\n", state.second->getCost());
+                result.appendFormat("  Conflicting Devices:");
+                for (auto& id : conflicting) {
+                    result.appendFormat(" %s", cameraId.string());
+                }
+                if (conflicting.size() == 0) {
+                    result.appendFormat(" NONE");
+                }
+                result.appendFormat("\n");
+
+                result.appendFormat("  Device version: %#x\n", deviceVersion);
                 if (deviceVersion >= CAMERA_DEVICE_API_VERSION_2_0) {
                     result.appendFormat("  Device static metadata:\n");
                     write(fd, result.string(), result.size());
@@ -1872,19 +2014,38 @@
                 } else {
                     write(fd, result.string(), result.size());
                 }
+
+                CameraParameters p = state.second->getShimParams();
+                if (!p.isEmpty()) {
+                    result = String8::format("  Camera1 API shim is using parameters:\n        ");
+                    write(fd, result.string(), result.size());
+                    p.dump(fd, args);
+                }
             }
 
-            sp<BasicClient> client = mClient[i].promote();
-            if (client == 0) {
-                result = String8::format("  Device is closed, no client instance\n");
+            auto clientDescriptor = mActiveClientManager.get(cameraId);
+            if (clientDescriptor == nullptr) {
+                result = String8::format("  Device %s is closed, no client instance\n",
+                        cameraId.string());
                 write(fd, result.string(), result.size());
                 continue;
             }
             hasClient = true;
-            result = String8::format("  Device is open. Client instance dump:\n");
+            result = String8::format("  Device %s is open. Client instance dump:\n\n",
+                    cameraId.string());
+            result.appendFormat("Client priority level: %d\n", clientDescriptor->getPriority());
+            result.appendFormat("Client PID: %d\n", clientDescriptor->getOwnerId());
+
+            auto client = clientDescriptor->getValue();
+            result.appendFormat("Client package: %s\n",
+                    String8(client->getPackageName()).string());
             write(fd, result.string(), result.size());
+
             client->dump(fd, args);
         }
+
+        if (stateLocked) mCameraStatesLock.unlock();
+
         if (!hasClient) {
             result = String8::format("\nNo active camera clients yet.\n");
             write(fd, result.string(), result.size());
@@ -1908,7 +2069,6 @@
                 write(fd, result.string(), result.size());
             }
         }
-
     }
     return NO_ERROR;
 }
@@ -1931,124 +2091,68 @@
     }
 }
 
-/*virtual*/void CameraService::binderDied(
-    const wp<IBinder> &who) {
+/*virtual*/void CameraService::binderDied(const wp<IBinder> &who) {
 
     /**
       * While tempting to promote the wp<IBinder> into a sp,
       * it's actually not supported by the binder driver
       */
 
-    ALOGV("java clients' binder died");
-
     // check torch client
     handleTorchClientBinderDied(who);
 
     // check camera device client
-    sp<BasicClient> cameraClient = getClientByRemote(who);
-
-    if (cameraClient == 0) {
-        ALOGV("java clients' binder death already cleaned up (normal case)");
+    if(!evictClientIdByRemote(who)) {
+        ALOGV("%s: Java client's binder death already cleaned up (normal case)", __FUNCTION__);
         return;
     }
 
-    ALOGW("Disconnecting camera client %p since the binder for it "
-          "died (this pid %d)", cameraClient.get(), getCallingPid());
-
-    cameraClient->disconnect();
-
+    ALOGE("%s: Java client's binder died, removing it from the list of active clients",
+            __FUNCTION__);
 }
 
-void CameraService::updateStatus(ICameraServiceListener::Status status,
-                                 int32_t cameraId,
-                                 const StatusVector *rejectSourceStates) {
-    // do not lock mServiceLock here or can get into a deadlock from
-    //  connect() -> ProClient::disconnect -> updateStatus
-    Mutex::Autolock lock(mStatusMutex);
-
-    ICameraServiceListener::Status oldStatus = mStatusList[cameraId];
-
-    mStatusList[cameraId] = status;
-
-    if (oldStatus != status) {
-        ALOGV("%s: Status has changed for camera ID %d from 0x%x to 0x%x",
-              __FUNCTION__, cameraId, (uint32_t)oldStatus, (uint32_t)status);
-
-        if (oldStatus == ICameraServiceListener::STATUS_NOT_PRESENT &&
-            (status != ICameraServiceListener::STATUS_PRESENT &&
-             status != ICameraServiceListener::STATUS_ENUMERATING)) {
-
-            ALOGW("%s: From NOT_PRESENT can only transition into PRESENT"
-                  " or ENUMERATING", __FUNCTION__);
-            mStatusList[cameraId] = oldStatus;
-            return;
-        }
-
-        if (rejectSourceStates != NULL) {
-            const StatusVector &rejectList = *rejectSourceStates;
-            StatusVector::const_iterator it = rejectList.begin();
-
-            /**
-             * Sometimes we want to conditionally do a transition.
-             * For example if a client disconnects, we want to go to PRESENT
-             * only if we weren't already in NOT_PRESENT or ENUMERATING.
-             */
-            for (; it != rejectList.end(); ++it) {
-                if (oldStatus == *it) {
-                    ALOGV("%s: Rejecting status transition for Camera ID %d, "
-                          " since the source state was was in one of the bad "
-                          " states.", __FUNCTION__, cameraId);
-                    mStatusList[cameraId] = oldStatus;
-                    return;
-                }
-            }
-        }
-
-        /**
-          * ProClients lose their exclusive lock.
-          * - Done before the CameraClient can initialize the HAL device,
-          *   since we want to be able to close it before they get to initialize
-          */
-        if (status == ICameraServiceListener::STATUS_NOT_AVAILABLE) {
-            Vector<wp<ProClient> > proClients(mProClientList[cameraId]);
-            Vector<wp<ProClient> >::const_iterator it;
-
-            for (it = proClients.begin(); it != proClients.end(); ++it) {
-                sp<ProClient> proCl = it->promote();
-                if (proCl.get() != NULL) {
-                    proCl->onExclusiveLockStolen();
-                }
-            }
-        }
-
-        if (status == ICameraServiceListener::STATUS_NOT_PRESENT ||
-            status == ICameraServiceListener::STATUS_NOT_AVAILABLE) {
-            // update torch status to not available when the camera device
-            // becomes not present or not available.
-            onTorchStatusChanged(String8::format("%d", cameraId),
-                    ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE);
-        } else if (status == ICameraServiceListener::STATUS_PRESENT) {
-            // update torch status to available when the camera device becomes
-            // present or available
-            onTorchStatusChanged(String8::format("%d", cameraId),
-                    ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF);
-        }
-
-        Vector<sp<ICameraServiceListener> >::const_iterator it;
-        for (it = mListenerList.begin(); it != mListenerList.end(); ++it) {
-            (*it)->onStatusChanged(status, cameraId);
-        }
-    }
+void CameraService::updateStatus(ICameraServiceListener::Status status, const String8& cameraId) {
+    updateStatus(status, cameraId, {});
 }
 
-ICameraServiceListener::Status CameraService::getStatus(int cameraId) const {
-    if (cameraId < 0 || cameraId >= MAX_CAMERAS) {
-        ALOGE("%s: Invalid camera ID %d", __FUNCTION__, cameraId);
-        return ICameraServiceListener::STATUS_UNKNOWN;
+void CameraService::updateStatus(ICameraServiceListener::Status status, const String8& cameraId,
+        std::initializer_list<ICameraServiceListener::Status> rejectSourceStates) {
+    // Do not lock mServiceLock here or can get into a deadlock from
+    // connect() -> disconnect -> updateStatus
+
+    auto state = getCameraState(cameraId);
+
+    if (state == nullptr) {
+        ALOGW("%s: Could not update the status for %s, no such device exists", __FUNCTION__,
+                cameraId.string());
+        return;
     }
 
-    Mutex::Autolock al(mStatusMutex);
-    return mStatusList[cameraId];
+    // Update the status for this camera state, then send the onStatusChangedCallbacks to each
+    // of the listeners with both the mStatusStatus and mStatusListenerLock held
+    state->updateStatus(status, cameraId, rejectSourceStates, [this]
+            (const String8& cameraId, ICameraServiceListener::Status status) {
+
+            // Update torch status
+            if (status == ICameraServiceListener::STATUS_NOT_PRESENT ||
+                status == ICameraServiceListener::STATUS_NOT_AVAILABLE) {
+                // Update torch status to not available when the camera device becomes not present
+                // or not available.
+                onTorchStatusChanged(cameraId, ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE);
+            } else if (status == ICameraServiceListener::STATUS_PRESENT) {
+                // Update torch status to available when the camera device becomes present or
+                // available
+                onTorchStatusChanged(cameraId, ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF);
+            }
+
+            Mutex::Autolock lock(mStatusListenerLock);
+
+            for (auto& listener : mListenerList) {
+                // TODO: Refactor status listeners to use strings for Camera IDs and remove this.
+                int id = cameraIdToInt(cameraId);
+                if (id != -1) listener->onStatusChanged(status, id);
+            }
+        });
 }
 
 status_t CameraService::getTorchStatusLocked(
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 22afc8c..ca1c504 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -17,6 +17,7 @@
 #ifndef ANDROID_SERVERS_CAMERA_CAMERASERVICE_H
 #define ANDROID_SERVERS_CAMERA_CAMERASERVICE_H
 
+#include <cutils/multiuser.h>
 #include <utils/Vector.h>
 #include <utils/KeyedVector.h>
 #include <binder/AppOpsManager.h>
@@ -27,8 +28,6 @@
 
 #include <camera/ICamera.h>
 #include <camera/ICameraClient.h>
-#include <camera/IProCameraUser.h>
-#include <camera/IProCameraCallbacks.h>
 #include <camera/camera2/ICameraDeviceUser.h>
 #include <camera/camera2/ICameraDeviceCallbacks.h>
 #include <camera/VendorTagDescriptor.h>
@@ -38,11 +37,15 @@
 #include <camera/ICameraServiceListener.h>
 #include "CameraFlashlight.h"
 
-
 #include "common/CameraModule.h"
+#include "utils/AutoConditionLock.h"
+#include "utils/ClientManager.h"
+#include "utils/RingBuffer.h"
 
-/* This needs to be increased if we can have more cameras */
-#define MAX_CAMERAS 2
+#include <set>
+#include <string>
+#include <map>
+#include <memory>
 
 namespace android {
 
@@ -62,6 +65,39 @@
     class Client;
     class BasicClient;
 
+    enum apiLevel {
+        API_1 = 1,
+        API_2 = 2
+    };
+
+    // Process States (mirrors frameworks/base/core/java/android/app/ActivityManager.java)
+    static const int PROCESS_STATE_NONEXISTENT = -1;
+    static const int PROCESS_STATE_PERSISTENT = 0;
+    static const int PROCESS_STATE_PERSISTENT_UI = 1;
+    static const int PROCESS_STATE_TOP = 2;
+    static const int PROCESS_STATE_IMPORTANT_FOREGROUND = 3;
+    static const int PROCESS_STATE_IMPORTANT_BACKGROUND = 4;
+    static const int PROCESS_STATE_BACKUP = 5;
+    static const int PROCESS_STATE_HEAVY_WEIGHT = 6;
+    static const int PROCESS_STATE_SERVICE = 7;
+    static const int PROCESS_STATE_RECEIVER = 8;
+    static const int PROCESS_STATE_HOME = 9;
+    static const int PROCESS_STATE_LAST_ACTIVITY = 10;
+    static const int PROCESS_STATE_CACHED_ACTIVITY = 11;
+    static const int PROCESS_STATE_CACHED_ACTIVITY_CLIENT = 12;
+    static const int PROCESS_STATE_CACHED_EMPTY = 13;
+
+    // 3 second busy timeout when other clients are connecting
+    static const nsecs_t DEFAULT_CONNECT_TIMEOUT_NS = 3000000000;
+
+    // Default number of messages to store in eviction log
+    static const size_t DEFAULT_EVICTION_LOG_LENGTH = 50;
+
+    enum {
+        // Default last user id
+        DEFAULT_LAST_USER_ID = 0,
+    };
+
     // Implementation of BinderService<T>
     static char const* getServiceName() { return "media.camera"; }
 
@@ -70,8 +106,8 @@
 
     /////////////////////////////////////////////////////////////////////
     // HAL Callbacks
-    virtual void        onDeviceStatusChanged(int cameraId,
-                                              int newStatus);
+    virtual void        onDeviceStatusChanged(camera_device_status_t cameraId,
+                                              camera_device_status_t newStatus);
     virtual void        onTorchStatusChanged(const String8& cameraId,
                                              ICameraServiceListener::TorchStatus
                                                    newStatus);
@@ -95,11 +131,6 @@
             /*out*/
             sp<ICamera>& device);
 
-    virtual status_t connectPro(const sp<IProCameraCallbacks>& cameraCb,
-            int cameraId, const String16& clientPackageName, int clientUid,
-            /*out*/
-            sp<IProCameraUser>& device);
-
     virtual status_t connectDevice(
             const sp<ICameraDeviceCallbacks>& cameraCb,
             int cameraId,
@@ -120,6 +151,8 @@
     virtual status_t    setTorchMode(const String16& cameraId, bool enabled,
             const sp<IBinder>& clientBinder);
 
+    virtual void notifySystemEvent(int eventId, int arg0);
+
     // OK = supports api of that version, -EOPNOTSUPP = does not support
     virtual status_t    supportsCameraApi(
             int cameraId, int apiVersion);
@@ -132,7 +165,6 @@
 
     /////////////////////////////////////////////////////////////////////
     // Client functionality
-    virtual void        removeClientByRemote(const wp<IBinder>& remoteBinder);
 
     enum sound_kind {
         SOUND_SHUTTER = 0,
@@ -155,11 +187,6 @@
     /////////////////////////////////////////////////////////////////////
     // CameraClient functionality
 
-    // returns plain pointer of client. Note that mClientLock should be acquired to
-    // prevent the client from destruction. The result can be NULL.
-    virtual BasicClient* getClientByIdUnsafe(int cameraId);
-    virtual Mutex*      getClientLockById(int cameraId);
-
     class BasicClient : public virtual RefBase {
     public:
         virtual status_t    initialize(CameraModule *module) = 0;
@@ -169,13 +196,22 @@
         // virtual inheritance
         virtual sp<IBinder> asBinderWrapper() = 0;
 
-        // Return the remote callback binder object (e.g. IProCameraCallbacks)
+        // Return the remote callback binder object (e.g. ICameraDeviceCallbacks)
         sp<IBinder>         getRemote() {
             return mRemoteBinder;
         }
 
         virtual status_t    dump(int fd, const Vector<String16>& args) = 0;
 
+        // Return the package name for this client
+        virtual String16 getPackageName() const;
+
+        // Notify client about a fatal error
+        virtual void notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+                const CaptureResultExtras& resultExtras) = 0;
+
+        // Get the PID of the application client using this
+        virtual int getClientPid() const;
     protected:
         BasicClient(const sp<CameraService>& cameraService,
                 const sp<IBinder>& remoteCallback,
@@ -202,6 +238,7 @@
         pid_t                           mClientPid;
         uid_t                           mClientUid;      // immutable after constructor
         pid_t                           mServicePid;     // immutable after constructor
+        bool                            mDisconnected;
 
         // - The app-side Binder interface to receive callbacks from us
         sp<IBinder>                     mRemoteBinder;   // immutable after constructor
@@ -210,10 +247,6 @@
         status_t                        startCameraOps();
         status_t                        finishCameraOps();
 
-        // Notify client about a fatal error
-        virtual void                    notifyError(
-                ICameraDeviceCallbacks::CameraErrorCode errorCode,
-                const CaptureResultExtras& resultExtras) = 0;
     private:
         AppOpsManager                   mAppOpsManager;
 
@@ -285,13 +318,11 @@
             return asBinder(this);
         }
 
-    protected:
-        static Mutex*        getClientLockFromCookie(void* user);
-        // convert client from cookie. Client lock should be acquired before getting Client.
-        static Client*       getClientFromCookie(void* user);
-
         virtual void         notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
                                          const CaptureResultExtras& resultExtras);
+    protected:
+        // Convert client from cookie.
+        static sp<CameraService::Client> getClientFromCookie(void* user);
 
         // Initialized in constructor
 
@@ -300,93 +331,232 @@
 
     }; // class Client
 
-    class ProClient : public BnProCameraUser, public BasicClient {
+    typedef std::shared_ptr<resource_policy::ClientDescriptor<String8,
+            sp<CameraService::BasicClient>>> DescriptorPtr;
+
+    /**
+     * A container class for managing active camera clients that are using HAL devices.  Active
+     * clients are represented by ClientDescriptor objects that contain strong pointers to the
+     * actual BasicClient subclass binder interface implementation.
+     *
+     * This class manages the eviction behavior for the camera clients.  See the parent class
+     * implementation in utils/ClientManager for the specifics of this behavior.
+     */
+    class CameraClientManager :
+            public resource_policy::ClientManager<String8, sp<CameraService::BasicClient>> {
     public:
-        typedef IProCameraCallbacks TCamCallbacks;
+        virtual ~CameraClientManager();
 
-        ProClient(const sp<CameraService>& cameraService,
-                const sp<IProCameraCallbacks>& remoteCallback,
-                const String16& clientPackageName,
-                int cameraId,
-                int cameraFacing,
-                int clientPid,
-                uid_t clientUid,
-                int servicePid);
+        /**
+         * Return a strong pointer to the active BasicClient for this camera ID, or an empty
+         * if none exists.
+         */
+        sp<CameraService::BasicClient> getCameraClient(const String8& id) const;
 
-        virtual ~ProClient();
+        /**
+         * Return a string describing the current state.
+         */
+        String8 toString() const;
 
-        const sp<IProCameraCallbacks>& getRemoteCallback() {
-            return mRemoteCallback;
-        }
+        /**
+         * Make a ClientDescriptor object wrapping the given BasicClient strong pointer.
+         */
+        static DescriptorPtr makeClientDescriptor(const String8& key, const sp<BasicClient>& value,
+                int32_t cost, const std::set<String8>& conflictingKeys, int32_t priority,
+                int32_t ownerId);
 
-        /***
-            IProCamera implementation
-         ***/
-        virtual status_t      connect(const sp<IProCameraCallbacks>& callbacks)
-                                                                            = 0;
-        virtual status_t      exclusiveTryLock() = 0;
-        virtual status_t      exclusiveLock() = 0;
-        virtual status_t      exclusiveUnlock() = 0;
+        /**
+         * Make a ClientDescriptor object wrapping the given BasicClient strong pointer with
+         * values intialized from a prior ClientDescriptor.
+         */
+        static DescriptorPtr makeClientDescriptor(const sp<BasicClient>& value,
+                const CameraService::DescriptorPtr& partial);
 
-        virtual bool          hasExclusiveLock() = 0;
-
-        // Note that the callee gets a copy of the metadata.
-        virtual int           submitRequest(camera_metadata_t* metadata,
-                                            bool streaming = false) = 0;
-        virtual status_t      cancelRequest(int requestId) = 0;
-
-        // Callbacks from camera service
-        virtual void          onExclusiveLockStolen() = 0;
-
-    protected:
-        virtual void          notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
-                                          const CaptureResultExtras& resultExtras);
-
-        sp<IProCameraCallbacks> mRemoteCallback;
-    }; // class ProClient
+    }; // class CameraClientManager
 
 private:
 
+    /**
+     * Container class for the state of each logical camera device, including: ID, status, and
+     * dependencies on other devices.  The mapping of camera ID -> state saved in mCameraStates
+     * represents the camera devices advertised by the HAL (and any USB devices, when we add
+     * those).
+     *
+     * This container does NOT represent an active camera client.  These are represented using
+     * the ClientDescriptors stored in mActiveClientManager.
+     */
+    class CameraState {
+    public:
+        /**
+         * Make a new CameraState and set the ID, cost, and conflicting devices using the values
+         * returned in the HAL's camera_info struct for each device.
+         */
+        CameraState(const String8& id, int cost, const std::set<String8>& conflicting);
+        virtual ~CameraState();
+
+        /**
+         * Return the status for this device.
+         *
+         * This method acquires mStatusLock.
+         */
+        ICameraServiceListener::Status getStatus() const;
+
+        /**
+         * This function updates the status for this camera device, unless the given status
+         * is in the given list of rejected status states, and execute the function passed in
+         * with a signature onStatusUpdateLocked(const String8&, ICameraServiceListener::Status)
+         * if the status has changed.
+         *
+         * This method is idempotent, and will not result in the function passed to
+         * onStatusUpdateLocked being called more than once for the same arguments.
+         * This method aquires mStatusLock.
+         */
+        template<class Func>
+        void updateStatus(ICameraServiceListener::Status status, const String8& cameraId,
+                std::initializer_list<ICameraServiceListener::Status> rejectSourceStates,
+                Func onStatusUpdatedLocked);
+
+        /**
+         * Return the last set CameraParameters object generated from the information returned by
+         * the HAL for this device (or an empty CameraParameters object if none has been set).
+         */
+        CameraParameters getShimParams() const;
+
+        /**
+         * Set the CameraParameters for this device.
+         */
+        void setShimParams(const CameraParameters& params);
+
+        /**
+         * Return the resource_cost advertised by the HAL for this device.
+         */
+        int getCost() const;
+
+        /**
+         * Return a set of the IDs of conflicting devices advertised by the HAL for this device.
+         */
+        std::set<String8> getConflicting() const;
+
+        /**
+         * Return the ID of this camera device.
+         */
+        String8 getId() const;
+
+    private:
+        const String8 mId;
+        ICameraServiceListener::Status mStatus; // protected by mStatusLock
+        const int mCost;
+        std::set<String8> mConflicting;
+        mutable Mutex mStatusLock;
+        CameraParameters mShimParams;
+    }; // class CameraState
+
     // Delay-load the Camera HAL module
     virtual void onFirstRef();
 
-    // Step 1. Check if we can connect, before we acquire the service lock.
-    status_t            validateConnect(int cameraId,
-                                        /*inout*/
-                                        int& clientUid) const;
+    // Check if we can connect, before we acquire the service lock.
+    status_t validateConnectLocked(const String8& cameraId, /*inout*/int& clientUid) const;
 
-    // Step 2. Check if we can connect, after we acquire the service lock.
-    bool                canConnectUnsafe(int cameraId,
-                                         const String16& clientPackageName,
-                                         const sp<IBinder>& remoteCallback,
-                                         /*out*/
-                                         sp<BasicClient> &client);
+    // Handle active client evictions, and update service state.
+    // Only call with with mServiceLock held.
+    status_t handleEvictionsLocked(const String8& cameraId, int clientPid,
+        apiLevel effectiveApiLevel, const sp<IBinder>& remoteCallback, const String8& packageName,
+        /*out*/
+        sp<BasicClient>* client,
+        std::shared_ptr<resource_policy::ClientDescriptor<String8, sp<BasicClient>>>* partial);
 
-    // When connection is successful, initialize client and track its death
-    status_t            connectFinishUnsafe(const sp<BasicClient>& client,
-                                            const sp<IBinder>& remoteCallback);
+    // Single implementation shared between the various connect calls
+    template<class CALLBACK, class CLIENT>
+    status_t connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId, int halVersion,
+            const String16& clientPackageName, int clientUid, apiLevel effectiveApiLevel,
+            bool legacyMode, bool shimUpdateOnly, /*out*/sp<CLIENT>& device);
 
-    virtual sp<BasicClient>  getClientByRemote(const wp<IBinder>& cameraClient);
 
+    // Lock guarding camera service state
     Mutex               mServiceLock;
-    // either a Client or CameraDeviceClient
-    wp<BasicClient>     mClient[MAX_CAMERAS];  // protected by mServiceLock
-    Mutex               mClientLock[MAX_CAMERAS]; // prevent Client destruction inside callbacks
+
+    // Condition to use with mServiceLock, used to handle simultaneous connect calls from clients
+    std::shared_ptr<WaitableMutexWrapper> mServiceLockWrapper;
+
+    // Return NO_ERROR if the device with a give ID can be connected to
+    status_t checkIfDeviceIsUsable(const String8& cameraId) const;
+
+    // Container for managing currently active application-layer clients
+    CameraClientManager mActiveClientManager;
+
+    // Mapping from camera ID -> state for each device, map is protected by mCameraStatesLock
+    std::map<String8, std::shared_ptr<CameraState>> mCameraStates;
+
+    // Mutex guarding mCameraStates map
+    mutable Mutex mCameraStatesLock;
+
+    // Circular buffer for storing event logging for dumps
+    RingBuffer<String8> mEventLog;
+
+    // UID of last user.
+    int mLastUserId;
+
+    /**
+     * Get the camera state for a given camera id.
+     *
+     * This acquires mCameraStatesLock.
+     */
+    std::shared_ptr<CameraService::CameraState> getCameraState(const String8& cameraId) const;
+
+    /**
+     * Evict client who's remote binder has died.  Returns true if this client was in the active
+     * list and was disconnected.
+     *
+     * This method acquires mServiceLock.
+     */
+    bool evictClientIdByRemote(const wp<IBinder>& cameraClient);
+
+    /**
+     * Remove the given client from the active clients list; does not disconnect the client.
+     *
+     * This method acquires mServiceLock.
+     */
+    void removeByClient(const BasicClient* client);
+
+    /**
+     * Add new client to active clients list after conflicting clients have disconnected using the
+     * values set in the partial descriptor passed in to construct the actual client descriptor.
+     * This is typically called at the end of a connect call.
+     *
+     * This method must be called with mServiceLock held.
+     */
+    void finishConnectLocked(const sp<BasicClient>& client, const DescriptorPtr& desc);
+
+    /**
+     * Returns the integer corresponding to the given camera ID string, or -1 on failure.
+     */
+    static int cameraIdToInt(const String8& cameraId);
+
+    /**
+     * Remove a single client corresponding to the given camera id from the list of active clients.
+     * If none exists, return an empty strongpointer.
+     *
+     * This method must be called with mServiceLock held.
+     */
+    sp<CameraService::BasicClient> removeClientLocked(const String8& cameraId);
+
+    /**
+     * Handle a notification that the current device user has changed.
+     */
+    void doUserSwitch(int newUserId);
+
+    /**
+     * Add a event log message that a client has been disconnected.
+     */
+    void logDisconnected(const String8& cameraId, int clientPid, const String8& clientPackage);
+
+    /**
+     * Add a event log message that a client has been connected.
+     */
+    void logConnected(const String8& cameraId, int clientPid, const String8& clientPackage);
+
     int                 mNumberOfCameras;
 
-    typedef wp<ProClient> weak_pro_client_ptr;
-    Vector<weak_pro_client_ptr> mProClientList[MAX_CAMERAS];
-
-    // needs to be called with mServiceLock held
-    sp<BasicClient>     findClientUnsafe(const wp<IBinder>& cameraClient, int& outIndex);
-    sp<ProClient>       findProClientUnsafe(
-                                     const wp<IBinder>& cameraCallbacksRemote);
-
-    // atomics to record whether the hardware is allocated to some client.
-    volatile int32_t    mBusy[MAX_CAMERAS];
-    void                setCameraBusy(int cameraId);
-    void                setCameraFree(int cameraId);
-
     // sounds
     MediaPlayer*        newMediaPlayer(const char *file);
 
@@ -396,24 +566,21 @@
 
     CameraModule*     mModule;
 
-    Vector<sp<ICameraServiceListener> >
-                        mListenerList;
+    // Guarded by mStatusListenerMutex
+    std::vector<sp<ICameraServiceListener>> mListenerList;
+    Mutex       mStatusListenerLock;
 
-    // guard only mStatusList and the broadcasting of ICameraServiceListener
-    mutable Mutex       mStatusMutex;
-    ICameraServiceListener::Status
-                        mStatusList[MAX_CAMERAS];
-
-    // Read the current status (locks mStatusMutex)
-    ICameraServiceListener::Status
-                        getStatus(int cameraId) const;
-
-    typedef Vector<ICameraServiceListener::Status> StatusVector;
-    // Broadcast the new status if it changed (locks the service mutex)
-    void                updateStatus(
-                            ICameraServiceListener::Status status,
-                            int32_t cameraId,
-                            const StatusVector *rejectSourceStates = NULL);
+    /**
+     * Update the status for the given camera id (if that device exists), and broadcast the
+     * status update to all current ICameraServiceListeners if the status has changed.  Any
+     * statuses in rejectedSourceStates will be ignored.
+     *
+     * This method must be idempotent.
+     * This method acquires mStatusLock and mStatusListenerLock.
+     */
+    void updateStatus(ICameraServiceListener::Status status, const String8& cameraId,
+            std::initializer_list<ICameraServiceListener::Status> rejectedSourceStates);
+    void updateStatus(ICameraServiceListener::Status status, const String8& cameraId);
 
     // flashlight control
     sp<CameraFlashlight> mFlashlight;
@@ -435,9 +602,6 @@
     void onTorchStatusChangedLocked(const String8& cameraId,
             ICameraServiceListener::TorchStatus newStatus);
 
-    // validate the camera id for use of setting a torch mode.
-    bool validCameraIdForSetTorchMode(const String8& cameraId);
-
     // get a camera's torch status. mTorchStatusMutex should be locked.
     status_t getTorchStatusLocked(const String8 &cameraId,
             ICameraServiceListener::TorchStatus *status) const;
@@ -451,19 +615,9 @@
 
     // Helpers
 
-    bool                isValidCameraId(int cameraId);
-
     bool                setUpVendorTags();
 
     /**
-     * A mapping of camera ids to CameraParameters returned by that camera device.
-     *
-     * This cache is used to generate CameraCharacteristic metadata when using
-     * the HAL1 shim.
-     */
-    KeyedVector<int, CameraParameters>    mShimParams;
-
-    /**
      * Initialize and cache the metadata used by the HAL1 shim for a given cameraId.
      *
      * Returns OK on success, or a negative error code.
@@ -486,25 +640,192 @@
      */
     status_t            generateShimMetadata(int cameraId, /*out*/CameraMetadata* cameraInfo);
 
+    static int getCallingPid();
+
+    static int getCallingUid();
+
     /**
-     * Connect a new camera client.  This should only be used while holding the
-     * mutex for mServiceLock.
-     *
-     * Returns OK on success, or a negative error code.
+     * Get the current system time as a formatted string.
      */
-    status_t            connectHelperLocked(
-            /*out*/
-            sp<Client>& client,
-            /*in*/
-            const sp<ICameraClient>& cameraClient,
-            int cameraId,
-            const String16& clientPackageName,
-            int clientUid,
-            int callingPid,
-            int halVersion = CAMERA_HAL_API_VERSION_UNSPECIFIED,
-            bool legacyMode = false);
+    static String8 getFormattedCurrentTime();
+
+    /**
+     * Get the camera eviction priority from the current process state given by ActivityManager.
+     */
+    static int getCameraPriorityFromProcState(int procState);
+
+    static status_t makeClient(const sp<CameraService>& cameraService,
+            const sp<IInterface>& cameraCb, const String16& packageName, const String8& cameraId,
+            int facing, int clientPid, uid_t clientUid, int servicePid, bool legacyMode,
+            int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
+            /*out*/sp<BasicClient>* client);
 };
 
+template<class Func>
+void CameraService::CameraState::updateStatus(ICameraServiceListener::Status status,
+        const String8& cameraId,
+        std::initializer_list<ICameraServiceListener::Status> rejectSourceStates,
+        Func onStatusUpdatedLocked) {
+    Mutex::Autolock lock(mStatusLock);
+    ICameraServiceListener::Status oldStatus = mStatus;
+    mStatus = status;
+
+    if (oldStatus == status) {
+        return;
+    }
+
+    ALOGV("%s: Status has changed for camera ID %s from %#x to %#x", __FUNCTION__,
+            cameraId.string(), oldStatus, status);
+
+    if (oldStatus == ICameraServiceListener::STATUS_NOT_PRESENT &&
+        (status != ICameraServiceListener::STATUS_PRESENT &&
+         status != ICameraServiceListener::STATUS_ENUMERATING)) {
+
+        ALOGW("%s: From NOT_PRESENT can only transition into PRESENT or ENUMERATING",
+                __FUNCTION__);
+        mStatus = oldStatus;
+        return;
+    }
+
+    /**
+     * Sometimes we want to conditionally do a transition.
+     * For example if a client disconnects, we want to go to PRESENT
+     * only if we weren't already in NOT_PRESENT or ENUMERATING.
+     */
+    for (auto& rejectStatus : rejectSourceStates) {
+        if (oldStatus == rejectStatus) {
+            ALOGV("%s: Rejecting status transition for Camera ID %s,  since the source "
+                    "state was was in one of the bad states.", __FUNCTION__, cameraId.string());
+            mStatus = oldStatus;
+            return;
+        }
+    }
+
+    onStatusUpdatedLocked(cameraId, status);
+}
+
+
+template<class CALLBACK, class CLIENT>
+status_t CameraService::connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
+        int halVersion, const String16& clientPackageName, int clientUid,
+        apiLevel effectiveApiLevel, bool legacyMode, bool shimUpdateOnly,
+        /*out*/sp<CLIENT>& device) {
+    status_t ret = NO_ERROR;
+    String8 clientName8(clientPackageName);
+    int clientPid = getCallingPid();
+
+    ALOGI("CameraService::connect call E (PID %d \"%s\", camera ID %s) for HAL version %d and "
+            "Camera API version %d", clientPid, clientName8.string(), cameraId.string(),
+            halVersion, static_cast<int>(effectiveApiLevel));
+
+    sp<CLIENT> client = nullptr;
+    {
+        // Acquire mServiceLock and prevent other clients from connecting
+        std::unique_ptr<AutoConditionLock> lock =
+                AutoConditionLock::waitAndAcquire(mServiceLockWrapper, DEFAULT_CONNECT_TIMEOUT_NS);
+
+        if (lock == nullptr) {
+            ALOGE("CameraService::connect X (PID %d) rejected (too many other clients connecting)."
+                    , clientPid);
+            return -EBUSY;
+        }
+
+        // Enforce client permissions and do basic sanity checks
+        if((ret = validateConnectLocked(cameraId, /*inout*/clientUid)) != NO_ERROR) {
+            return ret;
+        }
+        mLastUserId = multiuser_get_user_id(clientUid);
+
+        // Check the shim parameters after acquiring lock, if they have already been updated and
+        // we were doing a shim update, return immediately
+        if (shimUpdateOnly) {
+            auto cameraState = getCameraState(cameraId);
+            if (cameraState != nullptr) {
+                if (!cameraState->getShimParams().isEmpty()) return NO_ERROR;
+            }
+        }
+
+        sp<BasicClient> clientTmp = nullptr;
+        std::shared_ptr<resource_policy::ClientDescriptor<String8, sp<BasicClient>>> partial;
+        if ((ret = handleEvictionsLocked(cameraId, clientPid, effectiveApiLevel,
+                IInterface::asBinder(cameraCb), clientName8, /*out*/&clientTmp,
+                /*out*/&partial)) != NO_ERROR) {
+            return ret;
+        }
+
+        if (clientTmp.get() != nullptr) {
+            // Handle special case for API1 MediaRecorder where the existing client is returned
+            device = static_cast<CLIENT*>(clientTmp.get());
+            return NO_ERROR;
+        }
+
+        // give flashlight a chance to close devices if necessary.
+        mFlashlight->prepareDeviceOpen(cameraId);
+
+        // TODO: Update getDeviceVersion + HAL interface to use strings for Camera IDs
+        int id = cameraIdToInt(cameraId);
+        if (id == -1) {
+            ALOGE("%s: Invalid camera ID %s, cannot get device version from HAL.", __FUNCTION__,
+                    cameraId.string());
+            return BAD_VALUE;
+        }
+
+        int facing = -1;
+        int deviceVersion = getDeviceVersion(id, /*out*/&facing);
+        sp<BasicClient> tmp = nullptr;
+        if((ret = makeClient(this, cameraCb, clientPackageName, cameraId, facing, clientPid,
+                clientUid, getpid(), legacyMode, halVersion, deviceVersion, effectiveApiLevel,
+                /*out*/&tmp)) != NO_ERROR) {
+            return ret;
+        }
+        client = static_cast<CLIENT*>(tmp.get());
+
+        LOG_ALWAYS_FATAL_IF(client.get() == nullptr, "%s: CameraService in invalid state",
+                __FUNCTION__);
+
+        if ((ret = client->initialize(mModule)) != OK) {
+            ALOGE("%s: Could not initialize client from HAL module.", __FUNCTION__);
+            return ret;
+        }
+
+        sp<IBinder> remoteCallback = client->getRemote();
+        if (remoteCallback != nullptr) {
+            remoteCallback->linkToDeath(this);
+        }
+
+        // Update shim paremeters for legacy clients
+        if (effectiveApiLevel == API_1) {
+            // Assume we have always received a Client subclass for API1
+            sp<Client> shimClient = reinterpret_cast<Client*>(client.get());
+            String8 rawParams = shimClient->getParameters();
+            CameraParameters params(rawParams);
+
+            auto cameraState = getCameraState(cameraId);
+            if (cameraState != nullptr) {
+                cameraState->setShimParams(params);
+            } else {
+                ALOGE("%s: Cannot update shim parameters for camera %s, no such device exists.",
+                        __FUNCTION__, cameraId.string());
+            }
+        }
+
+        if (shimUpdateOnly) {
+            // If only updating legacy shim parameters, immediately disconnect client
+            mServiceLock.unlock();
+            client->disconnect();
+            mServiceLock.lock();
+        } else {
+            // Otherwise, add client to active clients list
+            finishConnectLocked(client, partial);
+        }
+    } // lock is destroyed, allow further connect calls
+
+    // Important: release the mutex here so the client can call back into the service from its
+    // destructor (can be at the end of the call)
+    device = client;
+    return NO_ERROR;
+}
+
 } // namespace android
 
 #endif
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 5dbdeb2..6f44aee 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -163,11 +163,9 @@
 
 status_t Camera2Client::dump(int fd, const Vector<String16>& args) {
     String8 result;
-    result.appendFormat("Client2[%d] (%p) Client: %s PID: %d, dump:\n",
-            mCameraId,
+    result.appendFormat("Client2[%d] (%p) PID: %d, dump:\n", mCameraId,
             (getRemoteCallback() != NULL ?
                     (IInterface::asBinder(getRemoteCallback()).get()) : NULL),
-            String8(mClientPackageName).string(),
             mClientPid);
     result.append("  State: ");
 #define CASE_APPEND_ENUM(x) case x: result.append(#x "\n"); break;
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index 6bea3b6..e552633 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -99,12 +99,7 @@
 
 // tear down the client
 CameraClient::~CameraClient() {
-    // this lock should never be NULL
-    Mutex* lock = mCameraService->getClientLockById(mCameraId);
-    lock->lock();
     mDestructionStarted = true;
-    // client will not be accessed from callback. should unlock to prevent dead-lock in disconnect
-    lock->unlock();
     int callingPid = getCallingPid();
     LOG1("CameraClient::~CameraClient E (pid %d, this %p)", callingPid, this);
 
@@ -116,11 +111,11 @@
     const size_t SIZE = 256;
     char buffer[SIZE];
 
-    size_t len = snprintf(buffer, SIZE, "Client[%d] (%p) PID: %d\n",
+    size_t len = snprintf(buffer, SIZE, "Client[%d] (%p) with UID %d\n",
             mCameraId,
             (getRemoteCallback() != NULL ?
                     IInterface::asBinder(getRemoteCallback()).get() : NULL),
-            mClientPid);
+            mClientUid);
     len = (len > SIZE - 1) ? SIZE - 1 : len;
     write(fd, buffer, len);
 
@@ -677,6 +672,13 @@
                 LOG1("lockIfMessageWanted(%d): waited for %d ms",
                     msgType, sleepCount * CHECK_MESSAGE_INTERVAL);
             }
+
+            // If messages are no longer enabled after acquiring lock, release and drop message
+            if ((mMsgEnabled & msgType) == 0) {
+                mLock.unlock();
+                break;
+            }
+
             return true;
         }
         if (sleepCount++ == 0) {
@@ -702,26 +704,13 @@
 //      (others)                        c->dataCallback
 // dataCallbackTimestamp
 //      (others)                        c->dataCallbackTimestamp
-//
-// NOTE: the *Callback functions grab mLock of the client before passing
-// control to handle* functions. So the handle* functions must release the
-// lock before calling the ICameraClient's callbacks, so those callbacks can
-// invoke methods in the Client class again (For example, the preview frame
-// callback may want to releaseRecordingFrame). The handle* functions must
-// release the lock after all accesses to member variables, so it must be
-// handled very carefully.
 
 void CameraClient::notifyCallback(int32_t msgType, int32_t ext1,
         int32_t ext2, void* user) {
     LOG2("notifyCallback(%d)", msgType);
 
-    Mutex* lock = getClientLockFromCookie(user);
-    if (lock == NULL) return;
-    Mutex::Autolock alock(*lock);
-
-    CameraClient* client =
-            static_cast<CameraClient*>(getClientFromCookie(user));
-    if (client == NULL) return;
+    sp<CameraClient> client = static_cast<CameraClient*>(getClientFromCookie(user).get());
+    if (client.get() == nullptr) return;
 
     if (!client->lockIfMessageWanted(msgType)) return;
 
@@ -740,13 +729,8 @@
         const sp<IMemory>& dataPtr, camera_frame_metadata_t *metadata, void* user) {
     LOG2("dataCallback(%d)", msgType);
 
-    Mutex* lock = getClientLockFromCookie(user);
-    if (lock == NULL) return;
-    Mutex::Autolock alock(*lock);
-
-    CameraClient* client =
-            static_cast<CameraClient*>(getClientFromCookie(user));
-    if (client == NULL) return;
+    sp<CameraClient> client = static_cast<CameraClient*>(getClientFromCookie(user).get());
+    if (client.get() == nullptr) return;
 
     if (!client->lockIfMessageWanted(msgType)) return;
     if (dataPtr == 0 && metadata == NULL) {
@@ -778,13 +762,8 @@
         int32_t msgType, const sp<IMemory>& dataPtr, void* user) {
     LOG2("dataCallbackTimestamp(%d)", msgType);
 
-    Mutex* lock = getClientLockFromCookie(user);
-    if (lock == NULL) return;
-    Mutex::Autolock alock(*lock);
-
-    CameraClient* client =
-            static_cast<CameraClient*>(getClientFromCookie(user));
-    if (client == NULL) return;
+    sp<CameraClient> client = static_cast<CameraClient*>(getClientFromCookie(user).get());
+    if (client.get() == nullptr) return;
 
     if (!client->lockIfMessageWanted(msgType)) return;
 
diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
index fd4e714..5c8f750 100644
--- a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
@@ -154,8 +154,8 @@
                 params.previewWidth, params.previewHeight,
                 callbackFormat, params.previewFormat);
         res = device->createStream(mCallbackWindow,
-                params.previewWidth, params.previewHeight,
-                callbackFormat, HAL_DATASPACE_JFIF, &mCallbackStreamId);
+                params.previewWidth, params.previewHeight, callbackFormat,
+                HAL_DATASPACE_JFIF, CAMERA3_STREAM_ROTATION_0, &mCallbackStreamId);
         if (res != OK) {
             ALOGE("%s: Camera %d: Can't create output stream for callbacks: "
                     "%s (%d)", __FUNCTION__, mId,
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index 5b387f9..34798bf 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -146,7 +146,7 @@
         res = device->createStream(mCaptureWindow,
                 params.pictureWidth, params.pictureHeight,
                 HAL_PIXEL_FORMAT_BLOB, HAL_DATASPACE_JFIF,
-                &mCaptureStreamId);
+                CAMERA3_STREAM_ROTATION_0, &mCaptureStreamId);
         if (res != OK) {
             ALOGE("%s: Camera %d: Can't create output stream for capture: "
                     "%s (%d)", __FUNCTION__, mId,
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 87e0132..6b0f8b5 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -65,15 +65,29 @@
     const Size MAX_PREVIEW_SIZE = { MAX_PREVIEW_WIDTH, MAX_PREVIEW_HEIGHT };
     // Treat the H.264 max size as the max supported video size.
     MediaProfiles *videoEncoderProfiles = MediaProfiles::getInstance();
-    int32_t maxVideoWidth = videoEncoderProfiles->getVideoEncoderParamByName(
-                            "enc.vid.width.max", VIDEO_ENCODER_H264);
-    int32_t maxVideoHeight = videoEncoderProfiles->getVideoEncoderParamByName(
-                            "enc.vid.height.max", VIDEO_ENCODER_H264);
-    const Size MAX_VIDEO_SIZE = {maxVideoWidth, maxVideoHeight};
+    Vector<video_encoder> encoders = videoEncoderProfiles->getVideoEncoders();
+    int32_t maxVideoWidth = 0;
+    int32_t maxVideoHeight = 0;
+    for (size_t i = 0; i < encoders.size(); i++) {
+        int width = videoEncoderProfiles->getVideoEncoderParamByName(
+                "enc.vid.width.max", encoders[i]);
+        int height = videoEncoderProfiles->getVideoEncoderParamByName(
+                "enc.vid.height.max", encoders[i]);
+        // Treat width/height separately here to handle the case where different
+        // profile might report max size of different aspect ratio
+        if (width > maxVideoWidth) {
+            maxVideoWidth = width;
+        }
+        if (height > maxVideoHeight) {
+            maxVideoHeight = height;
+        }
+    }
+    // This is just an upper bound and may not be an actually valid video size
+    const Size VIDEO_SIZE_UPPER_BOUND = {maxVideoWidth, maxVideoHeight};
 
     res = getFilteredSizes(MAX_PREVIEW_SIZE, &availablePreviewSizes);
     if (res != OK) return res;
-    res = getFilteredSizes(MAX_VIDEO_SIZE, &availableVideoSizes);
+    res = getFilteredSizes(VIDEO_SIZE_UPPER_BOUND, &availableVideoSizes);
     if (res != OK) return res;
 
     // Select initial preview and video size that's under the initial bound and
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
index a5a2fec..b6071f6 100644
--- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
@@ -183,7 +183,7 @@
         res = device->createStream(mPreviewWindow,
                 params.previewWidth, params.previewHeight,
                 CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, HAL_DATASPACE_UNKNOWN,
-                &mPreviewStreamId);
+                CAMERA3_STREAM_ROTATION_0, &mPreviewStreamId);
         if (res != OK) {
             ALOGE("%s: Camera %d: Unable to create preview stream: %s (%d)",
                     __FUNCTION__, mId, strerror(-res), res);
@@ -426,8 +426,8 @@
         // TODO: Wire this in from encoder side
         res = device->createStream(mRecordingWindow,
                 params.videoWidth, params.videoHeight,
-                CAMERA2_HAL_PIXEL_FORMAT_OPAQUE,
-                HAL_DATASPACE_BT709, &mRecordingStreamId);
+                CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, HAL_DATASPACE_BT709,
+                CAMERA3_STREAM_ROTATION_0, &mRecordingStreamId);
         if (res != OK) {
             ALOGE("%s: Camera %d: Can't create output stream for recording: "
                     "%s (%d)", __FUNCTION__, mId,
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
index 68aca2d..a03f9c7 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -185,8 +185,8 @@
                 (int)CAMERA2_HAL_PIXEL_FORMAT_ZSL :
                 (int)HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
         res = device->createStream(mZslWindow,
-                params.fastInfo.arrayWidth, params.fastInfo.arrayHeight,
-                streamType, HAL_DATASPACE_UNKNOWN, &mZslStreamId);
+                params.fastInfo.arrayWidth, params.fastInfo.arrayHeight, streamType,
+                HAL_DATASPACE_UNKNOWN, CAMERA3_STREAM_ROTATION_0, &mZslStreamId);
         if (res != OK) {
             ALOGE("%s: Camera %d: Can't create output stream for ZSL: "
                     "%s (%d)", __FUNCTION__, mId,
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index dde1779..8587e0e 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -314,8 +314,7 @@
     return res;
 }
 
-status_t CameraDeviceClient::createStream(
-                      const sp<IGraphicBufferProducer>& bufferProducer)
+status_t CameraDeviceClient::createStream(const OutputConfiguration &outputConfiguration)
 {
     ATRACE_CALL();
 
@@ -324,6 +323,8 @@
 
     Mutex::Autolock icl(mBinderSerializationLock);
 
+
+    sp<IGraphicBufferProducer> bufferProducer = outputConfiguration.getGraphicBufferProducer();
     if (bufferProducer == NULL) {
         ALOGE("%s: bufferProducer must not be null", __FUNCTION__);
         return BAD_VALUE;
@@ -413,7 +414,9 @@
 
     int streamId = -1;
     res = mDevice->createStream(anw, width, height, format, dataSpace,
-            &streamId);
+                                static_cast<camera3_stream_rotation_t>
+                                        (outputConfiguration.getRotation()),
+                                &streamId);
 
     if (res == OK) {
         mStreamMap.add(binder, streamId);
@@ -595,9 +598,7 @@
             mCameraId,
             (getRemoteCallback() != NULL ?
                     IInterface::asBinder(getRemoteCallback()).get() : NULL) );
-    result.appendFormat("  Current client: %s (PID %d, UID %u)\n",
-            String8(mClientPackageName).string(),
-            mClientPid, mClientUid);
+    result.appendFormat("  Current client UID %u\n", mClientUid);
 
     result.append("  State:\n");
     result.appendFormat("    Request ID counter: %d\n", mRequestIdCounter);
@@ -644,9 +645,6 @@
     }
 }
 
-// TODO: refactor the code below this with IProCameraUser.
-// it's 100% copy-pasted, so lets not change it right now to make it easier.
-
 void CameraDeviceClient::detachDevice() {
     if (mDevice == 0) return;
 
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index c89c269..a3dbb90 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -19,6 +19,7 @@
 
 #include <camera/camera2/ICameraDeviceUser.h>
 #include <camera/camera2/ICameraDeviceCallbacks.h>
+#include <camera/camera2/OutputConfiguration.h>
 
 #include "CameraService.h"
 #include "common/FrameProcessorBase.h"
@@ -83,8 +84,7 @@
     // Returns -EBUSY if device is not idle
     virtual status_t      deleteStream(int streamId);
 
-    virtual status_t      createStream(
-            const sp<IGraphicBufferProducer>& bufferProducer);
+    virtual status_t      createStream(const OutputConfiguration &outputConfiguration);
 
     // Create a request object from a template.
     virtual status_t      createDefaultRequest(int templateId,
diff --git a/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp b/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp
deleted file mode 100644
index ba93554..0000000
--- a/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp
+++ /dev/null
@@ -1,444 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "ProCamera2Client"
-#define ATRACE_TAG ATRACE_TAG_CAMERA
-//#define LOG_NDEBUG 0
-
-#include <utils/Log.h>
-#include <utils/Trace.h>
-
-#include <cutils/properties.h>
-#include <gui/Surface.h>
-#include <gui/Surface.h>
-
-#include "api_pro/ProCamera2Client.h"
-#include "common/CameraDeviceBase.h"
-
-namespace android {
-using namespace camera2;
-
-// Interface used by CameraService
-
-ProCamera2Client::ProCamera2Client(const sp<CameraService>& cameraService,
-                                   const sp<IProCameraCallbacks>& remoteCallback,
-                                   const String16& clientPackageName,
-                                   int cameraId,
-                                   int cameraFacing,
-                                   int clientPid,
-                                   uid_t clientUid,
-                                   int servicePid) :
-    Camera2ClientBase(cameraService, remoteCallback, clientPackageName,
-                cameraId, cameraFacing, clientPid, clientUid, servicePid)
-{
-    ATRACE_CALL();
-    ALOGI("ProCamera %d: Opened", cameraId);
-
-    mExclusiveLock = false;
-}
-
-status_t ProCamera2Client::initialize(CameraModule *module)
-{
-    ATRACE_CALL();
-    status_t res;
-
-    res = Camera2ClientBase::initialize(module);
-    if (res != OK) {
-        return res;
-    }
-
-    String8 threadName;
-    mFrameProcessor = new FrameProcessorBase(mDevice);
-    threadName = String8::format("PC2-%d-FrameProc", mCameraId);
-    mFrameProcessor->run(threadName.string());
-
-    mFrameProcessor->registerListener(FRAME_PROCESSOR_LISTENER_MIN_ID,
-                                      FRAME_PROCESSOR_LISTENER_MAX_ID,
-                                      /*listener*/this);
-
-    return OK;
-}
-
-ProCamera2Client::~ProCamera2Client() {
-}
-
-status_t ProCamera2Client::exclusiveTryLock() {
-    ATRACE_CALL();
-    ALOGV("%s", __FUNCTION__);
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-    SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
-
-    if (!mDevice.get()) return PERMISSION_DENIED;
-
-    if (!mExclusiveLock) {
-        mExclusiveLock = true;
-
-        if (mRemoteCallback != NULL) {
-            mRemoteCallback->onLockStatusChanged(
-                              IProCameraCallbacks::LOCK_ACQUIRED);
-        }
-
-        ALOGV("%s: exclusive lock acquired", __FUNCTION__);
-
-        return OK;
-    }
-
-    // TODO: have a PERMISSION_DENIED case for when someone else owns the lock
-
-    // don't allow recursive locking
-    ALOGW("%s: exclusive lock already exists - recursive locking is not"
-          "allowed", __FUNCTION__);
-
-    return ALREADY_EXISTS;
-}
-
-status_t ProCamera2Client::exclusiveLock() {
-    ATRACE_CALL();
-    ALOGV("%s", __FUNCTION__);
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-    SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
-
-    if (!mDevice.get()) return PERMISSION_DENIED;
-
-    /**
-     * TODO: this should asynchronously 'wait' until the lock becomes available
-     * if another client already has an exclusive lock.
-     *
-     * once we have proper sharing support this will need to do
-     * more than just return immediately
-     */
-    if (!mExclusiveLock) {
-        mExclusiveLock = true;
-
-        if (mRemoteCallback != NULL) {
-            mRemoteCallback->onLockStatusChanged(IProCameraCallbacks::LOCK_ACQUIRED);
-        }
-
-        ALOGV("%s: exclusive lock acquired", __FUNCTION__);
-
-        return OK;
-    }
-
-    // don't allow recursive locking
-    ALOGW("%s: exclusive lock already exists - recursive locking is not allowed"
-                                                                , __FUNCTION__);
-    return ALREADY_EXISTS;
-}
-
-status_t ProCamera2Client::exclusiveUnlock() {
-    ATRACE_CALL();
-    ALOGV("%s", __FUNCTION__);
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-    SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
-
-    // don't allow unlocking if we have no lock
-    if (!mExclusiveLock) {
-        ALOGW("%s: cannot unlock, no lock was held in the first place",
-              __FUNCTION__);
-        return BAD_VALUE;
-    }
-
-    mExclusiveLock = false;
-    if (mRemoteCallback != NULL ) {
-        mRemoteCallback->onLockStatusChanged(
-                                       IProCameraCallbacks::LOCK_RELEASED);
-    }
-    ALOGV("%s: exclusive lock released", __FUNCTION__);
-
-    return OK;
-}
-
-bool ProCamera2Client::hasExclusiveLock() {
-    Mutex::Autolock icl(mBinderSerializationLock);
-    return mExclusiveLock;
-}
-
-void ProCamera2Client::onExclusiveLockStolen() {
-    ALOGV("%s: ProClient lost exclusivity (id %d)",
-          __FUNCTION__, mCameraId);
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-    SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
-
-    if (mExclusiveLock && mRemoteCallback.get() != NULL) {
-        mRemoteCallback->onLockStatusChanged(
-                                       IProCameraCallbacks::LOCK_STOLEN);
-    }
-
-    mExclusiveLock = false;
-
-    //TODO: we should not need to detach the device, merely reset it.
-    detachDevice();
-}
-
-status_t ProCamera2Client::submitRequest(camera_metadata_t* request,
-                                         bool streaming) {
-    ATRACE_CALL();
-    ALOGV("%s", __FUNCTION__);
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-
-    if (!mDevice.get()) return DEAD_OBJECT;
-
-    if (!mExclusiveLock) {
-        return PERMISSION_DENIED;
-    }
-
-    CameraMetadata metadata(request);
-
-    if (!enforceRequestPermissions(metadata)) {
-        return PERMISSION_DENIED;
-    }
-
-    if (streaming) {
-        return mDevice->setStreamingRequest(metadata);
-    } else {
-        return mDevice->capture(metadata);
-    }
-
-    // unreachable. thx gcc for a useless warning
-    return OK;
-}
-
-status_t ProCamera2Client::cancelRequest(int requestId) {
-    (void)requestId;
-    ATRACE_CALL();
-    ALOGV("%s", __FUNCTION__);
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-
-    if (!mDevice.get()) return DEAD_OBJECT;
-
-    if (!mExclusiveLock) {
-        return PERMISSION_DENIED;
-    }
-
-    // TODO: implement
-    ALOGE("%s: not fully implemented yet", __FUNCTION__);
-    return INVALID_OPERATION;
-}
-
-status_t ProCamera2Client::deleteStream(int streamId) {
-    ATRACE_CALL();
-    ALOGV("%s (streamId = 0x%x)", __FUNCTION__, streamId);
-
-    status_t res;
-    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-
-    if (!mDevice.get()) return DEAD_OBJECT;
-    mDevice->clearStreamingRequest();
-
-    status_t code;
-    if ((code = mDevice->waitUntilDrained()) != OK) {
-        ALOGE("%s: waitUntilDrained failed with code 0x%x", __FUNCTION__, code);
-    }
-
-    return mDevice->deleteStream(streamId);
-}
-
-status_t ProCamera2Client::createStream(int width, int height, int format,
-                      const sp<IGraphicBufferProducer>& bufferProducer,
-                      /*out*/
-                      int* streamId)
-{
-    if (streamId) {
-        *streamId = -1;
-    }
-
-    ATRACE_CALL();
-    ALOGV("%s (w = %d, h = %d, f = 0x%x)", __FUNCTION__, width, height, format);
-
-    status_t res;
-    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-
-    if (!mDevice.get()) return DEAD_OBJECT;
-
-    sp<IBinder> binder;
-    sp<ANativeWindow> window;
-    if (bufferProducer != 0) {
-        binder = IInterface::asBinder(bufferProducer);
-        window = new Surface(bufferProducer);
-    }
-
-    return mDevice->createStream(window, width, height, format,
-                                 HAL_DATASPACE_UNKNOWN, streamId);
-}
-
-// Create a request object from a template.
-// -- Caller owns the newly allocated metadata
-status_t ProCamera2Client::createDefaultRequest(int templateId,
-                             /*out*/
-                              camera_metadata** request)
-{
-    ATRACE_CALL();
-    ALOGV("%s (templateId = 0x%x)", __FUNCTION__, templateId);
-
-    if (request) {
-        *request = NULL;
-    }
-
-    status_t res;
-    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-
-    if (!mDevice.get()) return DEAD_OBJECT;
-
-    CameraMetadata metadata;
-    if ( (res = mDevice->createDefaultRequest(templateId, &metadata) ) == OK) {
-        *request = metadata.release();
-    }
-
-    return res;
-}
-
-status_t ProCamera2Client::getCameraInfo(int cameraId,
-                                         /*out*/
-                                         camera_metadata** info)
-{
-    if (cameraId != mCameraId) {
-        return INVALID_OPERATION;
-    }
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-
-    if (!mDevice.get()) return DEAD_OBJECT;
-
-    CameraMetadata deviceInfo = mDevice->info();
-    *info = deviceInfo.release();
-
-    return OK;
-}
-
-status_t ProCamera2Client::dump(int fd, const Vector<String16>& args) {
-    String8 result;
-    result.appendFormat("ProCamera2Client[%d] (%p) PID: %d, dump:\n",
-            mCameraId,
-            (getRemoteCallback() != NULL ?
-                    IInterface::asBinder(getRemoteCallback()).get() : NULL),
-            mClientPid);
-    result.append("  State:\n");
-    write(fd, result.string(), result.size());
-
-    // TODO: print dynamic/request section from most recent requests
-    mFrameProcessor->dump(fd, args);
-    return dumpDevice(fd, args);
-}
-
-// IProCameraUser interface
-
-void ProCamera2Client::detachDevice() {
-    if (mDevice == 0) return;
-
-    ALOGV("Camera %d: Stopping processors", mCameraId);
-
-    mFrameProcessor->removeListener(FRAME_PROCESSOR_LISTENER_MIN_ID,
-                                    FRAME_PROCESSOR_LISTENER_MAX_ID,
-                                    /*listener*/this);
-    mFrameProcessor->requestExit();
-    ALOGV("Camera %d: Waiting for threads", mCameraId);
-    mFrameProcessor->join();
-    ALOGV("Camera %d: Disconnecting device", mCameraId);
-
-    // WORKAROUND: HAL refuses to disconnect while there's streams in flight
-    {
-        mDevice->clearStreamingRequest();
-
-        status_t code;
-        if ((code = mDevice->waitUntilDrained()) != OK) {
-            ALOGE("%s: waitUntilDrained failed with code 0x%x", __FUNCTION__,
-                  code);
-        }
-    }
-
-    Camera2ClientBase::detachDevice();
-}
-
-void ProCamera2Client::onResultAvailable(const CaptureResult& result) {
-    ATRACE_CALL();
-    ALOGV("%s", __FUNCTION__);
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-    SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
-
-    if (mRemoteCallback != NULL) {
-        CameraMetadata tmp(result.mMetadata);
-        camera_metadata_t* meta = tmp.release();
-        ALOGV("%s: meta = %p ", __FUNCTION__, meta);
-        mRemoteCallback->onResultReceived(result.mResultExtras.requestId, meta);
-        tmp.acquire(meta);
-    }
-}
-
-bool ProCamera2Client::enforceRequestPermissions(CameraMetadata& metadata) {
-
-    const int pid = IPCThreadState::self()->getCallingPid();
-    const int selfPid = getpid();
-    camera_metadata_entry_t entry;
-
-    /**
-     * Mixin default important security values
-     * - android.led.transmit = defaulted ON
-     */
-    CameraMetadata staticInfo = mDevice->info();
-    entry = staticInfo.find(ANDROID_LED_AVAILABLE_LEDS);
-    for(size_t i = 0; i < entry.count; ++i) {
-        uint8_t led = entry.data.u8[i];
-
-        switch(led) {
-            case ANDROID_LED_AVAILABLE_LEDS_TRANSMIT: {
-                uint8_t transmitDefault = ANDROID_LED_TRANSMIT_ON;
-                if (!metadata.exists(ANDROID_LED_TRANSMIT)) {
-                    metadata.update(ANDROID_LED_TRANSMIT,
-                                    &transmitDefault, 1);
-                }
-                break;
-            }
-        }
-    }
-
-    // We can do anything!
-    if (pid == selfPid) {
-        return true;
-    }
-
-    /**
-     * Permission check special fields in the request
-     * - android.led.transmit = android.permission.CAMERA_DISABLE_TRANSMIT
-     */
-    entry = metadata.find(ANDROID_LED_TRANSMIT);
-    if (entry.count > 0 && entry.data.u8[0] != ANDROID_LED_TRANSMIT_ON) {
-        String16 permissionString =
-            String16("android.permission.CAMERA_DISABLE_TRANSMIT_LED");
-        if (!checkCallingPermission(permissionString)) {
-            const int uid = IPCThreadState::self()->getCallingUid();
-            ALOGE("Permission Denial: "
-                  "can't disable transmit LED pid=%d, uid=%d", pid, uid);
-            return false;
-        }
-    }
-
-    return true;
-}
-
-} // namespace android
diff --git a/services/camera/libcameraservice/api_pro/ProCamera2Client.h b/services/camera/libcameraservice/api_pro/ProCamera2Client.h
deleted file mode 100644
index 7f5f6ac..0000000
--- a/services/camera/libcameraservice/api_pro/ProCamera2Client.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_SERVERS_CAMERA_PROCAMERA2CLIENT_H
-#define ANDROID_SERVERS_CAMERA_PROCAMERA2CLIENT_H
-
-#include "CameraService.h"
-#include "common/FrameProcessorBase.h"
-#include "common/Camera2ClientBase.h"
-#include "device2/Camera2Device.h"
-#include "camera/CaptureResult.h"
-
-namespace android {
-
-class IMemory;
-/**
- * Implements the binder IProCameraUser API,
- * meant for HAL2-level private API access.
- */
-class ProCamera2Client :
-        public Camera2ClientBase<CameraService::ProClient>,
-        public camera2::FrameProcessorBase::FilteredListener
-{
-public:
-    /**
-     * IProCameraUser interface (see IProCameraUser for details)
-     */
-    virtual status_t      exclusiveTryLock();
-    virtual status_t      exclusiveLock();
-    virtual status_t      exclusiveUnlock();
-
-    virtual bool          hasExclusiveLock();
-
-    // Note that the callee gets a copy of the metadata.
-    virtual int           submitRequest(camera_metadata_t* metadata,
-                                        bool streaming = false);
-    virtual status_t      cancelRequest(int requestId);
-
-    virtual status_t      deleteStream(int streamId);
-
-    virtual status_t      createStream(
-            int width,
-            int height,
-            int format,
-            const sp<IGraphicBufferProducer>& bufferProducer,
-            /*out*/
-            int* streamId);
-
-    // Create a request object from a template.
-    // -- Caller owns the newly allocated metadata
-    virtual status_t      createDefaultRequest(int templateId,
-                                               /*out*/
-                                               camera_metadata** request);
-
-    // Get the static metadata for the camera
-    // -- Caller owns the newly allocated metadata
-    virtual status_t      getCameraInfo(int cameraId,
-                                        /*out*/
-                                        camera_metadata** info);
-
-    /**
-     * Interface used by CameraService
-     */
-
-    ProCamera2Client(const sp<CameraService>& cameraService,
-            const sp<IProCameraCallbacks>& remoteCallback,
-            const String16& clientPackageName,
-            int cameraId,
-            int cameraFacing,
-            int clientPid,
-            uid_t clientUid,
-            int servicePid);
-    virtual ~ProCamera2Client();
-
-    virtual status_t      initialize(CameraModule *module);
-
-    virtual status_t      dump(int fd, const Vector<String16>& args);
-
-    // Callbacks from camera service
-    virtual void onExclusiveLockStolen();
-
-    /**
-     * Interface used by independent components of ProCamera2Client.
-     */
-
-protected:
-    /** FilteredListener implementation **/
-    virtual void onResultAvailable(const CaptureResult& result);
-
-    virtual void          detachDevice();
-
-private:
-    /** IProCameraUser interface-related private members */
-
-    /** Preview callback related members */
-    sp<camera2::FrameProcessorBase> mFrameProcessor;
-    static const int32_t FRAME_PROCESSOR_LISTENER_MIN_ID = 0;
-    static const int32_t FRAME_PROCESSOR_LISTENER_MAX_ID = 0x7fffffffL;
-
-    /** Utility members */
-    bool enforceRequestPermissions(CameraMetadata& metadata);
-
-    // Whether or not we have an exclusive lock on the device
-    // - if no we can't modify the request queue.
-    // note that creating/deleting streams we own is still OK
-    bool mExclusiveLock;
-};
-
-}; // namespace android
-
-#endif
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 0415d67..c0c2314 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -337,7 +337,6 @@
     mRemoteCallback.clear();
 }
 
-template class Camera2ClientBase<CameraService::ProClient>;
 template class Camera2ClientBase<CameraService::Client>;
 template class Camera2ClientBase<CameraDeviceClientBase>;
 
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index eb21d55..168ea0a 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -36,7 +36,7 @@
     typedef typename TClientBase::TCamCallbacks TCamCallbacks;
 
     /**
-     * Base binder interface (see ICamera/IProCameraUser for details)
+     * Base binder interface (see ICamera/ICameraDeviceUser for details)
      */
     virtual status_t      connect(const sp<TCamCallbacks>& callbacks);
     virtual void          disconnect();
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 8764504..fe55b9e 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -100,14 +100,14 @@
             nsecs_t timeout) = 0;
 
     /**
-     * Create an output stream of the requested size, format, and dataspace
+     * Create an output stream of the requested size, format, rotation and dataspace
      *
      * For HAL_PIXEL_FORMAT_BLOB formats, the width and height should be the
      * logical dimensions of the buffer, not the number of bytes.
      */
     virtual status_t createStream(sp<ANativeWindow> consumer,
             uint32_t width, uint32_t height, int format,
-            android_dataspace dataSpace, int *id) = 0;
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id) = 0;
 
     /**
      * Create an input reprocess stream that uses buffers from an existing
diff --git a/services/camera/libcameraservice/common/CameraModule.cpp b/services/camera/libcameraservice/common/CameraModule.cpp
index 5f767ad..e5b12ae 100644
--- a/services/camera/libcameraservice/common/CameraModule.cpp
+++ b/services/camera/libcameraservice/common/CameraModule.cpp
@@ -54,14 +54,12 @@
     }
 
     mModule = module;
-    for (int i = 0; i < MAX_CAMERAS_PER_MODULE; i++) {
-        mCameraInfoCached[i] = false;
-    }
+    mCameraInfoMap.setCapacity(getNumberOfCameras());
 }
 
 int CameraModule::getCameraInfo(int cameraId, struct camera_info *info) {
     Mutex::Autolock lock(mCameraInfoLock);
-    if (cameraId < 0 || cameraId >= MAX_CAMERAS_PER_MODULE) {
+    if (cameraId < 0) {
         ALOGE("%s: Invalid camera ID %d", __FUNCTION__, cameraId);
         return -EINVAL;
     }
@@ -72,21 +70,28 @@
         return mModule->get_camera_info(cameraId, info);
     }
 
-    camera_info &wrappedInfo = mCameraInfo[cameraId];
-    if (!mCameraInfoCached[cameraId]) {
-        camera_info rawInfo;
+    ssize_t index = mCameraInfoMap.indexOfKey(cameraId);
+    if (index == NAME_NOT_FOUND) {
+        // Get camera info from raw module and cache it
+        camera_info rawInfo, cameraInfo;
         int ret = mModule->get_camera_info(cameraId, &rawInfo);
         if (ret != 0) {
             return ret;
         }
-        CameraMetadata &m = mCameraCharacteristics[cameraId];
+        CameraMetadata m;
         m = rawInfo.static_camera_characteristics;
         deriveCameraCharacteristicsKeys(rawInfo.device_version, m);
-        wrappedInfo = rawInfo;
-        wrappedInfo.static_camera_characteristics = m.getAndLock();
-        mCameraInfoCached[cameraId] = true;
+        mCameraCharacteristicsMap.add(cameraId, m);
+        cameraInfo = rawInfo;
+        cameraInfo.static_camera_characteristics =
+                mCameraCharacteristicsMap.valueFor(cameraId).getAndLock();
+        mCameraInfoMap.add(cameraId, cameraInfo);
+        index = mCameraInfoMap.indexOfKey(cameraId);
     }
-    *info = wrappedInfo;
+
+    assert(index != NAME_NOT_FOUND);
+    // return the cached camera info
+    *info = mCameraInfoMap[index];
     return 0;
 }
 
@@ -99,10 +104,6 @@
     return mModule->open_legacy(&mModule->common, id, halVersion, device);
 }
 
-const hw_module_t* CameraModule::getRawModule() {
-    return &mModule->common;
-}
-
 int CameraModule::getNumberOfCameras() {
     return mModule->get_number_of_cameras();
 }
@@ -125,7 +126,6 @@
     return mModule->set_torch_mode(camera_id, enable);
 }
 
-
 status_t CameraModule::filterOpenErrorCode(status_t err) {
     switch(err) {
         case NO_ERROR:
@@ -139,6 +139,25 @@
     return -ENODEV;
 }
 
+uint16_t CameraModule::getModuleApiVersion() {
+    return mModule->common.module_api_version;
+}
+
+const char* CameraModule::getModuleName() {
+    return mModule->common.name;
+}
+
+uint16_t CameraModule::getHalApiVersion() {
+    return mModule->common.hal_api_version;
+}
+
+const char* CameraModule::getModuleAuthor() {
+    return mModule->common.author;
+}
+
+void* CameraModule::getDso() {
+    return mModule->common.dso;
+}
 
 }; // namespace android
 
diff --git a/services/camera/libcameraservice/common/CameraModule.h b/services/camera/libcameraservice/common/CameraModule.h
index 16207aa..e285b21 100644
--- a/services/camera/libcameraservice/common/CameraModule.h
+++ b/services/camera/libcameraservice/common/CameraModule.h
@@ -20,10 +20,7 @@
 #include <hardware/camera.h>
 #include <camera/CameraMetadata.h>
 #include <utils/Mutex.h>
-
-/* This needs to be increased if we can have more cameras */
-#define MAX_CAMERAS_PER_MODULE 2
-
+#include <utils/KeyedVector.h>
 
 namespace android {
 /**
@@ -37,7 +34,6 @@
 public:
     CameraModule(camera_module_t *module);
 
-    const hw_module_t* getRawModule();
     int getCameraInfo(int cameraId, struct camera_info *info);
     int getNumberOfCameras(void);
     int open(const char* id, struct hw_device_t** device);
@@ -46,6 +42,12 @@
     bool isVendorTagDefined();
     void getVendorTagOps(vendor_tag_ops_t* ops);
     int setTorchMode(const char* camera_id, bool enable);
+    uint16_t getModuleApiVersion();
+    const char* getModuleName();
+    uint16_t getHalApiVersion();
+    const char* getModuleAuthor();
+    // Only used by CameraModuleFixture native test. Do NOT use elsewhere.
+    void *getDso();
 
 private:
     // Derive camera characteristics keys defined after HAL device version
@@ -53,9 +55,8 @@
     status_t filterOpenErrorCode(status_t err);
 
     camera_module_t *mModule;
-    CameraMetadata mCameraCharacteristics[MAX_CAMERAS_PER_MODULE];
-    camera_info mCameraInfo[MAX_CAMERAS_PER_MODULE];
-    bool mCameraInfoCached[MAX_CAMERAS_PER_MODULE];
+    KeyedVector<int, camera_info> mCameraInfoMap;
+    KeyedVector<int, CameraMetadata> mCameraCharacteristicsMap;
     Mutex mCameraInfoLock;
 };
 
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.h b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
index f5ebbf8..7f14cd4 100644
--- a/services/camera/libcameraservice/device1/CameraHardwareInterface.h
+++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
@@ -97,7 +97,7 @@
         if (res != OK) return res;
 
         int rc = OK;
-        if (module->getRawModule()->module_api_version >= CAMERA_MODULE_API_VERSION_2_3 &&
+        if (module->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_3 &&
             info.device_version > CAMERA_DEVICE_API_VERSION_1_0) {
             // Open higher version camera device as HAL1.0 device.
             rc = module->openLegacy(mName.string(),
diff --git a/services/camera/libcameraservice/device2/Camera2Device.cpp b/services/camera/libcameraservice/device2/Camera2Device.cpp
index ee862a2..878986b 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.cpp
+++ b/services/camera/libcameraservice/device2/Camera2Device.cpp
@@ -242,7 +242,7 @@
 
 status_t Camera2Device::createStream(sp<ANativeWindow> consumer,
         uint32_t width, uint32_t height, int format,
-        android_dataspace /*dataSpace*/, int *id) {
+        android_dataspace /*dataSpace*/, camera3_stream_rotation_t rotation, int *id) {
     ATRACE_CALL();
     status_t res;
     ALOGV("%s: E", __FUNCTION__);
diff --git a/services/camera/libcameraservice/device2/Camera2Device.h b/services/camera/libcameraservice/device2/Camera2Device.h
index e4c2856..9b32fa6 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.h
+++ b/services/camera/libcameraservice/device2/Camera2Device.h
@@ -58,7 +58,7 @@
     virtual status_t waitUntilRequestReceived(int32_t requestId, nsecs_t timeout);
     virtual status_t createStream(sp<ANativeWindow> consumer,
             uint32_t width, uint32_t height, int format,
-            android_dataspace dataSpace, int *id);
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id);
     virtual status_t createReprocessStreamFromStream(int outputId, int *id);
     virtual status_t getStreamInfo(int id,
             uint32_t *width, uint32_t *height, uint32_t *format);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 529d249..8236788 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -802,12 +802,12 @@
 
 status_t Camera3Device::createStream(sp<ANativeWindow> consumer,
         uint32_t width, uint32_t height, int format, android_dataspace dataSpace,
-        int *id) {
+        camera3_stream_rotation_t rotation, int *id) {
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
-    ALOGV("Camera %d: Creating new stream %d: %d x %d, format %d, dataspace %d",
-            mId, mNextStreamId, width, height, format, dataSpace);
+    ALOGV("Camera %d: Creating new stream %d: %d x %d, format %d, dataspace %d rotation %d",
+            mId, mNextStreamId, width, height, format, dataSpace, rotation);
 
     status_t res;
     bool wasActive = false;
@@ -847,10 +847,10 @@
         }
 
         newStream = new Camera3OutputStream(mNextStreamId, consumer,
-                width, height, jpegBufferSize, format, dataSpace);
+                width, height, jpegBufferSize, format, dataSpace, rotation);
     } else {
         newStream = new Camera3OutputStream(mNextStreamId, consumer,
-                width, height, format, dataSpace);
+                width, height, format, dataSpace, rotation);
     }
     newStream->setStatusTracker(mStatusTracker);
 
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index e2ad1fa..a77548d 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -96,7 +96,7 @@
     // stream, reconfiguring device, and unpausing.
     virtual status_t createStream(sp<ANativeWindow> consumer,
             uint32_t width, uint32_t height, int format,
-            android_dataspace dataSpace, int *id);
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id);
     virtual status_t createInputStream(
             uint32_t width, uint32_t height, int format,
             int *id);
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
index 6201484..01edfff 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -28,7 +28,7 @@
 
 Camera3DummyStream::Camera3DummyStream(int id) :
         Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, DUMMY_WIDTH, DUMMY_HEIGHT,
-                /*maxSize*/0, DUMMY_FORMAT, DUMMY_DATASPACE) {
+                /*maxSize*/0, DUMMY_FORMAT, DUMMY_DATASPACE, DUMMY_ROTATION) {
 
 }
 
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
index 7f52d84..d023c57 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.h
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -76,6 +76,7 @@
     static const int DUMMY_HEIGHT = 240;
     static const int DUMMY_FORMAT = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
     static const android_dataspace DUMMY_DATASPACE = HAL_DATASPACE_UNKNOWN;
+    static const camera3_stream_rotation_t DUMMY_ROTATION = CAMERA3_STREAM_ROTATION_0;
     static const uint32_t DUMMY_USAGE = GRALLOC_USAGE_HW_COMPOSER;
 
     /**
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index ff0acbb..8696413 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -31,9 +31,9 @@
 
 Camera3IOStreamBase::Camera3IOStreamBase(int id, camera3_stream_type_t type,
         uint32_t width, uint32_t height, size_t maxSize, int format,
-        android_dataspace dataSpace) :
+        android_dataspace dataSpace, camera3_stream_rotation_t rotation) :
         Camera3Stream(id, type,
-                width, height, maxSize, format, dataSpace),
+                width, height, maxSize, format, dataSpace, rotation),
         mTotalBufferCount(0),
         mHandoutTotalBufferCount(0),
         mHandoutOutputBufferCount(0),
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
index 83d4350..abcf2b1 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
@@ -34,7 +34,7 @@
   protected:
     Camera3IOStreamBase(int id, camera3_stream_type_t type,
             uint32_t width, uint32_t height, size_t maxSize, int format,
-            android_dataspace dataSpace);
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation);
 
   public:
 
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index 87907bf..6bf671e 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -29,8 +29,8 @@
 
 Camera3InputStream::Camera3InputStream(int id,
         uint32_t width, uint32_t height, int format) :
-        Camera3IOStreamBase(id, CAMERA3_STREAM_INPUT, width, height,
-                            /*maxSize*/0, format, HAL_DATASPACE_UNKNOWN) {
+        Camera3IOStreamBase(id, CAMERA3_STREAM_INPUT, width, height, /*maxSize*/0,
+                            format, HAL_DATASPACE_UNKNOWN, CAMERA3_STREAM_ROTATION_0) {
 
     if (format == HAL_PIXEL_FORMAT_BLOB) {
         ALOGE("%s: Bad format, BLOB not supported", __FUNCTION__);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 103d90b..0c739e9 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -34,9 +34,9 @@
 Camera3OutputStream::Camera3OutputStream(int id,
         sp<ANativeWindow> consumer,
         uint32_t width, uint32_t height, int format,
-        android_dataspace dataSpace) :
+        android_dataspace dataSpace, camera3_stream_rotation_t rotation) :
         Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, width, height,
-                            /*maxSize*/0, format, dataSpace),
+                            /*maxSize*/0, format, dataSpace, rotation),
         mConsumer(consumer),
         mTransform(0),
         mTraceFirstBuffer(true) {
@@ -50,9 +50,9 @@
 Camera3OutputStream::Camera3OutputStream(int id,
         sp<ANativeWindow> consumer,
         uint32_t width, uint32_t height, size_t maxSize, int format,
-        android_dataspace dataSpace) :
+        android_dataspace dataSpace, camera3_stream_rotation_t rotation) :
         Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, width, height, maxSize,
-                            format, dataSpace),
+                            format, dataSpace, rotation),
         mConsumer(consumer),
         mTransform(0),
         mTraceFirstBuffer(true) {
@@ -72,10 +72,11 @@
 Camera3OutputStream::Camera3OutputStream(int id, camera3_stream_type_t type,
                                          uint32_t width, uint32_t height,
                                          int format,
-                                         android_dataspace dataSpace) :
+                                         android_dataspace dataSpace,
+                                         camera3_stream_rotation_t rotation) :
         Camera3IOStreamBase(id, type, width, height,
                             /*maxSize*/0,
-                            format, dataSpace),
+                            format, dataSpace, rotation),
         mTransform(0) {
 
     // Subclasses expected to initialize mConsumer themselves
@@ -156,33 +157,9 @@
     ALOG_ASSERT(output, "Expected output to be true");
 
     status_t res;
-    sp<Fence> releaseFence;
 
-    /**
-     * Fence management - calculate Release Fence
-     */
-    if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) {
-        if (buffer.release_fence != -1) {
-            ALOGE("%s: Stream %d: HAL should not set release_fence(%d) when "
-                  "there is an error", __FUNCTION__, mId, buffer.release_fence);
-            close(buffer.release_fence);
-        }
-
-        /**
-         * Reassign release fence as the acquire fence in case of error
-         */
-        releaseFence = new Fence(buffer.acquire_fence);
-    } else {
-        res = native_window_set_buffers_timestamp(mConsumer.get(), timestamp);
-        if (res != OK) {
-            ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
-                  __FUNCTION__, mId, strerror(-res), res);
-            return res;
-        }
-
-        releaseFence = new Fence(buffer.release_fence);
-    }
-
+    // Fence management - always honor release fence from HAL
+    sp<Fence> releaseFence = new Fence(buffer.release_fence);
     int anwReleaseFence = releaseFence->dup();
 
     /**
@@ -216,6 +193,13 @@
             mTraceFirstBuffer = false;
         }
 
+        res = native_window_set_buffers_timestamp(mConsumer.get(), timestamp);
+        if (res != OK) {
+            ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
+                  __FUNCTION__, mId, strerror(-res), res);
+            return res;
+        }
+
         res = currentConsumer->queueBuffer(currentConsumer.get(),
                 container_of(buffer.buffer, ANativeWindowBuffer, handle),
                 anwReleaseFence);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index f016d60..12b2ebb 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -40,7 +40,7 @@
      */
     Camera3OutputStream(int id, sp<ANativeWindow> consumer,
             uint32_t width, uint32_t height, int format,
-            android_dataspace dataSpace);
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation);
 
     /**
      * Set up a stream for formats that have a variable buffer size for the same
@@ -48,7 +48,7 @@
      */
     Camera3OutputStream(int id, sp<ANativeWindow> consumer,
             uint32_t width, uint32_t height, size_t maxSize, int format,
-            android_dataspace dataSpace);
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation);
 
     virtual ~Camera3OutputStream();
 
@@ -67,7 +67,7 @@
   protected:
     Camera3OutputStream(int id, camera3_stream_type_t type,
             uint32_t width, uint32_t height, int format,
-            android_dataspace dataSpace);
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation);
 
     /**
      * Note that we release the lock briefly in this function
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index f829741..4acbce3 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -47,7 +47,7 @@
 Camera3Stream::Camera3Stream(int id,
         camera3_stream_type type,
         uint32_t width, uint32_t height, size_t maxSize, int format,
-        android_dataspace dataSpace) :
+        android_dataspace dataSpace, camera3_stream_rotation_t rotation) :
     camera3_stream(),
     mId(id),
     mName(String8::format("Camera3Stream[%d]", id)),
@@ -60,6 +60,7 @@
     camera3_stream::height = height;
     camera3_stream::format = format;
     camera3_stream::data_space = dataSpace;
+    camera3_stream::rotation = rotation;
     camera3_stream::usage = 0;
     camera3_stream::max_buffers = 0;
     camera3_stream::priv = NULL;
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 72f3ee9..aba27fe 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -266,7 +266,7 @@
 
     Camera3Stream(int id, camera3_stream_type type,
             uint32_t width, uint32_t height, size_t maxSize, int format,
-            android_dataspace dataSpace);
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation);
 
     /**
      * Interface to be implemented by derived classes
diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
index 5bf7a4c..10d7f2e 100644
--- a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
@@ -115,7 +115,7 @@
         Camera3OutputStream(id, CAMERA3_STREAM_BIDIRECTIONAL,
                             width, height,
                             HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
-                            HAL_DATASPACE_UNKNOWN),
+                            HAL_DATASPACE_UNKNOWN, CAMERA3_STREAM_ROTATION_0),
         mDepth(bufferCount) {
 
     sp<IGraphicBufferProducer> producer;
diff --git a/services/camera/libcameraservice/utils/AutoConditionLock.cpp b/services/camera/libcameraservice/utils/AutoConditionLock.cpp
new file mode 100644
index 0000000..c8ee965
--- /dev/null
+++ b/services/camera/libcameraservice/utils/AutoConditionLock.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AutoConditionLock.h"
+
+namespace android {
+
+WaitableMutexWrapper::WaitableMutexWrapper(Mutex* mutex) : mMutex{mutex}, mState{false} {}
+
+WaitableMutexWrapper::~WaitableMutexWrapper() {}
+
+// Locks manager-owned mutex
+AutoConditionLock::AutoConditionLock(const std::shared_ptr<WaitableMutexWrapper>& manager) :
+        mManager{manager}, mAutoLock{manager->mMutex} {}
+
+// Unlocks manager-owned mutex
+AutoConditionLock::~AutoConditionLock() {
+    // Unset the condition and wake everyone up before releasing lock
+    mManager->mState = false;
+    mManager->mCondition.broadcast();
+}
+
+std::unique_ptr<AutoConditionLock> AutoConditionLock::waitAndAcquire(
+        const std::shared_ptr<WaitableMutexWrapper>& manager, nsecs_t waitTime) {
+
+    if (manager == nullptr || manager->mMutex == nullptr) {
+        // Bad input, return null
+        return std::unique_ptr<AutoConditionLock>{nullptr};
+    }
+
+    // Acquire scoped lock
+    std::unique_ptr<AutoConditionLock> scopedLock(new AutoConditionLock(manager));
+
+    // Figure out what time in the future we should hit the timeout
+    nsecs_t failTime = systemTime(SYSTEM_TIME_MONOTONIC) + waitTime;
+
+    // Wait until we timeout, or success
+    while(manager->mState) {
+        status_t ret = manager->mCondition.waitRelative(*(manager->mMutex), waitTime);
+        if (ret != NO_ERROR) {
+            // Timed out or whatever, return null
+            return std::unique_ptr<AutoConditionLock>{nullptr};
+        }
+        waitTime = failTime - systemTime(SYSTEM_TIME_MONOTONIC);
+    }
+
+    // Set the condition and return
+    manager->mState = true;
+    return scopedLock;
+}
+
+std::unique_ptr<AutoConditionLock> AutoConditionLock::waitAndAcquire(
+        const std::shared_ptr<WaitableMutexWrapper>& manager) {
+
+    if (manager == nullptr || manager->mMutex == nullptr) {
+        // Bad input, return null
+        return std::unique_ptr<AutoConditionLock>{nullptr};
+    }
+
+    // Acquire scoped lock
+    std::unique_ptr<AutoConditionLock> scopedLock(new AutoConditionLock(manager));
+
+    // Wait until we timeout, or success
+    while(manager->mState) {
+        status_t ret = manager->mCondition.wait(*(manager->mMutex));
+        if (ret != NO_ERROR) {
+            // Timed out or whatever, return null
+            return std::unique_ptr<AutoConditionLock>{nullptr};
+        }
+    }
+
+    // Set the condition and return
+    manager->mState = true;
+    return scopedLock;
+}
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/utils/AutoConditionLock.h b/services/camera/libcameraservice/utils/AutoConditionLock.h
new file mode 100644
index 0000000..9a3eafc
--- /dev/null
+++ b/services/camera/libcameraservice/utils/AutoConditionLock.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_SERVICE_UTILS_SCOPED_CONDITION_H
+#define ANDROID_SERVICE_UTILS_SCOPED_CONDITION_H
+
+#include <utils/Timers.h>
+#include <utils/Condition.h>
+#include <utils/Errors.h>
+#include <utils/Mutex.h>
+
+#include <memory>
+
+namespace android {
+
+/**
+ * WaitableMutexWrapper can be used with AutoConditionLock to construct scoped locks for the
+ * wrapped Mutex with timeouts for lock acquisition.
+ */
+class WaitableMutexWrapper {
+    friend class AutoConditionLock;
+public:
+    /**
+     * Construct the ConditionManger with the given Mutex.
+     */
+    WaitableMutexWrapper(Mutex* mutex);
+
+    virtual ~WaitableMutexWrapper();
+private:
+    Mutex* mMutex;
+    bool mState;
+    Condition mCondition;
+};
+
+/**
+ * AutoConditionLock is a scoped lock similar to Mutex::Autolock, but allows timeouts to be
+ * specified for lock acquisition.
+ *
+ * AutoConditionLock is used with a WaitableMutexWrapper to lock/unlock the WaitableMutexWrapper's
+ * wrapped Mutex, and wait/set/signal the WaitableMutexWrapper's wrapped condition. To use this,
+ * call AutoConditionLock::waitAndAcquire to get an instance.  This will:
+ *      - Lock the given WaitableMutexWrapper's mutex.
+ *      - Wait for the WaitableMutexWrapper's condition to become false, or timeout.
+ *      - Set the WaitableMutexWrapper's condition to true.
+ *
+ * When the AutoConditionLock goes out of scope and is destroyed, this will:
+ *      - Set the WaitableMutexWrapper's condition to false.
+ *      - Signal threads waiting on this condition to wakeup.
+ *      - Release WaitableMutexWrapper's mutex.
+ */
+class AutoConditionLock final {
+public:
+    AutoConditionLock() = delete;
+    AutoConditionLock(const AutoConditionLock& other) = delete;
+    AutoConditionLock & operator=(const AutoConditionLock&) = delete;
+
+    ~AutoConditionLock();
+
+    /**
+     * Make a new AutoConditionLock from a given WaitableMutexWrapper, waiting up to waitTime
+     * nanoseconds to acquire the WaitableMutexWrapper's wrapped lock.
+     *
+     * Return an empty unique_ptr if this fails, or a timeout occurs.
+     */
+    static std::unique_ptr<AutoConditionLock> waitAndAcquire(
+            const std::shared_ptr<WaitableMutexWrapper>& manager, nsecs_t waitTime);
+
+    /**
+     * Make a new AutoConditionLock from a given WaitableMutexWrapper, waiting indefinitely to
+     * acquire the WaitableMutexWrapper's wrapped lock.
+     *
+     * Return an empty unique_ptr if this fails.
+     */
+    static std::unique_ptr<AutoConditionLock> waitAndAcquire(
+            const std::shared_ptr<WaitableMutexWrapper>& manager);
+private:
+    AutoConditionLock(const std::shared_ptr<WaitableMutexWrapper>& manager);
+
+    std::shared_ptr<WaitableMutexWrapper> mManager;
+    Mutex::Autolock mAutoLock;
+};
+
+}; // namespace android
+
+#endif // ANDROID_SERVICE_UTILS_SCOPED_CONDITION_H
diff --git a/services/camera/libcameraservice/utils/ClientManager.h b/services/camera/libcameraservice/utils/ClientManager.h
new file mode 100644
index 0000000..ad5486d
--- /dev/null
+++ b/services/camera/libcameraservice/utils/ClientManager.h
@@ -0,0 +1,539 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVICE_UTILS_EVICTION_POLICY_MANAGER_H
+#define ANDROID_SERVICE_UTILS_EVICTION_POLICY_MANAGER_H
+
+#include <utils/Mutex.h>
+
+#include <algorithm>
+#include <utility>
+#include <vector>
+#include <set>
+#include <map>
+#include <memory>
+
+namespace android {
+namespace resource_policy {
+
+// --------------------------------------------------------------------------------
+
+/**
+ * The ClientDescriptor class is a container for a given key/value pair identifying a shared
+ * resource, and the corresponding cost, priority, owner ID, and conflicting keys list used
+ * in determining eviction behavior.
+ *
+ * Aside from the priority, these values are immutable once the ClientDescriptor has been
+ * constructed.
+ */
+template<class KEY, class VALUE>
+class ClientDescriptor final {
+public:
+    ClientDescriptor(const KEY& key, const VALUE& value, int32_t cost,
+            const std::set<KEY>& conflictingKeys, int32_t priority, int32_t ownerId);
+    ClientDescriptor(KEY&& key, VALUE&& value, int32_t cost, std::set<KEY>&& conflictingKeys,
+            int32_t priority, int32_t ownerId);
+
+    ~ClientDescriptor();
+
+    /**
+     * Return the key for this descriptor.
+     */
+    const KEY& getKey() const;
+
+    /**
+     * Return the value for this descriptor.
+     */
+    const VALUE& getValue() const;
+
+    /**
+     * Return the cost for this descriptor.
+     */
+    int32_t getCost() const;
+
+    /**
+     * Return the priority for this descriptor.
+     */
+    int32_t getPriority() const;
+
+    /**
+     * Return the owner ID for this descriptor.
+     */
+    int32_t getOwnerId() const;
+
+    /**
+     * Return true if the given key is in this descriptor's conflicting keys list.
+     */
+    bool isConflicting(const KEY& key) const;
+
+    /**
+     * Return the set of all conflicting keys for this descriptor.
+     */
+    std::set<KEY> getConflicting() const;
+
+    /**
+     * Set the proirity for this descriptor.
+     */
+    void setPriority(int32_t priority);
+
+    // This class is ordered by key
+    template<class K, class V>
+    friend bool operator < (const ClientDescriptor<K, V>& a, const ClientDescriptor<K, V>& b);
+
+private:
+    KEY mKey;
+    VALUE mValue;
+    int32_t mCost;
+    std::set<KEY> mConflicting;
+    int32_t mPriority;
+    int32_t mOwnerId;
+}; // class ClientDescriptor
+
+template<class K, class V>
+bool operator < (const ClientDescriptor<K, V>& a, const ClientDescriptor<K, V>& b) {
+    return a.mKey < b.mKey;
+}
+
+template<class KEY, class VALUE>
+ClientDescriptor<KEY, VALUE>::ClientDescriptor(const KEY& key, const VALUE& value, int32_t cost,
+        const std::set<KEY>& conflictingKeys, int32_t priority, int32_t ownerId) : mKey{key},
+        mValue{value}, mCost{cost}, mConflicting{conflictingKeys}, mPriority{priority},
+        mOwnerId{ownerId} {}
+
+template<class KEY, class VALUE>
+ClientDescriptor<KEY, VALUE>::ClientDescriptor(KEY&& key, VALUE&& value, int32_t cost,
+        std::set<KEY>&& conflictingKeys, int32_t priority, int32_t ownerId) :
+        mKey{std::forward<KEY>(key)}, mValue{std::forward<VALUE>(value)}, mCost{cost},
+        mConflicting{std::forward<std::set<KEY>>(conflictingKeys)}, mPriority{priority},
+        mOwnerId{ownerId} {}
+
+template<class KEY, class VALUE>
+ClientDescriptor<KEY, VALUE>::~ClientDescriptor() {}
+
+template<class KEY, class VALUE>
+const KEY& ClientDescriptor<KEY, VALUE>::getKey() const {
+    return mKey;
+}
+
+template<class KEY, class VALUE>
+const VALUE& ClientDescriptor<KEY, VALUE>::getValue() const {
+    return mValue;
+}
+
+template<class KEY, class VALUE>
+int32_t ClientDescriptor<KEY, VALUE>::getCost() const {
+    return mCost;
+}
+
+template<class KEY, class VALUE>
+int32_t ClientDescriptor<KEY, VALUE>::getPriority() const {
+    return mPriority;
+}
+
+template<class KEY, class VALUE>
+int32_t ClientDescriptor<KEY, VALUE>::getOwnerId() const {
+    return mOwnerId;
+}
+
+template<class KEY, class VALUE>
+bool ClientDescriptor<KEY, VALUE>::isConflicting(const KEY& key) const {
+    if (key == mKey) return true;
+    for (const auto& x : mConflicting) {
+        if (key == x) return true;
+    }
+    return false;
+}
+
+template<class KEY, class VALUE>
+std::set<KEY> ClientDescriptor<KEY, VALUE>::getConflicting() const {
+    return mConflicting;
+}
+
+template<class KEY, class VALUE>
+void ClientDescriptor<KEY, VALUE>::setPriority(int32_t priority) {
+    mPriority = priority;
+}
+
+// --------------------------------------------------------------------------------
+
+/**
+ * The ClientManager class wraps an LRU-ordered list of active clients and implements eviction
+ * behavior for handling shared resource access.
+ *
+ * When adding a new descriptor, eviction behavior is as follows:
+ *   - Keys are unique, adding a descriptor with the same key as an existing descriptor will
+ *     result in the lower-priority of the two being removed.  Priority ties result in the
+ *     LRU descriptor being evicted (this means the incoming descriptor be added in this case).
+ *   - Any descriptors with keys that are in the incoming descriptor's 'conflicting keys' list
+ *     will be removed if they have an equal or lower priority than the incoming descriptor;
+ *     if any have a higher priority, the incoming descriptor is removed instead.
+ *   - If the sum of all descriptors' costs, including the incoming descriptor's, is more than
+ *     the max cost allowed for this ClientManager, descriptors with non-zero cost, equal or lower
+ *     priority, and a different owner will be evicted in LRU order until either the cost is less
+ *     than the max cost, or all descriptors meeting this criteria have been evicted and the
+ *     incoming descriptor has the highest priority.  Otherwise, the incoming descriptor is
+ *     removed instead.
+ */
+template<class KEY, class VALUE>
+class ClientManager {
+public:
+    // The default maximum "cost" allowed before evicting
+    static constexpr int32_t DEFAULT_MAX_COST = 100;
+
+    ClientManager();
+    ClientManager(int32_t totalCost);
+
+    /**
+     * Add a given ClientDescriptor to the managed list.  ClientDescriptors for clients that
+     * are evicted by this action are returned in a vector.
+     *
+     * This may return the ClientDescriptor passed in if it would be evicted.
+     */
+    std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>> addAndEvict(
+            const std::shared_ptr<ClientDescriptor<KEY, VALUE>>& client);
+
+    /**
+     * Given a map containing owner (pid) -> priority mappings, update the priority of each
+     * ClientDescriptor with an owner in this mapping.
+     */
+    void updatePriorities(const std::map<int32_t,int32_t>& ownerPriorityList);
+
+    /**
+     * Remove all ClientDescriptors.
+     */
+    void removeAll();
+
+    /**
+     * Remove and return the ClientDescriptor with a given key.
+     */
+    std::shared_ptr<ClientDescriptor<KEY, VALUE>> remove(const KEY& key);
+
+    /**
+     * Remove the given ClientDescriptor.
+     */
+    void remove(const std::shared_ptr<ClientDescriptor<KEY, VALUE>>& value);
+
+    /**
+     * Return a vector of the ClientDescriptors that would be evicted by adding the given
+     * ClientDescriptor.
+     *
+     * This may return the ClientDescriptor passed in if it would be evicted.
+     */
+    std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>> wouldEvict(
+            const std::shared_ptr<ClientDescriptor<KEY, VALUE>>& client) const;
+
+    /**
+     * Return a vector of active ClientDescriptors that prevent this client from being added.
+     */
+    std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>> getIncompatibleClients(
+            const std::shared_ptr<ClientDescriptor<KEY, VALUE>>& client) const;
+
+    /**
+     * Return a vector containing all currently active ClientDescriptors.
+     */
+    std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>> getAll() const;
+
+    /**
+     * Return a vector containing all keys of currently active ClientDescriptors.
+     */
+    std::vector<KEY> getAllKeys() const;
+
+    /**
+     * Return a vector of the owner tags of all currently active ClientDescriptors (duplicates
+     * will be removed).
+     */
+    std::vector<int32_t> getAllOwners() const;
+
+    /**
+     * Return the ClientDescriptor corresponding to the given key, or an empty shared pointer
+     * if none exists.
+     */
+    std::shared_ptr<ClientDescriptor<KEY, VALUE>> get(const KEY& key) const;
+
+protected:
+    ~ClientManager();
+
+private:
+
+    /**
+     * Return a vector of the ClientDescriptors that would be evicted by adding the given
+     * ClientDescriptor.  If returnIncompatibleClients is set to true, instead, return the
+     * vector of ClientDescriptors that are higher priority than the incoming client and
+     * either conflict with this client, or contribute to the resource cost if that would
+     * prevent the incoming client from being added.
+     *
+     * This may return the ClientDescriptor passed in.
+     */
+    std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>> wouldEvictLocked(
+            const std::shared_ptr<ClientDescriptor<KEY, VALUE>>& client,
+            bool returnIncompatibleClients = false) const;
+
+    int64_t getCurrentCostLocked() const;
+
+    mutable Mutex mLock;
+    int32_t mMaxCost;
+    // LRU ordered, most recent at end
+    std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>> mClients;
+}; // class ClientManager
+
+template<class KEY, class VALUE>
+ClientManager<KEY, VALUE>::ClientManager() :
+        ClientManager(DEFAULT_MAX_COST) {}
+
+template<class KEY, class VALUE>
+ClientManager<KEY, VALUE>::ClientManager(int32_t totalCost) : mMaxCost(totalCost) {}
+
+template<class KEY, class VALUE>
+ClientManager<KEY, VALUE>::~ClientManager() {}
+
+template<class KEY, class VALUE>
+std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>> ClientManager<KEY, VALUE>::wouldEvict(
+        const std::shared_ptr<ClientDescriptor<KEY, VALUE>>& client) const {
+    Mutex::Autolock lock(mLock);
+    return wouldEvictLocked(client);
+}
+
+template<class KEY, class VALUE>
+std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>>
+ClientManager<KEY, VALUE>::getIncompatibleClients(
+        const std::shared_ptr<ClientDescriptor<KEY, VALUE>>& client) const {
+    Mutex::Autolock lock(mLock);
+    return wouldEvictLocked(client, /*returnIncompatibleClients*/true);
+}
+
+template<class KEY, class VALUE>
+std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>>
+ClientManager<KEY, VALUE>::wouldEvictLocked(
+        const std::shared_ptr<ClientDescriptor<KEY, VALUE>>& client,
+        bool returnIncompatibleClients) const {
+
+    std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>> evictList;
+
+    // Disallow null clients, return input
+    if (client == nullptr) {
+        evictList.push_back(client);
+        return evictList;
+    }
+
+    const KEY& key = client->getKey();
+    int32_t cost = client->getCost();
+    int32_t priority = client->getPriority();
+    int32_t owner = client->getOwnerId();
+
+    int64_t totalCost = getCurrentCostLocked() + cost;
+
+    // Determine the MRU of the owners tied for having the highest priority
+    int32_t highestPriorityOwner = owner;
+    int32_t highestPriority = priority;
+    for (const auto& i : mClients) {
+        int32_t curPriority = i->getPriority();
+        if (curPriority >= highestPriority) {
+            highestPriority = curPriority;
+            highestPriorityOwner = i->getOwnerId();
+        }
+    }
+
+    if (highestPriority == priority) {
+        // Switch back owner if the incoming client has the highest priority, as it is MRU
+        highestPriorityOwner = owner;
+    }
+
+    // Build eviction list of clients to remove
+    for (const auto& i : mClients) {
+        const KEY& curKey = i->getKey();
+        int32_t curCost = i->getCost();
+        int32_t curPriority = i->getPriority();
+        int32_t curOwner = i->getOwnerId();
+
+        bool conflicting = (curKey == key || i->isConflicting(key) ||
+                client->isConflicting(curKey));
+
+        if (!returnIncompatibleClients) {
+            // Find evicted clients
+
+            if (conflicting && curPriority > priority) {
+                // Pre-existing conflicting client with higher priority exists
+                evictList.clear();
+                evictList.push_back(client);
+                return evictList;
+            } else if (conflicting || ((totalCost > mMaxCost && curCost > 0) &&
+                    (curPriority <= priority) &&
+                    !(highestPriorityOwner == owner && owner == curOwner))) {
+                // Add a pre-existing client to the eviction list if:
+                // - We are adding a client with higher priority that conflicts with this one.
+                // - The total cost including the incoming client's is more than the allowable
+                //   maximum, and the client has a non-zero cost, lower priority, and a different
+                //   owner than the incoming client when the incoming client has the
+                //   highest priority.
+                evictList.push_back(i);
+                totalCost -= curCost;
+            }
+        } else {
+            // Find clients preventing the incoming client from being added
+
+            if (curPriority > priority && (conflicting || (totalCost > mMaxCost && curCost > 0))) {
+                // Pre-existing conflicting client with higher priority exists
+                evictList.push_back(i);
+            }
+        }
+    }
+
+    // Immediately return the incompatible clients if we are calculating these instead
+    if (returnIncompatibleClients) {
+        return evictList;
+    }
+
+    // If the total cost is too high, return the input unless the input has the highest priority
+    if (totalCost > mMaxCost && highestPriorityOwner != owner) {
+        evictList.clear();
+        evictList.push_back(client);
+        return evictList;
+    }
+
+    return evictList;
+
+}
+
+template<class KEY, class VALUE>
+std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>> ClientManager<KEY, VALUE>::addAndEvict(
+        const std::shared_ptr<ClientDescriptor<KEY, VALUE>>& client) {
+    Mutex::Autolock lock(mLock);
+    auto evicted = wouldEvictLocked(client);
+    auto it = evicted.begin();
+    if (it != evicted.end() && *it == client) {
+        return evicted;
+    }
+
+    auto iter = evicted.cbegin();
+
+    // Remove evicted clients from list
+    mClients.erase(std::remove_if(mClients.begin(), mClients.end(),
+        [&iter] (std::shared_ptr<ClientDescriptor<KEY, VALUE>>& curClientPtr) {
+            if (curClientPtr->getKey() == (*iter)->getKey()) {
+                iter++;
+                return true;
+            }
+            return false;
+        }), mClients.end());
+
+    mClients.push_back(client);
+
+    return evicted;
+}
+
+template<class KEY, class VALUE>
+std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>>
+ClientManager<KEY, VALUE>::getAll() const {
+    Mutex::Autolock lock(mLock);
+    return mClients;
+}
+
+template<class KEY, class VALUE>
+std::vector<KEY> ClientManager<KEY, VALUE>::getAllKeys() const {
+    Mutex::Autolock lock(mLock);
+    std::vector<KEY> keys(mClients.size());
+    for (const auto& i : mClients) {
+        keys.push_back(i->getKey());
+    }
+    return keys;
+}
+
+template<class KEY, class VALUE>
+std::vector<int32_t> ClientManager<KEY, VALUE>::getAllOwners() const {
+    Mutex::Autolock lock(mLock);
+    std::set<int32_t> owners;
+    for (const auto& i : mClients) {
+        owners.emplace(i->getOwnerId());
+    }
+    return std::vector<int32_t>(owners.begin(), owners.end());
+}
+
+template<class KEY, class VALUE>
+void ClientManager<KEY, VALUE>::updatePriorities(
+        const std::map<int32_t,int32_t>& ownerPriorityList) {
+    Mutex::Autolock lock(mLock);
+    for (auto& i : mClients) {
+        auto j = ownerPriorityList.find(i->getOwnerId());
+        if (j != ownerPriorityList.end()) {
+            i->setPriority(j->second);
+        }
+    }
+}
+
+template<class KEY, class VALUE>
+std::shared_ptr<ClientDescriptor<KEY, VALUE>> ClientManager<KEY, VALUE>::get(
+        const KEY& key) const {
+    Mutex::Autolock lock(mLock);
+    for (const auto& i : mClients) {
+        if (i->getKey() == key) return i;
+    }
+    return std::shared_ptr<ClientDescriptor<KEY, VALUE>>(nullptr);
+}
+
+template<class KEY, class VALUE>
+void ClientManager<KEY, VALUE>::removeAll() {
+    Mutex::Autolock lock(mLock);
+    mClients.clear();
+}
+
+template<class KEY, class VALUE>
+std::shared_ptr<ClientDescriptor<KEY, VALUE>> ClientManager<KEY, VALUE>::remove(const KEY& key) {
+    Mutex::Autolock lock(mLock);
+
+    std::shared_ptr<ClientDescriptor<KEY, VALUE>> ret;
+
+    // Remove evicted clients from list
+    mClients.erase(std::remove_if(mClients.begin(), mClients.end(),
+        [&key, &ret] (std::shared_ptr<ClientDescriptor<KEY, VALUE>>& curClientPtr) {
+            if (curClientPtr->getKey() == key) {
+                ret = curClientPtr;
+                return true;
+            }
+            return false;
+        }), mClients.end());
+
+    return ret;
+}
+
+template<class KEY, class VALUE>
+void ClientManager<KEY, VALUE>::remove(
+        const std::shared_ptr<ClientDescriptor<KEY, VALUE>>& value) {
+    Mutex::Autolock lock(mLock);
+    // Remove evicted clients from list
+    mClients.erase(std::remove_if(mClients.begin(), mClients.end(),
+        [&value] (std::shared_ptr<ClientDescriptor<KEY, VALUE>>& curClientPtr) {
+            if (curClientPtr == value) {
+                return true;
+            }
+            return false;
+        }), mClients.end());
+}
+
+template<class KEY, class VALUE>
+int64_t ClientManager<KEY, VALUE>::getCurrentCostLocked() const {
+    int64_t totalCost = 0;
+    for (const auto& x : mClients) {
+            totalCost += x->getCost();
+    }
+    return totalCost;
+}
+
+// --------------------------------------------------------------------------------
+
+}; // namespace resource_policy
+}; // namespace android
+
+#endif // ANDROID_SERVICE_UTILS_EVICTION_POLICY_MANAGER_H
diff --git a/services/camera/libcameraservice/utils/RingBuffer.h b/services/camera/libcameraservice/utils/RingBuffer.h
new file mode 100644
index 0000000..df7c00e
--- /dev/null
+++ b/services/camera/libcameraservice/utils/RingBuffer.h
@@ -0,0 +1,361 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_SERVICE_UTILS_RING_BUFFER_H
+#define ANDROID_SERVICE_UTILS_RING_BUFFER_H
+
+#include <utils/Log.h>
+#include <cutils/compiler.h>
+
+#include <iterator>
+#include <utility>
+#include <vector>
+
+namespace android {
+
+/**
+ * A RingBuffer class that maintains an array of objects that can grow up to a certain size.
+ * Elements added to the RingBuffer are inserted in the logical front of the buffer, and
+ * invalidate all current iterators for that RingBuffer object.
+ */
+template <class T>
+class RingBuffer final {
+public:
+
+    /**
+     * Construct a RingBuffer that can grow up to the given length.
+     */
+    RingBuffer(size_t length);
+
+    /**
+     * Forward iterator to this class.  Implements an std:forward_iterator.
+     */
+    class iterator : public std::iterator<std::forward_iterator_tag, T> {
+    public:
+        iterator(T* ptr, size_t size, size_t pos, size_t ctr);
+
+        iterator& operator++();
+
+        iterator operator++(int);
+
+        bool operator==(const iterator& rhs);
+
+        bool operator!=(const iterator& rhs);
+
+        T& operator*();
+
+        T* operator->();
+
+    private:
+        T* mPtr;
+        size_t mSize;
+        size_t mPos;
+        size_t mCtr;
+    };
+
+    /**
+     * Constant forward iterator to this class.  Implements an std:forward_iterator.
+     */
+    class const_iterator : public std::iterator<std::forward_iterator_tag, T> {
+    public:
+        const_iterator(const T* ptr, size_t size, size_t pos, size_t ctr);
+
+        const_iterator& operator++();
+
+        const_iterator operator++(int);
+
+        bool operator==(const const_iterator& rhs);
+
+        bool operator!=(const const_iterator& rhs);
+
+        const T& operator*();
+
+        const T* operator->();
+
+    private:
+        const T* mPtr;
+        size_t mSize;
+        size_t mPos;
+        size_t mCtr;
+    };
+
+    /**
+     * Adds item to the front of this RingBuffer.  If the RingBuffer is at its maximum length,
+     * this will result in the last element being replaced (this is done using the element's
+     * assignment operator).
+     *
+     * All current iterators are invalidated.
+     */
+    void add(const T& item);
+
+    /**
+     * Moves item to the front of this RingBuffer.  Following a call to this, item should no
+     * longer be used.  If the RingBuffer is at its maximum length, this will result in the
+     * last element being replaced (this is done using the element's assignment operator).
+     *
+     * All current iterators are invalidated.
+     */
+    void add(T&& item);
+
+    /**
+     * Construct item in-place in the front of this RingBuffer using the given arguments.  If
+     * the RingBuffer is at its maximum length, this will result in the last element being
+     * replaced (this is done using the element's assignment operator).
+     *
+     * All current iterators are invalidated.
+     */
+    template <class... Args>
+    void emplace(Args&&... args);
+
+    /**
+     * Get an iterator to the front of this RingBuffer.
+     */
+    iterator begin();
+
+    /**
+     * Get an iterator to the end of this RingBuffer.
+     */
+    iterator end();
+
+    /**
+     * Get a const_iterator to the front of this RingBuffer.
+     */
+    const_iterator begin() const;
+
+    /**
+     * Get a const_iterator to the end of this RingBuffer.
+     */
+    const_iterator end() const;
+
+    /**
+     * Return a reference to the element at a given index.  If the index is out of range for
+     * this ringbuffer, [0, size), the behavior for this is undefined.
+     */
+    T& operator[](size_t index);
+
+    /**
+     * Return a const reference to the element at a given index.  If the index is out of range
+     * for this ringbuffer, [0, size), the behavior for this is undefined.
+     */
+    const T& operator[](size_t index) const;
+
+    /**
+     * Return the current size of this RingBuffer.
+     */
+    size_t size() const;
+
+    /**
+     * Remove all elements from this RingBuffer and set the size to 0.
+     */
+    void clear();
+
+private:
+    size_t mFrontIdx;
+    size_t mMaxBufferSize;
+    std::vector<T> mBuffer;
+}; // class RingBuffer
+
+
+template <class T>
+RingBuffer<T>::RingBuffer(size_t length) : mFrontIdx{0}, mMaxBufferSize{length} {}
+
+template <class T>
+RingBuffer<T>::iterator::iterator(T* ptr, size_t size, size_t pos, size_t ctr) :
+        mPtr{ptr}, mSize{size}, mPos{pos}, mCtr{ctr} {}
+
+template <class T>
+typename RingBuffer<T>::iterator& RingBuffer<T>::iterator::operator++() {
+    ++mCtr;
+
+    if (CC_UNLIKELY(mCtr == mSize)) {
+        mPos = mSize;
+        return *this;
+    }
+
+    mPos = ((CC_UNLIKELY(mPos == 0)) ? mSize - 1 : mPos - 1);
+    return *this;
+}
+
+template <class T>
+typename RingBuffer<T>::iterator RingBuffer<T>::iterator::operator++(int) {
+    iterator tmp{mPtr, mSize, mPos, mCtr};
+    ++(*this);
+    return tmp;
+}
+
+template <class T>
+bool RingBuffer<T>::iterator::operator==(const iterator& rhs) {
+    return (mPtr + mPos) == (rhs.mPtr + rhs.mPos);
+}
+
+template <class T>
+bool RingBuffer<T>::iterator::operator!=(const iterator& rhs) {
+    return (mPtr + mPos) != (rhs.mPtr + rhs.mPos);
+}
+
+template <class T>
+T& RingBuffer<T>::iterator::operator*() {
+    return *(mPtr + mPos);
+}
+
+template <class T>
+T* RingBuffer<T>::iterator::operator->() {
+    return mPtr + mPos;
+}
+
+template <class T>
+RingBuffer<T>::const_iterator::const_iterator(const T* ptr, size_t size, size_t pos, size_t ctr) :
+        mPtr{ptr}, mSize{size}, mPos{pos}, mCtr{ctr} {}
+
+template <class T>
+typename RingBuffer<T>::const_iterator& RingBuffer<T>::const_iterator::operator++() {
+    ++mCtr;
+
+    if (CC_UNLIKELY(mCtr == mSize)) {
+        mPos = mSize;
+        return *this;
+    }
+
+    mPos = ((CC_UNLIKELY(mPos == 0)) ? mSize - 1 : mPos - 1);
+    return *this;
+}
+
+template <class T>
+typename RingBuffer<T>::const_iterator RingBuffer<T>::const_iterator::operator++(int) {
+    const_iterator tmp{mPtr, mSize, mPos, mCtr};
+    ++(*this);
+    return tmp;
+}
+
+template <class T>
+bool RingBuffer<T>::const_iterator::operator==(const const_iterator& rhs) {
+    return (mPtr + mPos) == (rhs.mPtr + rhs.mPos);
+}
+
+template <class T>
+bool RingBuffer<T>::const_iterator::operator!=(const const_iterator& rhs) {
+    return (mPtr + mPos) != (rhs.mPtr + rhs.mPos);
+}
+
+template <class T>
+const T& RingBuffer<T>::const_iterator::operator*() {
+    return *(mPtr + mPos);
+}
+
+template <class T>
+const T* RingBuffer<T>::const_iterator::operator->() {
+    return mPtr + mPos;
+}
+
+template <class T>
+void RingBuffer<T>::add(const T& item) {
+    if (mBuffer.size() < mMaxBufferSize) {
+        mBuffer.push_back(item);
+        mFrontIdx = ((mFrontIdx + 1) % mMaxBufferSize);
+        return;
+    }
+
+    mBuffer[mFrontIdx] = item;
+    mFrontIdx = ((mFrontIdx + 1) % mMaxBufferSize);
+}
+
+template <class T>
+void RingBuffer<T>::add(T&& item) {
+    if (mBuffer.size() != mMaxBufferSize) {
+        mBuffer.push_back(std::forward<T>(item));
+        mFrontIdx = ((mFrontIdx + 1) % mMaxBufferSize);
+        return;
+    }
+
+    // Only works for types with move assignment operator
+    mBuffer[mFrontIdx] = std::forward<T>(item);
+    mFrontIdx = ((mFrontIdx + 1) % mMaxBufferSize);
+}
+
+template <class T>
+template <class... Args>
+void RingBuffer<T>::emplace(Args&&... args) {
+    if (mBuffer.size() != mMaxBufferSize) {
+        mBuffer.emplace_back(std::forward<Args>(args)...);
+        mFrontIdx = ((mFrontIdx + 1) % mMaxBufferSize);
+        return;
+    }
+
+    // Only works for types with move assignment operator
+    mBuffer[mFrontIdx] = T(std::forward<Args>(args)...);
+    mFrontIdx = ((mFrontIdx + 1) % mMaxBufferSize);
+}
+
+template <class T>
+typename RingBuffer<T>::iterator RingBuffer<T>::begin() {
+    size_t tmp = (mBuffer.size() == 0) ? 0 : mBuffer.size() - 1;
+    return iterator(mBuffer.data(), mBuffer.size(), (mFrontIdx == 0) ? tmp : mFrontIdx - 1, 0);
+}
+
+template <class T>
+typename RingBuffer<T>::iterator RingBuffer<T>::end() {
+    size_t s = mBuffer.size();
+    return iterator(mBuffer.data(), s, s, s);
+}
+
+template <class T>
+typename RingBuffer<T>::const_iterator RingBuffer<T>::begin() const {
+    size_t tmp = (mBuffer.size() == 0) ? 0 : mBuffer.size() - 1;
+    return const_iterator(mBuffer.data(), mBuffer.size(),
+            (mFrontIdx == 0) ? tmp : mFrontIdx - 1, 0);
+}
+
+template <class T>
+typename RingBuffer<T>::const_iterator RingBuffer<T>::end() const {
+    size_t s = mBuffer.size();
+    return const_iterator(mBuffer.data(), s, s, s);
+}
+
+template <class T>
+T& RingBuffer<T>::operator[](size_t index) {
+    LOG_ALWAYS_FATAL_IF(index >= mBuffer.size(), "Index %zu out of bounds, size is %zu.",
+            index, mBuffer.size());
+    size_t pos = (index >= mFrontIdx) ?
+            mBuffer.size() - 1 - (index - mFrontIdx) : mFrontIdx - 1 - index;
+    return mBuffer[pos];
+}
+
+template <class T>
+const T& RingBuffer<T>::operator[](size_t index) const {
+    LOG_ALWAYS_FATAL_IF(index >= mBuffer.size(), "Index %zu out of bounds, size is %zu.",
+            index, mBuffer.size());
+    size_t pos = (index >= mFrontIdx) ?
+            mBuffer.size() - 1 - (index - mFrontIdx) : mFrontIdx - 1 - index;
+    return mBuffer[pos];
+}
+
+template <class T>
+size_t RingBuffer<T>::size() const {
+    return mBuffer.size();
+}
+
+template <class T>
+void RingBuffer<T>::clear() {
+    mBuffer.clear();
+    mFrontIdx = 0;
+}
+
+}; // namespace android
+
+#endif // ANDROID_SERVICE_UTILS_RING_BUFFER_H
+
+
diff --git a/services/mediaresourcemanager/Android.mk b/services/mediaresourcemanager/Android.mk
new file mode 100644
index 0000000..84218cf
--- /dev/null
+++ b/services/mediaresourcemanager/Android.mk
@@ -0,0 +1,18 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := ResourceManagerService.cpp
+
+LOCAL_SHARED_LIBRARIES := libmedia libstagefright libbinder libutils liblog
+
+LOCAL_MODULE:= libresourcemanagerservice
+
+LOCAL_32_BIT_ONLY := true
+
+LOCAL_C_INCLUDES += \
+    $(TOPDIR)frameworks/av/include
+
+include $(BUILD_SHARED_LIBRARY)
+
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
new file mode 100644
index 0000000..7296d47
--- /dev/null
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -0,0 +1,345 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ResourceManagerService"
+#include <utils/Log.h>
+
+#include <binder/IServiceManager.h>
+#include <dirent.h>
+#include <media/stagefright/ProcessInfo.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include "ResourceManagerService.h"
+
+namespace android {
+
+template <typename T>
+static String8 getString(const Vector<T> &items) {
+    String8 itemsStr;
+    for (size_t i = 0; i < items.size(); ++i) {
+        itemsStr.appendFormat("%s ", items[i].toString().string());
+    }
+    return itemsStr;
+}
+
+static bool hasResourceType(String8 type, Vector<MediaResource> resources) {
+    for (size_t i = 0; i < resources.size(); ++i) {
+        if (resources[i].mType == type) {
+            return true;
+        }
+    }
+    return false;
+}
+
+static bool hasResourceType(String8 type, ResourceInfos infos) {
+    for (size_t i = 0; i < infos.size(); ++i) {
+        if (hasResourceType(type, infos[i].resources)) {
+            return true;
+        }
+    }
+    return false;
+}
+
+static ResourceInfos& getResourceInfosForEdit(
+        int pid,
+        PidResourceInfosMap& map) {
+    ssize_t index = map.indexOfKey(pid);
+    if (index < 0) {
+        // new pid
+        ResourceInfos infosForPid;
+        map.add(pid, infosForPid);
+    }
+
+    return map.editValueFor(pid);
+}
+
+static ResourceInfo& getResourceInfoForEdit(
+        int64_t clientId,
+        const sp<IResourceManagerClient> client,
+        ResourceInfos& infos) {
+    for (size_t i = 0; i < infos.size(); ++i) {
+        if (infos[i].clientId == clientId) {
+            return infos.editItemAt(i);
+        }
+    }
+    ResourceInfo info;
+    info.clientId = clientId;
+    info.client = client;
+    infos.push_back(info);
+    return infos.editItemAt(infos.size() - 1);
+}
+
+ResourceManagerService::ResourceManagerService()
+    : mProcessInfo(new ProcessInfo()),
+      mSupportsMultipleSecureCodecs(true),
+      mSupportsSecureWithNonSecureCodec(true) {}
+
+ResourceManagerService::ResourceManagerService(sp<ProcessInfoInterface> processInfo)
+    : mProcessInfo(processInfo),
+      mSupportsMultipleSecureCodecs(true),
+      mSupportsSecureWithNonSecureCodec(true) {}
+
+ResourceManagerService::~ResourceManagerService() {}
+
+void ResourceManagerService::config(const Vector<MediaResourcePolicy> &policies) {
+    ALOGV("config(%s)", getString(policies).string());
+
+    Mutex::Autolock lock(mLock);
+    for (size_t i = 0; i < policies.size(); ++i) {
+        String8 type = policies[i].mType;
+        uint64_t value = policies[i].mValue;
+        if (type == kPolicySupportsMultipleSecureCodecs) {
+            mSupportsMultipleSecureCodecs = (value != 0);
+        } else if (type == kPolicySupportsSecureWithNonSecureCodec) {
+            mSupportsSecureWithNonSecureCodec = (value != 0);
+        }
+    }
+}
+
+void ResourceManagerService::addResource(
+        int pid,
+        int64_t clientId,
+        const sp<IResourceManagerClient> client,
+        const Vector<MediaResource> &resources) {
+    ALOGV("addResource(pid %d, clientId %lld, resources %s)",
+            pid, (long long) clientId, getString(resources).string());
+
+    Mutex::Autolock lock(mLock);
+    ResourceInfos& infos = getResourceInfosForEdit(pid, mMap);
+    ResourceInfo& info = getResourceInfoForEdit(clientId, client, infos);
+    info.resources.appendVector(resources);
+}
+
+void ResourceManagerService::removeResource(int64_t clientId) {
+    ALOGV("removeResource(%lld)", (long long) clientId);
+
+    Mutex::Autolock lock(mLock);
+    bool found = false;
+    for (size_t i = 0; i < mMap.size(); ++i) {
+        ResourceInfos &infos = mMap.editValueAt(i);
+        for (size_t j = 0; j < infos.size();) {
+            if (infos[j].clientId == clientId) {
+                j = infos.removeAt(j);
+                found = true;
+            } else {
+                ++j;
+            }
+        }
+        if (found) {
+            break;
+        }
+    }
+    if (!found) {
+        ALOGV("didn't find client");
+    }
+}
+
+bool ResourceManagerService::reclaimResource(
+        int callingPid, const Vector<MediaResource> &resources) {
+    ALOGV("reclaimResource(callingPid %d, resources %s)",
+            callingPid, getString(resources).string());
+
+    Vector<sp<IResourceManagerClient>> clients;
+    {
+        Mutex::Autolock lock(mLock);
+        // first pass to handle secure/non-secure codec conflict
+        for (size_t i = 0; i < resources.size(); ++i) {
+            String8 type = resources[i].mType;
+            if (type == kResourceSecureCodec) {
+                if (!mSupportsMultipleSecureCodecs) {
+                    if (!getAllClients_l(callingPid, String8(kResourceSecureCodec), &clients)) {
+                        return false;
+                    }
+                }
+                if (!mSupportsSecureWithNonSecureCodec) {
+                    if (!getAllClients_l(callingPid, String8(kResourceNonSecureCodec), &clients)) {
+                        return false;
+                    }
+                }
+            } else if (type == kResourceNonSecureCodec) {
+                if (!mSupportsSecureWithNonSecureCodec) {
+                    if (!getAllClients_l(callingPid, String8(kResourceSecureCodec), &clients)) {
+                        return false;
+                    }
+                }
+            }
+        }
+
+        if (clients.size() == 0) {
+            // if no secure/non-secure codec conflict, run second pass to handle other resources.
+            for (size_t i = 0; i < resources.size(); ++i) {
+                String8 type = resources[i].mType;
+                if (type == kResourceGraphicMemory) {
+                    sp<IResourceManagerClient> client;
+                    if (!getLowestPriorityBiggestClient_l(callingPid, type, &client)) {
+                        return false;
+                    }
+                    clients.push_back(client);
+                }
+            }
+        }
+    }
+
+    if (clients.size() == 0) {
+        return false;
+    }
+
+    for (size_t i = 0; i < clients.size(); ++i) {
+        ALOGV("reclaimResource from client %p", clients[i].get());
+        if (!clients[i]->reclaimResource()) {
+            return false;
+        }
+    }
+    return true;
+}
+
+bool ResourceManagerService::getAllClients_l(
+        int callingPid, const String8 &type, Vector<sp<IResourceManagerClient>> *clients) {
+    Vector<sp<IResourceManagerClient>> temp;
+    for (size_t i = 0; i < mMap.size(); ++i) {
+        ResourceInfos &infos = mMap.editValueAt(i);
+        for (size_t j = 0; j < infos.size(); ++j) {
+            if (hasResourceType(type, infos[j].resources)) {
+                if (!isCallingPriorityHigher_l(callingPid, mMap.keyAt(i))) {
+                    // some higher/equal priority process owns the resource,
+                    // this request can't be fulfilled.
+                    ALOGE("getAllClients_l: can't reclaim resource %s from pid %d",
+                            type.string(), mMap.keyAt(i));
+                    return false;
+                }
+                temp.push_back(infos[j].client);
+            }
+        }
+    }
+    if (temp.size() == 0) {
+        ALOGV("getAllClients_l: didn't find any resource %s", type.string());
+        return true;
+    }
+    clients->appendVector(temp);
+    return true;
+}
+
+bool ResourceManagerService::getLowestPriorityBiggestClient_l(
+        int callingPid, const String8 &type, sp<IResourceManagerClient> *client) {
+    int lowestPriorityPid;
+    int lowestPriority;
+    int callingPriority;
+    if (!mProcessInfo->getPriority(callingPid, &callingPriority)) {
+        ALOGE("getLowestPriorityBiggestClient_l: can't get process priority for pid %d",
+                callingPid);
+        return false;
+    }
+    if (!getLowestPriorityPid_l(type, &lowestPriorityPid, &lowestPriority)) {
+        return false;
+    }
+    if (lowestPriority <= callingPriority) {
+        ALOGE("getLowestPriorityBiggestClient_l: lowest priority %d vs caller priority %d",
+                lowestPriority, callingPriority);
+        return false;
+    }
+
+    if (!getBiggestClient_l(lowestPriorityPid, type, client)) {
+        return false;
+    }
+    return true;
+}
+
+bool ResourceManagerService::getLowestPriorityPid_l(
+        const String8 &type, int *lowestPriorityPid, int *lowestPriority) {
+    int pid = -1;
+    int priority = -1;
+    for (size_t i = 0; i < mMap.size(); ++i) {
+        if (mMap.valueAt(i).size() == 0) {
+            // no client on this process.
+            continue;
+        }
+        if (!hasResourceType(type, mMap.valueAt(i))) {
+            // doesn't have the requested resource type
+            continue;
+        }
+        int tempPid = mMap.keyAt(i);
+        int tempPriority;
+        if (!mProcessInfo->getPriority(tempPid, &tempPriority)) {
+            ALOGV("getLowestPriorityPid_l: can't get priority of pid %d, skipped", tempPid);
+            // TODO: remove this pid from mMap?
+            continue;
+        }
+        if (pid == -1 || tempPriority > priority) {
+            // initial the value
+            pid = tempPid;
+            priority = tempPriority;
+        }
+    }
+    if (pid != -1) {
+        *lowestPriorityPid = pid;
+        *lowestPriority = priority;
+    }
+    return (pid != -1);
+}
+
+bool ResourceManagerService::isCallingPriorityHigher_l(int callingPid, int pid) {
+    int callingPidPriority;
+    if (!mProcessInfo->getPriority(callingPid, &callingPidPriority)) {
+        return false;
+    }
+
+    int priority;
+    if (!mProcessInfo->getPriority(pid, &priority)) {
+        return false;
+    }
+
+    return (callingPidPriority < priority);
+}
+
+bool ResourceManagerService::getBiggestClient_l(
+        int pid, const String8 &type, sp<IResourceManagerClient> *client) {
+    ssize_t index = mMap.indexOfKey(pid);
+    if (index < 0) {
+        ALOGE("getBiggestClient_l: can't find resource info for pid %d", pid);
+        return false;
+    }
+
+    sp<IResourceManagerClient> clientTemp;
+    uint64_t largestValue = 0;
+    const ResourceInfos &infos = mMap.valueAt(index);
+    for (size_t i = 0; i < infos.size(); ++i) {
+        Vector<MediaResource> resources = infos[i].resources;
+        for (size_t j = 0; j < resources.size(); ++j) {
+            if (resources[j].mType == type) {
+                if (resources[j].mValue > largestValue) {
+                    largestValue = resources[j].mValue;
+                    clientTemp = infos[i].client;
+                }
+            }
+        }
+    }
+
+    if (clientTemp == NULL) {
+        ALOGE("getBiggestClient_l: can't find resource type %s for pid %d", type.string(), pid);
+        return false;
+    }
+
+    *client = clientTemp;
+    return true;
+}
+
+} // namespace android
diff --git a/services/mediaresourcemanager/ResourceManagerService.h b/services/mediaresourcemanager/ResourceManagerService.h
new file mode 100644
index 0000000..2ed9bf8
--- /dev/null
+++ b/services/mediaresourcemanager/ResourceManagerService.h
@@ -0,0 +1,106 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef ANDROID_RESOURCEMANAGERSERVICE_H
+#define ANDROID_RESOURCEMANAGERSERVICE_H
+
+#include <arpa/inet.h>
+#include <binder/BinderService.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/threads.h>
+#include <utils/Vector.h>
+
+#include <media/IResourceManagerService.h>
+
+namespace android {
+
+struct ProcessInfoInterface;
+
+struct ResourceInfo {
+    int64_t clientId;
+    sp<IResourceManagerClient> client;
+    Vector<MediaResource> resources;
+};
+
+typedef Vector<ResourceInfo> ResourceInfos;
+typedef KeyedVector<int, ResourceInfos> PidResourceInfosMap;
+
+class ResourceManagerService
+    : public BinderService<ResourceManagerService>,
+      public BnResourceManagerService
+{
+public:
+    static char const *getServiceName() { return "media.resource_manager"; }
+
+    ResourceManagerService();
+    ResourceManagerService(sp<ProcessInfoInterface> processInfo);
+
+    // IResourceManagerService interface
+    virtual void config(const Vector<MediaResourcePolicy> &policies);
+
+    virtual void addResource(
+            int pid,
+            int64_t clientId,
+            const sp<IResourceManagerClient> client,
+            const Vector<MediaResource> &resources);
+
+    virtual void removeResource(int64_t clientId);
+
+    virtual bool reclaimResource(int callingPid, const Vector<MediaResource> &resources);
+
+protected:
+    virtual ~ResourceManagerService();
+
+private:
+    friend class ResourceManagerServiceTest;
+
+    // Gets the list of all the clients who own the specified resource type.
+    // Returns false if any client belongs to a process with higher priority than the
+    // calling process. The clients will remain unchanged if returns false.
+    bool getAllClients_l(int callingPid, const String8 &type,
+            Vector<sp<IResourceManagerClient>> *clients);
+
+    // Gets the client who owns specified resource type from lowest possible priority process.
+    // Returns false if the calling process priority is not higher than the lowest process
+    // priority. The client will remain unchanged if returns false.
+    bool getLowestPriorityBiggestClient_l(int callingPid, const String8 &type,
+            sp<IResourceManagerClient> *client);
+
+    // Gets lowest priority process that has the specified resource type.
+    // Returns false if failed. The output parameters will remain unchanged if failed.
+    bool getLowestPriorityPid_l(const String8 &type, int *pid, int *priority);
+
+    // Gets the client who owns biggest piece of specified resource type from pid.
+    // Returns false if failed. The client will remain unchanged if failed.
+    bool getBiggestClient_l(int pid, const String8 &type, sp<IResourceManagerClient> *client);
+
+    bool isCallingPriorityHigher_l(int callingPid, int pid);
+
+    mutable Mutex mLock;
+    sp<ProcessInfoInterface> mProcessInfo;
+    PidResourceInfosMap mMap;
+    bool mSupportsMultipleSecureCodecs;
+    bool mSupportsSecureWithNonSecureCodec;
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_RESOURCEMANAGERSERVICE_H
diff --git a/services/mediaresourcemanager/test/Android.mk b/services/mediaresourcemanager/test/Android.mk
new file mode 100644
index 0000000..228b62a
--- /dev/null
+++ b/services/mediaresourcemanager/test/Android.mk
@@ -0,0 +1,25 @@
+# Build the unit tests.
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := ResourceManagerService_test
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := \
+  ResourceManagerService_test.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+  libbinder \
+  liblog \
+  libmedia \
+  libresourcemanagerservice \
+  libutils \
+
+LOCAL_C_INCLUDES := \
+  frameworks/av/include \
+  frameworks/av/services/mediaresourcemanager \
+
+LOCAL_32_BIT_ONLY := true
+
+include $(BUILD_NATIVE_TEST)
diff --git a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
new file mode 100644
index 0000000..b73e1bc
--- /dev/null
+++ b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
@@ -0,0 +1,464 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ResourceManagerService_test"
+#include <utils/Log.h>
+
+#include <gtest/gtest.h>
+
+#include "ResourceManagerService.h"
+#include <media/IResourceManagerService.h>
+#include <media/MediaResource.h>
+#include <media/MediaResourcePolicy.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/ProcessInfoInterface.h>
+
+namespace android {
+
+struct TestProcessInfo : public ProcessInfoInterface {
+    TestProcessInfo() {}
+    virtual ~TestProcessInfo() {}
+
+    virtual bool getPriority(int pid, int *priority) {
+        // For testing, use pid as priority.
+        // Lower the value higher the priority.
+        *priority = pid;
+        return true;
+    }
+
+private:
+    DISALLOW_EVIL_CONSTRUCTORS(TestProcessInfo);
+};
+
+struct TestClient : public BnResourceManagerClient {
+    TestClient(sp<ResourceManagerService> service)
+        : mReclaimed(false), mService(service) {}
+
+    virtual bool reclaimResource() {
+        sp<IResourceManagerClient> client(this);
+        mService->removeResource((int64_t) client.get());
+        mReclaimed = true;
+        return true;
+    }
+
+    bool reclaimed() const {
+        return mReclaimed;
+    }
+
+    void reset() {
+        mReclaimed = false;
+    }
+
+protected:
+    virtual ~TestClient() {}
+
+private:
+    bool mReclaimed;
+    sp<ResourceManagerService> mService;
+    DISALLOW_EVIL_CONSTRUCTORS(TestClient);
+};
+
+static const int kTestPid1 = 30;
+static const int kTestPid2 = 20;
+
+class ResourceManagerServiceTest : public ::testing::Test {
+public:
+    ResourceManagerServiceTest()
+        : mService(new ResourceManagerService(new TestProcessInfo)),
+          mTestClient1(new TestClient(mService)),
+          mTestClient2(new TestClient(mService)),
+          mTestClient3(new TestClient(mService)) {
+    }
+
+protected:
+    static bool isEqualResources(const Vector<MediaResource> &resources1,
+            const Vector<MediaResource> &resources2) {
+        if (resources1.size() != resources2.size()) {
+            return false;
+        }
+        for (size_t i = 0; i < resources1.size(); ++i) {
+            if (resources1[i] != resources2[i]) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    static void expectEqResourceInfo(const ResourceInfo &info, sp<IResourceManagerClient> client,
+            const Vector<MediaResource> &resources) {
+        EXPECT_EQ(client, info.client);
+        EXPECT_TRUE(isEqualResources(resources, info.resources));
+    }
+
+    void verifyClients(bool c1, bool c2, bool c3) {
+        TestClient *client1 = static_cast<TestClient*>(mTestClient1.get());
+        TestClient *client2 = static_cast<TestClient*>(mTestClient2.get());
+        TestClient *client3 = static_cast<TestClient*>(mTestClient3.get());
+
+        EXPECT_EQ(c1, client1->reclaimed());
+        EXPECT_EQ(c2, client2->reclaimed());
+        EXPECT_EQ(c3, client3->reclaimed());
+
+        client1->reset();
+        client2->reset();
+        client3->reset();
+    }
+
+    void addResource() {
+        // kTestPid1 mTestClient1
+        Vector<MediaResource> resources1;
+        resources1.push_back(MediaResource(String8(kResourceSecureCodec), 1));
+        mService->addResource(kTestPid1, (int64_t) mTestClient1.get(), mTestClient1, resources1);
+        resources1.push_back(MediaResource(String8(kResourceGraphicMemory), 200));
+        Vector<MediaResource> resources11;
+        resources11.push_back(MediaResource(String8(kResourceGraphicMemory), 200));
+        mService->addResource(kTestPid1, (int64_t) mTestClient1.get(), mTestClient1, resources11);
+
+        // kTestPid2 mTestClient2
+        Vector<MediaResource> resources2;
+        resources2.push_back(MediaResource(String8(kResourceNonSecureCodec), 1));
+        resources2.push_back(MediaResource(String8(kResourceGraphicMemory), 300));
+        mService->addResource(kTestPid2, (int64_t) mTestClient2.get(), mTestClient2, resources2);
+
+        // kTestPid2 mTestClient3
+        Vector<MediaResource> resources3;
+        mService->addResource(kTestPid2, (int64_t) mTestClient3.get(), mTestClient3, resources3);
+        resources3.push_back(MediaResource(String8(kResourceSecureCodec), 1));
+        resources3.push_back(MediaResource(String8(kResourceGraphicMemory), 100));
+        mService->addResource(kTestPid2, (int64_t) mTestClient3.get(), mTestClient3, resources3);
+
+        const PidResourceInfosMap &map = mService->mMap;
+        EXPECT_EQ(2u, map.size());
+        ssize_t index1 = map.indexOfKey(kTestPid1);
+        ASSERT_GE(index1, 0);
+        const ResourceInfos &infos1 = map[index1];
+        EXPECT_EQ(1u, infos1.size());
+        expectEqResourceInfo(infos1[0], mTestClient1, resources1);
+
+        ssize_t index2 = map.indexOfKey(kTestPid2);
+        ASSERT_GE(index2, 0);
+        const ResourceInfos &infos2 = map[index2];
+        EXPECT_EQ(2u, infos2.size());
+        expectEqResourceInfo(infos2[0], mTestClient2, resources2);
+        expectEqResourceInfo(infos2[1], mTestClient3, resources3);
+    }
+
+    void testConfig() {
+        EXPECT_TRUE(mService->mSupportsMultipleSecureCodecs);
+        EXPECT_TRUE(mService->mSupportsSecureWithNonSecureCodec);
+
+        Vector<MediaResourcePolicy> policies1;
+        policies1.push_back(MediaResourcePolicy(String8(kPolicySupportsMultipleSecureCodecs), 1));
+        policies1.push_back(
+                MediaResourcePolicy(String8(kPolicySupportsSecureWithNonSecureCodec), 0));
+        mService->config(policies1);
+        EXPECT_TRUE(mService->mSupportsMultipleSecureCodecs);
+        EXPECT_FALSE(mService->mSupportsSecureWithNonSecureCodec);
+
+        Vector<MediaResourcePolicy> policies2;
+        policies2.push_back(MediaResourcePolicy(String8(kPolicySupportsMultipleSecureCodecs), 0));
+        policies2.push_back(
+                MediaResourcePolicy(String8(kPolicySupportsSecureWithNonSecureCodec), 1));
+        mService->config(policies2);
+        EXPECT_FALSE(mService->mSupportsMultipleSecureCodecs);
+        EXPECT_TRUE(mService->mSupportsSecureWithNonSecureCodec);
+    }
+
+    void testRemoveResource() {
+        addResource();
+
+        mService->removeResource((int64_t) mTestClient2.get());
+
+        const PidResourceInfosMap &map = mService->mMap;
+        EXPECT_EQ(2u, map.size());
+        const ResourceInfos &infos1 = map.valueFor(kTestPid1);
+        const ResourceInfos &infos2 = map.valueFor(kTestPid2);
+        EXPECT_EQ(1u, infos1.size());
+        EXPECT_EQ(1u, infos2.size());
+        // mTestClient2 has been removed.
+        EXPECT_EQ(mTestClient3, infos2[0].client);
+    }
+
+    void testGetAllClients() {
+        addResource();
+
+        String8 type = String8(kResourceSecureCodec);
+        String8 unknowType = String8("unknowType");
+        Vector<sp<IResourceManagerClient> > clients;
+        int lowPriorityPid = 100;
+        EXPECT_FALSE(mService->getAllClients_l(lowPriorityPid, type, &clients));
+        int midPriorityPid = 25;
+        EXPECT_FALSE(mService->getAllClients_l(lowPriorityPid, type, &clients));
+        int highPriorityPid = 10;
+        EXPECT_TRUE(mService->getAllClients_l(10, unknowType, &clients));
+        EXPECT_TRUE(mService->getAllClients_l(10, type, &clients));
+
+        EXPECT_EQ(2u, clients.size());
+        EXPECT_EQ(mTestClient3, clients[0]);
+        EXPECT_EQ(mTestClient1, clients[1]);
+    }
+
+    void testReclaimResourceSecure() {
+        Vector<MediaResource> resources;
+        resources.push_back(MediaResource(String8(kResourceSecureCodec), 1));
+        resources.push_back(MediaResource(String8(kResourceGraphicMemory), 150));
+
+        // ### secure codec can't coexist and secure codec can coexist with non-secure codec ###
+        {
+            addResource();
+            mService->mSupportsMultipleSecureCodecs = false;
+            mService->mSupportsSecureWithNonSecureCodec = true;
+
+            // priority too low
+            EXPECT_FALSE(mService->reclaimResource(40, resources));
+            EXPECT_FALSE(mService->reclaimResource(25, resources));
+
+            // reclaim all secure codecs
+            EXPECT_TRUE(mService->reclaimResource(10, resources));
+            verifyClients(true, false, true);
+
+            // call again should reclaim one largest graphic memory from lowest process
+            EXPECT_TRUE(mService->reclaimResource(10, resources));
+            verifyClients(false, true, false);
+
+            // nothing left
+            EXPECT_FALSE(mService->reclaimResource(10, resources));
+        }
+
+        // ### secure codecs can't coexist and secure codec can't coexist with non-secure codec ###
+        {
+            addResource();
+            mService->mSupportsMultipleSecureCodecs = false;
+            mService->mSupportsSecureWithNonSecureCodec = false;
+
+            // priority too low
+            EXPECT_FALSE(mService->reclaimResource(40, resources));
+            EXPECT_FALSE(mService->reclaimResource(25, resources));
+
+            // reclaim all secure and non-secure codecs
+            EXPECT_TRUE(mService->reclaimResource(10, resources));
+            verifyClients(true, true, true);
+
+            // nothing left
+            EXPECT_FALSE(mService->reclaimResource(10, resources));
+        }
+
+
+        // ### secure codecs can coexist but secure codec can't coexist with non-secure codec ###
+        {
+            addResource();
+            mService->mSupportsMultipleSecureCodecs = true;
+            mService->mSupportsSecureWithNonSecureCodec = false;
+
+            // priority too low
+            EXPECT_FALSE(mService->reclaimResource(40, resources));
+            EXPECT_FALSE(mService->reclaimResource(25, resources));
+
+            // reclaim all non-secure codecs
+            EXPECT_TRUE(mService->reclaimResource(10, resources));
+            verifyClients(false, true, false);
+
+            // call again should reclaim one largest graphic memory from lowest process
+            EXPECT_TRUE(mService->reclaimResource(10, resources));
+            verifyClients(true, false, false);
+
+            // call again should reclaim another largest graphic memory from lowest process
+            EXPECT_TRUE(mService->reclaimResource(10, resources));
+            verifyClients(false, false, true);
+
+            // nothing left
+            EXPECT_FALSE(mService->reclaimResource(10, resources));
+        }
+
+        // ### secure codecs can coexist and secure codec can coexist with non-secure codec ###
+        {
+            addResource();
+            mService->mSupportsMultipleSecureCodecs = true;
+            mService->mSupportsSecureWithNonSecureCodec = true;
+
+            // priority too low
+            EXPECT_FALSE(mService->reclaimResource(40, resources));
+
+            EXPECT_TRUE(mService->reclaimResource(10, resources));
+            // one largest graphic memory from lowest process got reclaimed
+            verifyClients(true, false, false);
+
+            // call again should reclaim another graphic memory from lowest process
+            EXPECT_TRUE(mService->reclaimResource(10, resources));
+            verifyClients(false, true, false);
+
+            // call again should reclaim another graphic memory from lowest process
+            EXPECT_TRUE(mService->reclaimResource(10, resources));
+            verifyClients(false, false, true);
+
+            // nothing left
+            EXPECT_FALSE(mService->reclaimResource(10, resources));
+        }
+    }
+
+    void testReclaimResourceNonSecure() {
+        Vector<MediaResource> resources;
+        resources.push_back(MediaResource(String8(kResourceNonSecureCodec), 1));
+        resources.push_back(MediaResource(String8(kResourceGraphicMemory), 150));
+
+        // ### secure codec can't coexist with non-secure codec ###
+        {
+            addResource();
+            mService->mSupportsSecureWithNonSecureCodec = false;
+
+            // priority too low
+            EXPECT_FALSE(mService->reclaimResource(40, resources));
+            EXPECT_FALSE(mService->reclaimResource(25, resources));
+
+            // reclaim all secure codecs
+            EXPECT_TRUE(mService->reclaimResource(10, resources));
+            verifyClients(true, false, true);
+
+            // call again should reclaim one graphic memory from lowest process
+            EXPECT_TRUE(mService->reclaimResource(10, resources));
+            verifyClients(false, true, false);
+
+            // nothing left
+            EXPECT_FALSE(mService->reclaimResource(10, resources));
+        }
+
+
+        // ### secure codec can coexist with non-secure codec ###
+        {
+            addResource();
+            mService->mSupportsSecureWithNonSecureCodec = true;
+
+            // priority too low
+            EXPECT_FALSE(mService->reclaimResource(40, resources));
+
+            EXPECT_TRUE(mService->reclaimResource(10, resources));
+            // one largest graphic memory from lowest process got reclaimed
+            verifyClients(true, false, false);
+
+            // call again should reclaim another graphic memory from lowest process
+            EXPECT_TRUE(mService->reclaimResource(10, resources));
+            verifyClients(false, true, false);
+
+            // call again should reclaim another graphic memory from lowest process
+            EXPECT_TRUE(mService->reclaimResource(10, resources));
+            verifyClients(false, false, true);
+
+            // nothing left
+            EXPECT_FALSE(mService->reclaimResource(10, resources));
+        }
+    }
+
+    void testGetLowestPriorityBiggestClient() {
+        String8 type = String8(kResourceGraphicMemory);
+        sp<IResourceManagerClient> client;
+        EXPECT_FALSE(mService->getLowestPriorityBiggestClient_l(10, type, &client));
+
+        addResource();
+
+        EXPECT_FALSE(mService->getLowestPriorityBiggestClient_l(100, type, &client));
+        EXPECT_TRUE(mService->getLowestPriorityBiggestClient_l(10, type, &client));
+
+        // kTestPid1 is the lowest priority process with kResourceGraphicMemory.
+        // mTestClient1 has the largest kResourceGraphicMemory within kTestPid1.
+        EXPECT_EQ(mTestClient1, client);
+    }
+
+    void testGetLowestPriorityPid() {
+        int pid;
+        int priority;
+        TestProcessInfo processInfo;
+
+        String8 type = String8(kResourceGraphicMemory);
+        EXPECT_FALSE(mService->getLowestPriorityPid_l(type, &pid, &priority));
+
+        addResource();
+
+        EXPECT_TRUE(mService->getLowestPriorityPid_l(type, &pid, &priority));
+        EXPECT_EQ(kTestPid1, pid);
+        int priority1;
+        processInfo.getPriority(kTestPid1, &priority1);
+        EXPECT_EQ(priority1, priority);
+
+        type = String8(kResourceNonSecureCodec);
+        EXPECT_TRUE(mService->getLowestPriorityPid_l(type, &pid, &priority));
+        EXPECT_EQ(kTestPid2, pid);
+        int priority2;
+        processInfo.getPriority(kTestPid2, &priority2);
+        EXPECT_EQ(priority2, priority);
+    }
+
+    void testGetBiggestClient() {
+        String8 type = String8(kResourceGraphicMemory);
+        sp<IResourceManagerClient> client;
+        EXPECT_FALSE(mService->getBiggestClient_l(kTestPid2, type, &client));
+
+        addResource();
+
+        EXPECT_TRUE(mService->getBiggestClient_l(kTestPid2, type, &client));
+        EXPECT_EQ(mTestClient2, client);
+    }
+
+    void testIsCallingPriorityHigher() {
+        EXPECT_FALSE(mService->isCallingPriorityHigher_l(101, 100));
+        EXPECT_FALSE(mService->isCallingPriorityHigher_l(100, 100));
+        EXPECT_TRUE(mService->isCallingPriorityHigher_l(99, 100));
+    }
+
+    sp<ResourceManagerService> mService;
+    sp<IResourceManagerClient> mTestClient1;
+    sp<IResourceManagerClient> mTestClient2;
+    sp<IResourceManagerClient> mTestClient3;
+};
+
+TEST_F(ResourceManagerServiceTest, config) {
+    testConfig();
+}
+
+TEST_F(ResourceManagerServiceTest, addResource) {
+    addResource();
+}
+
+TEST_F(ResourceManagerServiceTest, removeResource) {
+    testRemoveResource();
+}
+
+TEST_F(ResourceManagerServiceTest, reclaimResource) {
+    testReclaimResourceSecure();
+    testReclaimResourceNonSecure();
+}
+
+TEST_F(ResourceManagerServiceTest, getAllClients_l) {
+    testGetAllClients();
+}
+
+TEST_F(ResourceManagerServiceTest, getLowestPriorityBiggestClient_l) {
+    testGetLowestPriorityBiggestClient();
+}
+
+TEST_F(ResourceManagerServiceTest, getLowestPriorityPid_l) {
+    testGetLowestPriorityPid();
+}
+
+TEST_F(ResourceManagerServiceTest, getBiggestClient_l) {
+    testGetBiggestClient();
+}
+
+TEST_F(ResourceManagerServiceTest, isCallingPriorityHigher_l) {
+    testIsCallingPriorityHigher();
+}
+
+} // namespace android
diff --git a/services/radio/Android.mk b/services/radio/Android.mk
index 5e89b22..9ee5666 100644
--- a/services/radio/Android.mk
+++ b/services/radio/Android.mk
@@ -26,6 +26,7 @@
     libutils \
     libbinder \
     libcutils \
+    libmedia \
     libhardware \
     libradio \
     libradio_metadata
diff --git a/services/radio/RadioService.cpp b/services/radio/RadioService.cpp
index 152619b..a6c2bdf 100644
--- a/services/radio/RadioService.cpp
+++ b/services/radio/RadioService.cpp
@@ -22,6 +22,8 @@
 #include <sys/types.h>
 #include <pthread.h>
 
+#include <system/audio.h>
+#include <system/audio_policy.h>
 #include <system/radio.h>
 #include <system/radio_metadata.h>
 #include <cutils/atomic.h>
@@ -33,11 +35,13 @@
 #include <binder/MemoryBase.h>
 #include <binder/MemoryHeapBase.h>
 #include <hardware/radio.h>
+#include <media/AudioSystem.h>
 #include "RadioService.h"
 #include "RadioRegions.h"
 
 namespace android {
 
+static const char kRadioTunerAudioDeviceName[] = "Radio tuner source";
 
 RadioService::RadioService()
     : BnRadioService(), mNextUniqueId(1)
@@ -84,7 +88,7 @@
     ALOGI("loaded default module %s, handle %d", properties.product, properties.handle);
 
     convertProperties(&properties, &halProperties);
-    sp<Module> module = new Module(this, dev, properties);
+    sp<Module> module = new Module(dev, properties);
     mModules.add(properties.handle, module);
 }
 
@@ -380,10 +384,8 @@
 #undef LOG_TAG
 #define LOG_TAG "RadioService::Module"
 
-RadioService::Module::Module(const sp<RadioService>& service,
-                                      radio_hw_device* hwDevice,
-                                      radio_properties properties)
- : mService(service), mHwDevice(hwDevice), mProperties(properties), mMute(true)
+RadioService::Module::Module(radio_hw_device* hwDevice, radio_properties properties)
+ : mHwDevice(hwDevice), mProperties(properties), mMute(true)
 {
 }
 
@@ -416,6 +418,31 @@
     struct radio_hal_band_config halConfig;
     halConfig = config->band;
 
+    // Tuner preemption logic:
+    // There is a limited amount of tuners and a limited amount of radio audio sources per module.
+    // The minimum is one tuner and one audio source.
+    // The numbers of tuners and sources are indicated in the module properties.
+    // NOTE: current framework implementation only supports one radio audio source.
+    // It is possible to open more than one tuner at a time but only one tuner can be connected
+    // to the radio audio source (AUDIO_DEVICE_IN_FM_TUNER).
+    // The base rule is that a newly connected tuner always wins, i.e. always gets a tuner
+    // and can use the audio source if requested.
+    // If another client is preempted, it is notified by a callback with RADIO_EVENT_CONTROL
+    // indicating loss of control.
+    // - If the newly connected client requests the audio source (audio == true):
+    //    - if an audio source is available
+    //          no problem
+    //    - if not:
+    //          the oldest client in the list using audio is preempted.
+    // - If the newly connected client does not request the audio source (audio == false):
+    //    - if a tuner is available
+    //          no problem
+    //    - if not:
+    //          The oldest client not using audio is preempted first and if none is found the
+    //          the oldest client using audio is preempted.
+    // Each time a tuner using the audio source is opened or closed, the audio policy manager is
+    // notified of the connection or disconnection of AUDIO_DEVICE_IN_FM_TUNER.
+
     sp<ModuleClient> oldestTuner;
     sp<ModuleClient> oldestAudio;
     size_t allocatedTuners = 0;
@@ -437,28 +464,31 @@
     }
 
     const struct radio_tuner *halTuner;
+    sp<ModuleClient> preemtedClient;
     if (audio) {
         if (allocatedAudio >= mProperties.num_audio_sources) {
             ALOG_ASSERT(oldestAudio != 0, "addClient() allocatedAudio/oldestAudio mismatch");
-            halTuner = oldestAudio->getTuner();
-            oldestAudio->setTuner(NULL);
-            mHwDevice->close_tuner(mHwDevice, halTuner);
+            preemtedClient = oldestAudio;
         }
     } else {
         if (allocatedAudio + allocatedTuners >= mProperties.num_tuners) {
             if (allocatedTuners != 0) {
                 ALOG_ASSERT(oldestTuner != 0, "addClient() allocatedTuners/oldestTuner mismatch");
-                halTuner = oldestTuner->getTuner();
-                oldestTuner->setTuner(NULL);
-                mHwDevice->close_tuner(mHwDevice, halTuner);
+                preemtedClient = oldestTuner;
             } else {
                 ALOG_ASSERT(oldestAudio != 0, "addClient() allocatedAudio/oldestAudio mismatch");
-                halTuner = oldestAudio->getTuner();
-                oldestAudio->setTuner(NULL);
-                mHwDevice->close_tuner(mHwDevice, halTuner);
+                preemtedClient = oldestAudio;
             }
         }
     }
+    if (preemtedClient != 0) {
+        halTuner = preemtedClient->getTuner();
+        preemtedClient->setTuner(NULL);
+        mHwDevice->close_tuner(mHwDevice, halTuner);
+        if (preemtedClient->audio()) {
+            notifyDeviceConnection(false, "");
+        }
+    }
 
     ret = mHwDevice->open_tuner(mHwDevice, &halConfig, audio,
                                 RadioService::callback, moduleClient->callbackThread().get(),
@@ -467,11 +497,13 @@
         ALOGV("addClient() setTuner %p", halTuner);
         moduleClient->setTuner(halTuner);
         mModuleClients.add(moduleClient);
+        if (audio) {
+            notifyDeviceConnection(true, "");
+        }
     } else {
         moduleClient.clear();
     }
 
-    //TODO notify audio device connection to audio policy manager if audio is on
 
     ALOGV("addClient() DONE moduleClient %p", moduleClient.get());
 
@@ -501,19 +533,32 @@
     }
 
     mHwDevice->close_tuner(mHwDevice, halTuner);
+    if (moduleClient->audio()) {
+        notifyDeviceConnection(false, "");
+    }
 
-    //TODO notify audio device disconnection to audio policy manager if audio was on
     mMute = true;
 
     if (mModuleClients.isEmpty()) {
         return;
     }
 
+    // Tuner reallocation logic:
+    // When a client is removed and was controlling a tuner, this tuner will be allocated to a
+    // previously preempted client. This client will be notified by a callback with
+    // RADIO_EVENT_CONTROL indicating gain of control.
+    // - If a preempted client is waiting for an audio source and one becomes available:
+    //    Allocate the tuner to the most recently added client waiting for an audio source
+    // - If not:
+    //    Allocate the tuner to the most recently added client.
+    // Each time a tuner using the audio source is opened or closed, the audio policy manager is
+    // notified of the connection or disconnection of AUDIO_DEVICE_IN_FM_TUNER.
+
     sp<ModuleClient> youngestClient;
     sp<ModuleClient> youngestClientAudio;
     size_t allocatedTuners = 0;
     size_t allocatedAudio = 0;
-    for (ssize_t i = mModuleClients.size(); i >= 0; i--) {
+    for (ssize_t i = mModuleClients.size() - 1; i >= 0; i--) {
         if (mModuleClients[i]->getTuner() == NULL) {
             if (mModuleClients[i]->audio()) {
                 if (youngestClientAudio == 0) {
@@ -550,10 +595,11 @@
                                 RadioService::callback, moduleClient->callbackThread().get(),
                                 &halTuner);
 
-    //TODO notify audio device connection to audio policy manager if audio is on
-
     if (ret == 0) {
         youngestClient->setTuner(halTuner);
+        if (youngestClient->audio()) {
+            notifyDeviceConnection(true, "");
+        }
     }
 }
 
@@ -583,6 +629,16 @@
     return &mProperties.bands[0];
 }
 
+void RadioService::Module::notifyDeviceConnection(bool connected,
+                                                  const char *address) {
+    int64_t token = IPCThreadState::self()->clearCallingIdentity();
+    AudioSystem::setDeviceConnectionState(AUDIO_DEVICE_IN_FM_TUNER,
+                                          connected ? AUDIO_POLICY_DEVICE_STATE_AVAILABLE :
+                                                  AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+                                          address, kRadioTunerAudioDeviceName);
+    IPCThreadState::self()->restoreCallingIdentity(token);
+}
+
 #undef LOG_TAG
 #define LOG_TAG "RadioService::ModuleClient"
 
diff --git a/services/radio/RadioService.h b/services/radio/RadioService.h
index 9ede020..49feda6 100644
--- a/services/radio/RadioService.h
+++ b/services/radio/RadioService.h
@@ -66,8 +66,7 @@
     class Module : public virtual RefBase {
     public:
 
-       Module(const sp<RadioService>& service,
-              radio_hw_device* hwDevice,
+       Module(radio_hw_device* hwDevice,
               struct radio_properties properties);
 
        virtual ~Module();
@@ -88,16 +87,17 @@
        const struct radio_properties properties() const { return mProperties; }
        const struct radio_band_config *getDefaultConfig() const ;
 
-       wp<RadioService> service() const { return mService; }
-
     private:
 
-        Mutex                         mLock;
-        wp<RadioService>              mService;
-        const struct radio_hw_device        *mHwDevice;
-        const struct radio_properties       mProperties;
-        Vector< sp<ModuleClient> >    mModuleClients;
-        bool                          mMute;
+       void notifyDeviceConnection(bool connected, const char *address);
+
+        Mutex                         mLock;          // protects  mModuleClients
+        const struct radio_hw_device  *mHwDevice;     // HAL hardware device
+        const struct radio_properties mProperties;    // cached hardware module properties
+        Vector< sp<ModuleClient> >    mModuleClients; // list of attached clients
+        bool                          mMute;          // radio audio source state
+                                                      // when unmuted, audio is routed to the
+                                                      // output device selected for media use case.
     }; // class Module
 
     class CallbackThread : public Thread {
@@ -120,11 +120,11 @@
                 sp<IMemory> prepareEvent(radio_hal_event_t *halEvent);
 
     private:
-        wp<ModuleClient>      mModuleClient;
-        Condition             mCallbackCond;
-        Mutex                 mCallbackLock;
-        Vector< sp<IMemory> > mEventQueue;
-        sp<MemoryDealer>      mMemoryDealer;
+        wp<ModuleClient>      mModuleClient;    // client module the thread belongs to
+        Condition             mCallbackCond;    // condition signaled when a new event is posted
+        Mutex                 mCallbackLock;    // protects mEventQueue
+        Vector< sp<IMemory> > mEventQueue;      // pending callback events
+        sp<MemoryDealer>      mMemoryDealer;    // shared memory for callback event
     }; // class CallbackThread
 
     class ModuleClient : public BnRadio,
@@ -181,13 +181,15 @@
 
     private:
 
-        mutable Mutex               mLock;
-        wp<Module>                  mModule;
-        sp<IRadioClient>            mClient;
-        radio_band_config_t         mConfig;
-        sp<CallbackThread>          mCallbackThread;
+        mutable Mutex               mLock;           // protects mClient, mConfig and mTuner
+        wp<Module>                  mModule;         // The module this client is attached to
+        sp<IRadioClient>            mClient;         // event callback binder interface
+        radio_band_config_t         mConfig;         // current band configuration
+        sp<CallbackThread>          mCallbackThread; // event callback thread
         const bool                  mAudio;
-        const struct radio_tuner    *mTuner;
+        const struct radio_tuner    *mTuner;        // HAL tuner interface. NULL indicates that
+                                                    // this client does not have control on any
+                                                    // tuner
     }; // class ModuleClient
 
 
@@ -199,8 +201,8 @@
 
     static void convertProperties(radio_properties_t *properties,
                                   const radio_hal_properties_t *halProperties);
-    Mutex               mServiceLock;
-    volatile int32_t    mNextUniqueId;
+    Mutex               mServiceLock;   // protects mModules
+    volatile int32_t    mNextUniqueId;  // for module ID allocation
     DefaultKeyedVector< radio_handle_t, sp<Module> > mModules;
 };