Merge "gralloc: add IAllocator/IMapper 4.0 to codec2"
diff --git a/apex/manifest.json b/apex/manifest.json
index 3011ee8..ddd642e 100644
--- a/apex/manifest.json
+++ b/apex/manifest.json
@@ -1,4 +1,4 @@
 {
   "name": "com.android.media",
-  "version": 290000000
+  "version": 300000000
 }
diff --git a/apex/manifest_codec.json b/apex/manifest_codec.json
index 83a5178..2320fd7 100644
--- a/apex/manifest_codec.json
+++ b/apex/manifest_codec.json
@@ -1,4 +1,4 @@
 {
   "name": "com.android.media.swcodec",
-  "version": 290000000
+  "version": 300000000
 }
diff --git a/camera/Camera.cpp b/camera/Camera.cpp
index c6c35ef..84d1d93 100644
--- a/camera/Camera.cpp
+++ b/camera/Camera.cpp
@@ -347,6 +347,20 @@
     return c->setPreviewCallbackTarget(callbackProducer);
 }
 
+status_t Camera::setAudioRestriction(int32_t mode)
+{
+    sp <::android::hardware::ICamera> c = mCamera;
+    if (c == 0) return NO_INIT;
+    return c->setAudioRestriction(mode);
+}
+
+int32_t Camera::getGlobalAudioRestriction()
+{
+    sp <::android::hardware::ICamera> c = mCamera;
+    if (c == 0) return NO_INIT;
+    return c->getGlobalAudioRestriction();
+}
+
 // callback from camera service
 void Camera::notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2)
 {
diff --git a/camera/ICamera.cpp b/camera/ICamera.cpp
index f0945c7..b83edf7 100644
--- a/camera/ICamera.cpp
+++ b/camera/ICamera.cpp
@@ -56,6 +56,8 @@
     SET_VIDEO_BUFFER_TARGET,
     RELEASE_RECORDING_FRAME_HANDLE,
     RELEASE_RECORDING_FRAME_HANDLE_BATCH,
+    SET_AUDIO_RESTRICTION,
+    GET_GLOBAL_AUDIO_RESTRICTION,
 };
 
 class BpCamera: public BpInterface<ICamera>
@@ -191,6 +193,21 @@
         }
     }
 
+    status_t setAudioRestriction(int32_t mode) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
+        data.writeInt32(mode);
+        remote()->transact(SET_AUDIO_RESTRICTION, data, &reply);
+        return reply.readInt32();
+    }
+
+    int32_t getGlobalAudioRestriction() {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
+        remote()->transact(GET_GLOBAL_AUDIO_RESTRICTION, data, &reply);
+        return reply.readInt32();
+    }
+
     status_t setVideoBufferMode(int32_t videoBufferMode)
     {
         ALOGV("setVideoBufferMode: %d", videoBufferMode);
@@ -494,6 +511,17 @@
             reply->writeInt32(setVideoTarget(st));
             return NO_ERROR;
         } break;
+        case SET_AUDIO_RESTRICTION: {
+            CHECK_INTERFACE(ICamera, data, reply);
+            int32_t mode = data.readInt32();
+            reply->writeInt32(setAudioRestriction(mode));
+            return NO_ERROR;
+        } break;
+        case GET_GLOBAL_AUDIO_RESTRICTION: {
+            CHECK_INTERFACE(ICamera, data, reply);
+            reply->writeInt32(getGlobalAudioRestriction());
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
index 49dfde8..93549e0 100644
--- a/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
+++ b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
@@ -155,4 +155,26 @@
     void updateOutputConfiguration(int streamId, in OutputConfiguration outputConfiguration);
 
     void finalizeOutputConfigurations(int streamId, in OutputConfiguration outputConfiguration);
+
+
+    // Keep in sync with public API in
+    // frameworks/base/core/java/android/hardware/camera2/CameraDevice.java
+    const int AUDIO_RESTRICTION_NONE = 0;
+    const int AUDIO_RESTRICTION_VIBRATION = 1;
+    const int AUDIO_RESTRICTION_VIBRATION_SOUND = 3;
+
+    /**
+      * Set audio restriction mode for this camera device.
+      *
+      * @param mode the audio restriction mode ID as above
+      *
+      */
+    void setCameraAudioRestriction(int mode);
+
+    /**
+      * Get global audio restriction mode for all camera clients.
+      *
+      * @return the currently applied system-wide audio restriction mode
+      */
+    int getGlobalAudioRestriction();
 }
diff --git a/camera/cameraserver/Android.bp b/camera/cameraserver/Android.bp
index ecaba3a..334f879 100644
--- a/camera/cameraserver/Android.bp
+++ b/camera/cameraserver/Android.bp
@@ -25,7 +25,6 @@
         "libgui",
         "libbinder",
         "libhidlbase",
-        "libhidltransport",
         "android.hardware.camera.common@1.0",
         "android.hardware.camera.provider@2.4",
         "android.hardware.camera.provider@2.5",
diff --git a/camera/cameraserver/main_cameraserver.cpp b/camera/cameraserver/main_cameraserver.cpp
index 53b3d84..cef8ef5 100644
--- a/camera/cameraserver/main_cameraserver.cpp
+++ b/camera/cameraserver/main_cameraserver.cpp
@@ -34,6 +34,7 @@
     sp<IServiceManager> sm = defaultServiceManager();
     ALOGI("ServiceManager: %p", sm.get());
     CameraService::instantiate();
+    ALOGI("ServiceManager: %p done instantiate", sm.get());
     ProcessState::self()->startThreadPool();
     IPCThreadState::self()->joinThreadPool();
 }
diff --git a/camera/include/camera/Camera.h b/camera/include/camera/Camera.h
index 430aa1c..2cdb617 100644
--- a/camera/include/camera/Camera.h
+++ b/camera/include/camera/Camera.h
@@ -167,6 +167,9 @@
 
             sp<ICameraRecordingProxy> getRecordingProxy();
 
+            status_t     setAudioRestriction(int32_t mode);
+            int32_t      getGlobalAudioRestriction();
+
     // ICameraClient interface
     virtual void        notifyCallback(int32_t msgType, int32_t ext, int32_t ext2);
     virtual void        dataCallback(int32_t msgType, const sp<IMemory>& dataPtr,
diff --git a/camera/include/camera/android/hardware/ICamera.h b/camera/include/camera/android/hardware/ICamera.h
index 80823d6..ec19e5d 100644
--- a/camera/include/camera/android/hardware/ICamera.h
+++ b/camera/include/camera/android/hardware/ICamera.h
@@ -140,6 +140,12 @@
     // Set the video buffer producer for camera to use in VIDEO_BUFFER_MODE_BUFFER_QUEUE mode.
     virtual status_t        setVideoTarget(
             const sp<IGraphicBufferProducer>& bufferProducer) = 0;
+
+    // Set the audio restriction mode
+    virtual status_t        setAudioRestriction(int32_t mode) = 0;
+
+    // Get the global audio restriction mode
+    virtual int32_t         getGlobalAudioRestriction() = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/camera/ndk/Android.bp b/camera/ndk/Android.bp
index a2ee65d..d8220eb 100644
--- a/camera/ndk/Android.bp
+++ b/camera/ndk/Android.bp
@@ -107,7 +107,6 @@
     ],
 
     shared_libs: [
-        "libhwbinder",
         "libfmq",
         "libhidlbase",
         "libhardware",
@@ -143,7 +142,6 @@
     vendor: true,
     srcs: ["ndk_vendor/tests/AImageReaderVendorTest.cpp"],
     shared_libs: [
-        "libhwbinder",
         "libcamera2ndk_vendor",
         "libcamera_metadata",
         "libmediandk",
diff --git a/camera/ndk/impl/ACameraCaptureSession.cpp b/camera/ndk/impl/ACameraCaptureSession.cpp
index d6f1412..68db233 100644
--- a/camera/ndk/impl/ACameraCaptureSession.cpp
+++ b/camera/ndk/impl/ACameraCaptureSession.cpp
@@ -33,7 +33,9 @@
         dev->unlockDevice();
     }
     // Fire onClosed callback
-    (*mUserSessionCallback.onClosed)(mUserSessionCallback.context, this);
+    if (mUserSessionCallback.onClosed != nullptr) {
+        (*mUserSessionCallback.onClosed)(mUserSessionCallback.context, this);
+    }
     ALOGV("~ACameraCaptureSession: %p is deleted", this);
 }
 
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 4a801a7..68fe045 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -3543,11 +3543,19 @@
      * output capture result.</p>
      * <p>This control is only effective if ACAMERA_CONTROL_AE_MODE or ACAMERA_CONTROL_MODE is set to
      * OFF; otherwise the auto-exposure algorithm will override this value.</p>
+     * <p>Note that for devices supporting postRawSensitivityBoost, the total sensitivity applied
+     * to the final processed image is the combination of ACAMERA_SENSOR_SENSITIVITY and
+     * ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST. In case the application uses the sensor
+     * sensitivity from last capture result of an auto request for a manual request, in order
+     * to achieve the same brightness in the output image, the application should also
+     * set postRawSensitivityBoost.</p>
      *
      * @see ACAMERA_CONTROL_AE_MODE
      * @see ACAMERA_CONTROL_MODE
+     * @see ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST
      * @see ACAMERA_SENSOR_INFO_SENSITIVITY_RANGE
      * @see ACAMERA_SENSOR_MAX_ANALOG_SENSITIVITY
+     * @see ACAMERA_SENSOR_SENSITIVITY
      */
     ACAMERA_SENSOR_SENSITIVITY =                                // int32
             ACAMERA_SENSOR_START + 2,
@@ -7758,6 +7766,13 @@
      */
     ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_SECURE_IMAGE_DATA         = 13,
 
+    /**
+     * <p>The camera device is only accessible by Android's system components and privileged
+     * applications. Processes need to have the android.permission.SYSTEM_CAMERA in
+     * addition to android.permission.CAMERA in order to connect to this camera device.</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_SYSTEM_CAMERA             = 14,
+
 } acamera_metadata_enum_android_request_available_capabilities_t;
 
 
diff --git a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
index 7ab0124..938b5f5 100644
--- a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
+++ b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
@@ -253,21 +253,9 @@
         return true;
     }
 
-    static void onDeviceDisconnected(void* /*obj*/, ACameraDevice* /*device*/) {}
-
-    static void onDeviceError(void* /*obj*/, ACameraDevice* /*device*/, int /*errorCode*/) {}
-
-    static void onSessionClosed(void* /*obj*/, ACameraCaptureSession* /*session*/) {}
-
-    static void onSessionReady(void* /*obj*/, ACameraCaptureSession* /*session*/) {}
-
-    static void onSessionActive(void* /*obj*/, ACameraCaptureSession* /*session*/) {}
-
    private:
-    ACameraDevice_StateCallbacks mDeviceCb{this, onDeviceDisconnected,
-                                           onDeviceError};
-    ACameraCaptureSession_stateCallbacks mSessionCb{
-        this, onSessionClosed, onSessionReady, onSessionActive};
+    ACameraDevice_StateCallbacks mDeviceCb{this, nullptr, nullptr};
+    ACameraCaptureSession_stateCallbacks mSessionCb{ this, nullptr, nullptr, nullptr};
 
     native_handle_t* mImgReaderAnw = nullptr;  // not owned by us.
 
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index 7aa655f..e39f885 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -37,6 +37,7 @@
 
 #include <binder/IPCThreadState.h>
 #include <utils/Errors.h>
+#include <utils/SystemClock.h>
 #include <utils/Timers.h>
 #include <utils/Trace.h>
 
@@ -95,6 +96,8 @@
 static const uint32_t kFallbackWidth = 1280;        // 720p
 static const uint32_t kFallbackHeight = 720;
 static const char* kMimeTypeAvc = "video/avc";
+static const char* kMimeTypeApplicationOctetstream = "application/octet-stream";
+static const char* kWinscopeMagicString = "#VV1NSC0PET1ME!#";
 
 // Command-line parameters.
 static bool gVerbose = false;           // chatty on stdout
@@ -350,6 +353,50 @@
 }
 
 /*
+ * Writes an unsigned integer byte-by-byte in little endian order regardless
+ * of the platform endianness.
+ */
+template <typename UINT>
+static void writeValueLE(UINT value, uint8_t* buffer) {
+    for (int i = 0; i < sizeof(UINT); ++i) {
+        buffer[i] = static_cast<uint8_t>(value);
+        value >>= 8;
+    }
+}
+
+/*
+ * Saves frames presentation time relative to the elapsed realtime clock in microseconds
+ * preceded by a Winscope magic string and frame count to a metadata track.
+ * This metadata is used by the Winscope tool to sync video with SurfaceFlinger
+ * and WindowManager traces.
+ *
+ * The metadata is written as a binary array as follows:
+ * - winscope magic string (kWinscopeMagicString constant), without trailing null char,
+ * - the number of recorded frames (as little endian uint32),
+ * - for every frame its presentation time relative to the elapsed realtime clock in microseconds
+ *   (as little endian uint64).
+ */
+static status_t writeWinscopeMetadata(const Vector<int64_t>& timestamps,
+        const ssize_t metaTrackIdx, const sp<MediaMuxer>& muxer) {
+    ALOGV("Writing metadata");
+    int64_t systemTimeToElapsedTimeOffsetMicros = (android::elapsedRealtimeNano()
+        - systemTime(SYSTEM_TIME_MONOTONIC)) / 1000;
+    sp<ABuffer> buffer = new ABuffer(timestamps.size() * sizeof(int64_t)
+        + sizeof(uint32_t) + strlen(kWinscopeMagicString));
+    uint8_t* pos = buffer->data();
+    strcpy(reinterpret_cast<char*>(pos), kWinscopeMagicString);
+    pos += strlen(kWinscopeMagicString);
+    writeValueLE<uint32_t>(timestamps.size(), pos);
+    pos += sizeof(uint32_t);
+    for (size_t idx = 0; idx < timestamps.size(); ++idx) {
+        writeValueLE<uint64_t>(static_cast<uint64_t>(timestamps[idx]
+            + systemTimeToElapsedTimeOffsetMicros), pos);
+        pos += sizeof(uint64_t);
+    }
+    return muxer->writeSampleData(buffer, metaTrackIdx, timestamps[0], 0);
+}
+
+/*
  * Runs the MediaCodec encoder, sending the output to the MediaMuxer.  The
  * input frames are coming from the virtual display as fast as SurfaceFlinger
  * wants to send them.
@@ -364,10 +411,13 @@
     static int kTimeout = 250000;   // be responsive on signal
     status_t err;
     ssize_t trackIdx = -1;
+    ssize_t metaTrackIdx = -1;
     uint32_t debugNumFrames = 0;
     int64_t startWhenNsec = systemTime(CLOCK_MONOTONIC);
     int64_t endWhenNsec = startWhenNsec + seconds_to_nanoseconds(gTimeLimitSec);
     DisplayInfo mainDpyInfo;
+    Vector<int64_t> timestamps;
+    bool firstFrame = true;
 
     assert((rawFp == NULL && muxer != NULL) || (rawFp != NULL && muxer == NULL));
 
@@ -384,6 +434,11 @@
         int64_t ptsUsec;
         uint32_t flags;
 
+        if (firstFrame) {
+            ATRACE_NAME("first_frame");
+            firstFrame = false;
+        }
+
         if (systemTime(CLOCK_MONOTONIC) > endWhenNsec) {
             if (gVerbose) {
                 printf("Time limit reached\n");
@@ -465,6 +520,9 @@
                             "Failed writing data to muxer (err=%d)\n", err);
                         return err;
                     }
+                    if (gOutputFormat == FORMAT_MP4) {
+                        timestamps.add(ptsUsec);
+                    }
                 }
                 debugNumFrames++;
             }
@@ -491,6 +549,11 @@
                 encoder->getOutputFormat(&newFormat);
                 if (muxer != NULL) {
                     trackIdx = muxer->addTrack(newFormat);
+                    if (gOutputFormat == FORMAT_MP4) {
+                        sp<AMessage> metaFormat = new AMessage;
+                        metaFormat->setString(KEY_MIME, kMimeTypeApplicationOctetstream);
+                        metaTrackIdx = muxer->addTrack(metaFormat);
+                    }
                     ALOGV("Starting muxer");
                     err = muxer->start();
                     if (err != NO_ERROR) {
@@ -527,6 +590,13 @@
                         systemTime(CLOCK_MONOTONIC) - startWhenNsec));
         fflush(stdout);
     }
+    if (metaTrackIdx >= 0 && !timestamps.isEmpty()) {
+        err = writeWinscopeMetadata(timestamps, metaTrackIdx, muxer);
+        if (err != NO_ERROR) {
+            fprintf(stderr, "Failed writing metadata to muxer (err=%d)\n", err);
+            return err;
+        }
+    }
     return NO_ERROR;
 }
 
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
index 38fd34a..0c8d44a 100644
--- a/cmds/stagefright/Android.mk
+++ b/cmds/stagefright/Android.mk
@@ -153,9 +153,9 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:= \
-        filters/argbtorgba.rs \
-        filters/nightvision.rs \
-        filters/saturation.rs \
+        filters/argbtorgba.rscript \
+        filters/nightvision.rscript \
+        filters/saturation.rscript \
         mediafilter.cpp \
 
 LOCAL_SHARED_LIBRARIES := \
diff --git a/cmds/stagefright/filters/argbtorgba.rs b/cmds/stagefright/filters/argbtorgba.rscript
similarity index 100%
rename from cmds/stagefright/filters/argbtorgba.rs
rename to cmds/stagefright/filters/argbtorgba.rscript
diff --git a/cmds/stagefright/filters/nightvision.rs b/cmds/stagefright/filters/nightvision.rscript
similarity index 100%
rename from cmds/stagefright/filters/nightvision.rs
rename to cmds/stagefright/filters/nightvision.rscript
diff --git a/cmds/stagefright/filters/saturation.rs b/cmds/stagefright/filters/saturation.rscript
similarity index 100%
rename from cmds/stagefright/filters/saturation.rs
rename to cmds/stagefright/filters/saturation.rscript
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index bf36be0..cf9efe0 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -989,7 +989,7 @@
                 failed = false;
                 printf("getFrameAtTime(%s) => OK\n", filename);
 
-                VideoFrame *frame = (VideoFrame *)mem->pointer();
+                VideoFrame *frame = (VideoFrame *)mem->unsecurePointer();
 
                 CHECK_EQ(writeJpegFile("/sdcard/out.jpg",
                             frame->getFlattenedData(),
diff --git a/cmds/stagefright/stream.cpp b/cmds/stagefright/stream.cpp
index 35bdbc0..59b5f9f 100644
--- a/cmds/stagefright/stream.cpp
+++ b/cmds/stagefright/stream.cpp
@@ -116,7 +116,7 @@
 
     sp<IMemory> mem = mBuffers.itemAt(index);
 
-    ssize_t n = read(mFd, mem->pointer(), mem->size());
+    ssize_t n = read(mFd, mem->unsecurePointer(), mem->size());
     if (n <= 0) {
         mListener->issueCommand(IStreamListener::EOS, false /* synchronous */);
     } else {
@@ -238,7 +238,7 @@
             copy = mem->size() - mCurrentBufferOffset;
         }
 
-        memcpy((uint8_t *)mem->pointer() + mCurrentBufferOffset, data, copy);
+        memcpy((uint8_t *)mem->unsecurePointer() + mCurrentBufferOffset, data, copy);
         mCurrentBufferOffset += copy;
 
         if (mCurrentBufferOffset == mem->size()) {
diff --git a/drm/drmserver/Android.bp b/drm/drmserver/Android.bp
index c25a0a1..fd71837 100644
--- a/drm/drmserver/Android.bp
+++ b/drm/drmserver/Android.bp
@@ -25,6 +25,7 @@
 
     shared_libs: [
         "libmedia",
+        "libmediametrics",
         "libutils",
         "liblog",
         "libbinder",
diff --git a/drm/drmserver/DrmManager.cpp b/drm/drmserver/DrmManager.cpp
index afbcb39..66610b3 100644
--- a/drm/drmserver/DrmManager.cpp
+++ b/drm/drmserver/DrmManager.cpp
@@ -19,7 +19,10 @@
 #include "utils/Log.h"
 
 #include <utils/String8.h>
+
+#include <binder/IPCThreadState.h>
 #include <drm/DrmInfo.h>
+
 #include <drm/DrmInfoEvent.h>
 #include <drm/DrmRights.h>
 #include <drm/DrmConstraints.h>
@@ -28,6 +31,7 @@
 #include <drm/DrmInfoRequest.h>
 #include <drm/DrmSupportInfo.h>
 #include <drm/DrmConvertedStatus.h>
+#include <media/MediaAnalyticsItem.h>
 #include <IDrmEngine.h>
 
 #include "DrmManager.h"
@@ -50,6 +54,40 @@
 
 }
 
+void DrmManager::reportEngineMetrics(
+        const char func[], const String8& plugInId, const String8& mimeType) {
+    IDrmEngine& engine = mPlugInManager.getPlugIn(plugInId);
+
+    std::unique_ptr<MediaAnalyticsItem> item(MediaAnalyticsItem::create("drmmanager"));
+    item->generateSessionID();
+    item->setUid(IPCThreadState::self()->getCallingUid());
+    item->setCString("function_name", func);
+    item->setCString("plugin_id", plugInId.getPathLeaf().getBasePath().c_str());
+
+    std::unique_ptr<DrmSupportInfo> info(engine.getSupportInfo(0));
+    if (NULL != info) {
+        item->setCString("description", info->getDescription().c_str());
+    }
+
+    if (!mimeType.isEmpty()) {
+        item->setCString("mime_types", mimeType.c_str());
+    } else if (NULL != info) {
+        DrmSupportInfo::MimeTypeIterator mimeIter = info->getMimeTypeIterator();
+        String8 mimes;
+        while (mimeIter.hasNext()) {
+            mimes += mimeIter.next();
+            if (mimeIter.hasNext()) {
+                mimes += ",";
+            }
+        }
+        item->setCString("mime_types", mimes.c_str());
+    }
+
+    if (!item->selfrecord()) {
+        ALOGE("Failed to record metrics");
+    }
+}
+
 int DrmManager::addUniqueId(bool isNative) {
     Mutex::Autolock _l(mLock);
 
@@ -147,27 +185,36 @@
     for (size_t index = 0; index < plugInIdList.size(); index++) {
         IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInIdList.itemAt(index));
         rDrmEngine.terminate(uniqueId);
+        reportEngineMetrics(__func__, plugInIdList[index]);
     }
 }
 
 DrmConstraints* DrmManager::getConstraints(int uniqueId, const String8* path, const int action) {
     Mutex::Autolock _l(mLock);
+    DrmConstraints *constraints = NULL;
     const String8 plugInId = getSupportedPlugInIdFromPath(uniqueId, *path);
     if (EMPTY_STRING != plugInId) {
         IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
-        return rDrmEngine.getConstraints(uniqueId, path, action);
+        constraints = rDrmEngine.getConstraints(uniqueId, path, action);
     }
-    return NULL;
+    if (NULL != constraints) {
+        reportEngineMetrics(__func__, plugInId);
+    }
+    return constraints;
 }
 
 DrmMetadata* DrmManager::getMetadata(int uniqueId, const String8* path) {
     Mutex::Autolock _l(mLock);
+    DrmMetadata *meta = NULL;
     const String8 plugInId = getSupportedPlugInIdFromPath(uniqueId, *path);
     if (EMPTY_STRING != plugInId) {
         IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
-        return rDrmEngine.getMetadata(uniqueId, path);
+        meta = rDrmEngine.getMetadata(uniqueId, path);
     }
-    return NULL;
+    if (NULL != meta) {
+        reportEngineMetrics(__func__, plugInId);
+    }
+    return meta;
 }
 
 bool DrmManager::canHandle(int uniqueId, const String8& path, const String8& mimeType) {
@@ -175,6 +222,10 @@
     const String8 plugInId = getSupportedPlugInId(mimeType);
     bool result = (EMPTY_STRING != plugInId) ? true : false;
 
+    if (result) {
+        reportEngineMetrics(__func__, plugInId, mimeType);
+    }
+
     if (0 < path.length()) {
         if (result) {
             IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
@@ -191,12 +242,17 @@
 
 DrmInfoStatus* DrmManager::processDrmInfo(int uniqueId, const DrmInfo* drmInfo) {
     Mutex::Autolock _l(mLock);
-    const String8 plugInId = getSupportedPlugInId(drmInfo->getMimeType());
+    DrmInfoStatus *infoStatus = NULL;
+    const String8 mimeType = drmInfo->getMimeType();
+    const String8 plugInId = getSupportedPlugInId(mimeType);
     if (EMPTY_STRING != plugInId) {
         IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
-        return rDrmEngine.processDrmInfo(uniqueId, drmInfo);
+        infoStatus = rDrmEngine.processDrmInfo(uniqueId, drmInfo);
     }
-    return NULL;
+    if (NULL != infoStatus) {
+        reportEngineMetrics(__func__, plugInId, mimeType);
+    }
+    return infoStatus;
 }
 
 bool DrmManager::canHandle(int uniqueId, const String8& path) {
@@ -208,6 +264,7 @@
         result = rDrmEngine.canHandle(uniqueId, path);
 
         if (result) {
+            reportEngineMetrics(__func__, plugInPathList[i]);
             break;
         }
     }
@@ -216,54 +273,75 @@
 
 DrmInfo* DrmManager::acquireDrmInfo(int uniqueId, const DrmInfoRequest* drmInfoRequest) {
     Mutex::Autolock _l(mLock);
-    const String8 plugInId = getSupportedPlugInId(drmInfoRequest->getMimeType());
+    DrmInfo *info = NULL;
+    const String8 mimeType = drmInfoRequest->getMimeType();
+    const String8 plugInId = getSupportedPlugInId(mimeType);
     if (EMPTY_STRING != plugInId) {
         IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
-        return rDrmEngine.acquireDrmInfo(uniqueId, drmInfoRequest);
+        info = rDrmEngine.acquireDrmInfo(uniqueId, drmInfoRequest);
     }
-    return NULL;
+    if (NULL != info) {
+        reportEngineMetrics(__func__, plugInId, mimeType);
+    }
+    return info;
 }
 
 status_t DrmManager::saveRights(int uniqueId, const DrmRights& drmRights,
             const String8& rightsPath, const String8& contentPath) {
     Mutex::Autolock _l(mLock);
-    const String8 plugInId = getSupportedPlugInId(drmRights.getMimeType());
+    const String8 mimeType = drmRights.getMimeType();
+    const String8 plugInId = getSupportedPlugInId(mimeType);
     status_t result = DRM_ERROR_UNKNOWN;
     if (EMPTY_STRING != plugInId) {
         IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
         result = rDrmEngine.saveRights(uniqueId, drmRights, rightsPath, contentPath);
     }
+    if (DRM_NO_ERROR == result) {
+        reportEngineMetrics(__func__, plugInId, mimeType);
+    }
     return result;
 }
 
 String8 DrmManager::getOriginalMimeType(int uniqueId, const String8& path, int fd) {
     Mutex::Autolock _l(mLock);
+    String8 mimeType(EMPTY_STRING);
     const String8 plugInId = getSupportedPlugInIdFromPath(uniqueId, path);
     if (EMPTY_STRING != plugInId) {
         IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
-        return rDrmEngine.getOriginalMimeType(uniqueId, path, fd);
+        mimeType = rDrmEngine.getOriginalMimeType(uniqueId, path, fd);
     }
-    return EMPTY_STRING;
+    if (!mimeType.isEmpty()) {
+        reportEngineMetrics(__func__, plugInId, mimeType);
+    }
+    return mimeType;
 }
 
 int DrmManager::getDrmObjectType(int uniqueId, const String8& path, const String8& mimeType) {
     Mutex::Autolock _l(mLock);
+    int type = DrmObjectType::UNKNOWN;
     const String8 plugInId = getSupportedPlugInId(uniqueId, path, mimeType);
     if (EMPTY_STRING != plugInId) {
         IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
-        return rDrmEngine.getDrmObjectType(uniqueId, path, mimeType);
+        type = rDrmEngine.getDrmObjectType(uniqueId, path, mimeType);
     }
-    return DrmObjectType::UNKNOWN;
+    if (DrmObjectType::UNKNOWN != type) {
+        reportEngineMetrics(__func__, plugInId, mimeType);
+    }
+    return type;
 }
 
 int DrmManager::checkRightsStatus(int uniqueId, const String8& path, int action) {
     Mutex::Autolock _l(mLock);
+    int rightsStatus = RightsStatus::RIGHTS_INVALID;
     const String8 plugInId = getSupportedPlugInIdFromPath(uniqueId, path);
     if (EMPTY_STRING != plugInId) {
         IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
-        return rDrmEngine.checkRightsStatus(uniqueId, path, action);
+        rightsStatus = rDrmEngine.checkRightsStatus(uniqueId, path, action);
     }
-    return RightsStatus::RIGHTS_INVALID;
+    if (RightsStatus::RIGHTS_INVALID != rightsStatus) {
+        reportEngineMetrics(__func__, plugInId);
+    }
+    return rightsStatus;
 }
 
 status_t DrmManager::consumeRights(
@@ -307,6 +385,9 @@
         IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
         result = rDrmEngine.removeRights(uniqueId, path);
     }
+    if (DRM_NO_ERROR == result) {
+        reportEngineMetrics(__func__, plugInId);
+    }
     return result;
 }
 
@@ -319,6 +400,7 @@
         if (DRM_NO_ERROR != result) {
             break;
         }
+        reportEngineMetrics(__func__, plugInIdList[index]);
     }
     return result;
 }
@@ -335,6 +417,7 @@
             ++mConvertId;
             convertId = mConvertId;
             mConvertSessionMap.add(convertId, &rDrmEngine);
+            reportEngineMetrics(__func__, plugInId, mimeType);
         }
     }
     return convertId;
@@ -415,6 +498,7 @@
             if (DRM_NO_ERROR == result) {
                 ++mDecryptSessionId;
                 mDecryptSessionMap.add(mDecryptSessionId, &rDrmEngine);
+                reportEngineMetrics(__func__, plugInId, String8(mime));
                 break;
             }
         }
@@ -443,6 +527,7 @@
             if (DRM_NO_ERROR == result) {
                 ++mDecryptSessionId;
                 mDecryptSessionMap.add(mDecryptSessionId, &rDrmEngine);
+                reportEngineMetrics(__func__, plugInId, String8(mime));
                 break;
             }
         }
@@ -472,6 +557,7 @@
             if (DRM_NO_ERROR == result) {
                 ++mDecryptSessionId;
                 mDecryptSessionMap.add(mDecryptSessionId, &rDrmEngine);
+                reportEngineMetrics(__func__, plugInId, mimeType);
                 break;
             }
         }
diff --git a/drm/drmserver/DrmManager.h b/drm/drmserver/DrmManager.h
index 26222bc..75fc1a8 100644
--- a/drm/drmserver/DrmManager.h
+++ b/drm/drmserver/DrmManager.h
@@ -143,6 +143,9 @@
 
     bool canHandle(int uniqueId, const String8& path);
 
+    void reportEngineMetrics(const char func[],
+            const String8& plugInId, const String8& mimeType = String8(""));
+
 private:
     enum {
         kMaxNumUniqueIds = 0x1000,
diff --git a/drm/libmediadrm/Android.bp b/drm/libmediadrm/Android.bp
index d6db1d4..6dce472 100644
--- a/drm/libmediadrm/Android.bp
+++ b/drm/libmediadrm/Android.bp
@@ -34,7 +34,6 @@
         "android.hardware.drm@1.2",
         "libhidlallocatorutils",
         "libhidlbase",
-        "libhidltransport",
     ],
 
     cflags: [
diff --git a/drm/libmediadrm/CryptoHal.cpp b/drm/libmediadrm/CryptoHal.cpp
index 954608f..58110d4 100644
--- a/drm/libmediadrm/CryptoHal.cpp
+++ b/drm/libmediadrm/CryptoHal.cpp
@@ -321,7 +321,11 @@
      }
 
     // memory must be within the address space of the heap
-    if (memory->pointer() != static_cast<uint8_t *>(heap->getBase()) + memory->offset()  ||
+    // TODO: Using unsecurePointer() has some associated security pitfalls
+    //       (see declaration for details).
+    //       Either document why it is safe in this case or address the
+    //       issue (e.g. by copying).
+    if (memory->unsecurePointer() != static_cast<uint8_t *>(heap->getBase()) + memory->offset()  ||
             heap->getSize() < memory->offset() + memory->size() ||
             SIZE_MAX - memory->offset() < memory->size()) {
         android_errorWriteLog(0x534e4554, "76221123");
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp b/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp
index eaa3390..cb69f91 100644
--- a/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp
+++ b/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp
@@ -89,7 +89,7 @@
     // asset_id change. If it sends an EcmContainer with 2 Ecms with different
     // asset_ids (old and new) then it might be best to prefetch the Emm.
     if ((asset_.id() != 0) && (*asset_id != asset_.id())) {
-        ALOGW("Asset_id change from %llu to %" PRIu64, asset_.id(), *asset_id);
+        ALOGW("Asset_id change from %" PRIu64 " to %" PRIu64, asset_.id(), *asset_id);
         asset_.Clear();
     }
 
diff --git a/drm/mediacas/plugins/clearkey/ecm.cpp b/drm/mediacas/plugins/clearkey/ecm.cpp
index 9fde13a..b3b5218 100644
--- a/drm/mediacas/plugins/clearkey/ecm.cpp
+++ b/drm/mediacas/plugins/clearkey/ecm.cpp
@@ -17,6 +17,8 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "ecm"
 
+#include <inttypes.h>
+
 #include "ecm.h"
 #include "ecm_generator.h"
 #include "protos/license_protos.pb.h"
@@ -76,7 +78,7 @@
         return status;
     }
     if (asset.id() != asset_from_emm.id()) {
-        ALOGE("Asset_id from Emm (%llu) does not match asset_id from Ecm (%llu).",
+        ALOGE("Asset_id from Emm (%" PRIu64 ") does not match asset_id from Ecm (%" PRIu64 ").",
                 asset_from_emm.id(), asset.id());
         return CLEARKEY_STATUS_INVALID_PARAMETER;
     }
diff --git a/drm/mediadrm/plugins/clearkey/hidl/Android.bp b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
index e91e918..a153ce2 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/Android.bp
+++ b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
@@ -48,7 +48,6 @@
         "libcrypto",
         "libhidlbase",
         "libhidlmemory",
-        "libhidltransport",
         "liblog",
         "libprotobuf-cpp-lite",
         "libutils",
diff --git a/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
index 23a35e5..f164f28 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
@@ -62,10 +62,8 @@
       secure, keyId, iv, mode, pattern, subSamples, source, offset, destination,
       [&](Status_V1_2 hStatus, uint32_t hBytesWritten, hidl_string hDetailedError) {
         status = toStatus_1_0(hStatus);
-        if (status == Status::OK) {
-          bytesWritten = hBytesWritten;
-          detailedError = hDetailedError;
-        }
+        bytesWritten = hBytesWritten;
+        detailedError = hDetailedError;
       }
     );
 
@@ -109,6 +107,10 @@
                  "destination decrypt buffer base not set");
         return Void();
       }
+    } else {
+        _hidl_cb(Status_V1_2::ERROR_DRM_CANNOT_HANDLE, 0,
+                 "destination type not supported");
+        return Void();
     }
 
     sp<IMemory> sourceBase = mSharedBufferMap[source.bufferId];
@@ -126,38 +128,45 @@
             (static_cast<void *>(sourceBase->getPointer()));
     uint8_t* srcPtr = static_cast<uint8_t *>(base + source.offset + offset);
     void* destPtr = NULL;
-    if (destination.type == BufferType::SHARED_MEMORY) {
-        const SharedBuffer& destBuffer = destination.nonsecureMemory;
-        sp<IMemory> destBase = mSharedBufferMap[destBuffer.bufferId];
-        if (destBase == nullptr) {
-            _hidl_cb(Status_V1_2::ERROR_DRM_CANNOT_HANDLE, 0, "destination is a nullptr");
-            return Void();
-        }
-
-        if (destBuffer.offset + destBuffer.size > destBase->getSize()) {
-            _hidl_cb(Status_V1_2::ERROR_DRM_FRAME_TOO_LARGE, 0, "invalid buffer size");
-            return Void();
-        }
-        destPtr = static_cast<void *>(base + destination.nonsecureMemory.offset);
-    } else if (destination.type == BufferType::NATIVE_HANDLE) {
-        native_handle_t *handle = const_cast<native_handle_t *>(
-        destination.secureMemory.getNativeHandle());
-        destPtr = static_cast<void *>(handle);
+    // destination.type == BufferType::SHARED_MEMORY
+    const SharedBuffer& destBuffer = destination.nonsecureMemory;
+    sp<IMemory> destBase = mSharedBufferMap[destBuffer.bufferId];
+    if (destBase == nullptr) {
+        _hidl_cb(Status_V1_2::ERROR_DRM_CANNOT_HANDLE, 0, "destination is a nullptr");
+        return Void();
     }
 
+    if (destBuffer.offset + destBuffer.size > destBase->getSize()) {
+        _hidl_cb(Status_V1_2::ERROR_DRM_FRAME_TOO_LARGE, 0, "invalid buffer size");
+        return Void();
+    }
+    destPtr = static_cast<void *>(base + destination.nonsecureMemory.offset);
+
+
     // Calculate the output buffer size and determine if any subsamples are
     // encrypted.
     size_t destSize = 0;
     bool haveEncryptedSubsamples = false;
     for (size_t i = 0; i < subSamples.size(); i++) {
         const SubSample &subSample = subSamples[i];
-        destSize += subSample.numBytesOfClearData;
-        destSize += subSample.numBytesOfEncryptedData;
+        if (__builtin_add_overflow(destSize, subSample.numBytesOfClearData, &destSize)) {
+            _hidl_cb(Status_V1_2::ERROR_DRM_FRAME_TOO_LARGE, 0, "subsample clear size overflow");
+            return Void();
+        }
+        if (__builtin_add_overflow(destSize, subSample.numBytesOfEncryptedData, &destSize)) {
+            _hidl_cb(Status_V1_2::ERROR_DRM_FRAME_TOO_LARGE, 0, "subsample encrypted size overflow");
+            return Void();
+        }
         if (subSample.numBytesOfEncryptedData > 0) {
         haveEncryptedSubsamples = true;
         }
     }
 
+    if (destSize > destBuffer.size) {
+        _hidl_cb(Status_V1_2::ERROR_DRM_FRAME_TOO_LARGE, 0, "subsample sum too large");
+        return Void();
+    }
+
     if (mode == Mode::UNENCRYPTED) {
         if (haveEncryptedSubsamples) {
             _hidl_cb(Status_V1_2::ERROR_DRM_CANNOT_HANDLE, 0,
diff --git a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
index d74bc53..0c74a6c 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
@@ -797,7 +797,8 @@
 }
 
 Return<Status> DrmPlugin::releaseSecureStops(const SecureStopRelease& ssRelease) {
-    if (ssRelease.opaqueData.size() == 0) {
+    // minimum opaqueData contains the uint32_t count, see comment below
+    if (ssRelease.opaqueData.size() < sizeof(uint32_t)) {
         return Status::BAD_VALUE;
     }
 
diff --git a/drm/mediadrm/plugins/clearkey/hidl/serviceLazy.cpp b/drm/mediadrm/plugins/clearkey/hidl/serviceLazy.cpp
index 99fd883..a510487 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/serviceLazy.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/serviceLazy.cpp
@@ -38,7 +38,7 @@
     configureRpcThreadpool(8, true /* callerWillJoin */);
 
     // Setup hwbinder service
-    LazyServiceRegistrar serviceRegistrar;
+    auto serviceRegistrar = LazyServiceRegistrar::getInstance();
 
     // Setup hwbinder service
     CHECK_EQ(serviceRegistrar.registerService(drmFactory, "clearkey"), android::NO_ERROR)
diff --git a/include/media/AudioMixer.h b/include/media/AudioMixer.h
index de839c6..85ee950 120000
--- a/include/media/AudioMixer.h
+++ b/include/media/AudioMixer.h
@@ -1 +1 @@
-../../media/libaudioclient/include/media/AudioMixer.h
\ No newline at end of file
+../../media/libaudioprocessing/include/media/AudioMixer.h
\ No newline at end of file
diff --git a/include/media/BufferProviders.h b/include/media/BufferProviders.h
index 779bb15..778e1d8 120000
--- a/include/media/BufferProviders.h
+++ b/include/media/BufferProviders.h
@@ -1 +1 @@
-../../media/libmedia/include/media/BufferProviders.h
\ No newline at end of file
+../../media/libaudioprocessing/include/media/BufferProviders.h
\ No newline at end of file
diff --git a/include/media/ExtendedAudioBufferProvider.h b/include/media/ExtendedAudioBufferProvider.h
index d653cc3..99d3c13 120000
--- a/include/media/ExtendedAudioBufferProvider.h
+++ b/include/media/ExtendedAudioBufferProvider.h
@@ -1 +1 @@
-../../media/libmedia/include/media/ExtendedAudioBufferProvider.h
\ No newline at end of file
+../../media/libaudioclient/include/media/ExtendedAudioBufferProvider.h
\ No newline at end of file
diff --git a/include/media/SingleStateQueue.h b/include/media/SingleStateQueue.h
deleted file mode 120000
index 619f6ee..0000000
--- a/include/media/SingleStateQueue.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/SingleStateQueue.h
\ No newline at end of file
diff --git a/include/media/nbaio/SingleStateQueue.h b/include/media/nbaio/SingleStateQueue.h
new file mode 120000
index 0000000..d3e0553
--- /dev/null
+++ b/include/media/nbaio/SingleStateQueue.h
@@ -0,0 +1 @@
+../../../media/libnbaio/include_mono/media/nbaio/SingleStateQueue.h
\ No newline at end of file
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 5f19f74..1b1f149 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -28,7 +28,7 @@
 #include <media/AudioResamplerPublic.h>
 #include <media/AudioTimestamp.h>
 #include <media/Modulo.h>
-#include <media/SingleStateQueue.h>
+#include <media/nbaio/SingleStateQueue.h>
 
 namespace android {
 
diff --git a/include/private/media/VideoFrame.h b/include/private/media/VideoFrame.h
index 712f118..16e794a 100644
--- a/include/private/media/VideoFrame.h
+++ b/include/private/media/VideoFrame.h
@@ -41,7 +41,7 @@
             uint32_t angle, uint32_t bpp, bool hasData, size_t iccSize):
         mWidth(width), mHeight(height),
         mDisplayWidth(displayWidth), mDisplayHeight(displayHeight),
-        mTileWidth(tileWidth), mTileHeight(tileHeight),
+        mTileWidth(tileWidth), mTileHeight(tileHeight), mDurationUs(0),
         mRotationAngle(angle), mBytesPerPixel(bpp), mRowBytes(bpp * width),
         mSize(hasData ? (bpp * width * height) : 0),
         mIccSize(iccSize), mReserved(0) {
@@ -78,6 +78,7 @@
     uint32_t mDisplayHeight;   // Display height before rotation
     uint32_t mTileWidth;       // Tile width (0 if image doesn't have grid)
     uint32_t mTileHeight;      // Tile height (0 if image doesn't have grid)
+    int64_t  mDurationUs;      // Frame duration in microseconds
     int32_t  mRotationAngle;   // Rotation angle, clockwise, should be multiple of 90
     uint32_t mBytesPerPixel;   // Number of bytes per pixel
     uint32_t mRowBytes;        // Number of bytes per row before rotation
diff --git a/media/audioserver/Android.mk b/media/audioserver/Android.mk
index 969f2ee..6697cb5 100644
--- a/media/audioserver/Android.mk
+++ b/media/audioserver/Android.mk
@@ -9,12 +9,11 @@
 	libaaudioservice \
 	libaudioflinger \
 	libaudiopolicyservice \
+	libaudioprocessing \
 	libbinder \
 	libcutils \
 	liblog \
 	libhidlbase \
-	libhidltransport \
-	libhwbinder \
 	libmedia \
 	libmedialogservice \
 	libmediautils \
@@ -39,7 +38,6 @@
 	frameworks/av/media/libaaudio/src \
 	frameworks/av/media/libaaudio/src/binding \
 	frameworks/av/media/libmedia \
-	$(call include-path-for, audio-utils) \
 	external/sonic \
 
 # If AUDIOSERVER_MULTILIB in device.mk is non-empty then it is used to control
diff --git a/media/bufferpool/1.0/AccessorImpl.cpp b/media/bufferpool/1.0/AccessorImpl.cpp
index fa17f15..09006ca 100644
--- a/media/bufferpool/1.0/AccessorImpl.cpp
+++ b/media/bufferpool/1.0/AccessorImpl.cpp
@@ -151,6 +151,7 @@
                 newConnection->initialize(accessor, id);
                 *connection = newConnection;
                 *pConnectionId = id;
+                mBufferPool.mConnectionIds.insert(id);
                 ++sSeqId;
             }
         }
@@ -247,7 +248,7 @@
     ALOGD("Destruction - bufferpool %p "
           "cached: %zu/%zuM, %zu/%d%% in use; "
           "allocs: %zu, %d%% recycled; "
-          "transfers: %zu, %d%% unfetced",
+          "transfers: %zu, %d%% unfetched",
           this, mStats.mBuffersCached, mStats.mSizeCached >> 20,
           mStats.mBuffersInUse, percentage(mStats.mBuffersInUse, mStats.mBuffersCached),
           mStats.mTotalAllocations, percentage(mStats.mTotalRecycles, mStats.mTotalAllocations),
@@ -305,7 +306,12 @@
         found->second->mSenderValidated = true;
         return true;
     }
-    // TODO: verify there is target connection Id
+    if (mConnectionIds.find(message.targetConnectionId) == mConnectionIds.end()) {
+        // N.B: it could be fake or receive connection already closed.
+        ALOGD("bufferpool %p receiver connection %lld is no longer valid",
+              this, (long long)message.targetConnectionId);
+        return false;
+    }
     mStats.onBufferSent();
     mTransactions.insert(std::make_pair(
             message.transactionId,
@@ -450,6 +456,7 @@
             }
         }
     }
+    mConnectionIds.erase(connectionId);
     return true;
 }
 
diff --git a/media/bufferpool/1.0/AccessorImpl.h b/media/bufferpool/1.0/AccessorImpl.h
index c04dbf3..84cb685 100644
--- a/media/bufferpool/1.0/AccessorImpl.h
+++ b/media/bufferpool/1.0/AccessorImpl.h
@@ -94,6 +94,7 @@
 
         std::map<BufferId, std::unique_ptr<InternalBuffer>> mBuffers;
         std::set<BufferId> mFreeBuffers;
+        std::set<ConnectionId> mConnectionIds;
 
         /// Buffer pool statistics which tracks allocation and transfer statistics.
         struct Stats {
diff --git a/media/bufferpool/1.0/Android.bp b/media/bufferpool/1.0/Android.bp
index c7ea70f..f817c76 100644
--- a/media/bufferpool/1.0/Android.bp
+++ b/media/bufferpool/1.0/Android.bp
@@ -16,8 +16,6 @@
         "libcutils",
         "libfmq",
         "libhidlbase",
-        "libhwbinder",
-        "libhidltransport",
         "liblog",
         "libutils",
         "android.hardware.media.bufferpool@1.0",
diff --git a/media/bufferpool/2.0/AccessorImpl.cpp b/media/bufferpool/2.0/AccessorImpl.cpp
index 94cf006..929a20e 100644
--- a/media/bufferpool/2.0/AccessorImpl.cpp
+++ b/media/bufferpool/2.0/AccessorImpl.cpp
@@ -163,6 +163,7 @@
                 *connection = newConnection;
                 *pConnectionId = id;
                 *pMsgId = mBufferPool.mInvalidation.mInvalidationId;
+                mBufferPool.mConnectionIds.insert(id);
                 mBufferPool.mInvalidationChannel.getDesc(invDescPtr);
                 mBufferPool.mInvalidation.onConnect(id, observer);
                 ++sSeqId;
@@ -303,7 +304,7 @@
     ALOGD("Destruction - bufferpool2 %p "
           "cached: %zu/%zuM, %zu/%d%% in use; "
           "allocs: %zu, %d%% recycled; "
-          "transfers: %zu, %d%% unfetced",
+          "transfers: %zu, %d%% unfetched",
           this, mStats.mBuffersCached, mStats.mSizeCached >> 20,
           mStats.mBuffersInUse, percentage(mStats.mBuffersInUse, mStats.mBuffersCached),
           mStats.mTotalAllocations, percentage(mStats.mTotalRecycles, mStats.mTotalAllocations),
@@ -474,7 +475,12 @@
         found->second->mSenderValidated = true;
         return true;
     }
-    // TODO: verify there is target connection Id
+    if (mConnectionIds.find(message.targetConnectionId) == mConnectionIds.end()) {
+        // N.B: it could be fake or receive connection already closed.
+        ALOGD("bufferpool2 %p receiver connection %lld is no longer valid",
+              this, (long long)message.targetConnectionId);
+        return false;
+    }
     mStats.onBufferSent();
     mTransactions.insert(std::make_pair(
             message.transactionId,
@@ -644,6 +650,7 @@
             }
         }
     }
+    mConnectionIds.erase(connectionId);
     return true;
 }
 
@@ -774,11 +781,19 @@
             std::mutex &mutex,
             std::condition_variable &cv,
             bool &ready) {
+    constexpr uint32_t NUM_SPIN_TO_INCREASE_SLEEP = 1024;
+    constexpr uint32_t NUM_SPIN_TO_LOG = 1024*8;
+    constexpr useconds_t MAX_SLEEP_US = 10000;
+    uint32_t numSpin = 0;
+    useconds_t sleepUs = 1;
+
     while(true) {
         std::map<uint32_t, const std::weak_ptr<Accessor::Impl>> copied;
         {
             std::unique_lock<std::mutex> lock(mutex);
             if (!ready) {
+                numSpin = 0;
+                sleepUs = 1;
                 cv.wait(lock);
             }
             copied.insert(accessors.begin(), accessors.end());
@@ -800,9 +815,20 @@
             if (accessors.size() == 0) {
                 ready = false;
             } else {
-                // prevent draining cpu.
+                // TODO Use an efficient way to wait over FMQ.
+                // N.B. Since there is not a efficient way to wait over FMQ,
+                // polling over the FMQ is the current way to prevent draining
+                // CPU.
                 lock.unlock();
-                std::this_thread::yield();
+                ++numSpin;
+                if (numSpin % NUM_SPIN_TO_INCREASE_SLEEP == 0 &&
+                    sleepUs < MAX_SLEEP_US) {
+                    sleepUs *= 10;
+                }
+                if (numSpin % NUM_SPIN_TO_LOG == 0) {
+                    ALOGW("invalidator thread spinning");
+                }
+                ::usleep(sleepUs);
             }
         }
     }
diff --git a/media/bufferpool/2.0/AccessorImpl.h b/media/bufferpool/2.0/AccessorImpl.h
index eea72b9..807e0f1 100644
--- a/media/bufferpool/2.0/AccessorImpl.h
+++ b/media/bufferpool/2.0/AccessorImpl.h
@@ -111,6 +111,7 @@
 
         std::map<BufferId, std::unique_ptr<InternalBuffer>> mBuffers;
         std::set<BufferId> mFreeBuffers;
+        std::set<ConnectionId> mConnectionIds;
 
         struct Invalidation {
             static std::atomic<std::uint32_t> sInvSeqId;
diff --git a/media/bufferpool/2.0/Android.bp b/media/bufferpool/2.0/Android.bp
index c71ac17..97f114a 100644
--- a/media/bufferpool/2.0/Android.bp
+++ b/media/bufferpool/2.0/Android.bp
@@ -1,9 +1,5 @@
-cc_library {
-    name: "libstagefright_bufferpool@2.0",
-    vendor_available: true,
-    vndk: {
-        enabled: true,
-    },
+cc_defaults {
+    name: "libstagefright_bufferpool@2.0-default",
     srcs: [
         "Accessor.cpp",
         "AccessorImpl.cpp",
@@ -20,8 +16,6 @@
         "libcutils",
         "libfmq",
         "libhidlbase",
-        "libhwbinder",
-        "libhidltransport",
         "liblog",
         "libutils",
         "android.hardware.media.bufferpool@2.0",
@@ -31,3 +25,23 @@
         "android.hardware.media.bufferpool@2.0",
     ],
 }
+
+cc_library {
+    name: "libstagefright_bufferpool@2.0.1",
+    defaults: ["libstagefright_bufferpool@2.0-default"],
+    vendor_available: true,
+    cflags: [
+        "-DBUFFERPOOL_CLONE_HANDLES",
+    ],
+}
+
+// Deprecated. Do not use. Use libstagefright_bufferpool@2.0.1 instead.
+cc_library {
+    name: "libstagefright_bufferpool@2.0",
+    defaults: ["libstagefright_bufferpool@2.0-default"],
+    vendor_available: true,
+    vndk: {
+        enabled: true,
+    },
+}
+
diff --git a/media/bufferpool/2.0/ClientManager.cpp b/media/bufferpool/2.0/ClientManager.cpp
index c31d313..87ee4e8 100644
--- a/media/bufferpool/2.0/ClientManager.cpp
+++ b/media/bufferpool/2.0/ClientManager.cpp
@@ -351,7 +351,21 @@
         }
         client = it->second;
     }
+#ifdef BUFFERPOOL_CLONE_HANDLES
+    native_handle_t *origHandle;
+    ResultStatus res = client->allocate(params, &origHandle, buffer);
+    if (res != ResultStatus::OK) {
+        return res;
+    }
+    *handle = native_handle_clone(origHandle);
+    if (handle == NULL) {
+        buffer->reset();
+        return ResultStatus::NO_MEMORY;
+    }
+    return ResultStatus::OK;
+#else
     return client->allocate(params, handle, buffer);
+#endif
 }
 
 ResultStatus ClientManager::Impl::receive(
@@ -367,7 +381,22 @@
         }
         client = it->second;
     }
+#ifdef BUFFERPOOL_CLONE_HANDLES
+    native_handle_t *origHandle;
+    ResultStatus res = client->receive(
+            transactionId, bufferId, timestampUs, &origHandle, buffer);
+    if (res != ResultStatus::OK) {
+        return res;
+    }
+    *handle = native_handle_clone(origHandle);
+    if (handle == NULL) {
+        buffer->reset();
+        return ResultStatus::NO_MEMORY;
+    }
+    return ResultStatus::OK;
+#else
     return client->receive(transactionId, bufferId, timestampUs, handle, buffer);
+#endif
 }
 
 ResultStatus ClientManager::Impl::postSend(
diff --git a/media/bufferpool/2.0/include/bufferpool/ClientManager.h b/media/bufferpool/2.0/include/bufferpool/ClientManager.h
index 953c304..24b61f4 100644
--- a/media/bufferpool/2.0/include/bufferpool/ClientManager.h
+++ b/media/bufferpool/2.0/include/bufferpool/ClientManager.h
@@ -104,7 +104,9 @@
     ResultStatus flush(ConnectionId connectionId);
 
     /**
-     * Allocates a buffer from the specified connection.
+     * Allocates a buffer from the specified connection. The output parameter
+     * handle is cloned from the internal handle. So it is safe to use directly,
+     * and it should be deleted and destroyed after use.
      *
      * @param connectionId  The id of the connection.
      * @param params        The allocation parameters.
@@ -123,7 +125,9 @@
                           std::shared_ptr<BufferPoolData> *buffer);
 
     /**
-     * Receives a buffer for the transaction.
+     * Receives a buffer for the transaction. The output parameter handle is
+     * cloned from the internal handle. So it is safe to use directly, and it
+     * should be deleted and destoyed after use.
      *
      * @param connectionId  The id of the receiving connection.
      * @param transactionId The id for the transaction.
diff --git a/media/codec2/components/aac/C2SoftAacEnc.cpp b/media/codec2/components/aac/C2SoftAacEnc.cpp
index 8e3852c..a8f39d5 100644
--- a/media/codec2/components/aac/C2SoftAacEnc.cpp
+++ b/media/codec2/components/aac/C2SoftAacEnc.cpp
@@ -157,9 +157,10 @@
       mSentCodecSpecificData(false),
       mInputTimeSet(false),
       mInputSize(0),
-      mInputTimeUs(0),
+      mNextFrameTimestampUs(0),
       mSignalledError(false),
-      mOutIndex(0u) {
+      mOutIndex(0u),
+      mRemainderLen(0u) {
 }
 
 C2SoftAacEnc::~C2SoftAacEnc() {
@@ -183,8 +184,9 @@
     mSentCodecSpecificData = false;
     mInputTimeSet = false;
     mInputSize = 0u;
-    mInputTimeUs = 0;
+    mNextFrameTimestampUs = 0;
     mSignalledError = false;
+    mRemainderLen = 0;
     return C2_OK;
 }
 
@@ -201,7 +203,7 @@
     mSentCodecSpecificData = false;
     mInputTimeSet = false;
     mInputSize = 0u;
-    mInputTimeUs = 0;
+    mNextFrameTimestampUs = 0;
     return C2_OK;
 }
 
@@ -365,21 +367,25 @@
         capacity = view.capacity();
     }
     if (!mInputTimeSet && capacity > 0) {
-        mInputTimeUs = work->input.ordinal.timestamp;
+        mNextFrameTimestampUs = work->input.ordinal.timestamp;
         mInputTimeSet = true;
     }
 
-    size_t numFrames = (capacity + mInputSize + (eos ? mNumBytesPerInputFrame - 1 : 0))
-            / mNumBytesPerInputFrame;
-    ALOGV("capacity = %zu; mInputSize = %zu; numFrames = %zu mNumBytesPerInputFrame = %u",
-          capacity, mInputSize, numFrames, mNumBytesPerInputFrame);
+    size_t numFrames =
+        (mRemainderLen + capacity + mInputSize + (eos ? mNumBytesPerInputFrame - 1 : 0))
+        / mNumBytesPerInputFrame;
+    ALOGV("capacity = %zu; mInputSize = %zu; numFrames = %zu "
+          "mNumBytesPerInputFrame = %u inputTS = %lld remaining = %zu",
+          capacity, mInputSize, numFrames,
+          mNumBytesPerInputFrame, work->input.ordinal.timestamp.peekll(),
+          mRemainderLen);
 
     std::shared_ptr<C2LinearBlock> block;
-    std::shared_ptr<C2Buffer> buffer;
     std::unique_ptr<C2WriteView> wView;
     uint8_t *outPtr = temp;
     size_t outAvailable = 0u;
     uint64_t inputIndex = work->input.ordinal.frameIndex.peeku();
+    size_t bytesPerSample = channelCount * sizeof(int16_t);
 
     AACENC_InArgs inargs;
     AACENC_OutArgs outargs;
@@ -442,9 +448,31 @@
         const std::shared_ptr<C2Buffer> mBuffer;
     };
 
-    C2WorkOrdinalStruct outOrdinal = work->input.ordinal;
+    struct OutputBuffer {
+        std::shared_ptr<C2Buffer> buffer;
+        c2_cntr64_t timestampUs;
+    };
+    std::list<OutputBuffer> outputBuffers;
 
-    while (encoderErr == AACENC_OK && inargs.numInSamples > 0) {
+    if (mRemainderLen > 0) {
+        size_t offset = 0;
+        for (; mRemainderLen < bytesPerSample && offset < capacity; ++offset) {
+            mRemainder[mRemainderLen++] = data[offset];
+        }
+        data += offset;
+        capacity -= offset;
+        if (mRemainderLen == bytesPerSample) {
+            inBuffer[0] = mRemainder;
+            inBufferSize[0] = bytesPerSample;
+            inargs.numInSamples = channelCount;
+            mRemainderLen = 0;
+            ALOGV("Processing remainder");
+        } else {
+            // We have exhausted the input already
+            inargs.numInSamples = 0;
+        }
+    }
+    while (encoderErr == AACENC_OK && inargs.numInSamples >= channelCount) {
         if (numFrames && !block) {
             C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
             // TODO: error handling, proper usage, etc.
@@ -473,29 +501,22 @@
                                   &outargs);
 
         if (encoderErr == AACENC_OK) {
-            if (buffer) {
-                outOrdinal.frameIndex = mOutIndex++;
-                outOrdinal.timestamp = mInputTimeUs;
-                cloneAndSend(
-                        inputIndex,
-                        work,
-                        FillWork(C2FrameData::FLAG_INCOMPLETE, outOrdinal, buffer));
-                buffer.reset();
-            }
-
             if (outargs.numOutBytes > 0) {
                 mInputSize = 0;
                 int consumed = (capacity / sizeof(int16_t)) - inargs.numInSamples
                         + outargs.numInSamples;
-                mInputTimeUs = work->input.ordinal.timestamp
+                c2_cntr64_t currentFrameTimestampUs = mNextFrameTimestampUs;
+                mNextFrameTimestampUs = work->input.ordinal.timestamp
                         + (consumed * 1000000ll / channelCount / sampleRate);
-                buffer = createLinearBuffer(block, 0, outargs.numOutBytes);
-#if defined(LOG_NDEBUG) && !LOG_NDEBUG
+                std::shared_ptr<C2Buffer> buffer = createLinearBuffer(block, 0, outargs.numOutBytes);
+#if 0
                 hexdump(outPtr, std::min(outargs.numOutBytes, 256));
 #endif
                 outPtr = temp;
                 outAvailable = 0;
                 block.reset();
+
+                outputBuffers.push_back({buffer, currentFrameTimestampUs});
             } else {
                 mInputSize += outargs.numInSamples * sizeof(int16_t);
             }
@@ -505,11 +526,17 @@
                 inBufferSize[0] -= outargs.numInSamples * sizeof(int16_t);
                 inargs.numInSamples -= outargs.numInSamples;
             }
-        }
-        ALOGV("encoderErr = %d mInputSize = %zu inargs.numInSamples = %d, mInputTimeUs = %lld",
-              encoderErr, mInputSize, inargs.numInSamples, mInputTimeUs.peekll());
-    }
 
+            if (inBuffer[0] == mRemainder) {
+                inBuffer[0] = const_cast<uint8_t *>(data);
+                inBufferSize[0] = capacity;
+                inargs.numInSamples = capacity / sizeof(int16_t);
+            }
+        }
+        ALOGV("encoderErr = %d mInputSize = %zu "
+              "inargs.numInSamples = %d, mNextFrameTimestampUs = %lld",
+              encoderErr, mInputSize, inargs.numInSamples, mNextFrameTimestampUs.peekll());
+    }
     if (eos && inBufferSize[0] > 0) {
         if (numFrames && !block) {
             C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
@@ -540,12 +567,37 @@
                            &outBufDesc,
                            &inargs,
                            &outargs);
+        inBufferSize[0] = 0;
     }
 
-    outOrdinal.frameIndex = mOutIndex++;
-    outOrdinal.timestamp = mInputTimeUs;
+    if (inBufferSize[0] > 0) {
+        for (size_t i = 0; i < inBufferSize[0]; ++i) {
+            mRemainder[i] = static_cast<uint8_t *>(inBuffer[0])[i];
+        }
+        mRemainderLen = inBufferSize[0];
+    }
+
+    while (outputBuffers.size() > 1) {
+        const OutputBuffer& front = outputBuffers.front();
+        C2WorkOrdinalStruct ordinal = work->input.ordinal;
+        ordinal.frameIndex = mOutIndex++;
+        ordinal.timestamp = front.timestampUs;
+        cloneAndSend(
+                inputIndex,
+                work,
+                FillWork(C2FrameData::FLAG_INCOMPLETE, ordinal, front.buffer));
+        outputBuffers.pop_front();
+    }
+    std::shared_ptr<C2Buffer> buffer;
+    C2WorkOrdinalStruct ordinal = work->input.ordinal;
+    ordinal.frameIndex = mOutIndex++;
+    if (!outputBuffers.empty()) {
+        ordinal.timestamp = outputBuffers.front().timestampUs;
+        buffer = outputBuffers.front().buffer;
+    }
+    // Mark the end of frame
     FillWork((C2FrameData::flags_t)(eos ? C2FrameData::FLAG_END_OF_STREAM : 0),
-             outOrdinal, buffer)(work);
+             ordinal, buffer)(work);
 }
 
 c2_status_t C2SoftAacEnc::drain(
@@ -569,7 +621,7 @@
     mSentCodecSpecificData = false;
     mInputTimeSet = false;
     mInputSize = 0u;
-    mInputTimeUs = 0;
+    mNextFrameTimestampUs = 0;
 
     // TODO: we don't have any pending work at this time to drain.
     return C2_OK;
diff --git a/media/codec2/components/aac/C2SoftAacEnc.h b/media/codec2/components/aac/C2SoftAacEnc.h
index a38be19..6ecfbdd 100644
--- a/media/codec2/components/aac/C2SoftAacEnc.h
+++ b/media/codec2/components/aac/C2SoftAacEnc.h
@@ -56,11 +56,15 @@
     bool mSentCodecSpecificData;
     bool mInputTimeSet;
     size_t mInputSize;
-    c2_cntr64_t mInputTimeUs;
+    c2_cntr64_t mNextFrameTimestampUs;
 
     bool mSignalledError;
     std::atomic_uint64_t mOutIndex;
 
+    // We support max 6 channels
+    uint8_t mRemainder[6 * sizeof(int16_t)];
+    size_t mRemainderLen;
+
     status_t initEncoder();
 
     status_t setAudioParams();
diff --git a/media/codec2/components/aom/Android.bp b/media/codec2/components/aom/Android.bp
index 0fabf5c..61dbd4c 100644
--- a/media/codec2/components/aom/Android.bp
+++ b/media/codec2/components/aom/Android.bp
@@ -1,10 +1,16 @@
 cc_library_shared {
-    name: "libcodec2_soft_av1dec",
+    name: "libcodec2_soft_av1dec_aom",
     defaults: [
         "libcodec2_soft-defaults",
         "libcodec2_soft_sanitize_all-defaults",
     ],
 
+    // coordinated with frameworks/av/media/codec2/components/gav1/Android.bp
+    // so only 1 of them has the official c2.android.av1.decoder name
+    cflags: [
+        "-DCODECNAME=\"c2.android.av1-aom.decoder\"",
+    ],
+
     srcs: ["C2SoftAomDec.cpp"],
     static_libs: ["libaom"],
 
diff --git a/media/codec2/components/aom/C2SoftAomDec.cpp b/media/codec2/components/aom/C2SoftAomDec.cpp
index 0cf277f..c7046cb 100644
--- a/media/codec2/components/aom/C2SoftAomDec.cpp
+++ b/media/codec2/components/aom/C2SoftAomDec.cpp
@@ -29,7 +29,10 @@
 
 namespace android {
 
-constexpr char COMPONENT_NAME[] = "c2.android.av1.decoder";
+constexpr size_t kMinInputBufferSize = 2 * 1024 * 1024;
+
+// codecname set and passed in as a compile flag from Android.bp
+constexpr char COMPONENT_NAME[] = CODECNAME;
 
 class C2SoftAomDec::IntfImpl : public SimpleInterface<void>::BaseParams {
   public:
@@ -111,7 +114,7 @@
         addParameter(
             DefineParam(mMaxInputSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
                 .withDefault(
-                    new C2StreamMaxBufferSizeInfo::input(0u, 320 * 240 * 3 / 4))
+                    new C2StreamMaxBufferSizeInfo::input(0u, kMinInputBufferSize))
                 .withFields({
                     C2F(mMaxInputSize, value).any(),
                 })
@@ -191,8 +194,8 @@
         const C2P<C2StreamMaxPictureSizeTuning::output>& maxSize) {
         (void)mayBlock;
         // assume compression ratio of 2
-        me.set().value = (((maxSize.v.width + 63) / 64) *
-                          ((maxSize.v.height + 63) / 64) * 3072);
+        me.set().value = c2_max((((maxSize.v.width + 63) / 64)
+                * ((maxSize.v.height + 63) / 64) * 3072), kMinInputBufferSize);
         return C2R::Ok();
     }
     static C2R DefaultColorAspectsSetter(bool mayBlock, C2P<C2StreamColorAspectsTuning::output> &me) {
diff --git a/media/codec2/components/avc/C2SoftAvcDec.cpp b/media/codec2/components/avc/C2SoftAvcDec.cpp
index 3f015b4..2662f0f 100644
--- a/media/codec2/components/avc/C2SoftAvcDec.cpp
+++ b/media/codec2/components/avc/C2SoftAvcDec.cpp
@@ -31,9 +31,10 @@
 namespace android {
 
 namespace {
-
+constexpr size_t kMinInputBufferSize = 2 * 1024 * 1024;
 constexpr char COMPONENT_NAME[] = "c2.android.avc.decoder";
-
+constexpr uint32_t kDefaultOutputDelay = 8;
+constexpr uint32_t kMaxOutputDelay = 16;
 }  // namespace
 
 class C2SoftAvcDec::IntfImpl : public SimpleInterface<void>::BaseParams {
@@ -54,7 +55,9 @@
         // TODO: Proper support for reorder depth.
         addParameter(
                 DefineParam(mActualOutputDelay, C2_PARAMKEY_OUTPUT_DELAY)
-                .withConstValue(new C2PortActualDelayTuning::output(8u))
+                .withDefault(new C2PortActualDelayTuning::output(kDefaultOutputDelay))
+                .withFields({C2F(mActualOutputDelay, value).inRange(0, kMaxOutputDelay)})
+                .withSetter(Setter<decltype(*mActualOutputDelay)>::StrictValueWithNoDeps)
                 .build());
 
         // TODO: output latency and reordering
@@ -111,7 +114,7 @@
 
         addParameter(
                 DefineParam(mMaxInputSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
-                .withDefault(new C2StreamMaxBufferSizeInfo::input(0u, 320 * 240 * 3 / 4))
+                .withDefault(new C2StreamMaxBufferSizeInfo::input(0u, kMinInputBufferSize))
                 .withFields({
                     C2F(mMaxInputSize, value).any(),
                 })
@@ -196,7 +199,6 @@
                                      0u, HAL_PIXEL_FORMAT_YCBCR_420_888))
                 .build());
     }
-
     static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
                           C2P<C2StreamPictureSizeInfo::output> &me) {
         (void)mayBlock;
@@ -225,7 +227,8 @@
                                   const C2P<C2StreamMaxPictureSizeTuning::output> &maxSize) {
         (void)mayBlock;
         // assume compression ratio of 2
-        me.set().value = (((maxSize.v.width + 15) / 16) * ((maxSize.v.height + 15) / 16) * 192);
+        me.set().value = c2_max((((maxSize.v.width + 15) / 16)
+                * ((maxSize.v.height + 15) / 16) * 192), kMinInputBufferSize);
         return C2R::Ok();
     }
 
@@ -333,6 +336,7 @@
       mDecHandle(nullptr),
       mOutBufferFlush(nullptr),
       mIvColorFormat(IV_YUV_420P),
+      mOutputDelay(kDefaultOutputDelay),
       mWidth(320),
       mHeight(240),
       mHeaderDecoded(false),
@@ -882,6 +886,26 @@
             work->result = C2_CORRUPTED;
             return;
         }
+        if (s_decode_op.i4_reorder_depth >= 0 && mOutputDelay != s_decode_op.i4_reorder_depth) {
+            mOutputDelay = s_decode_op.i4_reorder_depth;
+            ALOGV("New Output delay %d ", mOutputDelay);
+
+            C2PortActualDelayTuning::output outputDelay(mOutputDelay);
+            std::vector<std::unique_ptr<C2SettingResult>> failures;
+            c2_status_t err =
+                mIntf->config({&outputDelay}, C2_MAY_BLOCK, &failures);
+            if (err == OK) {
+                work->worklets.front()->output.configUpdate.push_back(
+                    C2Param::Copy(outputDelay));
+            } else {
+                ALOGE("Cannot set output delay");
+                mSignalledError = true;
+                work->workletsProcessed = 1u;
+                work->result = C2_CORRUPTED;
+                return;
+            }
+            continue;
+        }
         if (0 < s_decode_op.u4_pic_wd && 0 < s_decode_op.u4_pic_ht) {
             if (mHeaderDecoded == false) {
                 mHeaderDecoded = true;
diff --git a/media/codec2/components/avc/C2SoftAvcDec.h b/media/codec2/components/avc/C2SoftAvcDec.h
index 72ee583..4414a26 100644
--- a/media/codec2/components/avc/C2SoftAvcDec.h
+++ b/media/codec2/components/avc/C2SoftAvcDec.h
@@ -157,7 +157,7 @@
 
     size_t mNumCores;
     IV_COLOR_FORMAT_T mIvColorFormat;
-
+    uint32_t mOutputDelay;
     uint32_t mWidth;
     uint32_t mHeight;
     uint32_t mStride;
diff --git a/media/codec2/components/flac/Android.bp b/media/codec2/components/flac/Android.bp
index e5eb51d..48cc51b 100644
--- a/media/codec2/components/flac/Android.bp
+++ b/media/codec2/components/flac/Android.bp
@@ -23,8 +23,11 @@
 
     srcs: ["C2SoftFlacEnc.cpp"],
 
-    static_libs: [
+    shared_libs: [
         "libaudioutils",
+    ],
+
+    static_libs: [
         "libFLAC",
     ],
 }
diff --git a/media/codec2/components/g711/C2SoftG711Dec.cpp b/media/codec2/components/g711/C2SoftG711Dec.cpp
index 43b843a..b6cc32e 100644
--- a/media/codec2/components/g711/C2SoftG711Dec.cpp
+++ b/media/codec2/components/g711/C2SoftG711Dec.cpp
@@ -73,7 +73,7 @@
 
         addParameter(
                 DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
-                .withDefault(new C2StreamChannelCountInfo::output(0u, 1))
+                .withDefault(new C2StreamChannelCountInfo::output(0u, 6))
                 .withFields({C2F(mChannelCount, value).equalTo(1)})
                 .withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
                 .build());
diff --git a/media/codec2/components/gav1/Android.bp b/media/codec2/components/gav1/Android.bp
index 0a0545d..5c4abb7 100644
--- a/media/codec2/components/gav1/Android.bp
+++ b/media/codec2/components/gav1/Android.bp
@@ -1,10 +1,16 @@
 cc_library_shared {
-    name: "libcodec2_soft_gav1dec",
+    name: "libcodec2_soft_av1dec_gav1",
     defaults: [
         "libcodec2_soft-defaults",
         "libcodec2_soft_sanitize_all-defaults",
     ],
 
+    // coordinated with frameworks/av/media/codec2/components/aom/Android.bp
+    // so only 1 of them has the official c2.android.av1.decoder name
+    cflags: [
+        "-DCODECNAME=\"c2.android.av1.decoder\"",
+    ],
+
     srcs: ["C2SoftGav1Dec.cpp"],
     static_libs: ["libgav1"],
 
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.cpp b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
index f5321ba..ec5f549 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.cpp
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
@@ -27,7 +27,8 @@
 
 namespace android {
 
-constexpr char COMPONENT_NAME[] = "c2.android.gav1.decoder";
+// codecname set and passed in as a compile flag from Android.bp
+constexpr char COMPONENT_NAME[] = CODECNAME;
 
 class C2SoftGav1Dec::IntfImpl : public SimpleInterface<void>::BaseParams {
  public:
diff --git a/media/codec2/components/hevc/C2SoftHevcDec.cpp b/media/codec2/components/hevc/C2SoftHevcDec.cpp
index 7232572..df677c2 100644
--- a/media/codec2/components/hevc/C2SoftHevcDec.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcDec.cpp
@@ -33,7 +33,8 @@
 namespace {
 
 constexpr char COMPONENT_NAME[] = "c2.android.hevc.decoder";
-
+constexpr uint32_t kDefaultOutputDelay = 8;
+constexpr uint32_t kMaxOutputDelay = 16;
 }  // namespace
 
 class C2SoftHevcDec::IntfImpl : public SimpleInterface<void>::BaseParams {
@@ -54,7 +55,9 @@
         // TODO: Proper support for reorder depth.
         addParameter(
                 DefineParam(mActualOutputDelay, C2_PARAMKEY_OUTPUT_DELAY)
-                .withConstValue(new C2PortActualDelayTuning::output(8u))
+                .withDefault(new C2PortActualDelayTuning::output(kDefaultOutputDelay))
+                .withFields({C2F(mActualOutputDelay, value).inRange(0, kMaxOutputDelay)})
+                .withSetter(Setter<decltype(*mActualOutputDelay)>::StrictValueWithNoDeps)
                 .build());
 
         addParameter(
@@ -327,6 +330,7 @@
         mDecHandle(nullptr),
         mOutBufferFlush(nullptr),
         mIvColorformat(IV_YUV_420P),
+        mOutputDelay(kDefaultOutputDelay),
         mWidth(320),
         mHeight(240),
         mHeaderDecoded(false),
@@ -877,6 +881,26 @@
             work->result = C2_CORRUPTED;
             return;
         }
+        if (s_decode_op.i4_reorder_depth >= 0 && mOutputDelay != s_decode_op.i4_reorder_depth) {
+            mOutputDelay = s_decode_op.i4_reorder_depth;
+            ALOGV("New Output delay %d ", mOutputDelay);
+
+            C2PortActualDelayTuning::output outputDelay(mOutputDelay);
+            std::vector<std::unique_ptr<C2SettingResult>> failures;
+            c2_status_t err =
+                mIntf->config({&outputDelay}, C2_MAY_BLOCK, &failures);
+            if (err == OK) {
+                work->worklets.front()->output.configUpdate.push_back(
+                    C2Param::Copy(outputDelay));
+            } else {
+                ALOGE("Cannot set output delay");
+                mSignalledError = true;
+                work->workletsProcessed = 1u;
+                work->result = C2_CORRUPTED;
+                return;
+            }
+            continue;
+        }
         if (0 < s_decode_op.u4_pic_wd && 0 < s_decode_op.u4_pic_ht) {
             if (mHeaderDecoded == false) {
                 mHeaderDecoded = true;
diff --git a/media/codec2/components/hevc/C2SoftHevcDec.h b/media/codec2/components/hevc/C2SoftHevcDec.h
index b7664e6..ce63a6c 100644
--- a/media/codec2/components/hevc/C2SoftHevcDec.h
+++ b/media/codec2/components/hevc/C2SoftHevcDec.h
@@ -115,7 +115,7 @@
 
     size_t mNumCores;
     IV_COLOR_FORMAT_T mIvColorformat;
-
+    uint32_t mOutputDelay;
     uint32_t mWidth;
     uint32_t mHeight;
     uint32_t mStride;
diff --git a/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp b/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
index df7b403..e0365fc 100644
--- a/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
+++ b/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
@@ -29,7 +29,7 @@
 #include "impeg2d.h"
 
 namespace android {
-
+constexpr size_t kMinInputBufferSize = 2 * 1024 * 1024;
 constexpr char COMPONENT_NAME[] = "c2.android.mpeg2.decoder";
 
 class C2SoftMpeg2Dec::IntfImpl : public SimpleInterface<void>::BaseParams {
@@ -47,6 +47,12 @@
         noInputLatency();
         noTimeStretch();
 
+        // TODO: Proper support for reorder depth.
+        addParameter(
+                DefineParam(mActualOutputDelay, C2_PARAMKEY_OUTPUT_DELAY)
+                .withConstValue(new C2PortActualDelayTuning::output(3u))
+                .build());
+
         // TODO: output latency and reordering
 
         addParameter(
@@ -93,7 +99,7 @@
 
         addParameter(
                 DefineParam(mMaxInputSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
-                .withDefault(new C2StreamMaxBufferSizeInfo::input(0u, 320 * 240 * 3 / 2))
+                .withDefault(new C2StreamMaxBufferSizeInfo::input(0u, kMinInputBufferSize))
                 .withFields({
                     C2F(mMaxInputSize, value).any(),
                 })
@@ -207,7 +213,8 @@
                                   const C2P<C2StreamMaxPictureSizeTuning::output> &maxSize) {
         (void)mayBlock;
         // assume compression ratio of 1
-        me.set().value = (((maxSize.v.width + 15) / 16) * ((maxSize.v.height + 15) / 16) * 384);
+        me.set().value = c2_max((((maxSize.v.width + 15) / 16)
+                * ((maxSize.v.height + 15) / 16) * 384), kMinInputBufferSize);
         return C2R::Ok();
     }
 
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
index 7e6685e..61b286c 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
@@ -33,7 +33,7 @@
 #include "mp4dec_api.h"
 
 namespace android {
-
+constexpr size_t kMinInputBufferSize = 2 * 1024 * 1024;
 #ifdef MPEG4
 constexpr char COMPONENT_NAME[] = "c2.android.mpeg4.decoder";
 #else
@@ -149,11 +149,7 @@
 
         addParameter(
                 DefineParam(mMaxInputSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
-#ifdef MPEG4
-                .withDefault(new C2StreamMaxBufferSizeInfo::input(0u, 1920 * 1088 * 3 / 2))
-#else
-                .withDefault(new C2StreamMaxBufferSizeInfo::input(0u, 352 * 288 * 3 / 2))
-#endif
+                .withDefault(new C2StreamMaxBufferSizeInfo::input(0u, kMinInputBufferSize))
                 .withFields({
                     C2F(mMaxInputSize, value).any(),
                 })
@@ -218,7 +214,8 @@
                                   const C2P<C2StreamMaxPictureSizeTuning::output> &maxSize) {
         (void)mayBlock;
         // assume compression ratio of 1
-        me.set().value = (((maxSize.v.width + 15) / 16) * ((maxSize.v.height + 15) / 16) * 384);
+        me.set().value = c2_max((((maxSize.v.width + 15) / 16)
+                * ((maxSize.v.height + 15) / 16) * 384), kMinInputBufferSize);
         return C2R::Ok();
     }
 
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp
index 36053f6..54c8c47 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp
@@ -517,9 +517,11 @@
             if (layout.planes[layout.PLANE_Y].colInc == 1
                     && layout.planes[layout.PLANE_U].colInc == 1
                     && layout.planes[layout.PLANE_V].colInc == 1
+                    && yStride == align(width, 16)
                     && uStride == vStride
                     && yStride == 2 * vStride) {
-                // I420 compatible - planes are already set up above
+                // I420 compatible with yStride being equal to aligned width
+                // planes are already set up above
                 break;
             }
 
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index a52ca15..62076f8 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -30,7 +30,7 @@
 #include "C2SoftVpxDec.h"
 
 namespace android {
-
+constexpr size_t kMinInputBufferSize = 2 * 1024 * 1024;
 #ifdef VP9
 constexpr char COMPONENT_NAME[] = "c2.android.vp9.decoder";
 #else
@@ -166,7 +166,7 @@
 
         addParameter(
                 DefineParam(mMaxInputSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
-                .withDefault(new C2StreamMaxBufferSizeInfo::input(0u, 320 * 240 * 3 / 4))
+                .withDefault(new C2StreamMaxBufferSizeInfo::input(0u, kMinInputBufferSize))
                 .withFields({
                     C2F(mMaxInputSize, value).any(),
                 })
@@ -244,7 +244,8 @@
                                   const C2P<C2StreamMaxPictureSizeTuning::output> &maxSize) {
         (void)mayBlock;
         // assume compression ratio of 2
-        me.set().value = (((maxSize.v.width + 63) / 64) * ((maxSize.v.height + 63) / 64) * 3072);
+        me.set().value = c2_max((((maxSize.v.width + 63) / 64)
+                * ((maxSize.v.height + 63) / 64) * 3072), kMinInputBufferSize);
         return C2R::Ok();
     }
 
diff --git a/media/codec2/hidl/1.0/utils/Android.bp b/media/codec2/hidl/1.0/utils/Android.bp
index 63fe36b..a2930a6 100644
--- a/media/codec2/hidl/1.0/utils/Android.bp
+++ b/media/codec2/hidl/1.0/utils/Android.bp
@@ -24,7 +24,7 @@
         "libgui",
         "libhidlbase",
         "liblog",
-        "libstagefright_bufferpool@2.0",
+        "libstagefright_bufferpool@2.0.1",
         "libui",
         "libutils",
     ],
@@ -37,7 +37,7 @@
         "android.hardware.media.c2@1.0",
         "libcodec2",
         "libgui",
-        "libstagefright_bufferpool@2.0",
+        "libstagefright_bufferpool@2.0.1",
         "libui",
     ],
 }
@@ -63,6 +63,7 @@
     ],
 
     header_libs: [
+        "libbinder_headers",
         "libsystem_headers",
         "libcodec2_internal", // private
     ],
@@ -80,10 +81,8 @@
         "libcodec2_vndk",
         "libcutils",
         "libhidlbase",
-        "libhidltransport",
-        "libhwbinder",
         "liblog",
-        "libstagefright_bufferpool@2.0",
+        "libstagefright_bufferpool@2.0.1",
         "libstagefright_bufferqueue_helper",
         "libui",
         "libutils",
@@ -98,7 +97,7 @@
         "libcodec2",
         "libcodec2_vndk",
         "libhidlbase",
-        "libstagefright_bufferpool@2.0",
+        "libstagefright_bufferpool@2.0.1",
         "libui",
     ],
 }
diff --git a/media/codec2/hidl/1.0/utils/InputBufferManager.cpp b/media/codec2/hidl/1.0/utils/InputBufferManager.cpp
index a023a05..8c0d0a4 100644
--- a/media/codec2/hidl/1.0/utils/InputBufferManager.cpp
+++ b/media/codec2/hidl/1.0/utils/InputBufferManager.cpp
@@ -70,7 +70,7 @@
                  << ".";
     std::lock_guard<std::mutex> lock(mMutex);
 
-    std::set<TrackedBuffer> &bufferIds =
+    std::set<TrackedBuffer*> &bufferIds =
             mTrackedBuffersMap[listener][frameIndex];
 
     for (size_t i = 0; i < input.buffers.size(); ++i) {
@@ -79,13 +79,14 @@
                          << "Input buffer at index " << i << " is null.";
             continue;
         }
-        const TrackedBuffer &bufferId =
-                *bufferIds.emplace(listener, frameIndex, i, input.buffers[i]).
-                first;
+        TrackedBuffer *bufferId =
+            new TrackedBuffer(listener, frameIndex, i, input.buffers[i]);
+        mTrackedBufferCache.emplace(bufferId);
+        bufferIds.emplace(bufferId);
 
         c2_status_t status = input.buffers[i]->registerOnDestroyNotify(
                 onBufferDestroyed,
-                const_cast<void*>(reinterpret_cast<const void*>(&bufferId)));
+                reinterpret_cast<void*>(bufferId));
         if (status != C2_OK) {
             LOG(DEBUG) << "InputBufferManager::_registerFrameData -- "
                        << "registerOnDestroyNotify() failed "
@@ -119,31 +120,32 @@
 
     auto findListener = mTrackedBuffersMap.find(listener);
     if (findListener != mTrackedBuffersMap.end()) {
-        std::map<uint64_t, std::set<TrackedBuffer>> &frameIndex2BufferIds
+        std::map<uint64_t, std::set<TrackedBuffer*>> &frameIndex2BufferIds
                 = findListener->second;
         auto findFrameIndex = frameIndex2BufferIds.find(frameIndex);
         if (findFrameIndex != frameIndex2BufferIds.end()) {
-            std::set<TrackedBuffer> &bufferIds = findFrameIndex->second;
-            for (const TrackedBuffer& bufferId : bufferIds) {
-                std::shared_ptr<C2Buffer> buffer = bufferId.buffer.lock();
+            std::set<TrackedBuffer*> &bufferIds = findFrameIndex->second;
+            for (TrackedBuffer* bufferId : bufferIds) {
+                std::shared_ptr<C2Buffer> buffer = bufferId->buffer.lock();
                 if (buffer) {
                     c2_status_t status = buffer->unregisterOnDestroyNotify(
                             onBufferDestroyed,
-                            const_cast<void*>(
-                            reinterpret_cast<const void*>(&bufferId)));
+                            reinterpret_cast<void*>(bufferId));
                     if (status != C2_OK) {
                         LOG(DEBUG) << "InputBufferManager::_unregisterFrameData "
                                    << "-- unregisterOnDestroyNotify() failed "
                                    << "(listener @ 0x"
                                         << std::hex
-                                        << bufferId.listener.unsafe_get()
+                                        << bufferId->listener.unsafe_get()
                                    << ", frameIndex = "
-                                        << std::dec << bufferId.frameIndex
-                                   << ", bufferIndex = " << bufferId.bufferIndex
+                                        << std::dec << bufferId->frameIndex
+                                   << ", bufferIndex = " << bufferId->bufferIndex
                                    << ") => status = " << status
                                    << ".";
                     }
                 }
+                mTrackedBufferCache.erase(bufferId);
+                delete bufferId;
             }
 
             frameIndex2BufferIds.erase(findFrameIndex);
@@ -179,31 +181,32 @@
 
     auto findListener = mTrackedBuffersMap.find(listener);
     if (findListener != mTrackedBuffersMap.end()) {
-        std::map<uint64_t, std::set<TrackedBuffer>> &frameIndex2BufferIds =
+        std::map<uint64_t, std::set<TrackedBuffer*>> &frameIndex2BufferIds =
                 findListener->second;
         for (auto findFrameIndex = frameIndex2BufferIds.begin();
                 findFrameIndex != frameIndex2BufferIds.end();
                 ++findFrameIndex) {
-            std::set<TrackedBuffer> &bufferIds = findFrameIndex->second;
-            for (const TrackedBuffer& bufferId : bufferIds) {
-                std::shared_ptr<C2Buffer> buffer = bufferId.buffer.lock();
+            std::set<TrackedBuffer*> &bufferIds = findFrameIndex->second;
+            for (TrackedBuffer* bufferId : bufferIds) {
+                std::shared_ptr<C2Buffer> buffer = bufferId->buffer.lock();
                 if (buffer) {
                     c2_status_t status = buffer->unregisterOnDestroyNotify(
                             onBufferDestroyed,
-                            const_cast<void*>(
-                            reinterpret_cast<const void*>(&bufferId)));
+                            reinterpret_cast<void*>(bufferId));
                     if (status != C2_OK) {
                         LOG(DEBUG) << "InputBufferManager::_unregisterFrameData "
                                    << "-- unregisterOnDestroyNotify() failed "
                                    << "(listener @ 0x"
                                         << std::hex
-                                        << bufferId.listener.unsafe_get()
+                                        << bufferId->listener.unsafe_get()
                                    << ", frameIndex = "
-                                        << std::dec << bufferId.frameIndex
-                                   << ", bufferIndex = " << bufferId.bufferIndex
+                                        << std::dec << bufferId->frameIndex
+                                   << ", bufferIndex = " << bufferId->bufferIndex
                                    << ") => status = " << status
                                    << ".";
                     }
+                    mTrackedBufferCache.erase(bufferId);
+                    delete bufferId;
                 }
             }
         }
@@ -236,50 +239,59 @@
                      << std::dec << ".";
         return;
     }
-    TrackedBuffer id(*reinterpret_cast<TrackedBuffer*>(arg));
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    TrackedBuffer *bufferId = reinterpret_cast<TrackedBuffer*>(arg);
+
+    if (mTrackedBufferCache.find(bufferId) == mTrackedBufferCache.end()) {
+        LOG(VERBOSE) << "InputBufferManager::_onBufferDestroyed -- called with "
+                     << "unregistered buffer: "
+                     << "buf @ 0x" << std::hex << buf
+                     << ", arg @ 0x" << std::hex << arg
+                     << std::dec << ".";
+        return;
+    }
+
     LOG(VERBOSE) << "InputBufferManager::_onBufferDestroyed -- called with "
                  << "buf @ 0x" << std::hex << buf
                  << ", arg @ 0x" << std::hex << arg
                  << std::dec << " -- "
-                 << "listener @ 0x" << std::hex << id.listener.unsafe_get()
-                 << ", frameIndex = " << std::dec << id.frameIndex
-                 << ", bufferIndex = " << id.bufferIndex
+                 << "listener @ 0x" << std::hex << bufferId->listener.unsafe_get()
+                 << ", frameIndex = " << std::dec << bufferId->frameIndex
+                 << ", bufferIndex = " << bufferId->bufferIndex
                  << ".";
-
-    std::lock_guard<std::mutex> lock(mMutex);
-
-    auto findListener = mTrackedBuffersMap.find(id.listener);
+    auto findListener = mTrackedBuffersMap.find(bufferId->listener);
     if (findListener == mTrackedBuffersMap.end()) {
-        LOG(DEBUG) << "InputBufferManager::_onBufferDestroyed -- "
-                   << "received invalid listener: "
-                   << "listener @ 0x" << std::hex << id.listener.unsafe_get()
-                   << " (frameIndex = " << std::dec << id.frameIndex
-                   << ", bufferIndex = " << id.bufferIndex
-                   << ").";
+        LOG(VERBOSE) << "InputBufferManager::_onBufferDestroyed -- "
+                     << "received invalid listener: "
+                     << "listener @ 0x" << std::hex << bufferId->listener.unsafe_get()
+                     << " (frameIndex = " << std::dec << bufferId->frameIndex
+                     << ", bufferIndex = " << bufferId->bufferIndex
+                     << ").";
         return;
     }
 
-    std::map<uint64_t, std::set<TrackedBuffer>> &frameIndex2BufferIds
+    std::map<uint64_t, std::set<TrackedBuffer*>> &frameIndex2BufferIds
             = findListener->second;
-    auto findFrameIndex = frameIndex2BufferIds.find(id.frameIndex);
+    auto findFrameIndex = frameIndex2BufferIds.find(bufferId->frameIndex);
     if (findFrameIndex == frameIndex2BufferIds.end()) {
         LOG(DEBUG) << "InputBufferManager::_onBufferDestroyed -- "
                    << "received invalid frame index: "
-                   << "frameIndex = " << id.frameIndex
-                   << " (listener @ 0x" << std::hex << id.listener.unsafe_get()
-                   << ", bufferIndex = " << std::dec << id.bufferIndex
+                   << "frameIndex = " << bufferId->frameIndex
+                   << " (listener @ 0x" << std::hex << bufferId->listener.unsafe_get()
+                   << ", bufferIndex = " << std::dec << bufferId->bufferIndex
                    << ").";
         return;
     }
 
-    std::set<TrackedBuffer> &bufferIds = findFrameIndex->second;
-    auto findBufferId = bufferIds.find(id);
+    std::set<TrackedBuffer*> &bufferIds = findFrameIndex->second;
+    auto findBufferId = bufferIds.find(bufferId);
     if (findBufferId == bufferIds.end()) {
         LOG(DEBUG) << "InputBufferManager::_onBufferDestroyed -- "
                    << "received invalid buffer index: "
-                   << "bufferIndex = " << id.bufferIndex
-                   << " (frameIndex = " << id.frameIndex
-                   << ", listener @ 0x" << std::hex << id.listener.unsafe_get()
+                   << "bufferIndex = " << bufferId->bufferIndex
+                   << " (frameIndex = " << bufferId->frameIndex
+                   << ", listener @ 0x" << std::hex << bufferId->listener.unsafe_get()
                    << std::dec << ").";
         return;
     }
@@ -292,10 +304,13 @@
         }
     }
 
-    DeathNotifications &deathNotifications = mDeathNotifications[id.listener];
-    deathNotifications.indices[id.frameIndex].emplace_back(id.bufferIndex);
+    DeathNotifications &deathNotifications = mDeathNotifications[bufferId->listener];
+    deathNotifications.indices[bufferId->frameIndex].emplace_back(bufferId->bufferIndex);
     ++deathNotifications.count;
     mOnBufferDestroyed.notify_one();
+
+    mTrackedBufferCache.erase(bufferId);
+    delete bufferId;
 }
 
 // Notify the clients about buffer destructions.
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputBufferManager.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputBufferManager.h
index b6857d5..42fa557 100644
--- a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputBufferManager.h
+++ b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputBufferManager.h
@@ -196,13 +196,9 @@
                 frameIndex(frameIndex),
                 bufferIndex(bufferIndex),
                 buffer(buffer) {}
-        TrackedBuffer(const TrackedBuffer&) = default;
-        bool operator<(const TrackedBuffer& other) const {
-            return bufferIndex < other.bufferIndex;
-        }
     };
 
-    // Map: listener -> frameIndex -> set<TrackedBuffer>.
+    // Map: listener -> frameIndex -> set<TrackedBuffer*>.
     // Essentially, this is used to store triples (listener, frameIndex,
     // bufferIndex) that's searchable by listener and (listener, frameIndex).
     // However, the value of the innermost map is TrackedBuffer, which also
@@ -210,7 +206,7 @@
     // because onBufferDestroyed() needs to know listener and frameIndex too.
     typedef std::map<wp<IComponentListener>,
                      std::map<uint64_t,
-                              std::set<TrackedBuffer>>> TrackedBuffersMap;
+                              std::set<TrackedBuffer*>>> TrackedBuffersMap;
 
     // Storage for pending (unsent) death notifications for one listener.
     // Each pair in member named "indices" are (frameIndex, bufferIndex) from
@@ -247,6 +243,16 @@
     // Mutex for the management of all input buffers.
     std::mutex mMutex;
 
+    // Cache for all TrackedBuffers.
+    //
+    // Whenever registerOnDestroyNotify() is called, an argument of type
+    // TrackedBuffer is created and stored into this cache.
+    // Whenever unregisterOnDestroyNotify() or onBufferDestroyed() is called,
+    // the TrackedBuffer is removed from this cache.
+    //
+    // mTrackedBuffersMap stores references to TrackedBuffers inside this cache.
+    std::set<TrackedBuffer*> mTrackedBufferCache;
+
     // Tracked input buffers.
     TrackedBuffersMap mTrackedBuffersMap;
 
diff --git a/media/codec2/hidl/1.0/utils/types.cpp b/media/codec2/hidl/1.0/utils/types.cpp
index 07dbf67..04fa59c 100644
--- a/media/codec2/hidl/1.0/utils/types.cpp
+++ b/media/codec2/hidl/1.0/utils/types.cpp
@@ -1434,6 +1434,11 @@
                 d->type = C2BaseBlock::GRAPHIC;
                 return true;
             }
+            if (cHandle) {
+                // Though we got cloned handle, creating block failed.
+                native_handle_close(cHandle);
+                native_handle_delete(cHandle);
+            }
 
             LOG(ERROR) << "Unknown handle type in BaseBlock::pooledBlock.";
             return false;
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
index 6469735..a8a552c 100644
--- a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
@@ -547,6 +547,10 @@
     if (mCompName == raw) {
         bitStreamInfo[0] = 8000;
         bitStreamInfo[1] = 1;
+    } else if (mCompName == g711alaw || mCompName == g711mlaw) {
+        // g711 test data is all 1-channel and has no embedded config info.
+        bitStreamInfo[0] = 8000;
+        bitStreamInfo[1] = 1;
     } else {
         ASSERT_NO_FATAL_FAILURE(
             getInputChannelInfo(mComponent, mCompName, bitStreamInfo));
diff --git a/media/codec2/hidl/1.0/vts/functional/common/README.md b/media/codec2/hidl/1.0/vts/functional/common/README.md
index 50e8356..f2f579c 100644
--- a/media/codec2/hidl/1.0/vts/functional/common/README.md
+++ b/media/codec2/hidl/1.0/vts/functional/common/README.md
@@ -1,31 +1,36 @@
-## Codec2 VTS Hal @ 1.0 tests ##
----
-#### master :
+# Codec2 VTS Hal @ 1.0 tests #
+
+## master :
 Functionality of master is to enumerate all the Codec2 components available in C2 media service.
 
-usage: VtsHalMediaC2V1\_0TargetMasterTest -I default
+usage: `VtsHalMediaC2V1_0TargetMasterTest -I default`
 
-#### component :
+## component :
 Functionality of component test is to validate common functionality across all the Codec2 components available in C2 media service. For a standard C2 component, these tests are expected to pass.
 
-usage: VtsHalMediaC2V1\_0TargetComponentTest -I software -C <comp name>
-example: VtsHalMediaC2V1\_0TargetComponentTest -I software -C c2.android.vorbis.decoder
+usage: `VtsHalMediaC2V1_0TargetComponentTest -I software -C <comp name>`
 
-#### audio :
-Functionality of audio test is to validate audio specific functionality Codec2 components. The resource files for this test are taken from media/codec2/hidl/1.0/vts/functional/res. The path to these files on the device is required to be given for bitstream tests.
+example: `VtsHalMediaC2V1_0TargetComponentTest -I software -C c2.android.vorbis.decoder`
 
-usage: VtsHalMediaC2V1\_0TargetAudioDecTest -I default -C <comp name> -P /sdcard/media/
-usage: VtsHalMediaC2V1\_0TargetAudioEncTest -I software -C <comp name> -P /sdcard/media/
+## audio :
+Functionality of audio test is to validate audio specific functionality of Codec2 components. The resource files for this test are taken from `frameworks/av/media/codec2/hidl/1.0/vts/functional/res`. The path to these files on the device can be specified with `-P`. (If the device path is omitted, `/data/local/tmp/media/` is the default value.)
 
-example: VtsHalMediaC2V1\_0TargetAudioDecTest -I software -C c2.android.flac.decoder -P /sdcard/media/
-example: VtsHalMediaC2V1\_0TargetAudioEncTest -I software -C c2.android.opus.encoder -P /sdcard/media/
+usage: `VtsHalMediaC2V1_0TargetAudioDecTest -I default -C <comp name> -P <path to resource files>`
 
-#### video :
-Functionality of video test is to validate video specific functionality Codec2 components. The resource files for this test are taken from media/codec2/hidl/1.0/vts/functional/res. The path to these files on the device is required to be given for bitstream tests.
+usage: `VtsHalMediaC2V1_0TargetAudioEncTest -I software -C <comp name> -P <path to resource files>`
 
-usage: VtsHalMediaC2V1\_0TargetVideoDecTest -I default -C <comp name> -P /sdcard/media/
-usage: VtsHalMediaC2V1\_0TargetVideoEncTest -I software -C <comp name> -P /sdcard/media/
+example: `VtsHalMediaC2V1_0TargetAudioDecTest -I software -C c2.android.flac.decoder -P /data/local/tmp/media/`
 
-example: VtsHalMediaC2V1\_0TargetVideoDecTest -I software -C c2.android.avc.decoder -P /sdcard/media/
-example: VtsHalMediaC2V1\_0TargetVideoEncTest -I software -C c2.android.vp9.encoder -P /sdcard/media/
+example: `VtsHalMediaC2V1_0TargetAudioEncTest -I software -C c2.android.opus.encoder -P /data/local/tmp/media/`
+
+## video :
+Functionality of video test is to validate video specific functionality of Codec2 components. The resource files for this test are taken from `frameworks/av/media/codec2/hidl/1.0/vts/functional/res`. The path to these files on the device can be specified with `-P`. (If the device path is omitted, `/data/local/tmp/media/` is the default value.)
+
+usage: `VtsHalMediaC2V1_0TargetVideoDecTest -I default -C <comp name> -P <path to resource files>`
+
+usage: `VtsHalMediaC2V1_0TargetVideoEncTest -I software -C <comp name> -P <path to resource files>`
+
+example: `VtsHalMediaC2V1_0TargetVideoDecTest -I software -C c2.android.avc.decoder -P /data/local/tmp/media/`
+
+example: `VtsHalMediaC2V1_0TargetVideoEncTest -I software -C c2.android.vp9.encoder -P /data/local/tmp/media/`
 
diff --git a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
index c577dac..db59e54 100644
--- a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
+++ b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
@@ -118,7 +118,7 @@
         registerTestService<IComponentStore>();
     }
 
-    ComponentTestEnvironment() : res("/sdcard/media/") {}
+    ComponentTestEnvironment() : res("/data/local/tmp/media/") {}
 
     void setComponent(const char* _component) { component = _component; }
 
diff --git a/media/codec2/hidl/client/Android.bp b/media/codec2/hidl/client/Android.bp
index 6038a40..89c1c4a 100644
--- a/media/codec2/hidl/client/Android.bp
+++ b/media/codec2/hidl/client/Android.bp
@@ -17,9 +17,8 @@
         "libcutils",
         "libgui",
         "libhidlbase",
-        "libhidltransport",
         "liblog",
-        "libstagefright_bufferpool@2.0",
+        "libstagefright_bufferpool@2.0.1",
         "libui",
         "libutils",
     ],
diff --git a/media/codec2/hidl/client/client.cpp b/media/codec2/hidl/client/client.cpp
index 2b417a6..c620bad 100644
--- a/media/codec2/hidl/client/client.cpp
+++ b/media/codec2/hidl/client/client.cpp
@@ -883,66 +883,72 @@
         Codec2Client::CreateComponentByName(
         const char* componentName,
         const std::shared_ptr<Listener>& listener,
-        std::shared_ptr<Codec2Client>* owner) {
-    std::shared_ptr<Component> component;
-    c2_status_t status = ForAllServices(
-            componentName,
-            [owner, &component, componentName, &listener](
-                    const std::shared_ptr<Codec2Client> &client)
-                        -> c2_status_t {
-                c2_status_t status = client->createComponent(componentName,
-                                                             listener,
-                                                             &component);
-                if (status == C2_OK) {
-                    if (owner) {
-                        *owner = client;
+        std::shared_ptr<Codec2Client>* owner,
+        size_t numberOfAttempts) {
+    while (true) {
+        std::shared_ptr<Component> component;
+        c2_status_t status = ForAllServices(
+                componentName,
+                [owner, &component, componentName, &listener](
+                        const std::shared_ptr<Codec2Client> &client)
+                            -> c2_status_t {
+                    c2_status_t status = client->createComponent(componentName,
+                                                                 listener,
+                                                                 &component);
+                    if (status == C2_OK) {
+                        if (owner) {
+                            *owner = client;
+                        }
+                    } else if (status != C2_NOT_FOUND) {
+                        LOG(DEBUG) << "IComponentStore("
+                                       << client->getServiceName()
+                                   << ")::createComponent(\"" << componentName
+                                   << "\") returned status = "
+                                   << status << ".";
                     }
-                } else if (status != C2_NOT_FOUND) {
-                    LOG(DEBUG) << "IComponentStore("
-                                   << client->getServiceName()
-                               << ")::createComponent(\"" << componentName
-                               << "\") returned status = "
-                               << status << ".";
-                }
-                return status;
-            });
-    if (status != C2_OK) {
-        LOG(DEBUG) << "Could not create component \"" << componentName << "\". "
-                      "Status = " << status << ".";
+                    return status;
+                });
+        if (numberOfAttempts > 0 && status == C2_TRANSACTION_FAILED) {
+            --numberOfAttempts;
+            continue;
+        }
+        return component;
     }
-    return component;
 }
 
 std::shared_ptr<Codec2Client::Interface>
         Codec2Client::CreateInterfaceByName(
         const char* interfaceName,
-        std::shared_ptr<Codec2Client>* owner) {
-    std::shared_ptr<Interface> interface;
-    c2_status_t status = ForAllServices(
-            interfaceName,
-            [owner, &interface, interfaceName](
-                    const std::shared_ptr<Codec2Client> &client)
-                        -> c2_status_t {
-                c2_status_t status = client->createInterface(interfaceName,
-                                                             &interface);
-                if (status == C2_OK) {
-                    if (owner) {
-                        *owner = client;
+        std::shared_ptr<Codec2Client>* owner,
+        size_t numberOfAttempts) {
+    while (true) {
+        std::shared_ptr<Interface> interface;
+        c2_status_t status = ForAllServices(
+                interfaceName,
+                [owner, &interface, interfaceName](
+                        const std::shared_ptr<Codec2Client> &client)
+                            -> c2_status_t {
+                    c2_status_t status = client->createInterface(interfaceName,
+                                                                 &interface);
+                    if (status == C2_OK) {
+                        if (owner) {
+                            *owner = client;
+                        }
+                    } else if (status != C2_NOT_FOUND) {
+                        LOG(DEBUG) << "IComponentStore("
+                                       << client->getServiceName()
+                                   << ")::createInterface(\"" << interfaceName
+                                   << "\") returned status = "
+                                   << status << ".";
                     }
-                } else if (status != C2_NOT_FOUND) {
-                    LOG(DEBUG) << "IComponentStore("
-                                   << client->getServiceName()
-                               << ")::createInterface(\"" << interfaceName
-                               << "\") returned status = "
-                               << status << ".";
-                }
-                return status;
-            });
-    if (status != C2_OK) {
-        LOG(DEBUG) << "Could not create interface \"" << interfaceName << "\". "
-                      "Status = " << status << ".";
+                    return status;
+                });
+        if (numberOfAttempts > 0 && status == C2_TRANSACTION_FAILED) {
+            --numberOfAttempts;
+            continue;
+        }
+        return interface;
     }
-    return interface;
 }
 
 std::vector<C2Component::Traits> const& Codec2Client::ListComponents() {
@@ -959,9 +965,9 @@
 
 std::shared_ptr<Codec2Client::InputSurface> Codec2Client::CreateInputSurface(
         char const* serviceName) {
-    uint32_t inputSurfaceSetting = ::android::base::GetUintProperty(
-            "debug.stagefright.c2inputsurface", uint32_t(0));
-    if (inputSurfaceSetting == 0) {
+    int32_t inputSurfaceSetting = ::android::base::GetIntProperty(
+            "debug.stagefright.c2inputsurface", int32_t(0));
+    if (inputSurfaceSetting <= 0) {
         return nullptr;
     }
     size_t index = GetServiceNames().size();
diff --git a/media/codec2/hidl/client/include/codec2/hidl/client.h b/media/codec2/hidl/client/include/codec2/hidl/client.h
index b8a7fb5..848901d 100644
--- a/media/codec2/hidl/client/include/codec2/hidl/client.h
+++ b/media/codec2/hidl/client/include/codec2/hidl/client.h
@@ -179,17 +179,21 @@
     static std::vector<std::shared_ptr<Codec2Client>> CreateFromAllServices();
 
     // Try to create a component with a given name from all known
-    // IComponentStore services.
+    // IComponentStore services. numberOfAttempts determines the number of times
+    // to retry the HIDL call if the transaction fails.
     static std::shared_ptr<Component> CreateComponentByName(
             char const* componentName,
             std::shared_ptr<Listener> const& listener,
-            std::shared_ptr<Codec2Client>* owner = nullptr);
+            std::shared_ptr<Codec2Client>* owner = nullptr,
+            size_t numberOfAttempts = 10);
 
     // Try to create a component interface with a given name from all known
-    // IComponentStore services.
+    // IComponentStore services. numberOfAttempts determines the number of times
+    // to retry the HIDL call if the transaction fails.
     static std::shared_ptr<Interface> CreateInterfaceByName(
             char const* interfaceName,
-            std::shared_ptr<Codec2Client>* owner = nullptr);
+            std::shared_ptr<Codec2Client>* owner = nullptr,
+            size_t numberOfAttempts = 10);
 
     // List traits from all known IComponentStore services.
     static std::vector<C2Component::Traits> const& ListComponents();
diff --git a/media/codec2/hidl/services/Android.bp b/media/codec2/hidl/services/Android.bp
index 216525e..0403a1f 100644
--- a/media/codec2/hidl/services/Android.bp
+++ b/media/codec2/hidl/services/Android.bp
@@ -17,8 +17,6 @@
         "libcodec2_hidl@1.0",
         "libcodec2_vndk",
         "libhidlbase",
-        "libhidltransport",
-        "libhwbinder",
         "liblog",
         "libstagefright_omx",
         "libstagefright_xmlparser",
diff --git a/media/codec2/sfplugin/Android.bp b/media/codec2/sfplugin/Android.bp
index 8ae80ee..0358d25 100644
--- a/media/codec2/sfplugin/Android.bp
+++ b/media/codec2/sfplugin/Android.bp
@@ -9,6 +9,7 @@
         "CCodecConfig.cpp",
         "Codec2Buffer.cpp",
         "Codec2InfoBuilder.cpp",
+        "Omx2IGraphicBufferSource.cpp",
         "PipelineWatcher.cpp",
         "ReflectedParamUpdater.cpp",
         "SkipCutBuffer.cpp",
@@ -21,16 +22,15 @@
 
     header_libs: [
         "libcodec2_internal",
+        "media_ndk_headers",
     ],
 
     shared_libs: [
         "android.hardware.cas.native@1.0",
-        "android.hardware.graphics.bufferqueue@1.0",
         "android.hardware.media.c2@1.0",
         "android.hardware.media.omx@1.0",
         "libbase",
         "libbinder",
-        "libcodec2",
         "libcodec2_client",
         "libcodec2_vndk",
         "libcutils",
@@ -38,12 +38,12 @@
         "libhidlallocatorutils",
         "libhidlbase",
         "liblog",
-        "libmedia",
         "libmedia_omx",
         "libsfplugin_ccodec_utils",
+        "libstagefright_bufferqueue_helper",
         "libstagefright_codecbase",
         "libstagefright_foundation",
-        "libstagefright_omx_utils",
+        "libstagefright_omx",
         "libstagefright_xmlparser",
         "libui",
         "libutils",
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index aa7189c..4a31953 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -45,6 +45,7 @@
 #include "CCodec.h"
 #include "CCodecBufferChannel.h"
 #include "InputSurfaceWrapper.h"
+#include "Omx2IGraphicBufferSource.h"
 
 extern "C" android::PersistentSurface *CreateInputSurface();
 
@@ -374,7 +375,11 @@
 
         // consumer usage is queried earlier.
 
-        ALOGD("ISConfig%s", status.str().c_str());
+        if (status.str().empty()) {
+            ALOGD("ISConfig not changed");
+        } else {
+            ALOGD("ISConfig%s", status.str().c_str());
+        }
         return err;
     }
 
@@ -809,9 +814,17 @@
             }
 
             {
-                double value;
-                if (msg->findDouble("time-lapse-fps", &value)) {
-                    config->mISConfig->mCaptureFps = value;
+                bool captureFpsFound = false;
+                double timeLapseFps;
+                float captureRate;
+                if (msg->findDouble("time-lapse-fps", &timeLapseFps)) {
+                    config->mISConfig->mCaptureFps = timeLapseFps;
+                    captureFpsFound = true;
+                } else if (msg->findAsFloat(KEY_CAPTURE_RATE, &captureRate)) {
+                    config->mISConfig->mCaptureFps = captureRate;
+                    captureFpsFound = true;
+                }
+                if (captureFpsFound) {
                     (void)msg->findAsFloat(KEY_FRAME_RATE, &config->mISConfig->mCodedFps);
                 }
             }
@@ -1067,6 +1080,7 @@
     OmxStatus s;
     android::sp<HGraphicBufferProducer> gbp;
     android::sp<HGraphicBufferSource> gbs;
+
     using ::android::hardware::Return;
     Return<void> transStatus = omx->createInputSurface(
             [&s, &gbp, &gbs](
@@ -1852,15 +1866,30 @@
 
 // Create Codec 2.0 input surface
 extern "C" android::PersistentSurface *CreateInputSurface() {
+    using namespace android;
     // Attempt to create a Codec2's input surface.
-    std::shared_ptr<android::Codec2Client::InputSurface> inputSurface =
-            android::Codec2Client::CreateInputSurface();
+    std::shared_ptr<Codec2Client::InputSurface> inputSurface =
+            Codec2Client::CreateInputSurface();
     if (!inputSurface) {
-        return nullptr;
+        if (property_get_int32("debug.stagefright.c2inputsurface", 0) == -1) {
+            sp<IGraphicBufferProducer> gbp;
+            sp<OmxGraphicBufferSource> gbs = new OmxGraphicBufferSource();
+            status_t err = gbs->initCheck();
+            if (err != OK) {
+                ALOGE("Failed to create persistent input surface: error %d", err);
+                return nullptr;
+            }
+            return new PersistentSurface(
+                    gbs->getIGraphicBufferProducer(),
+                    sp<IGraphicBufferSource>(
+                        new Omx2IGraphicBufferSource(gbs)));
+        } else {
+            return nullptr;
+        }
     }
-    return new android::PersistentSurface(
+    return new PersistentSurface(
             inputSurface->getGraphicBufferProducer(),
-            static_cast<android::sp<android::hidl::base::V1_0::IBase>>(
+            static_cast<sp<android::hidl::base::V1_0::IBase>>(
             inputSurface->getHalInterface()));
 }
 
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 0b4c2d7..d61b751 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -29,6 +29,7 @@
 #include <android/hardware/cas/native/1.0/IDescrambler.h>
 #include <android-base/stringprintf.h>
 #include <binder/MemoryDealer.h>
+#include <cutils/properties.h>
 #include <gui/Surface.h>
 #include <media/openmax/OMX_Core.h>
 #include <media/stagefright/foundation/ABuffer.h>
@@ -896,6 +897,9 @@
                 input->buffers.reset(new DummyInputBuffers(mName));
             } else if (mMetaMode == MODE_ANW) {
                 input->buffers.reset(new GraphicMetadataInputBuffers(mName));
+                // This is to ensure buffers do not get released prematurely.
+                // TODO: handle this without going into array mode
+                forceArrayMode = true;
             } else {
                 input->buffers.reset(new GraphicInputBuffers(numInputSlots, mName));
             }
@@ -945,7 +949,11 @@
         uint32_t outputGeneration;
         {
             Mutexed<OutputSurface>::Locked output(mOutputSurface);
-            output->maxDequeueBuffers = numOutputSlots + reorderDepth.value + kRenderingDepth;
+            output->maxDequeueBuffers = numOutputSlots +
+                    reorderDepth.value + kRenderingDepth;
+            if (!secure) {
+                output->maxDequeueBuffers += numInputSlots;
+            }
             outputSurface = output->surface ?
                     output->surface->getIGraphicBufferProducer() : nullptr;
             if (outputSurface) {
@@ -1325,13 +1333,18 @@
             case C2PortReorderBufferDepthTuning::CORE_INDEX: {
                 C2PortReorderBufferDepthTuning::output reorderDepth;
                 if (reorderDepth.updateFrom(*param)) {
+                    bool secure = mComponent->getName().find(".secure") != std::string::npos;
                     mReorderStash.lock()->setDepth(reorderDepth.value);
                     ALOGV("[%s] onWorkDone: updated reorder depth to %u",
                           mName, reorderDepth.value);
                     size_t numOutputSlots = mOutput.lock()->numSlots;
+                    size_t numInputSlots = mInput.lock()->numSlots;
                     Mutexed<OutputSurface>::Locked output(mOutputSurface);
-                    output->maxDequeueBuffers =
-                        numOutputSlots + reorderDepth.value + kRenderingDepth;
+                    output->maxDequeueBuffers = numOutputSlots +
+                            reorderDepth.value + kRenderingDepth;
+                    if (!secure) {
+                        output->maxDequeueBuffers += numInputSlots;
+                    }
                     if (output->surface) {
                         output->surface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
                     }
@@ -1375,10 +1388,12 @@
                     if (outputDelay.updateFrom(*param)) {
                         ALOGV("[%s] onWorkDone: updating output delay %u",
                               mName, outputDelay.value);
+                        bool secure = mComponent->getName().find(".secure") != std::string::npos;
                         (void)mPipelineWatcher.lock()->outputDelay(outputDelay.value);
 
                         bool outputBuffersChanged = false;
                         size_t numOutputSlots = 0;
+                        size_t numInputSlots = mInput.lock()->numSlots;
                         {
                             Mutexed<Output>::Locked output(mOutput);
                             output->outputDelay = outputDelay.value;
@@ -1404,6 +1419,9 @@
                         uint32_t depth = mReorderStash.lock()->depth();
                         Mutexed<OutputSurface>::Locked output(mOutputSurface);
                         output->maxDequeueBuffers = numOutputSlots + depth + kRenderingDepth;
+                        if (!secure) {
+                            output->maxDequeueBuffers += numInputSlots;
+                        }
                         if (output->surface) {
                             output->surface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
                         }
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index 1cfdc19..5adcd94 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -235,7 +235,10 @@
     const std::vector<ConfigMapper> &getConfigMappersForSdkKey(std::string key) const {
         auto it = mConfigMappers.find(key);
         if (it == mConfigMappers.end()) {
-            ALOGD("no c2 equivalents for %s", key.c_str());
+            if (mComplained.count(key) == 0) {
+                ALOGD("no c2 equivalents for %s", key.c_str());
+                mComplained.insert(key);
+            }
             return NO_MAPPERS;
         }
         ALOGV("found %zu eqs for %s", it->second.size(), key.c_str());
@@ -304,6 +307,7 @@
 
 private:
     std::map<SdkKey, std::vector<ConfigMapper>> mConfigMappers;
+    mutable std::set<std::string> mComplained;
 };
 
 const std::vector<ConfigMapper> StandardParams::NO_MAPPERS;
@@ -508,7 +512,8 @@
                .limitTo(D::ENCODER & D::VIDEO));
     // convert to timestamp base
     add(ConfigMapper(KEY_I_FRAME_INTERVAL, C2_PARAMKEY_SYNC_FRAME_INTERVAL, "value")
-        .withMappers([](C2Value v) -> C2Value {
+        .limitTo(D::VIDEO & D::ENCODER & D::CONFIG)
+        .withMapper([](C2Value v) -> C2Value {
             // convert from i32 to float
             int32_t i32Value;
             float fpValue;
@@ -518,12 +523,6 @@
                 return int64_t(c2_min(1000000 * fpValue + 0.5, (double)INT64_MAX));
             }
             return C2Value();
-        }, [](C2Value v) -> C2Value {
-            int64_t i64;
-            if (v.get(&i64)) {
-                return float(i64) / 1000000;
-            }
-            return C2Value();
         }));
     // remove when codecs switch to proper coding.gop (add support for calculating gop)
     deprecated(ConfigMapper("i-frame-period", "coding.gop", "intra-period")
@@ -1033,7 +1032,25 @@
     }
 
     ReflectedParamUpdater::Dict reflected = mParamUpdater->getParams(paramPointers);
-    ALOGD("c2 config is %s", reflected.debugString().c_str());
+    std::string config = reflected.debugString();
+    std::set<std::string> configLines;
+    std::string diff;
+    for (size_t start = 0; start != std::string::npos; ) {
+        size_t end = config.find('\n', start);
+        size_t count = (end == std::string::npos)
+                ? std::string::npos
+                : end - start + 1;
+        std::string line = config.substr(start, count);
+        configLines.insert(line);
+        if (mLastConfig.count(line) == 0) {
+            diff.append(line);
+        }
+        start = (end == std::string::npos) ? std::string::npos : end + 1;
+    }
+    if (!diff.empty()) {
+        ALOGD("c2 config diff is %s", diff.c_str());
+    }
+    mLastConfig.swap(configLines);
 
     bool changed = false;
     if (domain & mInputDomain) {
diff --git a/media/codec2/sfplugin/CCodecConfig.h b/media/codec2/sfplugin/CCodecConfig.h
index 3bafe3f..a61c8b7 100644
--- a/media/codec2/sfplugin/CCodecConfig.h
+++ b/media/codec2/sfplugin/CCodecConfig.h
@@ -134,6 +134,8 @@
     /// For now support a validation function.
     std::map<C2Param::Index, LocalParamValidator> mLocalParams;
 
+    std::set<std::string> mLastConfig;
+
     CCodecConfig();
 
     /// initializes the members required to manage the format: descriptors, reflector,
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
index 5c8ad56..b339a92 100644
--- a/media/codec2/sfplugin/Codec2Buffer.cpp
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -764,7 +764,11 @@
         const std::shared_ptr<C2LinearBlock> &block,
         const sp<IMemory> &memory,
         int32_t heapSeqNum)
-    : Codec2Buffer(format, new ABuffer(memory->pointer(), memory->size())),
+    // TODO: Using unsecurePointer() has some associated security pitfalls
+    //       (see declaration for details).
+    //       Either document why it is safe in this case or address the
+    //       issue (e.g. by copying).
+    : Codec2Buffer(format, new ABuffer(memory->unsecurePointer(), memory->size())),
       mBlock(block),
       mMemory(memory),
       mHeapSeqNum(heapSeqNum) {
@@ -800,7 +804,7 @@
     if (view.size() < length) {
         return false;
     }
-    memcpy(view.data(), decrypted->pointer(), length);
+    memcpy(view.data(), decrypted->unsecurePointer(), length);
     return true;
 }
 
diff --git a/media/codec2/sfplugin/Codec2InfoBuilder.cpp b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
index 6b75eba..745d701 100644
--- a/media/codec2/sfplugin/Codec2InfoBuilder.cpp
+++ b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
@@ -117,8 +117,9 @@
         }
     }
 
-    // For VP9, the static info is always propagated by framework.
+    // For VP9/AV1, the static info is always propagated by framework.
     supportsHdr |= (mediaType == MIMETYPE_VIDEO_VP9);
+    supportsHdr |= (mediaType == MIMETYPE_VIDEO_AV1);
 
     for (C2Value::Primitive profile : profileQuery[0].values.values) {
         pl.profile = (C2Config::profile_t)profile.ref<uint32_t>();
diff --git a/media/codec2/sfplugin/Omx2IGraphicBufferSource.cpp b/media/codec2/sfplugin/Omx2IGraphicBufferSource.cpp
new file mode 100644
index 0000000..764fa00
--- /dev/null
+++ b/media/codec2/sfplugin/Omx2IGraphicBufferSource.cpp
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef __LP64__
+#define OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
+#endif
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Omx2IGraphicBufferSource"
+#include <android-base/logging.h>
+
+#include "Omx2IGraphicBufferSource.h"
+
+#include <android/BnOMXBufferSource.h>
+#include <media/OMXBuffer.h>
+#include <media/stagefright/omx/OMXUtils.h>
+
+#include <OMX_Component.h>
+#include <OMX_Index.h>
+#include <OMX_IndexExt.h>
+
+namespace android {
+
+namespace /* unnamed */ {
+
+// OmxGraphicBufferSource -> IOMXBufferSource
+
+struct OmxGbs2IOmxBs : public BnOMXBufferSource {
+    sp<OmxGraphicBufferSource> mBase;
+    OmxGbs2IOmxBs(sp<OmxGraphicBufferSource> const& base) : mBase{base} {}
+    BnStatus onOmxExecuting() override {
+        return mBase->onOmxExecuting();
+    }
+    BnStatus onOmxIdle() override {
+        return mBase->onOmxIdle();
+    }
+    BnStatus onOmxLoaded() override {
+        return mBase->onOmxLoaded();
+    }
+    BnStatus onInputBufferAdded(int32_t bufferId) override {
+        return mBase->onInputBufferAdded(bufferId);
+    }
+    BnStatus onInputBufferEmptied(
+            int32_t bufferId,
+            OMXFenceParcelable const& fenceParcel) override {
+        return mBase->onInputBufferEmptied(bufferId, fenceParcel.get());
+    }
+};
+
+struct OmxNodeWrapper : public IOmxNodeWrapper {
+    sp<IOMXNode> mBase;
+    OmxNodeWrapper(sp<IOMXNode> const& base) : mBase{base} {}
+    status_t emptyBuffer(
+            int32_t bufferId, uint32_t flags,
+            const sp<GraphicBuffer> &buffer,
+            int64_t timestamp, int fenceFd) override {
+        return mBase->emptyBuffer(bufferId, buffer, flags, timestamp, fenceFd);
+    }
+    void dispatchDataSpaceChanged(
+            int32_t dataSpace, int32_t aspects, int32_t pixelFormat) override {
+        omx_message msg{};
+        msg.type = omx_message::EVENT;
+        msg.fenceFd = -1;
+        msg.u.event_data.event = OMX_EventDataSpaceChanged;
+        msg.u.event_data.data1 = dataSpace;
+        msg.u.event_data.data2 = aspects;
+        msg.u.event_data.data3 = pixelFormat;
+        mBase->dispatchMessage(msg);
+    }
+};
+
+} // unnamed namespace
+
+// Omx2IGraphicBufferSource
+Omx2IGraphicBufferSource::Omx2IGraphicBufferSource(
+        sp<OmxGraphicBufferSource> const& base)
+      : mBase{base},
+        mOMXBufferSource{new OmxGbs2IOmxBs(base)} {
+}
+
+BnStatus Omx2IGraphicBufferSource::setSuspend(
+        bool suspend, int64_t timeUs) {
+    return BnStatus::fromStatusT(mBase->setSuspend(suspend, timeUs));
+}
+
+BnStatus Omx2IGraphicBufferSource::setRepeatPreviousFrameDelayUs(
+        int64_t repeatAfterUs) {
+    return BnStatus::fromStatusT(mBase->setRepeatPreviousFrameDelayUs(repeatAfterUs));
+}
+
+BnStatus Omx2IGraphicBufferSource::setMaxFps(float maxFps) {
+    return BnStatus::fromStatusT(mBase->setMaxFps(maxFps));
+}
+
+BnStatus Omx2IGraphicBufferSource::setTimeLapseConfig(
+        double fps, double captureFps) {
+    return BnStatus::fromStatusT(mBase->setTimeLapseConfig(fps, captureFps));
+}
+
+BnStatus Omx2IGraphicBufferSource::setStartTimeUs(
+        int64_t startTimeUs) {
+    return BnStatus::fromStatusT(mBase->setStartTimeUs(startTimeUs));
+}
+
+BnStatus Omx2IGraphicBufferSource::setStopTimeUs(
+        int64_t stopTimeUs) {
+    return BnStatus::fromStatusT(mBase->setStopTimeUs(stopTimeUs));
+}
+
+BnStatus Omx2IGraphicBufferSource::getStopTimeOffsetUs(
+        int64_t *stopTimeOffsetUs) {
+    return BnStatus::fromStatusT(mBase->getStopTimeOffsetUs(stopTimeOffsetUs));
+}
+
+BnStatus Omx2IGraphicBufferSource::setColorAspects(
+        int32_t aspects) {
+    return BnStatus::fromStatusT(mBase->setColorAspects(aspects));
+}
+
+BnStatus Omx2IGraphicBufferSource::setTimeOffsetUs(
+        int64_t timeOffsetsUs) {
+    return BnStatus::fromStatusT(mBase->setTimeOffsetUs(timeOffsetsUs));
+}
+
+BnStatus Omx2IGraphicBufferSource::signalEndOfInputStream() {
+    return BnStatus::fromStatusT(mBase->signalEndOfInputStream());
+}
+
+BnStatus Omx2IGraphicBufferSource::configure(
+        const sp<IOMXNode>& omxNode, int32_t dataSpace) {
+    if (omxNode == NULL) {
+        return BnStatus::fromServiceSpecificError(BAD_VALUE);
+    }
+
+    // Do setInputSurface() first, the node will try to enable metadata
+    // mode on input, and does necessary error checking. If this fails,
+    // we can't use this input surface on the node.
+    status_t err = omxNode->setInputSurface(mOMXBufferSource);
+    if (err != NO_ERROR) {
+        ALOGE("Unable to set input surface: %d", err);
+        return BnStatus::fromServiceSpecificError(err);
+    }
+
+    uint32_t consumerUsage;
+    if (omxNode->getParameter(
+            (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits,
+            &consumerUsage, sizeof(consumerUsage)) != OK) {
+        consumerUsage = 0;
+    }
+
+    OMX_PARAM_PORTDEFINITIONTYPE def;
+    InitOMXParams(&def);
+    def.nPortIndex = 0; // kPortIndexInput
+
+    err = omxNode->getParameter(
+            OMX_IndexParamPortDefinition, &def, sizeof(def));
+    if (err != NO_ERROR) {
+        ALOGE("Failed to get port definition: %d", err);
+        return BnStatus::fromServiceSpecificError(UNKNOWN_ERROR);
+    }
+
+    return BnStatus::fromStatusT(mBase->configure(
+            new OmxNodeWrapper(omxNode),
+            dataSpace,
+            def.nBufferCountActual,
+            def.format.video.nFrameWidth,
+            def.format.video.nFrameHeight,
+            consumerUsage));
+}
+
+} // namespace android
+
diff --git a/media/codec2/sfplugin/Omx2IGraphicBufferSource.h b/media/codec2/sfplugin/Omx2IGraphicBufferSource.h
new file mode 100644
index 0000000..20fd1ec
--- /dev/null
+++ b/media/codec2/sfplugin/Omx2IGraphicBufferSource.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OMX_2_IGRAPHICBUFFERSOURCE_H_
+#define OMX_2_IGRAPHICBUFFERSOURCE_H_
+
+#include <android/BnGraphicBufferSource.h>
+#include <media/stagefright/omx/OmxGraphicBufferSource.h>
+
+namespace android {
+
+using BnStatus = ::android::binder::Status;
+
+struct Omx2IGraphicBufferSource : public BnGraphicBufferSource {
+    sp<OmxGraphicBufferSource> mBase;
+    sp<IOMXBufferSource> mOMXBufferSource;
+    Omx2IGraphicBufferSource(sp<OmxGraphicBufferSource> const& base);
+    BnStatus configure(const sp<IOMXNode>& omxNode, int32_t dataSpace) override;
+    BnStatus setSuspend(bool suspend, int64_t timeUs) override;
+    BnStatus setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs) override;
+    BnStatus setMaxFps(float maxFps) override;
+    BnStatus setTimeLapseConfig(double fps, double captureFps) override;
+    BnStatus setStartTimeUs(int64_t startTimeUs) override;
+    BnStatus setStopTimeUs(int64_t stopTimeUs) override;
+    BnStatus getStopTimeOffsetUs(int64_t *stopTimeOffsetUs) override;
+    BnStatus setColorAspects(int32_t aspects) override;
+    BnStatus setTimeOffsetUs(int64_t timeOffsetsUs) override;
+    BnStatus signalEndOfInputStream() override;
+};
+
+} // namespace android
+
+#endif // OMX_2_IGRAPHICBUFFERSOURCE_H_
+
diff --git a/media/codec2/sfplugin/PipelineWatcher.cpp b/media/codec2/sfplugin/PipelineWatcher.cpp
index 74d14e8..0ee9056 100644
--- a/media/codec2/sfplugin/PipelineWatcher.cpp
+++ b/media/codec2/sfplugin/PipelineWatcher.cpp
@@ -146,7 +146,7 @@
               std::chrono::duration_cast<std::chrono::milliseconds>(elapsed).count());
         durations.push_back(elapsed);
     }
-    std::nth_element(durations.begin(), durations.end(), durations.begin() + n,
+    std::nth_element(durations.begin(), durations.begin() + n, durations.end(),
                      std::greater<Clock::duration>());
     return durations[n];
 }
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.cpp b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
index 7334834..ef6af48 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.cpp
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
@@ -382,10 +382,11 @@
     // TODO: will need to disambiguate between Main8 and Main10
     { C2Config::PROFILE_AV1_0, AV1ProfileMain8 },
     { C2Config::PROFILE_AV1_0, AV1ProfileMain10 },
+    { C2Config::PROFILE_AV1_0, AV1ProfileMain10HDR10 },
+    { C2Config::PROFILE_AV1_0, AV1ProfileMain10HDR10Plus },
 };
 
 ALookup<C2Config::profile_t, int32_t> sAv1HdrProfiles = {
-    { C2Config::PROFILE_AV1_0, AV1ProfileMain10 },
     { C2Config::PROFILE_AV1_0, AV1ProfileMain10HDR10 },
 };
 
@@ -662,6 +663,8 @@
         return std::make_shared<HevcProfileLevelMapper>(true, isHdr10Plus);
     } else if (mediaType == MIMETYPE_VIDEO_VP9) {
         return std::make_shared<Vp9ProfileLevelMapper>(true, isHdr10Plus);
+    } else if (mediaType == MIMETYPE_VIDEO_AV1) {
+        return std::make_shared<Av1ProfileLevelMapper>(true, isHdr10Plus);
     }
     return nullptr;
 }
diff --git a/media/codec2/vndk/Android.bp b/media/codec2/vndk/Android.bp
index 60f9d20..4c529a6 100644
--- a/media/codec2/vndk/Android.bp
+++ b/media/codec2/vndk/Android.bp
@@ -58,7 +58,6 @@
         "android.hardware.graphics.mapper@4.0",
         "android.hardware.media.bufferpool@2.0",
         "libbase",
-        "libbinder",
         "libcutils",
         "libdl",
         "libhardware",
@@ -68,7 +67,7 @@
         "liblog",
         "libnativewindow",
         "libstagefright_foundation",
-        "libstagefright_bufferpool@2.0",
+        "libstagefright_bufferpool@2.0.1",
         "libui",
         "libutils",
     ],
diff --git a/media/codec2/vndk/C2Buffer.cpp b/media/codec2/vndk/C2Buffer.cpp
index 710b536..2d99b53 100644
--- a/media/codec2/vndk/C2Buffer.cpp
+++ b/media/codec2/vndk/C2Buffer.cpp
@@ -413,17 +413,14 @@
 
     std::shared_ptr<C2LinearAllocation> alloc;
     if (C2AllocatorIon::isValid(cHandle)) {
-        native_handle_t *handle = native_handle_clone(cHandle);
-        if (handle) {
-            c2_status_t err = sAllocator->priorLinearAllocation(handle, &alloc);
-            const std::shared_ptr<C2PooledBlockPoolData> poolData =
-                    std::make_shared<C2PooledBlockPoolData>(data);
-            if (err == C2_OK && poolData) {
-                // TODO: config params?
-                std::shared_ptr<C2LinearBlock> block =
-                        _C2BlockFactory::CreateLinearBlock(alloc, poolData);
-                return block;
-            }
+        c2_status_t err = sAllocator->priorLinearAllocation(cHandle, &alloc);
+        const std::shared_ptr<C2PooledBlockPoolData> poolData =
+                std::make_shared<C2PooledBlockPoolData>(data);
+        if (err == C2_OK && poolData) {
+            // TODO: config params?
+            std::shared_ptr<C2LinearBlock> block =
+                    _C2BlockFactory::CreateLinearBlock(alloc, poolData);
+            return block;
         }
     }
     return nullptr;
@@ -674,17 +671,14 @@
         ResultStatus status = mBufferPoolManager->allocate(
                 mConnectionId, params, &cHandle, &bufferPoolData);
         if (status == ResultStatus::OK) {
-            native_handle_t *handle = native_handle_clone(cHandle);
-            if (handle) {
-                std::shared_ptr<C2LinearAllocation> alloc;
-                std::shared_ptr<C2PooledBlockPoolData> poolData =
-                        std::make_shared<C2PooledBlockPoolData>(bufferPoolData);
-                c2_status_t err = mAllocator->priorLinearAllocation(handle, &alloc);
-                if (err == C2_OK && poolData && alloc) {
-                    *block = _C2BlockFactory::CreateLinearBlock(alloc, poolData, 0, capacity);
-                    if (*block) {
-                        return C2_OK;
-                    }
+            std::shared_ptr<C2LinearAllocation> alloc;
+            std::shared_ptr<C2PooledBlockPoolData> poolData =
+                    std::make_shared<C2PooledBlockPoolData>(bufferPoolData);
+            c2_status_t err = mAllocator->priorLinearAllocation(cHandle, &alloc);
+            if (err == C2_OK && poolData && alloc) {
+                *block = _C2BlockFactory::CreateLinearBlock(alloc, poolData, 0, capacity);
+                if (*block) {
+                    return C2_OK;
                 }
             }
             return C2_NO_MEMORY;
@@ -710,19 +704,16 @@
         ResultStatus status = mBufferPoolManager->allocate(
                 mConnectionId, params, &cHandle, &bufferPoolData);
         if (status == ResultStatus::OK) {
-            native_handle_t *handle = native_handle_clone(cHandle);
-            if (handle) {
-                std::shared_ptr<C2GraphicAllocation> alloc;
-                std::shared_ptr<C2PooledBlockPoolData> poolData =
-                    std::make_shared<C2PooledBlockPoolData>(bufferPoolData);
-                c2_status_t err = mAllocator->priorGraphicAllocation(
-                        handle, &alloc);
-                if (err == C2_OK && poolData && alloc) {
-                    *block = _C2BlockFactory::CreateGraphicBlock(
-                            alloc, poolData, C2Rect(width, height));
-                    if (*block) {
-                        return C2_OK;
-                    }
+            std::shared_ptr<C2GraphicAllocation> alloc;
+            std::shared_ptr<C2PooledBlockPoolData> poolData =
+                std::make_shared<C2PooledBlockPoolData>(bufferPoolData);
+            c2_status_t err = mAllocator->priorGraphicAllocation(
+                    cHandle, &alloc);
+            if (err == C2_OK && poolData && alloc) {
+                *block = _C2BlockFactory::CreateGraphicBlock(
+                        alloc, poolData, C2Rect(width, height));
+                if (*block) {
+                    return C2_OK;
                 }
             }
             return C2_NO_MEMORY;
@@ -1117,17 +1108,14 @@
 
     std::shared_ptr<C2GraphicAllocation> alloc;
     if (C2AllocatorGralloc::isValid(cHandle)) {
-        native_handle_t *handle = native_handle_clone(cHandle);
-        if (handle) {
-            c2_status_t err = sAllocator->priorGraphicAllocation(handle, &alloc);
-            const std::shared_ptr<C2PooledBlockPoolData> poolData =
-                    std::make_shared<C2PooledBlockPoolData>(data);
-            if (err == C2_OK && poolData) {
-                // TODO: config setup?
-                std::shared_ptr<C2GraphicBlock> block =
-                        _C2BlockFactory::CreateGraphicBlock(alloc, poolData);
-                return block;
-            }
+        c2_status_t err = sAllocator->priorGraphicAllocation(cHandle, &alloc);
+        const std::shared_ptr<C2PooledBlockPoolData> poolData =
+                std::make_shared<C2PooledBlockPoolData>(data);
+        if (err == C2_OK && poolData) {
+            // TODO: config setup?
+            std::shared_ptr<C2GraphicBlock> block =
+                    _C2BlockFactory::CreateGraphicBlock(alloc, poolData);
+            return block;
         }
     }
     return nullptr;
diff --git a/media/codec2/vndk/C2Store.cpp b/media/codec2/vndk/C2Store.cpp
index 6b4ed35..5b2bd7b 100644
--- a/media/codec2/vndk/C2Store.cpp
+++ b/media/codec2/vndk/C2Store.cpp
@@ -848,8 +848,8 @@
     emplace("libcodec2_soft_amrnbenc.so");
     emplace("libcodec2_soft_amrwbdec.so");
     emplace("libcodec2_soft_amrwbenc.so");
-    emplace("libcodec2_soft_av1dec.so");
-    emplace("libcodec2_soft_gav1dec.so");
+    //emplace("libcodec2_soft_av1dec_aom.so"); // deprecated for the gav1 implementation
+    emplace("libcodec2_soft_av1dec_gav1.so");
     emplace("libcodec2_soft_avcdec.so");
     emplace("libcodec2_soft_avcenc.so");
     emplace("libcodec2_soft_flacdec.so");
diff --git a/media/extractors/amr/AMRExtractor.cpp b/media/extractors/amr/AMRExtractor.cpp
index ffeff42..26431a4 100644
--- a/media/extractors/amr/AMRExtractor.cpp
+++ b/media/extractors/amr/AMRExtractor.cpp
@@ -144,6 +144,7 @@
 
 AMRExtractor::AMRExtractor(DataSourceHelper *source)
     : mDataSource(source),
+      mMeta(NULL),
       mInitCheck(NO_INIT),
       mOffsetTableLength(0) {
     float confidence;
@@ -191,7 +192,9 @@
 
 AMRExtractor::~AMRExtractor() {
     delete mDataSource;
-    AMediaFormat_delete(mMeta);
+    if (mMeta) {
+        AMediaFormat_delete(mMeta);
+    }
 }
 
 media_status_t AMRExtractor::getMetaData(AMediaFormat *meta) {
diff --git a/media/extractors/mkv/Android.bp b/media/extractors/mkv/Android.bp
index 1744d3d..38821fd 100644
--- a/media/extractors/mkv/Android.bp
+++ b/media/extractors/mkv/Android.bp
@@ -12,10 +12,10 @@
     shared_libs: [
         "liblog",
         "libmediandk",
+        "libstagefright_flacdec",
     ],
 
     static_libs: [
-        "libstagefright_flacdec",
         "libstagefright_foundation",
         "libstagefright_metadatautils",
         "libwebm",
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 36cab1d..b91d16f 100755
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -5820,11 +5820,11 @@
                       meta, AMEDIAFORMAT_KEY_TIME_US, ((long double)cts * 1000000) / mTimescale);
                 AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
 
-                int32_t byteOrder;
-                AMediaFormat_getInt32(mFormat,
+                int32_t byteOrder = 0;
+                bool isGetBigEndian = AMediaFormat_getInt32(mFormat,
                         AMEDIAFORMAT_KEY_PCM_BIG_ENDIAN, &byteOrder);
 
-                if (byteOrder == 1) {
+                if (isGetBigEndian && byteOrder == 1) {
                     // Big-endian -> little-endian
                     uint16_t *dstData = (uint16_t *)buf;
                     uint16_t *srcData = (uint16_t *)buf;
diff --git a/media/extractors/mp4/SampleTable.cpp b/media/extractors/mp4/SampleTable.cpp
index e7e8901..59c8200 100644
--- a/media/extractors/mp4/SampleTable.cpp
+++ b/media/extractors/mp4/SampleTable.cpp
@@ -652,6 +652,7 @@
     }
 
     mSampleTimeEntries = new (std::nothrow) SampleTimeEntry[mNumSampleSizes];
+    memset(mSampleTimeEntries, 0, sizeof(SampleTimeEntry) * mNumSampleSizes);
     if (!mSampleTimeEntries) {
         ALOGE("Cannot allocate sample entry table with %llu entries.",
                 (unsigned long long)mNumSampleSizes);
diff --git a/media/extractors/ogg/OggExtractor.cpp b/media/extractors/ogg/OggExtractor.cpp
index 72b94bb..4012ece 100644
--- a/media/extractors/ogg/OggExtractor.cpp
+++ b/media/extractors/ogg/OggExtractor.cpp
@@ -1062,8 +1062,15 @@
     size_t size = buffer->range_length();
 
     if (size < kOpusHeaderSize
-            || memcmp(data, "OpusHead", 8)
-            || /* version = */ data[8] != 1) {
+            || memcmp(data, "OpusHead", 8)) {
+        return AMEDIA_ERROR_MALFORMED;
+    }
+    // allow both version 0 and 1. Per the opus specification:
+    // An earlier draft of the specification described a version 0, but the only difference
+    // between version 1 and version 0 is that version 0 did not specify the semantics for
+    // handling the version field
+    if ( /* version = */ data[8] > 1) {
+        ALOGW("no support for opus version %d", data[8]);
         return AMEDIA_ERROR_MALFORMED;
     }
 
@@ -1384,7 +1391,7 @@
         return NULL;
     }
 
-    *confidence = 0.2f;
+    *confidence = 0.5f;
 
     return CreateExtractor;
 }
diff --git a/media/libaaudio/Android.bp b/media/libaaudio/Android.bp
index 16958f9..140052f 100644
--- a/media/libaaudio/Android.bp
+++ b/media/libaaudio/Android.bp
@@ -24,7 +24,7 @@
 ndk_library {
     name: "libaaudio",
     // deliberately includes symbols from AAudioTesting.h
-    symbol_file: "libaaudio.map.txt",
+    symbol_file: "src/libaaudio.map.txt",
     first_version: "26",
     unversioned_until: "current",
 }
@@ -32,6 +32,5 @@
 cc_library_headers {
     name: "libaaudio_headers",
     export_include_dirs: ["include"],
-    version_script: "libaaudio.map.txt",
 }
 
diff --git a/media/libaaudio/src/Android.bp b/media/libaaudio/src/Android.bp
index 4090286..56c0170 100644
--- a/media/libaaudio/src/Android.bp
+++ b/media/libaaudio/src/Android.bp
@@ -10,14 +10,71 @@
         "legacy",
         "utility",
     ],
+    header_libs: ["libaaudio_headers"],
+    export_header_lib_headers: ["libaaudio_headers"],
+    version_script: "libaaudio.map.txt",
+
+    srcs: [
+        "core/AAudioAudio.cpp",
+    ],
+
+    cflags: [
+        "-Wno-unused-parameter",
+        "-Wall",
+        "-Werror",
+
+        // By default, all symbols are hidden.
+        // "-fvisibility=hidden",
+        // AAUDIO_API is used to explicitly export a function or a variable as a visible symbol.
+        "-DAAUDIO_API=__attribute__((visibility(\"default\")))",
+    ],
+
+    shared_libs: [
+        "libaaudio_internal",
+        "libaudioclient",
+        "libaudioutils",
+        "liblog",
+        "libcutils",
+        "libutils",
+        "libbinder",
+    ],
+}
+
+cc_library {
+    name: "libaaudio_internal",
+
+    local_include_dirs: [
+        "binding",
+        "client",
+        "core",
+        "fifo",
+        "legacy",
+        "utility",
+    ],
+
     export_include_dirs: ["."],
     header_libs: ["libaaudio_headers"],
     export_header_lib_headers: ["libaaudio_headers"],
 
+    shared_libs: [
+        "libaudioclient",
+        "libaudioutils",
+        "liblog",
+        "libcutils",
+        "libutils",
+        "libbinder",
+    ],
+
+    cflags: [
+        "-Wno-unused-parameter",
+        "-Wall",
+        "-Werror",
+    ],
+
     srcs: [
+        "core/AudioGlobal.cpp",
         "core/AudioStream.cpp",
         "core/AudioStreamBuilder.cpp",
-        "core/AAudioAudio.cpp",
         "core/AAudioStreamParameters.cpp",
         "legacy/AudioStreamLegacy.cpp",
         "legacy/AudioStreamRecord.cpp",
@@ -54,25 +111,4 @@
         "flowgraph/SourceI16.cpp",
         "flowgraph/SourceI24.cpp",
     ],
-
-    cflags: [
-        "-Wno-unused-parameter",
-        "-Wall",
-        "-Werror",
-
-        // By default, all symbols are hidden.
-        // "-fvisibility=hidden",
-        // AAUDIO_API is used to explicitly export a function or a variable as a visible symbol.
-        "-DAAUDIO_API=__attribute__((visibility(\"default\")))",
-    ],
-
-    shared_libs: [
-        "libaudioclient",
-        "libaudioutils",
-        "liblog",
-        "libcutils",
-        "libutils",
-        "libbinder",
-        "libaudiomanager",
-    ],
 }
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 44d5122..8040e6a 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -27,6 +27,7 @@
 #include <aaudio/AAudioTesting.h>
 
 #include "AudioClock.h"
+#include "AudioGlobal.h"
 #include "AudioStreamBuilder.h"
 #include "AudioStream.h"
 #include "binding/AAudioCommon.h"
@@ -45,63 +46,14 @@
         return AAUDIO_ERROR_NULL; \
     }
 
-#define AAUDIO_CASE_ENUM(name) case name: return #name
-
 AAUDIO_API const char * AAudio_convertResultToText(aaudio_result_t returnCode) {
-    switch (returnCode) {
-        AAUDIO_CASE_ENUM(AAUDIO_OK);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_DISCONNECTED);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_ILLEGAL_ARGUMENT);
-        // reserved
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INTERNAL);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_STATE);
-        // reserved
-        // reserved
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_HANDLE);
-         // reserved
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_UNIMPLEMENTED);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_UNAVAILABLE);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_FREE_HANDLES);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_MEMORY);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_NULL);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_TIMEOUT);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_WOULD_BLOCK);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_FORMAT);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_OUT_OF_RANGE);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_SERVICE);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_RATE);
-    }
-    return "Unrecognized AAudio error.";
+    return AudioGlobal_convertResultToText(returnCode);
 }
 
 AAUDIO_API const char * AAudio_convertStreamStateToText(aaudio_stream_state_t state) {
-    switch (state) {
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_UNINITIALIZED);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_UNKNOWN);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_OPEN);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STARTING);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STARTED);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_PAUSING);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_PAUSED);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_FLUSHING);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_FLUSHED);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STOPPING);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STOPPED);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_DISCONNECTED);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_CLOSING);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_CLOSED);
-    }
-    return "Unrecognized AAudio state.";
+    return AudioGlobal_convertStreamStateToText(state);
 }
 
-#undef AAUDIO_CASE_ENUM
-
-
-/******************************************
- * Static globals.
- */
-static aaudio_policy_t s_MMapPolicy = AAUDIO_UNSPECIFIED;
-
 static AudioStream *convertAAudioStreamToAudioStream(AAudioStream* stream)
 {
     return (AudioStream*) stream;
@@ -543,23 +495,11 @@
 }
 
 AAUDIO_API aaudio_policy_t AAudio_getMMapPolicy() {
-    return s_MMapPolicy;
+    return AudioGlobal_getMMapPolicy();
 }
 
 AAUDIO_API aaudio_result_t AAudio_setMMapPolicy(aaudio_policy_t policy) {
-    aaudio_result_t result = AAUDIO_OK;
-    switch(policy) {
-        case AAUDIO_UNSPECIFIED:
-        case AAUDIO_POLICY_NEVER:
-        case AAUDIO_POLICY_AUTO:
-        case AAUDIO_POLICY_ALWAYS:
-            s_MMapPolicy = policy;
-            break;
-        default:
-            result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
-            break;
-    }
-    return result;
+    return AudioGlobal_setMMapPolicy(policy);
 }
 
 AAUDIO_API bool AAudioStream_isMMapUsed(AAudioStream* stream)
diff --git a/media/libaaudio/src/core/AudioGlobal.cpp b/media/libaaudio/src/core/AudioGlobal.cpp
new file mode 100644
index 0000000..e6d9a0d
--- /dev/null
+++ b/media/libaaudio/src/core/AudioGlobal.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <aaudio/AAudio.h>
+#include <aaudio/AAudioTesting.h>
+
+#include "AudioGlobal.h"
+
+/******************************************
+ * Static globals.
+ */
+namespace aaudio {
+
+static aaudio_policy_t g_MMapPolicy = AAUDIO_UNSPECIFIED;
+
+aaudio_policy_t AudioGlobal_getMMapPolicy() {
+  return g_MMapPolicy;
+}
+
+aaudio_result_t AudioGlobal_setMMapPolicy(aaudio_policy_t policy) {
+    aaudio_result_t result = AAUDIO_OK;
+    switch(policy) {
+        case AAUDIO_UNSPECIFIED:
+        case AAUDIO_POLICY_NEVER:
+        case AAUDIO_POLICY_AUTO:
+        case AAUDIO_POLICY_ALWAYS:
+            g_MMapPolicy = policy;
+            break;
+        default:
+            result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+            break;
+    }
+    return result;
+}
+
+#define AAUDIO_CASE_ENUM(name) case name: return #name
+
+const char* AudioGlobal_convertResultToText(aaudio_result_t returnCode) {
+    switch (returnCode) {
+        AAUDIO_CASE_ENUM(AAUDIO_OK);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_DISCONNECTED);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_ILLEGAL_ARGUMENT);
+        // reserved
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INTERNAL);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_STATE);
+        // reserved
+        // reserved
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_HANDLE);
+         // reserved
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_UNIMPLEMENTED);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_UNAVAILABLE);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_FREE_HANDLES);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_MEMORY);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_NULL);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_TIMEOUT);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_WOULD_BLOCK);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_FORMAT);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_OUT_OF_RANGE);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_SERVICE);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_RATE);
+    }
+    return "Unrecognized AAudio error.";
+}
+
+const char* AudioGlobal_convertStreamStateToText(aaudio_stream_state_t state) {
+      switch (state) {
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_UNINITIALIZED);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_UNKNOWN);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_OPEN);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STARTING);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STARTED);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_PAUSING);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_PAUSED);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_FLUSHING);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_FLUSHED);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STOPPING);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STOPPED);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_DISCONNECTED);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_CLOSING);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_CLOSED);
+    }
+    return "Unrecognized AAudio state.";
+}
+
+#undef AAUDIO_CASE_ENUM
+
+}  // namespace aaudio
diff --git a/media/libaaudio/src/core/AudioGlobal.h b/media/libaaudio/src/core/AudioGlobal.h
new file mode 100644
index 0000000..312cef2
--- /dev/null
+++ b/media/libaaudio/src/core/AudioGlobal.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef AAUDIO_AUDIOGLOBAL_H
+#define AAUDIO_AUDIOGLOBAL_H
+
+#include <aaudio/AAudio.h>
+#include <aaudio/AAudioTesting.h>
+
+
+namespace aaudio {
+
+aaudio_policy_t AudioGlobal_getMMapPolicy();
+aaudio_result_t AudioGlobal_setMMapPolicy(aaudio_policy_t policy);
+
+const char* AudioGlobal_convertResultToText(aaudio_result_t returnCode);
+const char* AudioGlobal_convertStreamStateToText(aaudio_stream_state_t state);
+
+}
+
+#endif  // AAUDIO_AUDIOGLOBAL_H
+
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 9b77223..5303631 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -25,8 +25,9 @@
 #include "AudioStreamBuilder.h"
 #include "AudioStream.h"
 #include "AudioClock.h"
+#include "AudioGlobal.h"
 
-using namespace aaudio;
+namespace aaudio {
 
 
 // Sequential number assigned to streams solely for debugging purposes.
@@ -51,7 +52,7 @@
                           || getState() == AAUDIO_STREAM_STATE_UNINITIALIZED
                           || getState() == AAUDIO_STREAM_STATE_DISCONNECTED),
                         "~AudioStream() - still in use, state = %s",
-                        AAudio_convertStreamStateToText(getState()));
+                        AudioGlobal_convertStreamStateToText(getState()));
 
     mPlayerBase->clearParentReference(); // remove reference to this AudioStream
 }
@@ -155,7 +156,7 @@
         case AAUDIO_STREAM_STATE_CLOSED:
         default:
             ALOGW("safePause() stream not running, state = %s",
-                  AAudio_convertStreamStateToText(getState()));
+                  AudioGlobal_convertStreamStateToText(getState()));
             return AAUDIO_ERROR_INVALID_STATE;
     }
 
@@ -240,7 +241,7 @@
         case AAUDIO_STREAM_STATE_CLOSED:
         default:
             ALOGW("%s() stream not running, state = %s", __func__,
-                  AAudio_convertStreamStateToText(getState()));
+                  AudioGlobal_convertStreamStateToText(getState()));
             return AAUDIO_ERROR_INVALID_STATE;
     }
 
@@ -488,3 +489,5 @@
 void AudioStream::MyPlayerBase::destroy() {
     unregisterWithAudioManager();
 }
+
+}  // namespace aaudio
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index 08f4958..44f45b3 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -27,6 +27,7 @@
 #include "binding/AAudioBinderClient.h"
 #include "client/AudioStreamInternalCapture.h"
 #include "client/AudioStreamInternalPlay.h"
+#include "core/AudioGlobal.h"
 #include "core/AudioStream.h"
 #include "core/AudioStreamBuilder.h"
 #include "legacy/AudioStreamRecord.h"
@@ -112,7 +113,7 @@
     }
 
     // The API setting is the highest priority.
-    aaudio_policy_t mmapPolicy = AAudio_getMMapPolicy();
+    aaudio_policy_t mmapPolicy = AudioGlobal_getMMapPolicy();
     // If not specified then get from a system property.
     if (mmapPolicy == AAUDIO_UNSPECIFIED) {
         mmapPolicy = AAudioProperty_getMMapPolicy();
diff --git a/media/libaaudio/libaaudio.map.txt b/media/libaaudio/src/libaaudio.map.txt
similarity index 100%
rename from media/libaaudio/libaaudio.map.txt
rename to media/libaaudio/src/libaaudio.map.txt
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index 96ed56a..cdd02c0 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -24,6 +24,7 @@
 #include <utils/Errors.h>
 
 #include "aaudio/AAudio.h"
+#include "core/AudioGlobal.h"
 #include <aaudio/AAudioTesting.h>
 #include <math.h>
 #include <system/audio-base.h>
@@ -355,7 +356,7 @@
         case AAUDIO_STREAM_STATE_DISCONNECTED:
         default:
             ALOGE("can only flush stream when PAUSED, OPEN or STOPPED, state = %s",
-                  AAudio_convertStreamStateToText(state));
+                  aaudio::AudioGlobal_convertStreamStateToText(state));
             result =  AAUDIO_ERROR_INVALID_STATE;
             break;
     }
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 6101e99..19cd0a0 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -11,7 +11,7 @@
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_marshalling.cpp"],
     shared_libs: [
-        "libaaudio",
+        "libaaudio_internal",
         "libbinder",
         "libcutils",
         "libutils",
@@ -23,7 +23,7 @@
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_clock_model.cpp"],
     shared_libs: [
-        "libaaudio",
+        "libaaudio_internal",
         "libaudioutils",
         "libcutils",
         "libutils",
@@ -34,7 +34,7 @@
     name: "test_block_adapter",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_block_adapter.cpp"],
-    shared_libs: ["libaaudio"],
+    shared_libs: ["libaaudio_internal"],
 }
 
 cc_test {
@@ -170,7 +170,7 @@
     name: "test_atomic_fifo",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_atomic_fifo.cpp"],
-    shared_libs: ["libaaudio"],
+    shared_libs: ["libaaudio_internal"],
 }
 
 cc_test {
@@ -178,7 +178,7 @@
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_flowgraph.cpp"],
     shared_libs: [
-        "libaaudio",
+        "libaaudio_internal",
         "libbinder",
         "libcutils",
         "libutils",
diff --git a/media/libaudioclient/AudioEffect.cpp b/media/libaudioclient/AudioEffect.cpp
index cf11936..1cc5fe6 100644
--- a/media/libaudioclient/AudioEffect.cpp
+++ b/media/libaudioclient/AudioEffect.cpp
@@ -159,7 +159,11 @@
 
     mIEffect = iEffect;
     mCblkMemory = cblk;
-    mCblk = static_cast<effect_param_cblk_t*>(cblk->pointer());
+    // TODO: Using unsecurePointer() has some associated security pitfalls
+    //       (see declaration for details).
+    //       Either document why it is safe in this case or address the
+    //       issue (e.g. by copying).
+    mCblk = static_cast<effect_param_cblk_t*>(cblk->unsecurePointer());
     int bufOffset = ((sizeof(effect_param_cblk_t) - 1) / sizeof(int) + 1) * sizeof(int);
     mCblk->buffer = (uint8_t *)mCblk + bufOffset;
 
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index a1b04ca..0f2d48e 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -759,7 +759,11 @@
         status = NO_INIT;
         goto exit;
     }
-    iMemPointer = output.cblk ->pointer();
+    // TODO: Using unsecurePointer() has some associated security pitfalls
+    //       (see declaration for details).
+    //       Either document why it is safe in this case or address the
+    //       issue (e.g. by copying).
+    iMemPointer = output.cblk ->unsecurePointer();
     if (iMemPointer == NULL) {
         ALOGE("%s(%d): Could not get control block pointer", __func__, mPortId);
         status = NO_INIT;
@@ -774,7 +778,11 @@
     if (output.buffers == 0) {
         buffers = cblk + 1;
     } else {
-        buffers = output.buffers->pointer();
+        // TODO: Using unsecurePointer() has some associated security pitfalls
+        //       (see declaration for details).
+        //       Either document why it is safe in this case or address the
+        //       issue (e.g. by copying).
+        buffers = output.buffers->unsecurePointer();
         if (buffers == NULL) {
             ALOGE("%s(%d): Could not get buffer pointer", __func__, mPortId);
             status = NO_INIT;
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 4a80cd3..e8d7b60 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -406,7 +406,7 @@
     mDoNotReconnect = doNotReconnect;
 
     ALOGV_IF(sharedBuffer != 0, "%s(): sharedBuffer: %p, size: %zu",
-            __func__, sharedBuffer->pointer(), sharedBuffer->size());
+            __func__, sharedBuffer->unsecurePointer(), sharedBuffer->size());
 
     ALOGV("%s(): streamType %d frameCount %zu flags %04x",
             __func__, streamType, frameCount, flags);
@@ -1508,7 +1508,11 @@
         status = NO_INIT;
         goto exit;
     }
-    void *iMemPointer = iMem->pointer();
+    // TODO: Using unsecurePointer() has some associated security pitfalls
+    //       (see declaration for details).
+    //       Either document why it is safe in this case or address the
+    //       issue (e.g. by copying).
+    void *iMemPointer = iMem->unsecurePointer();
     if (iMemPointer == NULL) {
         ALOGE("%s(%d): Could not get control block pointer", __func__, mPortId);
         status = NO_INIT;
@@ -1563,7 +1567,11 @@
     if (mSharedBuffer == 0) {
         buffers = cblk + 1;
     } else {
-        buffers = mSharedBuffer->pointer();
+        // TODO: Using unsecurePointer() has some associated security pitfalls
+        //       (see declaration for details).
+        //       Either document why it is safe in this case or address the
+        //       issue (e.g. by copying).
+        buffers = mSharedBuffer->unsecurePointer();
         if (buffers == NULL) {
             ALOGE("%s(%d): Could not get buffer pointer", __func__, mPortId);
             status = NO_INIT;
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index dd95e34..750fc21 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -340,11 +340,11 @@
         return reply.readInt32();
     }
 
-    virtual void setRecordSilenced(uid_t uid, bool silenced)
+    virtual void setRecordSilenced(audio_port_handle_t portId, bool silenced)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(uid);
+        data.writeInt32(portId);
         data.writeInt32(silenced ? 1 : 0);
         remote()->transact(SET_RECORD_SILENCED, data, &reply);
     }
@@ -1156,11 +1156,9 @@
         } break;
         case SET_RECORD_SILENCED: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            uid_t uid = data.readInt32();
-            audio_source_t source;
-            data.read(&source, sizeof(audio_source_t));
+            audio_port_handle_t portId = data.readInt32();
             bool silenced = data.readInt32() == 1;
-            setRecordSilenced(uid, silenced);
+            setRecordSilenced(portId, silenced);
             return NO_ERROR;
         } break;
         case SET_PARAMETERS: {
@@ -1339,10 +1337,14 @@
         }
         case GET_EFFECT_DESCRIPTOR: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            effect_uuid_t uuid;
-            data.read(&uuid, sizeof(effect_uuid_t));
-            effect_uuid_t type;
-            data.read(&type, sizeof(effect_uuid_t));
+            effect_uuid_t uuid = {};
+            if (data.read(&uuid, sizeof(effect_uuid_t)) != NO_ERROR) {
+                android_errorWriteLog(0x534e4554, "139417189");
+            }
+            effect_uuid_t type = {};
+            if (data.read(&type, sizeof(effect_uuid_t)) != NO_ERROR) {
+                android_errorWriteLog(0x534e4554, "139417189");
+            }
             uint32_t preferredTypeFlag = data.readUint32();
             effect_descriptor_t desc = {};
             status_t status = getEffectDescriptor(&uuid, &type, preferredTypeFlag, &desc);
diff --git a/media/libaudioclient/IAudioTrack.cpp b/media/libaudioclient/IAudioTrack.cpp
index 83a568a..6219e7a 100644
--- a/media/libaudioclient/IAudioTrack.cpp
+++ b/media/libaudioclient/IAudioTrack.cpp
@@ -62,7 +62,7 @@
         status_t status = remote()->transact(GET_CBLK, data, &reply);
         if (status == NO_ERROR) {
             cblk = interface_cast<IMemory>(reply.readStrongBinder());
-            if (cblk != 0 && cblk->pointer() == NULL) {
+            if (cblk != 0 && cblk->unsecurePointer() == NULL) {
                 cblk.clear();
             }
         }
diff --git a/media/libaudioclient/IEffect.cpp b/media/libaudioclient/IEffect.cpp
index ce72dae..5d47dff 100644
--- a/media/libaudioclient/IEffect.cpp
+++ b/media/libaudioclient/IEffect.cpp
@@ -122,7 +122,7 @@
         status_t status = remote()->transact(GET_CBLK, data, &reply);
         if (status == NO_ERROR) {
             cblk = interface_cast<IMemory>(reply.readStrongBinder());
-            if (cblk != 0 && cblk->pointer() == NULL) {
+            if (cblk != 0 && cblk->unsecurePointer() == NULL) {
                 cblk.clear();
             }
         }
diff --git a/media/libaudioclient/include/media/AudioMixer.h b/media/libaudioclient/include/media/AudioMixer.h
deleted file mode 100644
index 783eef3..0000000
--- a/media/libaudioclient/include/media/AudioMixer.h
+++ /dev/null
@@ -1,519 +0,0 @@
-/*
-**
-** Copyright 2007, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-**     http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#ifndef ANDROID_AUDIO_MIXER_H
-#define ANDROID_AUDIO_MIXER_H
-
-#include <map>
-#include <pthread.h>
-#include <sstream>
-#include <stdint.h>
-#include <sys/types.h>
-#include <unordered_map>
-#include <vector>
-
-#include <android/os/IExternalVibratorService.h>
-#include <media/AudioBufferProvider.h>
-#include <media/AudioResampler.h>
-#include <media/AudioResamplerPublic.h>
-#include <media/BufferProviders.h>
-#include <system/audio.h>
-#include <utils/Compat.h>
-#include <utils/threads.h>
-
-// FIXME This is actually unity gain, which might not be max in future, expressed in U.12
-#define MAX_GAIN_INT AudioMixer::UNITY_GAIN_INT
-
-// This must match frameworks/av/services/audioflinger/Configuration.h
-#define FLOAT_AUX
-
-namespace android {
-
-namespace NBLog {
-class Writer;
-}   // namespace NBLog
-
-// ----------------------------------------------------------------------------
-
-class AudioMixer
-{
-public:
-    // Do not change these unless underlying code changes.
-    // This mixer has a hard-coded upper limit of 8 channels for output.
-    static constexpr uint32_t MAX_NUM_CHANNELS = FCC_8;
-    static constexpr uint32_t MAX_NUM_VOLUMES = FCC_2; // stereo volume only
-    // maximum number of channels supported for the content
-    static const uint32_t MAX_NUM_CHANNELS_TO_DOWNMIX = AUDIO_CHANNEL_COUNT_MAX;
-
-    static const uint16_t UNITY_GAIN_INT = 0x1000;
-    static const CONSTEXPR float UNITY_GAIN_FLOAT = 1.0f;
-
-    enum { // names
-        // setParameter targets
-        TRACK           = 0x3000,
-        RESAMPLE        = 0x3001,
-        RAMP_VOLUME     = 0x3002, // ramp to new volume
-        VOLUME          = 0x3003, // don't ramp
-        TIMESTRETCH     = 0x3004,
-
-        // set Parameter names
-        // for target TRACK
-        CHANNEL_MASK    = 0x4000,
-        FORMAT          = 0x4001,
-        MAIN_BUFFER     = 0x4002,
-        AUX_BUFFER      = 0x4003,
-        DOWNMIX_TYPE    = 0X4004,
-        MIXER_FORMAT    = 0x4005, // AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
-        MIXER_CHANNEL_MASK = 0x4006, // Channel mask for mixer output
-        // for haptic
-        HAPTIC_ENABLED  = 0x4007, // Set haptic data from this track should be played or not.
-        HAPTIC_INTENSITY = 0x4008, // Set the intensity to play haptic data.
-        // for target RESAMPLE
-        SAMPLE_RATE     = 0x4100, // Configure sample rate conversion on this track name;
-                                  // parameter 'value' is the new sample rate in Hz.
-                                  // Only creates a sample rate converter the first time that
-                                  // the track sample rate is different from the mix sample rate.
-                                  // If the new sample rate is the same as the mix sample rate,
-                                  // and a sample rate converter already exists,
-                                  // then the sample rate converter remains present but is a no-op.
-        RESET           = 0x4101, // Reset sample rate converter without changing sample rate.
-                                  // This clears out the resampler's input buffer.
-        REMOVE          = 0x4102, // Remove the sample rate converter on this track name;
-                                  // the track is restored to the mix sample rate.
-        // for target RAMP_VOLUME and VOLUME (8 channels max)
-        // FIXME use float for these 3 to improve the dynamic range
-        VOLUME0         = 0x4200,
-        VOLUME1         = 0x4201,
-        AUXLEVEL        = 0x4210,
-        // for target TIMESTRETCH
-        PLAYBACK_RATE   = 0x4300, // Configure timestretch on this track name;
-                                  // parameter 'value' is a pointer to the new playback rate.
-    };
-
-    typedef enum { // Haptic intensity, should keep consistent with VibratorService
-        HAPTIC_SCALE_MUTE = os::IExternalVibratorService::SCALE_MUTE,
-        HAPTIC_SCALE_VERY_LOW = os::IExternalVibratorService::SCALE_VERY_LOW,
-        HAPTIC_SCALE_LOW = os::IExternalVibratorService::SCALE_LOW,
-        HAPTIC_SCALE_NONE = os::IExternalVibratorService::SCALE_NONE,
-        HAPTIC_SCALE_HIGH = os::IExternalVibratorService::SCALE_HIGH,
-        HAPTIC_SCALE_VERY_HIGH = os::IExternalVibratorService::SCALE_VERY_HIGH,
-    } haptic_intensity_t;
-    static constexpr float HAPTIC_SCALE_VERY_LOW_RATIO = 2.0f / 3.0f;
-    static constexpr float HAPTIC_SCALE_LOW_RATIO = 3.0f / 4.0f;
-    static const constexpr float HAPTIC_MAX_AMPLITUDE_FLOAT = 1.0f;
-
-    static inline bool isValidHapticIntensity(haptic_intensity_t hapticIntensity) {
-        switch (hapticIntensity) {
-        case HAPTIC_SCALE_MUTE:
-        case HAPTIC_SCALE_VERY_LOW:
-        case HAPTIC_SCALE_LOW:
-        case HAPTIC_SCALE_NONE:
-        case HAPTIC_SCALE_HIGH:
-        case HAPTIC_SCALE_VERY_HIGH:
-            return true;
-        default:
-            return false;
-        }
-    }
-
-    AudioMixer(size_t frameCount, uint32_t sampleRate)
-        : mSampleRate(sampleRate)
-        , mFrameCount(frameCount) {
-        pthread_once(&sOnceControl, &sInitRoutine);
-    }
-
-    // Create a new track in the mixer.
-    //
-    // \param name        a unique user-provided integer associated with the track.
-    //                    If name already exists, the function will abort.
-    // \param channelMask output channel mask.
-    // \param format      PCM format
-    // \param sessionId   Session id for the track. Tracks with the same
-    //                    session id will be submixed together.
-    //
-    // \return OK        on success.
-    //         BAD_VALUE if the format does not satisfy isValidFormat()
-    //                   or the channelMask does not satisfy isValidChannelMask().
-    status_t    create(
-            int name, audio_channel_mask_t channelMask, audio_format_t format, int sessionId);
-
-    bool        exists(int name) const {
-        return mTracks.count(name) > 0;
-    }
-
-    // Free an allocated track by name.
-    void        destroy(int name);
-
-    // Enable or disable an allocated track by name
-    void        enable(int name);
-    void        disable(int name);
-
-    void        setParameter(int name, int target, int param, void *value);
-
-    void        setBufferProvider(int name, AudioBufferProvider* bufferProvider);
-
-    void        process() {
-        for (const auto &pair : mTracks) {
-            // Clear contracted buffer before processing if contracted channels are saved
-            const std::shared_ptr<Track> &t = pair.second;
-            if (t->mKeepContractedChannels) {
-                t->clearContractedBuffer();
-            }
-        }
-        (this->*mHook)();
-        processHapticData();
-    }
-
-    size_t      getUnreleasedFrames(int name) const;
-
-    std::string trackNames() const {
-        std::stringstream ss;
-        for (const auto &pair : mTracks) {
-            ss << pair.first << " ";
-        }
-        return ss.str();
-    }
-
-    void        setNBLogWriter(NBLog::Writer *logWriter) {
-        mNBLogWriter = logWriter;
-    }
-
-    static inline bool isValidFormat(audio_format_t format) {
-        switch (format) {
-        case AUDIO_FORMAT_PCM_8_BIT:
-        case AUDIO_FORMAT_PCM_16_BIT:
-        case AUDIO_FORMAT_PCM_24_BIT_PACKED:
-        case AUDIO_FORMAT_PCM_32_BIT:
-        case AUDIO_FORMAT_PCM_FLOAT:
-            return true;
-        default:
-            return false;
-        }
-    }
-
-    static inline bool isValidChannelMask(audio_channel_mask_t channelMask) {
-        return audio_channel_mask_is_valid(channelMask); // the RemixBufferProvider is flexible.
-    }
-
-private:
-
-    /* For multi-format functions (calls template functions
-     * in AudioMixerOps.h).  The template parameters are as follows:
-     *
-     *   MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
-     *   USEFLOATVOL (set to true if float volume is used)
-     *   ADJUSTVOL   (set to true if volume ramp parameters needs adjustment afterwards)
-     *   TO: int32_t (Q4.27) or float
-     *   TI: int32_t (Q4.27) or int16_t (Q0.15) or float
-     *   TA: int32_t (Q4.27)
-     */
-
-    enum {
-        // FIXME this representation permits up to 8 channels
-        NEEDS_CHANNEL_COUNT__MASK   = 0x00000007,
-    };
-
-    enum {
-        NEEDS_CHANNEL_1             = 0x00000000,   // mono
-        NEEDS_CHANNEL_2             = 0x00000001,   // stereo
-
-        // sample format is not explicitly specified, and is assumed to be AUDIO_FORMAT_PCM_16_BIT
-
-        NEEDS_MUTE                  = 0x00000100,
-        NEEDS_RESAMPLE              = 0x00001000,
-        NEEDS_AUX                   = 0x00010000,
-    };
-
-    // hook types
-    enum {
-        PROCESSTYPE_NORESAMPLEONETRACK, // others set elsewhere
-    };
-
-    enum {
-        TRACKTYPE_NOP,
-        TRACKTYPE_RESAMPLE,
-        TRACKTYPE_NORESAMPLE,
-        TRACKTYPE_NORESAMPLEMONO,
-    };
-
-    // process hook functionality
-    using process_hook_t = void(AudioMixer::*)();
-
-    struct Track;
-    using hook_t = void(Track::*)(int32_t* output, size_t numOutFrames, int32_t* temp, int32_t* aux);
-
-    struct Track {
-        Track()
-            : bufferProvider(nullptr)
-        {
-            // TODO: move additional initialization here.
-        }
-
-        ~Track()
-        {
-            // bufferProvider, mInputBufferProvider need not be deleted.
-            mResampler.reset(nullptr);
-            // Ensure the order of destruction of buffer providers as they
-            // release the upstream provider in the destructor.
-            mTimestretchBufferProvider.reset(nullptr);
-            mPostDownmixReformatBufferProvider.reset(nullptr);
-            mDownmixerBufferProvider.reset(nullptr);
-            mReformatBufferProvider.reset(nullptr);
-            mContractChannelsNonDestructiveBufferProvider.reset(nullptr);
-            mAdjustChannelsBufferProvider.reset(nullptr);
-        }
-
-        bool        needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; }
-        bool        setResampler(uint32_t trackSampleRate, uint32_t devSampleRate);
-        bool        doesResample() const { return mResampler.get() != nullptr; }
-        void        resetResampler() { if (mResampler.get() != nullptr) mResampler->reset(); }
-        void        adjustVolumeRamp(bool aux, bool useFloat = false);
-        size_t      getUnreleasedFrames() const { return mResampler.get() != nullptr ?
-                                                    mResampler->getUnreleasedFrames() : 0; };
-
-        status_t    prepareForDownmix();
-        void        unprepareForDownmix();
-        status_t    prepareForReformat();
-        void        unprepareForReformat();
-        status_t    prepareForAdjustChannels();
-        void        unprepareForAdjustChannels();
-        status_t    prepareForAdjustChannelsNonDestructive(size_t frames);
-        void        unprepareForAdjustChannelsNonDestructive();
-        void        clearContractedBuffer();
-        bool        setPlaybackRate(const AudioPlaybackRate &playbackRate);
-        void        reconfigureBufferProviders();
-
-        static hook_t getTrackHook(int trackType, uint32_t channelCount,
-                audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
-
-        void track__nop(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
-
-        template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
-            typename TO, typename TI, typename TA>
-        void volumeMix(TO *out, size_t outFrames, const TI *in, TA *aux, bool ramp);
-
-        uint32_t    needs;
-
-        // TODO: Eventually remove legacy integer volume settings
-        union {
-        int16_t     volume[MAX_NUM_VOLUMES]; // U4.12 fixed point (top bit should be zero)
-        int32_t     volumeRL;
-        };
-
-        int32_t     prevVolume[MAX_NUM_VOLUMES];
-        int32_t     volumeInc[MAX_NUM_VOLUMES];
-        int32_t     auxInc;
-        int32_t     prevAuxLevel;
-        int16_t     auxLevel;       // 0 <= auxLevel <= MAX_GAIN_INT, but signed for mul performance
-
-        uint16_t    frameCount;
-
-        uint8_t     channelCount;   // 1 or 2, redundant with (needs & NEEDS_CHANNEL_COUNT__MASK)
-        uint8_t     unused_padding; // formerly format, was always 16
-        uint16_t    enabled;        // actually bool
-        audio_channel_mask_t channelMask;
-
-        // actual buffer provider used by the track hooks, see DownmixerBufferProvider below
-        //  for how the Track buffer provider is wrapped by another one when dowmixing is required
-        AudioBufferProvider*                bufferProvider;
-
-        mutable AudioBufferProvider::Buffer buffer; // 8 bytes
-
-        hook_t      hook;
-        const void  *mIn;             // current location in buffer
-
-        std::unique_ptr<AudioResampler> mResampler;
-        uint32_t            sampleRate;
-        int32_t*           mainBuffer;
-        int32_t*           auxBuffer;
-
-        /* Buffer providers are constructed to translate the track input data as needed.
-         *
-         * TODO: perhaps make a single PlaybackConverterProvider class to move
-         * all pre-mixer track buffer conversions outside the AudioMixer class.
-         *
-         * 1) mInputBufferProvider: The AudioTrack buffer provider.
-         * 2) mAdjustChannelsBufferProvider: Expands or contracts sample data from one interleaved
-         *    channel format to another. Expanded channels are filled with zeros and put at the end
-         *    of each audio frame. Contracted channels are copied to the end of the buffer.
-         * 3) mContractChannelsNonDestructiveBufferProvider: Non-destructively contract sample data.
-         *    This is currently using at audio-haptic coupled playback to separate audio and haptic
-         *    data. Contracted channels could be written to given buffer.
-         * 4) mReformatBufferProvider: If not NULL, performs the audio reformat to
-         *    match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
-         *    requires reformat. For example, it may convert floating point input to
-         *    PCM_16_bit if that's required by the downmixer.
-         * 5) mDownmixerBufferProvider: If not NULL, performs the channel remixing to match
-         *    the number of channels required by the mixer sink.
-         * 6) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
-         *    the downmixer requirements to the mixer engine input requirements.
-         * 7) mTimestretchBufferProvider: Adds timestretching for playback rate
-         */
-        AudioBufferProvider*     mInputBufferProvider;    // externally provided buffer provider.
-        // TODO: combine mAdjustChannelsBufferProvider and
-        // mContractChannelsNonDestructiveBufferProvider
-        std::unique_ptr<PassthruBufferProvider> mAdjustChannelsBufferProvider;
-        std::unique_ptr<PassthruBufferProvider> mContractChannelsNonDestructiveBufferProvider;
-        std::unique_ptr<PassthruBufferProvider> mReformatBufferProvider;
-        std::unique_ptr<PassthruBufferProvider> mDownmixerBufferProvider;
-        std::unique_ptr<PassthruBufferProvider> mPostDownmixReformatBufferProvider;
-        std::unique_ptr<PassthruBufferProvider> mTimestretchBufferProvider;
-
-        int32_t     sessionId;
-
-        audio_format_t mMixerFormat;     // output mix format: AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
-        audio_format_t mFormat;          // input track format
-        audio_format_t mMixerInFormat;   // mix internal format AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
-                                         // each track must be converted to this format.
-        audio_format_t mDownmixRequiresFormat;  // required downmixer format
-                                                // AUDIO_FORMAT_PCM_16_BIT if 16 bit necessary
-                                                // AUDIO_FORMAT_INVALID if no required format
-
-        float          mVolume[MAX_NUM_VOLUMES];     // floating point set volume
-        float          mPrevVolume[MAX_NUM_VOLUMES]; // floating point previous volume
-        float          mVolumeInc[MAX_NUM_VOLUMES];  // floating point volume increment
-
-        float          mAuxLevel;                     // floating point set aux level
-        float          mPrevAuxLevel;                 // floating point prev aux level
-        float          mAuxInc;                       // floating point aux increment
-
-        audio_channel_mask_t mMixerChannelMask;
-        uint32_t             mMixerChannelCount;
-
-        AudioPlaybackRate    mPlaybackRate;
-
-        // Haptic
-        bool                 mHapticPlaybackEnabled;
-        haptic_intensity_t   mHapticIntensity;
-        audio_channel_mask_t mHapticChannelMask;
-        uint32_t             mHapticChannelCount;
-        audio_channel_mask_t mMixerHapticChannelMask;
-        uint32_t             mMixerHapticChannelCount;
-        uint32_t             mAdjustInChannelCount;
-        uint32_t             mAdjustOutChannelCount;
-        uint32_t             mAdjustNonDestructiveInChannelCount;
-        uint32_t             mAdjustNonDestructiveOutChannelCount;
-        bool                 mKeepContractedChannels;
-
-        float getHapticScaleGamma() const {
-        // Need to keep consistent with the value in VibratorService.
-        switch (mHapticIntensity) {
-        case HAPTIC_SCALE_VERY_LOW:
-            return 2.0f;
-        case HAPTIC_SCALE_LOW:
-            return 1.5f;
-        case HAPTIC_SCALE_HIGH:
-            return 0.5f;
-        case HAPTIC_SCALE_VERY_HIGH:
-            return 0.25f;
-        default:
-            return 1.0f;
-        }
-        }
-
-        float getHapticMaxAmplitudeRatio() const {
-        // Need to keep consistent with the value in VibratorService.
-        switch (mHapticIntensity) {
-        case HAPTIC_SCALE_VERY_LOW:
-            return HAPTIC_SCALE_VERY_LOW_RATIO;
-        case HAPTIC_SCALE_LOW:
-            return HAPTIC_SCALE_LOW_RATIO;
-        case HAPTIC_SCALE_NONE:
-        case HAPTIC_SCALE_HIGH:
-        case HAPTIC_SCALE_VERY_HIGH:
-            return 1.0f;
-        default:
-            return 0.0f;
-        }
-        }
-
-    private:
-        // hooks
-        void track__genericResample(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
-        void track__16BitsStereo(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
-        void track__16BitsMono(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
-
-        void volumeRampStereo(int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
-        void volumeStereo(int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
-
-        // multi-format track hooks
-        template <int MIXTYPE, typename TO, typename TI, typename TA>
-        void track__Resample(TO* out, size_t frameCount, TO* temp __unused, TA* aux);
-        template <int MIXTYPE, typename TO, typename TI, typename TA>
-        void track__NoResample(TO* out, size_t frameCount, TO* temp __unused, TA* aux);
-    };
-
-    // TODO: remove BLOCKSIZE unit of processing - it isn't needed anymore.
-    static constexpr int BLOCKSIZE = 16;
-
-    bool setChannelMasks(int name,
-            audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask);
-
-    // Called when track info changes and a new process hook should be determined.
-    void invalidate() {
-        mHook = &AudioMixer::process__validate;
-    }
-
-    void process__validate();
-    void process__nop();
-    void process__genericNoResampling();
-    void process__genericResampling();
-    void process__oneTrack16BitsStereoNoResampling();
-
-    template <int MIXTYPE, typename TO, typename TI, typename TA>
-    void process__noResampleOneTrack();
-
-    void processHapticData();
-
-    static process_hook_t getProcessHook(int processType, uint32_t channelCount,
-            audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
-
-    static void convertMixerFormat(void *out, audio_format_t mixerOutFormat,
-            void *in, audio_format_t mixerInFormat, size_t sampleCount);
-
-    static void sInitRoutine();
-
-    // initialization constants
-    const uint32_t mSampleRate;
-    const size_t mFrameCount;
-
-    NBLog::Writer *mNBLogWriter = nullptr;   // associated NBLog::Writer
-
-    process_hook_t mHook = &AudioMixer::process__nop;   // one of process__*, never nullptr
-
-    // the size of the type (int32_t) should be the largest of all types supported
-    // by the mixer.
-    std::unique_ptr<int32_t[]> mOutputTemp;
-    std::unique_ptr<int32_t[]> mResampleTemp;
-
-    // track names grouped by main buffer, in no particular order of main buffer.
-    // however names for a particular main buffer are in order (by construction).
-    std::unordered_map<void * /* mainBuffer */, std::vector<int /* name */>> mGroups;
-
-    // track names that are enabled, in increasing order (by construction).
-    std::vector<int /* name */> mEnabled;
-
-    // track smart pointers, by name, in increasing order of name.
-    std::map<int /* name */, std::shared_ptr<Track>> mTracks;
-
-    static pthread_once_t sOnceControl; // initialized in constructor by first new
-};
-
-// ----------------------------------------------------------------------------
-} // namespace android
-
-#endif // ANDROID_AUDIO_MIXER_H
diff --git a/media/libaudioclient/include/media/AudioParameter.h b/media/libaudioclient/include/media/AudioParameter.h
index 7469976..3c190f2 100644
--- a/media/libaudioclient/include/media/AudioParameter.h
+++ b/media/libaudioclient/include/media/AudioParameter.h
@@ -70,6 +70,9 @@
     //  keyDeviceConnect / Disconnect: value is an int in audio_devices_t
     static const char * const keyDeviceConnect;
     static const char * const keyDeviceDisconnect;
+    //  Need to be here because vendors still use them.
+    static const char * const keyStreamConnect;  // Deprecated: DO NOT USE.
+    static const char * const keyStreamDisconnect;  // Deprecated: DO NOT USE.
 
     // For querying stream capabilities. All the returned values are lists.
     //   keyStreamSupportedFormats: audio_format_t
diff --git a/media/libmedia/include/media/ExtendedAudioBufferProvider.h b/media/libaudioclient/include/media/ExtendedAudioBufferProvider.h
similarity index 100%
rename from media/libmedia/include/media/ExtendedAudioBufferProvider.h
rename to media/libaudioclient/include/media/ExtendedAudioBufferProvider.h
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 8ec8931..b580a88 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -70,8 +70,12 @@
                 return DEAD_OBJECT;
             }
             if (parcel->readInt32() != 0) {
+                // TODO: Using unsecurePointer() has some associated security
+                //       pitfalls (see declaration for details).
+                //       Either document why it is safe in this case or address
+                //       the issue (e.g. by copying).
                 sharedBuffer = interface_cast<IMemory>(parcel->readStrongBinder());
-                if (sharedBuffer == 0 || sharedBuffer->pointer() == NULL) {
+                if (sharedBuffer == 0 || sharedBuffer->unsecurePointer() == NULL) {
                     return BAD_VALUE;
                 }
             }
@@ -269,13 +273,21 @@
             (void)parcel->read(&inputId, sizeof(audio_io_handle_t));
             if (parcel->readInt32() != 0) {
                 cblk = interface_cast<IMemory>(parcel->readStrongBinder());
-                if (cblk == 0 || cblk->pointer() == NULL) {
+                // TODO: Using unsecurePointer() has some associated security
+                //       pitfalls (see declaration for details).
+                //       Either document why it is safe in this case or address
+                //       the issue (e.g. by copying).
+                if (cblk == 0 || cblk->unsecurePointer() == NULL) {
                     return BAD_VALUE;
                 }
             }
             if (parcel->readInt32() != 0) {
                 buffers = interface_cast<IMemory>(parcel->readStrongBinder());
-                if (buffers == 0 || buffers->pointer() == NULL) {
+                // TODO: Using unsecurePointer() has some associated security
+                //       pitfalls (see declaration for details).
+                //       Either document why it is safe in this case or address
+                //       the issue (e.g. by copying).
+                if (buffers == 0 || buffers->unsecurePointer() == NULL) {
                     return BAD_VALUE;
                 }
             }
@@ -384,7 +396,7 @@
     // mic mute/state
     virtual     status_t    setMicMute(bool state) = 0;
     virtual     bool        getMicMute() const = 0;
-    virtual     void        setRecordSilenced(uid_t uid, bool silenced) = 0;
+    virtual     void        setRecordSilenced(audio_port_handle_t portId, bool silenced) = 0;
 
     virtual     status_t    setParameters(audio_io_handle_t ioHandle,
                                     const String8& keyValuePairs) = 0;
diff --git a/media/libaudiofoundation/Android.bp b/media/libaudiofoundation/Android.bp
new file mode 100644
index 0000000..9b9f12f
--- /dev/null
+++ b/media/libaudiofoundation/Android.bp
@@ -0,0 +1,39 @@
+cc_library_headers {
+    name: "libaudiofoundation_headers",
+    vendor_available: true,
+    export_include_dirs: ["include"],
+}
+
+cc_library_shared {
+    name: "libaudiofoundation",
+    vendor_available: true,
+
+    srcs: [
+        "AudioGain.cpp",
+        "AudioPort.cpp",
+        "AudioProfile.cpp",
+        "DeviceDescriptorBase.cpp",
+    ],
+
+    shared_libs: [
+        "libaudioutils",
+        "libbase",
+        "libbinder",
+        "liblog",
+        "libmedia_helper",
+        "libutils",
+    ],
+
+    header_libs: [
+        "libaudio_system_headers",
+        "libaudioclient_headers",
+        "libaudiofoundation_headers",
+    ],
+
+    export_header_lib_headers: ["libaudiofoundation_headers"],
+
+    cflags: [
+        "-Werror",
+        "-Wall",
+    ],
+}
diff --git a/media/libaudiofoundation/AudioGain.cpp b/media/libaudiofoundation/AudioGain.cpp
new file mode 100644
index 0000000..9d1d6db
--- /dev/null
+++ b/media/libaudiofoundation/AudioGain.cpp
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioGain"
+//#define LOG_NDEBUG 0
+
+//#define VERY_VERBOSE_LOGGING
+#ifdef VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) do { } while(0)
+#endif
+
+#include <android-base/stringprintf.h>
+#include <media/AudioGain.h>
+#include <utils/Log.h>
+
+#include <math.h>
+
+namespace android {
+
+AudioGain::AudioGain(int index, bool useInChannelMask)
+{
+    mIndex = index;
+    mUseInChannelMask = useInChannelMask;
+    memset(&mGain, 0, sizeof(struct audio_gain));
+}
+
+void AudioGain::getDefaultConfig(struct audio_gain_config *config)
+{
+    config->index = mIndex;
+    config->mode = mGain.mode;
+    config->channel_mask = mGain.channel_mask;
+    if ((mGain.mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
+        config->values[0] = mGain.default_value;
+    } else {
+        uint32_t numValues;
+        if (mUseInChannelMask) {
+            numValues = audio_channel_count_from_in_mask(mGain.channel_mask);
+        } else {
+            numValues = audio_channel_count_from_out_mask(mGain.channel_mask);
+        }
+        for (size_t i = 0; i < numValues; i++) {
+            config->values[i] = mGain.default_value;
+        }
+    }
+    if ((mGain.mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) {
+        config->ramp_duration_ms = mGain.min_ramp_ms;
+    }
+}
+
+status_t AudioGain::checkConfig(const struct audio_gain_config *config)
+{
+    if ((config->mode & ~mGain.mode) != 0) {
+        return BAD_VALUE;
+    }
+    if ((config->mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
+        if ((config->values[0] < mGain.min_value) ||
+                    (config->values[0] > mGain.max_value)) {
+            return BAD_VALUE;
+        }
+    } else {
+        if ((config->channel_mask & ~mGain.channel_mask) != 0) {
+            return BAD_VALUE;
+        }
+        uint32_t numValues;
+        if (mUseInChannelMask) {
+            numValues = audio_channel_count_from_in_mask(config->channel_mask);
+        } else {
+            numValues = audio_channel_count_from_out_mask(config->channel_mask);
+        }
+        for (size_t i = 0; i < numValues; i++) {
+            if ((config->values[i] < mGain.min_value) ||
+                    (config->values[i] > mGain.max_value)) {
+                return BAD_VALUE;
+            }
+        }
+    }
+    if ((config->mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) {
+        if ((config->ramp_duration_ms < mGain.min_ramp_ms) ||
+                    (config->ramp_duration_ms > mGain.max_ramp_ms)) {
+            return BAD_VALUE;
+        }
+    }
+    return NO_ERROR;
+}
+
+void AudioGain::dump(std::string *dst, int spaces, int index) const
+{
+    dst->append(base::StringPrintf("%*sGain %d:\n", spaces, "", index+1));
+    dst->append(base::StringPrintf("%*s- mode: %08x\n", spaces, "", mGain.mode));
+    dst->append(base::StringPrintf("%*s- channel_mask: %08x\n", spaces, "", mGain.channel_mask));
+    dst->append(base::StringPrintf("%*s- min_value: %d mB\n", spaces, "", mGain.min_value));
+    dst->append(base::StringPrintf("%*s- max_value: %d mB\n", spaces, "", mGain.max_value));
+    dst->append(base::StringPrintf("%*s- default_value: %d mB\n", spaces, "", mGain.default_value));
+    dst->append(base::StringPrintf("%*s- step_value: %d mB\n", spaces, "", mGain.step_value));
+    dst->append(base::StringPrintf("%*s- min_ramp_ms: %d ms\n", spaces, "", mGain.min_ramp_ms));
+    dst->append(base::StringPrintf("%*s- max_ramp_ms: %d ms\n", spaces, "", mGain.max_ramp_ms));
+}
+
+status_t AudioGain::writeToParcel(android::Parcel *parcel) const
+{
+    status_t status = NO_ERROR;
+    if ((status = parcel->writeInt32(mIndex)) != NO_ERROR) return status;
+    if ((status = parcel->writeBool(mUseInChannelMask)) != NO_ERROR) return status;
+    if ((status = parcel->writeBool(mUseForVolume)) != NO_ERROR) return status;
+    if ((status = parcel->writeUint32(mGain.mode)) != NO_ERROR) return status;
+    if ((status = parcel->writeUint32(mGain.channel_mask)) != NO_ERROR) return status;
+    if ((status = parcel->writeInt32(mGain.min_value)) != NO_ERROR) return status;
+    if ((status = parcel->writeInt32(mGain.max_value)) != NO_ERROR) return status;
+    if ((status = parcel->writeInt32(mGain.default_value)) != NO_ERROR) return status;
+    if ((status = parcel->writeUint32(mGain.step_value)) != NO_ERROR) return status;
+    if ((status = parcel->writeUint32(mGain.min_ramp_ms)) != NO_ERROR) return status;
+    status = parcel->writeUint32(mGain.max_ramp_ms);
+    return status;
+}
+
+status_t AudioGain::readFromParcel(const android::Parcel *parcel)
+{
+    status_t status = NO_ERROR;
+    if ((status = parcel->readInt32(&mIndex)) != NO_ERROR) return status;
+    if ((status = parcel->readBool(&mUseInChannelMask)) != NO_ERROR) return status;
+    if ((status = parcel->readBool(&mUseForVolume)) != NO_ERROR) return status;
+    if ((status = parcel->readUint32(&mGain.mode)) != NO_ERROR) return status;
+    if ((status = parcel->readUint32(&mGain.channel_mask)) != NO_ERROR) return status;
+    if ((status = parcel->readInt32(&mGain.min_value)) != NO_ERROR) return status;
+    if ((status = parcel->readInt32(&mGain.max_value)) != NO_ERROR) return status;
+    if ((status = parcel->readInt32(&mGain.default_value)) != NO_ERROR) return status;
+    if ((status = parcel->readUint32(&mGain.step_value)) != NO_ERROR) return status;
+    if ((status = parcel->readUint32(&mGain.min_ramp_ms)) != NO_ERROR) return status;
+    status = parcel->readUint32(&mGain.max_ramp_ms);
+    return status;
+}
+
+status_t AudioGains::writeToParcel(android::Parcel *parcel) const {
+    status_t status = NO_ERROR;
+    if ((status = parcel->writeUint64(this->size())) != NO_ERROR) return status;
+    for (const auto &audioGain : *this) {
+        if ((status = parcel->writeParcelable(*audioGain)) != NO_ERROR) {
+            break;
+        }
+    }
+    return status;
+}
+
+status_t AudioGains::readFromParcel(const android::Parcel *parcel) {
+    status_t status = NO_ERROR;
+    uint64_t count;
+    if ((status = parcel->readUint64(&count)) != NO_ERROR) return status;
+    for (uint64_t i = 0; i < count; i++) {
+        sp<AudioGain> audioGain = new AudioGain(0, false);
+        if ((status = parcel->readParcelable(audioGain.get())) != NO_ERROR) {
+            this->clear();
+            break;
+        }
+        this->push_back(audioGain);
+    }
+    return status;
+}
+
+} // namespace android
diff --git a/media/libaudiofoundation/AudioPort.cpp b/media/libaudiofoundation/AudioPort.cpp
new file mode 100644
index 0000000..cde31e4
--- /dev/null
+++ b/media/libaudiofoundation/AudioPort.cpp
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "AudioPort"
+
+#include <algorithm>
+
+#include <android-base/stringprintf.h>
+#include <media/AudioPort.h>
+#include <utils/Log.h>
+
+namespace android {
+
+void AudioPort::importAudioPort(const sp<AudioPort>& port, bool force __unused)
+{
+    for (const auto& profileToImport : port->mProfiles) {
+        // Import only valid port, i.e. valid format, non empty rates and channels masks
+        if (!profileToImport->isValid()) {
+            continue;
+        }
+        if (std::find_if(mProfiles.begin(), mProfiles.end(),
+                [profileToImport](const auto &profile) {
+                        return *profile == *profileToImport; }) == mProfiles.end()) {
+            addAudioProfile(profileToImport);
+        }
+    }
+}
+
+void AudioPort::toAudioPort(struct audio_port *port) const {
+    // TODO: update this function once audio_port structure reflects the new profile definition.
+    // For compatibility reason: flatening the AudioProfile into audio_port structure.
+    FormatSet flatenedFormats;
+    SampleRateSet flatenedRates;
+    ChannelMaskSet flatenedChannels;
+    for (const auto& profile : mProfiles) {
+        if (profile->isValid()) {
+            audio_format_t formatToExport = profile->getFormat();
+            const SampleRateSet &ratesToExport = profile->getSampleRates();
+            const ChannelMaskSet &channelsToExport = profile->getChannels();
+
+            flatenedFormats.insert(formatToExport);
+            flatenedRates.insert(ratesToExport.begin(), ratesToExport.end());
+            flatenedChannels.insert(channelsToExport.begin(), channelsToExport.end());
+
+            if (flatenedRates.size() > AUDIO_PORT_MAX_SAMPLING_RATES ||
+                    flatenedChannels.size() > AUDIO_PORT_MAX_CHANNEL_MASKS ||
+                    flatenedFormats.size() > AUDIO_PORT_MAX_FORMATS) {
+                ALOGE("%s: bailing out: cannot export profiles to port config", __func__);
+                return;
+            }
+        }
+    }
+    port->role = mRole;
+    port->type = mType;
+    strlcpy(port->name, mName.c_str(), AUDIO_PORT_MAX_NAME_LEN);
+    port->num_sample_rates = flatenedRates.size();
+    port->num_channel_masks = flatenedChannels.size();
+    port->num_formats = flatenedFormats.size();
+    std::copy(flatenedRates.begin(), flatenedRates.end(), port->sample_rates);
+    std::copy(flatenedChannels.begin(), flatenedChannels.end(), port->channel_masks);
+    std::copy(flatenedFormats.begin(), flatenedFormats.end(), port->formats);
+
+    ALOGV("AudioPort::toAudioPort() num gains %zu", mGains.size());
+
+    port->num_gains = std::min(mGains.size(), (size_t) AUDIO_PORT_MAX_GAINS);
+    for (size_t i = 0; i < port->num_gains; i++) {
+        port->gains[i] = mGains[i]->getGain();
+    }
+}
+
+void AudioPort::dump(std::string *dst, int spaces, bool verbose) const {
+    if (!mName.empty()) {
+        dst->append(base::StringPrintf("%*s- name: %s\n", spaces, "", mName.c_str()));
+    }
+    if (verbose) {
+        std::string profilesStr;
+        mProfiles.dump(&profilesStr, spaces);
+        dst->append(profilesStr);
+
+        if (mGains.size() != 0) {
+            dst->append(base::StringPrintf("%*s- gains:\n", spaces, ""));
+            for (size_t i = 0; i < mGains.size(); i++) {
+                std::string gainStr;
+                mGains[i]->dump(&gainStr, spaces + 2, i);
+                dst->append(gainStr);
+            }
+        }
+    }
+}
+
+void AudioPort::log(const char* indent) const
+{
+    ALOGI("%s Port[nm:%s, type:%d, role:%d]", indent, mName.c_str(), mType, mRole);
+}
+
+// --- AudioPortConfig class implementation
+
+status_t AudioPortConfig::applyAudioPortConfig(
+        const struct audio_port_config *config,
+        struct audio_port_config *backupConfig __unused)
+{
+    if (config->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
+        mSamplingRate = config->sample_rate;
+    }
+    if (config->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
+        mChannelMask = config->channel_mask;
+    }
+    if (config->config_mask & AUDIO_PORT_CONFIG_FORMAT) {
+        mFormat = config->format;
+    }
+    if (config->config_mask & AUDIO_PORT_CONFIG_GAIN) {
+        mGain = config->gain;
+    }
+
+    return NO_ERROR;
+}
+
+namespace {
+
+template<typename T>
+void updateField(
+        const T& portConfigField, T audio_port_config::*port_config_field,
+        struct audio_port_config *dstConfig, const struct audio_port_config *srcConfig,
+        unsigned int configMask, T defaultValue)
+{
+    if (dstConfig->config_mask & configMask) {
+        if ((srcConfig != nullptr) && (srcConfig->config_mask & configMask)) {
+            dstConfig->*port_config_field = srcConfig->*port_config_field;
+        } else {
+            dstConfig->*port_config_field = portConfigField;
+        }
+    } else {
+        dstConfig->*port_config_field = defaultValue;
+    }
+}
+
+} // namespace
+
+void AudioPortConfig::toAudioPortConfig(
+        struct audio_port_config *dstConfig,
+        const struct audio_port_config *srcConfig) const
+{
+    updateField(mSamplingRate, &audio_port_config::sample_rate,
+            dstConfig, srcConfig, AUDIO_PORT_CONFIG_SAMPLE_RATE, 0u);
+    updateField(mChannelMask, &audio_port_config::channel_mask,
+            dstConfig, srcConfig, AUDIO_PORT_CONFIG_CHANNEL_MASK,
+            (audio_channel_mask_t)AUDIO_CHANNEL_NONE);
+    updateField(mFormat, &audio_port_config::format,
+            dstConfig, srcConfig, AUDIO_PORT_CONFIG_FORMAT, AUDIO_FORMAT_INVALID);
+    dstConfig->id = mId;
+
+    sp<AudioPort> audioport = getAudioPort();
+    if ((dstConfig->config_mask & AUDIO_PORT_CONFIG_GAIN) && audioport != NULL) {
+        dstConfig->gain = mGain;
+        if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_GAIN)
+                && audioport->checkGain(&srcConfig->gain, srcConfig->gain.index) == OK) {
+            dstConfig->gain = srcConfig->gain;
+        }
+    } else {
+        dstConfig->gain.index = -1;
+    }
+    if (dstConfig->gain.index != -1) {
+        dstConfig->config_mask |= AUDIO_PORT_CONFIG_GAIN;
+    } else {
+        dstConfig->config_mask &= ~AUDIO_PORT_CONFIG_GAIN;
+    }
+}
+
+bool AudioPortConfig::hasGainController(bool canUseForVolume) const
+{
+    sp<AudioPort> audioport = getAudioPort();
+    if (!audioport) {
+        return false;
+    }
+    return canUseForVolume ? audioport->getGains().canUseForVolume()
+                           : audioport->getGains().size() > 0;
+}
+
+}
diff --git a/media/libaudiofoundation/AudioProfile.cpp b/media/libaudiofoundation/AudioProfile.cpp
new file mode 100644
index 0000000..1ae18c5
--- /dev/null
+++ b/media/libaudiofoundation/AudioProfile.cpp
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <set>
+
+#define LOG_TAG "AudioProfile"
+//#define LOG_NDEBUG 0
+
+#include <android-base/stringprintf.h>
+#include <media/AudioContainers.h>
+#include <media/AudioProfile.h>
+#include <media/TypeConverter.h>
+#include <utils/Errors.h>
+
+namespace android {
+
+bool operator == (const AudioProfile &left, const AudioProfile &right)
+{
+    return (left.getFormat() == right.getFormat()) &&
+            (left.getChannels() == right.getChannels()) &&
+            (left.getSampleRates() == right.getSampleRates());
+}
+
+// static
+sp<AudioProfile> AudioProfile::createFullDynamic(audio_format_t dynamicFormat)
+{
+    AudioProfile* dynamicProfile = new AudioProfile(dynamicFormat,
+            ChannelMaskSet(), SampleRateSet());
+    dynamicProfile->setDynamicFormat(true);
+    dynamicProfile->setDynamicChannels(true);
+    dynamicProfile->setDynamicRate(true);
+    return dynamicProfile;
+}
+
+AudioProfile::AudioProfile(audio_format_t format,
+                           audio_channel_mask_t channelMasks,
+                           uint32_t samplingRate) :
+        mName(""),
+        mFormat(format)
+{
+    mChannelMasks.insert(channelMasks);
+    mSamplingRates.insert(samplingRate);
+}
+
+AudioProfile::AudioProfile(audio_format_t format,
+                           const ChannelMaskSet &channelMasks,
+                           const SampleRateSet &samplingRateCollection) :
+        mName(""),
+        mFormat(format),
+        mChannelMasks(channelMasks),
+        mSamplingRates(samplingRateCollection) {}
+
+void AudioProfile::setChannels(const ChannelMaskSet &channelMasks)
+{
+    if (mIsDynamicChannels) {
+        mChannelMasks = channelMasks;
+    }
+}
+
+void AudioProfile::setSampleRates(const SampleRateSet &sampleRates)
+{
+    if (mIsDynamicRate) {
+        mSamplingRates = sampleRates;
+    }
+}
+
+void AudioProfile::clear()
+{
+    if (mIsDynamicChannels) {
+        mChannelMasks.clear();
+    }
+    if (mIsDynamicRate) {
+        mSamplingRates.clear();
+    }
+}
+
+void AudioProfile::dump(std::string *dst, int spaces) const
+{
+    dst->append(base::StringPrintf("%s%s%s\n", mIsDynamicFormat ? "[dynamic format]" : "",
+             mIsDynamicChannels ? "[dynamic channels]" : "",
+             mIsDynamicRate ? "[dynamic rates]" : ""));
+    if (mName.length() != 0) {
+        dst->append(base::StringPrintf("%*s- name: %s\n", spaces, "", mName.c_str()));
+    }
+    std::string formatLiteral;
+    if (FormatConverter::toString(mFormat, formatLiteral)) {
+        dst->append(base::StringPrintf("%*s- format: %s\n", spaces, "", formatLiteral.c_str()));
+    }
+    if (!mSamplingRates.empty()) {
+        dst->append(base::StringPrintf("%*s- sampling rates:", spaces, ""));
+        for (auto it = mSamplingRates.begin(); it != mSamplingRates.end();) {
+            dst->append(base::StringPrintf("%d", *it));
+            dst->append(++it == mSamplingRates.end() ? "" : ", ");
+        }
+        dst->append("\n");
+    }
+
+    if (!mChannelMasks.empty()) {
+        dst->append(base::StringPrintf("%*s- channel masks:", spaces, ""));
+        for (auto it = mChannelMasks.begin(); it != mChannelMasks.end();) {
+            dst->append(base::StringPrintf("0x%04x", *it));
+            dst->append(++it == mChannelMasks.end() ? "" : ", ");
+        }
+        dst->append("\n");
+    }
+}
+
+ssize_t AudioProfileVector::add(const sp<AudioProfile> &profile)
+{
+    ssize_t index = size();
+    push_back(profile);
+    return index;
+}
+
+void AudioProfileVector::clearProfiles()
+{
+    for (auto it = begin(); it != end();) {
+        if ((*it)->isDynamicFormat() && (*it)->hasValidFormat()) {
+            it = erase(it);
+        } else {
+            (*it)->clear();
+            ++it;
+        }
+    }
+}
+
+sp<AudioProfile> AudioProfileVector::getFirstValidProfile() const
+{
+    for (const auto &profile : *this) {
+        if (profile->isValid()) {
+            return profile;
+        }
+    }
+    return nullptr;
+}
+
+sp<AudioProfile> AudioProfileVector::getFirstValidProfileFor(audio_format_t format) const
+{
+    for (const auto &profile : *this) {
+        if (profile->isValid() && profile->getFormat() == format) {
+            return profile;
+        }
+    }
+    return nullptr;
+}
+
+FormatVector AudioProfileVector::getSupportedFormats() const
+{
+    FormatVector supportedFormats;
+    for (const auto &profile : *this) {
+        if (profile->hasValidFormat()) {
+            supportedFormats.push_back(profile->getFormat());
+        }
+    }
+    return supportedFormats;
+}
+
+bool AudioProfileVector::hasDynamicChannelsFor(audio_format_t format) const
+{
+    for (const auto &profile : *this) {
+        if (profile->getFormat() == format && profile->isDynamicChannels()) {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool AudioProfileVector::hasDynamicFormat() const
+{
+    for (const auto &profile : *this) {
+        if (profile->isDynamicFormat()) {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool AudioProfileVector::hasDynamicProfile() const
+{
+    for (const auto &profile : *this) {
+        if (profile->isDynamic()) {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool AudioProfileVector::hasDynamicRateFor(audio_format_t format) const
+{
+    for (const auto &profile : *this) {
+        if (profile->getFormat() == format && profile->isDynamicRate()) {
+            return true;
+        }
+    }
+    return false;
+}
+
+void AudioProfileVector::dump(std::string *dst, int spaces) const
+{
+    dst->append(base::StringPrintf("%*s- Profiles:\n", spaces, ""));
+    for (size_t i = 0; i < size(); i++) {
+        dst->append(base::StringPrintf("%*sProfile %zu:", spaces + 4, "", i));
+        std::string profileStr;
+        at(i)->dump(&profileStr, spaces + 8);
+        dst->append(profileStr);
+    }
+}
+
+} // namespace android
diff --git a/media/libaudiofoundation/DeviceDescriptorBase.cpp b/media/libaudiofoundation/DeviceDescriptorBase.cpp
new file mode 100644
index 0000000..f92c05d
--- /dev/null
+++ b/media/libaudiofoundation/DeviceDescriptorBase.cpp
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DeviceDescriptorBase"
+//#define LOG_NDEBUG 0
+
+#include <android-base/stringprintf.h>
+#include <audio_utils/string.h>
+#include <media/DeviceDescriptorBase.h>
+#include <media/TypeConverter.h>
+
+namespace android {
+
+DeviceDescriptorBase::DeviceDescriptorBase(audio_devices_t type) :
+    AudioPort("", AUDIO_PORT_TYPE_DEVICE,
+              audio_is_output_device(type) ? AUDIO_PORT_ROLE_SINK :
+                                             AUDIO_PORT_ROLE_SOURCE),
+    mDeviceType(type)
+{
+    if (audio_is_remote_submix_device(type)) {
+        mAddress = "0";
+    }
+}
+
+void DeviceDescriptorBase::toAudioPortConfig(struct audio_port_config *dstConfig,
+                                             const struct audio_port_config *srcConfig) const
+{
+    dstConfig->config_mask = AUDIO_PORT_CONFIG_GAIN;
+    if (mSamplingRate != 0) {
+        dstConfig->config_mask |= AUDIO_PORT_CONFIG_SAMPLE_RATE;
+    }
+    if (mChannelMask != AUDIO_CHANNEL_NONE) {
+        dstConfig->config_mask |= AUDIO_PORT_CONFIG_CHANNEL_MASK;
+    }
+    if (mFormat != AUDIO_FORMAT_INVALID) {
+        dstConfig->config_mask |= AUDIO_PORT_CONFIG_FORMAT;
+    }
+
+    if (srcConfig != NULL) {
+        dstConfig->config_mask |= srcConfig->config_mask;
+    }
+
+    AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
+
+    dstConfig->role = audio_is_output_device(mDeviceType) ?
+                        AUDIO_PORT_ROLE_SINK : AUDIO_PORT_ROLE_SOURCE;
+    dstConfig->type = AUDIO_PORT_TYPE_DEVICE;
+    dstConfig->ext.device.type = mDeviceType;
+
+    (void)audio_utils_strlcpy_zerofill(dstConfig->ext.device.address, mAddress.c_str());
+}
+
+void DeviceDescriptorBase::toAudioPort(struct audio_port *port) const
+{
+    ALOGV("DeviceDescriptorBase::toAudioPort() handle %d type %08x", mId, mDeviceType);
+    AudioPort::toAudioPort(port);
+    toAudioPortConfig(&port->active_config);
+    port->id = mId;
+    port->ext.device.type = mDeviceType;
+    (void)audio_utils_strlcpy_zerofill(port->ext.device.address, mAddress.c_str());
+}
+
+void DeviceDescriptorBase::dump(std::string *dst, int spaces, int index,
+                                const char* extraInfo, bool verbose) const
+{
+    dst->append(base::StringPrintf("%*sDevice %d:\n", spaces, "", index + 1));
+    if (mId != 0) {
+        dst->append(base::StringPrintf("%*s- id: %2d\n", spaces, "", mId));
+    }
+
+    if (extraInfo != nullptr) {
+        dst->append(extraInfo);
+    }
+
+    dst->append(base::StringPrintf("%*s- type: %-48s\n",
+            spaces, "", ::android::toString(mDeviceType).c_str()));
+
+    if (mAddress.size() != 0) {
+        dst->append(base::StringPrintf("%*s- address: %-32s\n", spaces, "", mAddress.c_str()));
+    }
+    AudioPort::dump(dst, spaces, verbose);
+}
+
+std::string DeviceDescriptorBase::toString() const
+{
+    std::stringstream sstream;
+    sstream << "type:0x" << std::hex << type() << ",@:" << mAddress;
+    return sstream.str();
+}
+
+void DeviceDescriptorBase::log() const
+{
+    ALOGI("Device id:%d type:0x%08X:%s, addr:%s", mId,  mDeviceType,
+          ::android::toString(mDeviceType).c_str(),
+          mAddress.c_str());
+
+    AudioPort::log("  ");
+}
+
+} // namespace android
diff --git a/media/libaudiofoundation/include/media/AudioContainers.h b/media/libaudiofoundation/include/media/AudioContainers.h
new file mode 100644
index 0000000..3313224
--- /dev/null
+++ b/media/libaudiofoundation/include/media/AudioContainers.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <set>
+#include <vector>
+
+#include <system/audio.h>
+
+namespace android {
+
+using ChannelMaskSet = std::set<audio_channel_mask_t>;
+using FormatSet = std::set<audio_format_t>;
+using SampleRateSet = std::set<uint32_t>;
+
+using FormatVector = std::vector<audio_format_t>;
+
+static inline ChannelMaskSet asInMask(const ChannelMaskSet& channelMasks) {
+    ChannelMaskSet inMaskSet;
+    for (const auto &channel : channelMasks) {
+        if (audio_channel_mask_out_to_in(channel) != AUDIO_CHANNEL_INVALID) {
+            inMaskSet.insert(audio_channel_mask_out_to_in(channel));
+        }
+    }
+    return inMaskSet;
+}
+
+static inline ChannelMaskSet asOutMask(const ChannelMaskSet& channelMasks) {
+    ChannelMaskSet outMaskSet;
+    for (const auto &channel : channelMasks) {
+        if (audio_channel_mask_in_to_out(channel) != AUDIO_CHANNEL_INVALID) {
+            outMaskSet.insert(audio_channel_mask_in_to_out(channel));
+        }
+    }
+    return outMaskSet;
+}
+
+} // namespace android
\ No newline at end of file
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioGain.h b/media/libaudiofoundation/include/media/AudioGain.h
similarity index 84%
rename from services/audiopolicy/common/managerdefinitions/include/AudioGain.h
rename to media/libaudiofoundation/include/media/AudioGain.h
index 4af93e1..6a7fb55 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioGain.h
+++ b/media/libaudiofoundation/include/media/AudioGain.h
@@ -16,15 +16,17 @@
 
 #pragma once
 
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
-#include <utils/String8.h>
 #include <system/audio.h>
+#include <string>
 #include <vector>
 
 namespace android {
 
-class AudioGain: public RefBase
+class AudioGain: public RefBase, public Parcelable
 {
 public:
     AudioGain(int index, bool useInChannelMask);
@@ -55,7 +57,7 @@
     int getMaxRampInMs() const { return mGain.max_ramp_ms; }
 
     // TODO: remove dump from here (split serialization)
-    void dump(String8 *dst, int spaces, int index) const;
+    void dump(std::string *dst, int spaces, int index) const;
 
     void getDefaultConfig(struct audio_gain_config *config);
     status_t checkConfig(const struct audio_gain_config *config);
@@ -65,6 +67,9 @@
 
     const struct audio_gain &getGain() const { return mGain; }
 
+    status_t writeToParcel(Parcel* parcel) const override;
+    status_t readFromParcel(const Parcel* parcel) override;
+
 private:
     int               mIndex;
     struct audio_gain mGain;
@@ -72,7 +77,7 @@
     bool              mUseForVolume = false;
 };
 
-class AudioGains : public std::vector<sp<AudioGain> >
+class AudioGains : public std::vector<sp<AudioGain> >, public Parcelable
 {
 public:
     bool canUseForVolume() const
@@ -90,6 +95,9 @@
         push_back(gain);
         return 0;
     }
+
+    status_t writeToParcel(Parcel* parcel) const override;
+    status_t readFromParcel(const Parcel* parcel) override;
 };
 
 } // namespace android
diff --git a/media/libaudiofoundation/include/media/AudioPort.h b/media/libaudiofoundation/include/media/AudioPort.h
new file mode 100644
index 0000000..b8d54de
--- /dev/null
+++ b/media/libaudiofoundation/include/media/AudioPort.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <string>
+
+#include <media/AudioGain.h>
+#include <media/AudioProfile.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <system/audio.h>
+#include <cutils/config_utils.h>
+
+namespace android {
+
+class AudioPort : public virtual RefBase
+{
+public:
+    AudioPort(const std::string& name, audio_port_type_t type,  audio_port_role_t role) :
+            mName(name), mType(type), mRole(role) {}
+
+    virtual ~AudioPort() = default;
+
+    void setName(const std::string &name) { mName = name; }
+    const std::string &getName() const { return mName; }
+
+    audio_port_type_t getType() const { return mType; }
+    audio_port_role_t getRole() const { return mRole; }
+
+    void setGains(const AudioGains &gains) { mGains = gains; }
+    const AudioGains &getGains() const { return mGains; }
+
+    virtual void toAudioPort(struct audio_port *port) const;
+
+    virtual void addAudioProfile(const sp<AudioProfile> &profile) {
+        mProfiles.add(profile);
+    }
+    virtual void clearAudioProfiles() {
+        mProfiles.clearProfiles();
+    }
+
+    bool hasValidAudioProfile() const { return mProfiles.hasValidProfile(); }
+
+    bool hasDynamicAudioProfile() const { return mProfiles.hasDynamicProfile(); }
+
+    void setAudioProfiles(const AudioProfileVector &profiles) { mProfiles = profiles; }
+    AudioProfileVector &getAudioProfiles() { return mProfiles; }
+
+    virtual void importAudioPort(const sp<AudioPort>& port, bool force = false);
+
+    status_t checkGain(const struct audio_gain_config *gainConfig, int index) const {
+        if (index < 0 || (size_t)index >= mGains.size()) {
+            return BAD_VALUE;
+        }
+        return mGains[index]->checkConfig(gainConfig);
+    }
+
+    bool useInputChannelMask() const
+    {
+        return ((mType == AUDIO_PORT_TYPE_DEVICE) && (mRole == AUDIO_PORT_ROLE_SOURCE)) ||
+                ((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SINK));
+    }
+
+    void dump(std::string *dst, int spaces, bool verbose = true) const;
+
+    void log(const char* indent) const;
+
+    AudioGains mGains; // gain controllers
+protected:
+    std::string  mName;
+    audio_port_type_t mType;
+    audio_port_role_t mRole;
+    AudioProfileVector mProfiles; // AudioProfiles supported by this port (format, Rates, Channels)
+};
+
+
+class AudioPortConfig : public virtual RefBase
+{
+public:
+    virtual ~AudioPortConfig() = default;
+
+    virtual sp<AudioPort> getAudioPort() const = 0;
+
+    virtual status_t applyAudioPortConfig(const struct audio_port_config *config,
+                                          struct audio_port_config *backupConfig = NULL);
+
+    virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
+                                   const struct audio_port_config *srcConfig = NULL) const;
+
+    unsigned int getSamplingRate() const { return mSamplingRate; }
+    audio_format_t getFormat() const { return mFormat; }
+    audio_channel_mask_t getChannelMask() const { return mChannelMask; }
+    audio_port_handle_t getId() const { return mId; }
+
+    bool hasGainController(bool canUseForVolume = false) const;
+
+protected:
+    unsigned int mSamplingRate = 0u;
+    audio_format_t mFormat = AUDIO_FORMAT_INVALID;
+    audio_channel_mask_t mChannelMask = AUDIO_CHANNEL_NONE;
+    audio_port_handle_t mId = AUDIO_PORT_HANDLE_NONE;
+    struct audio_gain_config mGain = { .index = -1 };
+};
+
+} // namespace android
diff --git a/media/libaudiofoundation/include/media/AudioProfile.h b/media/libaudiofoundation/include/media/AudioProfile.h
new file mode 100644
index 0000000..f4c2e12
--- /dev/null
+++ b/media/libaudiofoundation/include/media/AudioProfile.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <string>
+#include <vector>
+
+#include <media/AudioContainers.h>
+#include <system/audio.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class AudioProfile final : public RefBase
+{
+public:
+    static sp<AudioProfile> createFullDynamic(audio_format_t dynamicFormat = AUDIO_FORMAT_DEFAULT);
+
+    AudioProfile(audio_format_t format, audio_channel_mask_t channelMasks, uint32_t samplingRate);
+    AudioProfile(audio_format_t format,
+                 const ChannelMaskSet &channelMasks,
+                 const SampleRateSet &samplingRateCollection);
+
+    audio_format_t getFormat() const { return mFormat; }
+    const ChannelMaskSet &getChannels() const { return mChannelMasks; }
+    const SampleRateSet &getSampleRates() const { return mSamplingRates; }
+    void setChannels(const ChannelMaskSet &channelMasks);
+    void setSampleRates(const SampleRateSet &sampleRates);
+
+    void clear();
+    bool isValid() const { return hasValidFormat() && hasValidRates() && hasValidChannels(); }
+    bool supportsChannels(audio_channel_mask_t channels) const
+    {
+        return mChannelMasks.count(channels) != 0;
+    }
+    bool supportsRate(uint32_t rate) const { return mSamplingRates.count(rate) != 0; }
+
+    bool hasValidFormat() const { return mFormat != AUDIO_FORMAT_DEFAULT; }
+    bool hasValidRates() const { return !mSamplingRates.empty(); }
+    bool hasValidChannels() const { return !mChannelMasks.empty(); }
+
+    void setDynamicChannels(bool dynamic) { mIsDynamicChannels = dynamic; }
+    bool isDynamicChannels() const { return mIsDynamicChannels; }
+
+    void setDynamicRate(bool dynamic) { mIsDynamicRate = dynamic; }
+    bool isDynamicRate() const { return mIsDynamicRate; }
+
+    void setDynamicFormat(bool dynamic) { mIsDynamicFormat = dynamic; }
+    bool isDynamicFormat() const { return mIsDynamicFormat; }
+
+    bool isDynamic() { return mIsDynamicFormat || mIsDynamicChannels || mIsDynamicRate; }
+
+    void dump(std::string *dst, int spaces) const;
+
+private:
+    std::string  mName;
+    audio_format_t mFormat; // The format for an audio profile should only be set when initialized.
+    ChannelMaskSet mChannelMasks;
+    SampleRateSet mSamplingRates;
+
+    bool mIsDynamicFormat = false;
+    bool mIsDynamicChannels = false;
+    bool mIsDynamicRate = false;
+};
+
+class AudioProfileVector : public std::vector<sp<AudioProfile>>
+{
+public:
+    virtual ~AudioProfileVector() = default;
+
+    virtual ssize_t add(const sp<AudioProfile> &profile);
+
+    // If the profile is dynamic format and has valid format, it will be removed when doing
+    // clearProfiles(). Otherwise, AudioProfile::clear() will be called.
+    virtual void clearProfiles();
+
+    sp<AudioProfile> getFirstValidProfile() const;
+    sp<AudioProfile> getFirstValidProfileFor(audio_format_t format) const;
+    bool hasValidProfile() const { return getFirstValidProfile() != 0; }
+
+    FormatVector getSupportedFormats() const;
+    bool hasDynamicChannelsFor(audio_format_t format) const;
+    bool hasDynamicFormat() const;
+    bool hasDynamicProfile() const;
+    bool hasDynamicRateFor(audio_format_t format) const;
+
+    virtual void dump(std::string *dst, int spaces) const;
+};
+
+bool operator == (const AudioProfile &left, const AudioProfile &right);
+
+} // namespace android
diff --git a/media/libaudiofoundation/include/media/DeviceDescriptorBase.h b/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
new file mode 100644
index 0000000..bbe0517
--- /dev/null
+++ b/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <media/AudioPort.h>
+#include <utils/Errors.h>
+#include <cutils/config_utils.h>
+#include <system/audio.h>
+#include <system/audio_policy.h>
+
+namespace android {
+
+class DeviceDescriptorBase : public AudioPort, public AudioPortConfig
+{
+public:
+     // Note that empty name refers by convention to a generic device.
+    explicit DeviceDescriptorBase(audio_devices_t type);
+
+    virtual ~DeviceDescriptorBase() {}
+
+    audio_devices_t type() const { return mDeviceType; }
+    std::string address() const { return mAddress; }
+    void setAddress(const std::string &address) { mAddress = address; }
+
+    // AudioPortConfig
+    virtual sp<AudioPort> getAudioPort() const {
+        return static_cast<AudioPort*>(const_cast<DeviceDescriptorBase*>(this));
+    }
+    virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
+            const struct audio_port_config *srcConfig = NULL) const;
+
+    // AudioPort
+    virtual void toAudioPort(struct audio_port *port) const;
+
+    void dump(std::string *dst, int spaces, int index,
+              const char* extraInfo = nullptr, bool verbose = true) const;
+    void log() const;
+    std::string toString() const;
+
+protected:
+    std::string mAddress{""};
+    audio_devices_t     mDeviceType;
+};
+
+} // namespace android
diff --git a/media/libaudiohal/Android.bp b/media/libaudiohal/Android.bp
index 584c2c0..5837fcf 100644
--- a/media/libaudiohal/Android.bp
+++ b/media/libaudiohal/Android.bp
@@ -13,12 +13,6 @@
     ],
 
     shared_libs: [
-        "android.hardware.audio.effect@2.0",
-        "android.hardware.audio.effect@4.0",
-        "android.hardware.audio.effect@5.0",
-        "android.hardware.audio@2.0",
-        "android.hardware.audio@4.0",
-        "android.hardware.audio@5.0",
         "libaudiohal@2.0",
         "libaudiohal@4.0",
         "libaudiohal@5.0",
@@ -26,7 +20,8 @@
     ],
 
     header_libs: [
-        "libaudiohal_headers"
+        "libaudiohal_headers",
+        "libbase_headers",
     ]
 }
 
@@ -57,4 +52,10 @@
     name: "libaudiohal_headers",
 
     export_include_dirs: ["include"],
+
+    // This is needed because the stream interface includes media/MicrophoneInfo.h
+    // which is not in any library but has a dependency on headers from libbinder.
+    header_libs: ["libbinder_headers"],
+
+    export_header_lib_headers: ["libbinder_headers"],
 }
diff --git a/media/libaudiohal/DevicesFactoryHalInterface.cpp b/media/libaudiohal/DevicesFactoryHalInterface.cpp
index f86009c..d5336fa 100644
--- a/media/libaudiohal/DevicesFactoryHalInterface.cpp
+++ b/media/libaudiohal/DevicesFactoryHalInterface.cpp
@@ -14,26 +14,16 @@
  * limitations under the License.
  */
 
-#include <android/hardware/audio/2.0/IDevicesFactory.h>
-#include <android/hardware/audio/4.0/IDevicesFactory.h>
-#include <android/hardware/audio/5.0/IDevicesFactory.h>
-
 #include <libaudiohal/FactoryHalHidl.h>
 
+#include <media/audiohal/DevicesFactoryHalInterface.h>
+
 namespace android {
 
 // static
 sp<DevicesFactoryHalInterface> DevicesFactoryHalInterface::create() {
-    if (hardware::audio::V5_0::IDevicesFactory::getService() != nullptr) {
-        return V5_0::createDevicesFactoryHal();
-    }
-    if (hardware::audio::V4_0::IDevicesFactory::getService() != nullptr) {
-        return V4_0::createDevicesFactoryHal();
-    }
-    if (hardware::audio::V2_0::IDevicesFactory::getService() != nullptr) {
-        return V2_0::createDevicesFactoryHal();
-    }
-    return nullptr;
+    return createPreferedImpl<DevicesFactoryHalInterface>();
 }
 
 } // namespace android
+
diff --git a/media/libaudiohal/EffectsFactoryHalInterface.cpp b/media/libaudiohal/EffectsFactoryHalInterface.cpp
index bd3ef61..d15b14e 100644
--- a/media/libaudiohal/EffectsFactoryHalInterface.cpp
+++ b/media/libaudiohal/EffectsFactoryHalInterface.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2017 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,26 +14,15 @@
  * limitations under the License.
  */
 
-#include <android/hardware/audio/effect/2.0/IEffectsFactory.h>
-#include <android/hardware/audio/effect/4.0/IEffectsFactory.h>
-#include <android/hardware/audio/effect/5.0/IEffectsFactory.h>
-
 #include <libaudiohal/FactoryHalHidl.h>
 
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+
 namespace android {
 
 // static
 sp<EffectsFactoryHalInterface> EffectsFactoryHalInterface::create() {
-    if (hardware::audio::effect::V5_0::IEffectsFactory::getService() != nullptr) {
-        return effect::V5_0::createEffectsFactoryHal();
-    }
-    if (hardware::audio::effect::V4_0::IEffectsFactory::getService() != nullptr) {
-        return effect::V4_0::createEffectsFactoryHal();
-    }
-    if (hardware::audio::effect::V2_0::IEffectsFactory::getService() != nullptr) {
-        return effect::V2_0::createEffectsFactoryHal();
-    }
-    return nullptr;
+    return createPreferedImpl<EffectsFactoryHalInterface>();
 }
 
 // static
diff --git a/media/libaudiohal/impl/Android.bp b/media/libaudiohal/impl/Android.bp
index 88533da..d4a4f41 100644
--- a/media/libaudiohal/impl/Android.bp
+++ b/media/libaudiohal/impl/Android.bp
@@ -36,8 +36,6 @@
         "libhardware",
         "libhidlbase",
         "libhidlmemory",
-        "libhidltransport",
-        "libhwbinder",
         "liblog",
         "libmedia_helper",
         "libmediautils",
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp b/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
index 5e01e42..1335a0c 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
@@ -35,13 +35,10 @@
 namespace android {
 namespace CPP_VERSION {
 
-DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
-    sp<IDevicesFactory> defaultFactory{IDevicesFactory::getService()};
-    if (!defaultFactory) {
-        ALOGE("Failed to obtain IDevicesFactory/default service, terminating process.");
-        exit(1);
-    }
-    mDeviceFactories.push_back(defaultFactory);
+DevicesFactoryHalHidl::DevicesFactoryHalHidl(sp<IDevicesFactory> devicesFactory) {
+    ALOG_ASSERT(devicesFactory != nullptr, "Provided IDevicesFactory service is NULL");
+
+    mDeviceFactories.push_back(devicesFactory);
     if (MAJOR_VERSION >= 4) {
         // The MSD factory is optional and only available starting at HAL 4.0
         sp<IDevicesFactory> msdFactory{IDevicesFactory::getService(AUDIO_HAL_SERVICE_NAME_MSD)};
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHidl.h b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
index 27e0649..8775e7b 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHidl.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
@@ -32,18 +32,14 @@
 class DevicesFactoryHalHidl : public DevicesFactoryHalInterface
 {
   public:
+    DevicesFactoryHalHidl(sp<IDevicesFactory> devicesFactory);
+
     // Opens a device with the specified name. To close the device, it is
     // necessary to release references to the returned object.
     virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
-
   private:
-    friend class DevicesFactoryHalHybrid;
-
     std::vector<sp<IDevicesFactory>> mDeviceFactories;
 
-    // Can not be constructed directly by clients.
-    DevicesFactoryHalHidl();
-
     virtual ~DevicesFactoryHalHidl() = default;
 };
 
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp b/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
index f337a8b..0e1f1bb 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
@@ -17,16 +17,17 @@
 #define LOG_TAG "DevicesFactoryHalHybrid"
 //#define LOG_NDEBUG 0
 
+#include "DevicesFactoryHalHidl.h"
 #include "DevicesFactoryHalHybrid.h"
 #include "DevicesFactoryHalLocal.h"
-#include "DevicesFactoryHalHidl.h"
+#include <libaudiohal/FactoryHalHidl.h>
 
 namespace android {
 namespace CPP_VERSION {
 
-DevicesFactoryHalHybrid::DevicesFactoryHalHybrid()
+DevicesFactoryHalHybrid::DevicesFactoryHalHybrid(sp<IDevicesFactory> hidlFactory)
         : mLocalFactory(new DevicesFactoryHalLocal()),
-          mHidlFactory(new DevicesFactoryHalHidl()) {
+          mHidlFactory(new DevicesFactoryHalHidl(hidlFactory)) {
 }
 
 status_t DevicesFactoryHalHybrid::openDevice(const char *name, sp<DeviceHalInterface> *device) {
@@ -36,6 +37,12 @@
     }
     return mLocalFactory->openDevice(name, device);
 }
-
 } // namespace CPP_VERSION
+
+template <>
+sp<DevicesFactoryHalInterface> createFactoryHal<AudioHALVersion::CPP_VERSION>() {
+    auto service = hardware::audio::CPP_VERSION::IDevicesFactory::getService();
+    return service ? new CPP_VERSION::DevicesFactoryHalHybrid(service) : nullptr;
+}
+
 } // namespace android
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHybrid.h b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
index 5ac0d0d..545bb70 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
@@ -17,17 +17,20 @@
 #ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_H
 #define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_H
 
+#include PATH(android/hardware/audio/FILE_VERSION/IDevicesFactory.h)
 #include <media/audiohal/DevicesFactoryHalInterface.h>
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
 
+using ::android::hardware::audio::CPP_VERSION::IDevicesFactory;
+
 namespace android {
 namespace CPP_VERSION {
 
 class DevicesFactoryHalHybrid : public DevicesFactoryHalInterface
 {
   public:
-    DevicesFactoryHalHybrid();
+    DevicesFactoryHalHybrid(sp<IDevicesFactory> hidlFactory);
 
     // Opens a device with the specified name. To close the device, it is
     // necessary to release references to the returned object.
@@ -38,10 +41,6 @@
     sp<DevicesFactoryHalInterface> mHidlFactory;
 };
 
-sp<DevicesFactoryHalInterface> createDevicesFactoryHal() {
-    return new DevicesFactoryHalHybrid();
-}
-
 } // namespace CPP_VERSION
 } // namespace android
 
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
index 7fd6bde..ba7b195 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
@@ -19,11 +19,12 @@
 
 #include <cutils/native_handle.h>
 
-#include "EffectsFactoryHalHidl.h"
 #include "ConversionHelperHidl.h"
 #include "EffectBufferHalHidl.h"
 #include "EffectHalHidl.h"
+#include "EffectsFactoryHalHidl.h"
 #include "HidlUtils.h"
+#include <libaudiohal/FactoryHalHidl.h>
 
 using ::android::hardware::audio::common::CPP_VERSION::implementation::HidlUtils;
 using ::android::hardware::Return;
@@ -35,12 +36,10 @@
 using namespace ::android::hardware::audio::common::CPP_VERSION;
 using namespace ::android::hardware::audio::effect::CPP_VERSION;
 
-EffectsFactoryHalHidl::EffectsFactoryHalHidl() : ConversionHelperHidl("EffectsFactory") {
-    mEffectsFactory = IEffectsFactory::getService();
-    if (mEffectsFactory == 0) {
-        ALOGE("Failed to obtain IEffectsFactory service, terminating process.");
-        exit(1);
-    }
+EffectsFactoryHalHidl::EffectsFactoryHalHidl(sp<IEffectsFactory> effectsFactory)
+        : ConversionHelperHidl("EffectsFactory") {
+    ALOG_ASSERT(effectsFactory != nullptr, "Provided IDevicesFactory service is NULL");
+    mEffectsFactory = effectsFactory;
 }
 
 status_t EffectsFactoryHalHidl::queryAllDescriptors() {
@@ -147,4 +146,11 @@
 
 } // namespace CPP_VERSION
 } // namespace effect
+
+template<>
+sp<EffectsFactoryHalInterface> createFactoryHal<AudioHALVersion::CPP_VERSION>() {
+    auto service = hardware::audio::effect::CPP_VERSION::IEffectsFactory::getService();
+    return service ? new effect::CPP_VERSION::EffectsFactoryHalHidl(service) : nullptr;
+}
+
 } // namespace android
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.h b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
index 01178ff..2828513 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.h
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
@@ -18,7 +18,6 @@
 #define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
 
 #include PATH(android/hardware/audio/effect/FILE_VERSION/IEffectsFactory.h)
-#include PATH(android/hardware/audio/effect/FILE_VERSION/types.h)
 #include <media/audiohal/EffectsFactoryHalInterface.h>
 
 #include "ConversionHelperHidl.h"
@@ -34,7 +33,7 @@
 class EffectsFactoryHalHidl : public EffectsFactoryHalInterface, public ConversionHelperHidl
 {
   public:
-    EffectsFactoryHalHidl();
+    EffectsFactoryHalHidl(sp<IEffectsFactory> effectsFactory);
 
     // Returns the number of different effects in all loaded libraries.
     virtual status_t queryNumberEffects(uint32_t *pNumEffects);
@@ -66,10 +65,6 @@
     status_t queryAllDescriptors();
 };
 
-sp<EffectsFactoryHalInterface> createEffectsFactoryHal() {
-    return new EffectsFactoryHalHidl();
-}
-
 } // namespace CPP_VERSION
 } // namespace effect
 } // namespace android
diff --git a/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h b/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
index c7319d0..829f99c 100644
--- a/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
+++ b/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
@@ -23,33 +23,42 @@
 #include <media/audiohal/EffectsFactoryHalInterface.h>
 #include <utils/StrongPointer.h>
 
+#include <array>
+#include <utility>
+
 namespace android {
 
-namespace effect {
-namespace V2_0 {
-sp<EffectsFactoryHalInterface> createEffectsFactoryHal();
-} // namespace V2_0
+/** Supported HAL versions, in order of preference.
+ * Implementation should use specialize the `create*FactoryHal` for their version.
+ * Client should use `createPreferedImpl<*FactoryHal>()` to instantiate
+ * the preferred available impl.
+ */
+enum class AudioHALVersion {
+    V5_0,
+    V4_0,
+    V2_0,
+    end, // used for iterating over supported versions
+};
 
-namespace V4_0 {
-sp<EffectsFactoryHalInterface> createEffectsFactoryHal();
-} // namespace V4_0
+/** Template function to fully specialized for each version and each Interface. */
+template <AudioHALVersion, class Interface>
+sp<Interface> createFactoryHal();
 
-namespace V5_0 {
-sp<EffectsFactoryHalInterface> createEffectsFactoryHal();
-} // namespace V5_0
-} // namespace effect
+/** @Return the preferred available implementation or nullptr if none are available. */
+template <class Interface, AudioHALVersion version = AudioHALVersion{}>
+static sp<Interface> createPreferedImpl() {
+    if constexpr (version == AudioHALVersion::end) {
+        return nullptr; // tried all version, all returned nullptr
+    } else {
+        if (auto created = createFactoryHal<version, Interface>(); created != nullptr) {
+           return created;
+        }
 
-namespace V2_0 {
-sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
-} // namespace V2_0
+        using Raw = std::underlying_type_t<AudioHALVersion>; // cast as enum class do not support ++
+        return createPreferedImpl<Interface, AudioHALVersion(Raw(version) + 1)>();
+    }
+}
 
-namespace V4_0 {
-sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
-} // namespace V4_0
-
-namespace V5_0 {
-sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
-} // namespace V5_0
 
 } // namespace android
 
diff --git a/media/libaudioprocessing/Android.bp b/media/libaudioprocessing/Android.bp
index cb78063..e8aa700 100644
--- a/media/libaudioprocessing/Android.bp
+++ b/media/libaudioprocessing/Android.bp
@@ -3,20 +3,13 @@
 
     export_include_dirs: ["include"],
 
+    header_libs: ["libaudioclient_headers"],
+
     shared_libs: [
-        "libaudiohal",
         "libaudioutils",
         "libcutils",
         "liblog",
-        "libnbaio",
-        "libnblog",
-        "libsonic",
         "libutils",
-        "libvibrator",
-    ],
-
-    header_libs: [
-        "libbase_headers",
     ],
 
     cflags: [
@@ -33,18 +26,31 @@
     defaults: ["libaudioprocessing_defaults"],
 
     srcs: [
+        "AudioMixer.cpp",
         "BufferProviders.cpp",
         "RecordBufferConverter.cpp",
     ],
-    whole_static_libs: ["libaudioprocessing_arm"],
+
+    header_libs: [
+        "libbase_headers",
+    ],
+
+    shared_libs: [
+        "libaudiohal",
+        "libsonic",
+        "libvibrator",
+    ],
+
+    whole_static_libs: ["libaudioprocessing_base"],
 }
 
 cc_library_static {
-    name: "libaudioprocessing_arm",
+    name: "libaudioprocessing_base",
     defaults: ["libaudioprocessing_defaults"],
+    vendor_available: true,
 
     srcs: [
-        "AudioMixer.cpp",
+        "AudioMixerBase.cpp",
         "AudioResampler.cpp",
         "AudioResamplerCubic.cpp",
         "AudioResamplerSinc.cpp",
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index f7cc096..c0b11a4 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -18,6 +18,7 @@
 #define LOG_TAG "AudioMixer"
 //#define LOG_NDEBUG 0
 
+#include <sstream>
 #include <stdint.h>
 #include <string.h>
 #include <stdlib.h>
@@ -27,9 +28,6 @@
 #include <utils/Errors.h>
 #include <utils/Log.h>
 
-#include <cutils/compiler.h>
-#include <utils/Debug.h>
-
 #include <system/audio.h>
 
 #include <audio_utils/primitives.h>
@@ -58,138 +56,15 @@
 #define ALOGVV(a...) do { } while (0)
 #endif
 
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
-#endif
-
-// Set kUseNewMixer to true to use the new mixer engine always. Otherwise the
-// original code will be used for stereo sinks, the new mixer for multichannel.
-static constexpr bool kUseNewMixer = true;
-
-// Set kUseFloat to true to allow floating input into the mixer engine.
-// If kUseNewMixer is false, this is ignored or may be overridden internally
-// because of downmix/upmix support.
-static constexpr bool kUseFloat = true;
-
-#ifdef FLOAT_AUX
-using TYPE_AUX = float;
-static_assert(kUseNewMixer && kUseFloat,
-        "kUseNewMixer and kUseFloat must be true for FLOAT_AUX option");
-#else
-using TYPE_AUX = int32_t; // q4.27
-#endif
-
 // Set to default copy buffer size in frames for input processing.
-static const size_t kCopyBufferFrameCount = 256;
+static constexpr size_t kCopyBufferFrameCount = 256;
 
 namespace android {
 
 // ----------------------------------------------------------------------------
 
-static inline audio_format_t selectMixerInFormat(audio_format_t inputFormat __unused) {
-    return kUseFloat && kUseNewMixer ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
-}
-
-status_t AudioMixer::create(
-        int name, audio_channel_mask_t channelMask, audio_format_t format, int sessionId)
-{
-    LOG_ALWAYS_FATAL_IF(exists(name), "name %d already exists", name);
-
-    if (!isValidChannelMask(channelMask)) {
-        ALOGE("%s invalid channelMask: %#x", __func__, channelMask);
-        return BAD_VALUE;
-    }
-    if (!isValidFormat(format)) {
-        ALOGE("%s invalid format: %#x", __func__, format);
-        return BAD_VALUE;
-    }
-
-    auto t = std::make_shared<Track>();
-    {
-        // TODO: move initialization to the Track constructor.
-        // assume default parameters for the track, except where noted below
-        t->needs = 0;
-
-        // Integer volume.
-        // Currently integer volume is kept for the legacy integer mixer.
-        // Will be removed when the legacy mixer path is removed.
-        t->volume[0] = 0;
-        t->volume[1] = 0;
-        t->prevVolume[0] = 0 << 16;
-        t->prevVolume[1] = 0 << 16;
-        t->volumeInc[0] = 0;
-        t->volumeInc[1] = 0;
-        t->auxLevel = 0;
-        t->auxInc = 0;
-        t->prevAuxLevel = 0;
-
-        // Floating point volume.
-        t->mVolume[0] = 0.f;
-        t->mVolume[1] = 0.f;
-        t->mPrevVolume[0] = 0.f;
-        t->mPrevVolume[1] = 0.f;
-        t->mVolumeInc[0] = 0.;
-        t->mVolumeInc[1] = 0.;
-        t->mAuxLevel = 0.;
-        t->mAuxInc = 0.;
-        t->mPrevAuxLevel = 0.;
-
-        // no initialization needed
-        // t->frameCount
-        t->mHapticChannelMask = channelMask & AUDIO_CHANNEL_HAPTIC_ALL;
-        t->mHapticChannelCount = audio_channel_count_from_out_mask(t->mHapticChannelMask);
-        channelMask &= ~AUDIO_CHANNEL_HAPTIC_ALL;
-        t->channelCount = audio_channel_count_from_out_mask(channelMask);
-        t->enabled = false;
-        ALOGV_IF(audio_channel_mask_get_bits(channelMask) != AUDIO_CHANNEL_OUT_STEREO,
-                "Non-stereo channel mask: %d\n", channelMask);
-        t->channelMask = channelMask;
-        t->sessionId = sessionId;
-        // setBufferProvider(name, AudioBufferProvider *) is required before enable(name)
-        t->bufferProvider = NULL;
-        t->buffer.raw = NULL;
-        // no initialization needed
-        // t->buffer.frameCount
-        t->hook = NULL;
-        t->mIn = NULL;
-        t->sampleRate = mSampleRate;
-        // setParameter(name, TRACK, MAIN_BUFFER, mixBuffer) is required before enable(name)
-        t->mainBuffer = NULL;
-        t->auxBuffer = NULL;
-        t->mInputBufferProvider = NULL;
-        t->mMixerFormat = AUDIO_FORMAT_PCM_16_BIT;
-        t->mFormat = format;
-        t->mMixerInFormat = selectMixerInFormat(format);
-        t->mDownmixRequiresFormat = AUDIO_FORMAT_INVALID; // no format required
-        t->mMixerChannelMask = audio_channel_mask_from_representation_and_bits(
-                AUDIO_CHANNEL_REPRESENTATION_POSITION, AUDIO_CHANNEL_OUT_STEREO);
-        t->mMixerChannelCount = audio_channel_count_from_out_mask(t->mMixerChannelMask);
-        t->mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
-        // haptic
-        t->mHapticPlaybackEnabled = false;
-        t->mHapticIntensity = HAPTIC_SCALE_NONE;
-        t->mMixerHapticChannelMask = AUDIO_CHANNEL_NONE;
-        t->mMixerHapticChannelCount = 0;
-        t->mAdjustInChannelCount = t->channelCount + t->mHapticChannelCount;
-        t->mAdjustOutChannelCount = t->channelCount + t->mMixerHapticChannelCount;
-        t->mAdjustNonDestructiveInChannelCount = t->mAdjustOutChannelCount;
-        t->mAdjustNonDestructiveOutChannelCount = t->channelCount;
-        t->mKeepContractedChannels = false;
-        // Check the downmixing (or upmixing) requirements.
-        status_t status = t->prepareForDownmix();
-        if (status != OK) {
-            ALOGE("AudioMixer::getTrackName invalid channelMask (%#x)", channelMask);
-            return BAD_VALUE;
-        }
-        // prepareForDownmix() may change mDownmixRequiresFormat
-        ALOGVV("mMixerFormat:%#x  mMixerInFormat:%#x\n", t->mMixerFormat, t->mMixerInFormat);
-        t->prepareForReformat();
-        t->prepareForAdjustChannelsNonDestructive(mFrameCount);
-        t->prepareForAdjustChannels();
-
-        mTracks[name] = t;
-        return OK;
-    }
+bool AudioMixer::isValidChannelMask(audio_channel_mask_t channelMask) const {
+    return audio_channel_mask_is_valid(channelMask); // the RemixBufferProvider is flexible.
 }
 
 // Called when channel masks have changed for a track name
@@ -198,7 +73,7 @@
 bool AudioMixer::setChannelMasks(int name,
         audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask) {
     LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
-    const std::shared_ptr<Track> &track = mTracks[name];
+    const std::shared_ptr<Track> &track = getTrack(name);
 
     if (trackChannelMask == (track->channelMask | track->mHapticChannelMask)
             && mixerChannelMask == (track->mMixerChannelMask | track->mMixerHapticChannelMask)) {
@@ -255,14 +130,8 @@
     track->prepareForAdjustChannelsNonDestructive(mFrameCount);
     track->prepareForAdjustChannels();
 
-    if (track->mResampler.get() != nullptr) {
-        // resampler channels may have changed.
-        const uint32_t resetToSampleRate = track->sampleRate;
-        track->mResampler.reset(nullptr);
-        track->sampleRate = mSampleRate; // without resampler, track rate is device sample rate.
-        // recreate the resampler with updated format, channels, saved sampleRate.
-        track->setResampler(resetToSampleRate /*trackSampleRate*/, mSampleRate /*devSampleRate*/);
-    }
+    // Resampler channels may have changed.
+    track->recreateResampler(mSampleRate);
     return true;
 }
 
@@ -477,171 +346,10 @@
     }
 }
 
-void AudioMixer::destroy(int name)
-{
-    LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
-    ALOGV("deleteTrackName(%d)", name);
-
-    if (mTracks[name]->enabled) {
-        invalidate();
-    }
-    mTracks.erase(name); // deallocate track
-}
-
-void AudioMixer::enable(int name)
-{
-    LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
-    const std::shared_ptr<Track> &track = mTracks[name];
-
-    if (!track->enabled) {
-        track->enabled = true;
-        ALOGV("enable(%d)", name);
-        invalidate();
-    }
-}
-
-void AudioMixer::disable(int name)
-{
-    LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
-    const std::shared_ptr<Track> &track = mTracks[name];
-
-    if (track->enabled) {
-        track->enabled = false;
-        ALOGV("disable(%d)", name);
-        invalidate();
-    }
-}
-
-/* Sets the volume ramp variables for the AudioMixer.
- *
- * The volume ramp variables are used to transition from the previous
- * volume to the set volume.  ramp controls the duration of the transition.
- * Its value is typically one state framecount period, but may also be 0,
- * meaning "immediate."
- *
- * FIXME: 1) Volume ramp is enabled only if there is a nonzero integer increment
- * even if there is a nonzero floating point increment (in that case, the volume
- * change is immediate).  This restriction should be changed when the legacy mixer
- * is removed (see #2).
- * FIXME: 2) Integer volume variables are used for Legacy mixing and should be removed
- * when no longer needed.
- *
- * @param newVolume set volume target in floating point [0.0, 1.0].
- * @param ramp number of frames to increment over. if ramp is 0, the volume
- * should be set immediately.  Currently ramp should not exceed 65535 (frames).
- * @param pIntSetVolume pointer to the U4.12 integer target volume, set on return.
- * @param pIntPrevVolume pointer to the U4.28 integer previous volume, set on return.
- * @param pIntVolumeInc pointer to the U4.28 increment per output audio frame, set on return.
- * @param pSetVolume pointer to the float target volume, set on return.
- * @param pPrevVolume pointer to the float previous volume, set on return.
- * @param pVolumeInc pointer to the float increment per output audio frame, set on return.
- * @return true if the volume has changed, false if volume is same.
- */
-static inline bool setVolumeRampVariables(float newVolume, int32_t ramp,
-        int16_t *pIntSetVolume, int32_t *pIntPrevVolume, int32_t *pIntVolumeInc,
-        float *pSetVolume, float *pPrevVolume, float *pVolumeInc) {
-    // check floating point volume to see if it is identical to the previously
-    // set volume.
-    // We do not use a tolerance here (and reject changes too small)
-    // as it may be confusing to use a different value than the one set.
-    // If the resulting volume is too small to ramp, it is a direct set of the volume.
-    if (newVolume == *pSetVolume) {
-        return false;
-    }
-    if (newVolume < 0) {
-        newVolume = 0; // should not have negative volumes
-    } else {
-        switch (fpclassify(newVolume)) {
-        case FP_SUBNORMAL:
-        case FP_NAN:
-            newVolume = 0;
-            break;
-        case FP_ZERO:
-            break; // zero volume is fine
-        case FP_INFINITE:
-            // Infinite volume could be handled consistently since
-            // floating point math saturates at infinities,
-            // but we limit volume to unity gain float.
-            // ramp = 0; break;
-            //
-            newVolume = AudioMixer::UNITY_GAIN_FLOAT;
-            break;
-        case FP_NORMAL:
-        default:
-            // Floating point does not have problems with overflow wrap
-            // that integer has.  However, we limit the volume to
-            // unity gain here.
-            // TODO: Revisit the volume limitation and perhaps parameterize.
-            if (newVolume > AudioMixer::UNITY_GAIN_FLOAT) {
-                newVolume = AudioMixer::UNITY_GAIN_FLOAT;
-            }
-            break;
-        }
-    }
-
-    // set floating point volume ramp
-    if (ramp != 0) {
-        // when the ramp completes, *pPrevVolume is set to *pSetVolume, so there
-        // is no computational mismatch; hence equality is checked here.
-        ALOGD_IF(*pPrevVolume != *pSetVolume, "previous float ramp hasn't finished,"
-                " prev:%f  set_to:%f", *pPrevVolume, *pSetVolume);
-        const float inc = (newVolume - *pPrevVolume) / ramp; // could be inf, nan, subnormal
-        // could be inf, cannot be nan, subnormal
-        const float maxv = std::max(newVolume, *pPrevVolume);
-
-        if (isnormal(inc) // inc must be a normal number (no subnormals, infinite, nan)
-                && maxv + inc != maxv) { // inc must make forward progress
-            *pVolumeInc = inc;
-            // ramp is set now.
-            // Note: if newVolume is 0, then near the end of the ramp,
-            // it may be possible that the ramped volume may be subnormal or
-            // temporarily negative by a small amount or subnormal due to floating
-            // point inaccuracies.
-        } else {
-            ramp = 0; // ramp not allowed
-        }
-    }
-
-    // compute and check integer volume, no need to check negative values
-    // The integer volume is limited to "unity_gain" to avoid wrapping and other
-    // audio artifacts, so it never reaches the range limit of U4.28.
-    // We safely use signed 16 and 32 bit integers here.
-    const float scaledVolume = newVolume * AudioMixer::UNITY_GAIN_INT; // not neg, subnormal, nan
-    const int32_t intVolume = (scaledVolume >= (float)AudioMixer::UNITY_GAIN_INT) ?
-            AudioMixer::UNITY_GAIN_INT : (int32_t)scaledVolume;
-
-    // set integer volume ramp
-    if (ramp != 0) {
-        // integer volume is U4.12 (to use 16 bit multiplies), but ramping uses U4.28.
-        // when the ramp completes, *pIntPrevVolume is set to *pIntSetVolume << 16, so there
-        // is no computational mismatch; hence equality is checked here.
-        ALOGD_IF(*pIntPrevVolume != *pIntSetVolume << 16, "previous int ramp hasn't finished,"
-                " prev:%d  set_to:%d", *pIntPrevVolume, *pIntSetVolume << 16);
-        const int32_t inc = ((intVolume << 16) - *pIntPrevVolume) / ramp;
-
-        if (inc != 0) { // inc must make forward progress
-            *pIntVolumeInc = inc;
-        } else {
-            ramp = 0; // ramp not allowed
-        }
-    }
-
-    // if no ramp, or ramp not allowed, then clear float and integer increments
-    if (ramp == 0) {
-        *pVolumeInc = 0;
-        *pPrevVolume = newVolume;
-        *pIntVolumeInc = 0;
-        *pIntPrevVolume = intVolume << 16;
-    }
-    *pSetVolume = newVolume;
-    *pIntSetVolume = intVolume;
-    return true;
-}
-
 void AudioMixer::setParameter(int name, int target, int param, void *value)
 {
     LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
-    const std::shared_ptr<Track> &track = mTracks[name];
+    const std::shared_ptr<Track> &track = getTrack(name);
 
     int valueInt = static_cast<int>(reinterpret_cast<uintptr_t>(value));
     int32_t *valueBuf = reinterpret_cast<int32_t*>(value);
@@ -670,11 +378,7 @@
             }
             break;
         case AUX_BUFFER:
-            if (track->auxBuffer != valueBuf) {
-                track->auxBuffer = valueBuf;
-                ALOGV("setParameter(TRACK, AUX_BUFFER, %p)", valueBuf);
-                invalidate();
-            }
+            AudioMixerBase::setParameter(name, target, param, value);
             break;
         case FORMAT: {
             audio_format_t format = static_cast<audio_format_t>(valueInt);
@@ -730,127 +434,38 @@
         break;
 
     case RESAMPLE:
-        switch (param) {
-        case SAMPLE_RATE:
-            ALOG_ASSERT(valueInt > 0, "bad sample rate %d", valueInt);
-            if (track->setResampler(uint32_t(valueInt), mSampleRate)) {
-                ALOGV("setParameter(RESAMPLE, SAMPLE_RATE, %u)",
-                        uint32_t(valueInt));
-                invalidate();
-            }
-            break;
-        case RESET:
-            track->resetResampler();
-            invalidate();
-            break;
-        case REMOVE:
-            track->mResampler.reset(nullptr);
-            track->sampleRate = mSampleRate;
-            invalidate();
-            break;
-        default:
-            LOG_ALWAYS_FATAL("setParameter resample: bad param %d", param);
-        }
-        break;
-
     case RAMP_VOLUME:
     case VOLUME:
+        AudioMixerBase::setParameter(name, target, param, value);
+        break;
+    case TIMESTRETCH:
         switch (param) {
-        case AUXLEVEL:
-            if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
-                    target == RAMP_VOLUME ? mFrameCount : 0,
-                    &track->auxLevel, &track->prevAuxLevel, &track->auxInc,
-                    &track->mAuxLevel, &track->mPrevAuxLevel, &track->mAuxInc)) {
-                ALOGV("setParameter(%s, AUXLEVEL: %04x)",
-                        target == VOLUME ? "VOLUME" : "RAMP_VOLUME", track->auxLevel);
-                invalidate();
+        case PLAYBACK_RATE: {
+            const AudioPlaybackRate *playbackRate =
+                    reinterpret_cast<AudioPlaybackRate*>(value);
+            ALOGW_IF(!isAudioPlaybackRateValid(*playbackRate),
+                    "bad parameters speed %f, pitch %f",
+                    playbackRate->mSpeed, playbackRate->mPitch);
+            if (track->setPlaybackRate(*playbackRate)) {
+                ALOGV("setParameter(TIMESTRETCH, PLAYBACK_RATE, STRETCH_MODE, FALLBACK_MODE "
+                        "%f %f %d %d",
+                        playbackRate->mSpeed,
+                        playbackRate->mPitch,
+                        playbackRate->mStretchMode,
+                        playbackRate->mFallbackMode);
+                // invalidate();  (should not require reconfigure)
             }
-            break;
+        } break;
         default:
-            if ((unsigned)param >= VOLUME0 && (unsigned)param < VOLUME0 + MAX_NUM_VOLUMES) {
-                if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
-                        target == RAMP_VOLUME ? mFrameCount : 0,
-                        &track->volume[param - VOLUME0],
-                        &track->prevVolume[param - VOLUME0],
-                        &track->volumeInc[param - VOLUME0],
-                        &track->mVolume[param - VOLUME0],
-                        &track->mPrevVolume[param - VOLUME0],
-                        &track->mVolumeInc[param - VOLUME0])) {
-                    ALOGV("setParameter(%s, VOLUME%d: %04x)",
-                            target == VOLUME ? "VOLUME" : "RAMP_VOLUME", param - VOLUME0,
-                                    track->volume[param - VOLUME0]);
-                    invalidate();
-                }
-            } else {
-                LOG_ALWAYS_FATAL("setParameter volume: bad param %d", param);
-            }
+            LOG_ALWAYS_FATAL("setParameter timestretch: bad param %d", param);
         }
         break;
-        case TIMESTRETCH:
-            switch (param) {
-            case PLAYBACK_RATE: {
-                const AudioPlaybackRate *playbackRate =
-                        reinterpret_cast<AudioPlaybackRate*>(value);
-                ALOGW_IF(!isAudioPlaybackRateValid(*playbackRate),
-                        "bad parameters speed %f, pitch %f",
-                        playbackRate->mSpeed, playbackRate->mPitch);
-                if (track->setPlaybackRate(*playbackRate)) {
-                    ALOGV("setParameter(TIMESTRETCH, PLAYBACK_RATE, STRETCH_MODE, FALLBACK_MODE "
-                            "%f %f %d %d",
-                            playbackRate->mSpeed,
-                            playbackRate->mPitch,
-                            playbackRate->mStretchMode,
-                            playbackRate->mFallbackMode);
-                    // invalidate();  (should not require reconfigure)
-                }
-            } break;
-            default:
-                LOG_ALWAYS_FATAL("setParameter timestretch: bad param %d", param);
-            }
-            break;
 
     default:
         LOG_ALWAYS_FATAL("setParameter: bad target %d", target);
     }
 }
 
-bool AudioMixer::Track::setResampler(uint32_t trackSampleRate, uint32_t devSampleRate)
-{
-    if (trackSampleRate != devSampleRate || mResampler.get() != nullptr) {
-        if (sampleRate != trackSampleRate) {
-            sampleRate = trackSampleRate;
-            if (mResampler.get() == nullptr) {
-                ALOGV("Creating resampler from track %d Hz to device %d Hz",
-                        trackSampleRate, devSampleRate);
-                AudioResampler::src_quality quality;
-                // force lowest quality level resampler if use case isn't music or video
-                // FIXME this is flawed for dynamic sample rates, as we choose the resampler
-                // quality level based on the initial ratio, but that could change later.
-                // Should have a way to distinguish tracks with static ratios vs. dynamic ratios.
-                if (isMusicRate(trackSampleRate)) {
-                    quality = AudioResampler::DEFAULT_QUALITY;
-                } else {
-                    quality = AudioResampler::DYN_LOW_QUALITY;
-                }
-
-                // TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
-                // but if none exists, it is the channel count (1 for mono).
-                const int resamplerChannelCount = mDownmixerBufferProvider.get() != nullptr
-                        ? mMixerChannelCount : channelCount;
-                ALOGVV("Creating resampler:"
-                        " format(%#x) channels(%d) devSampleRate(%u) quality(%d)\n",
-                        mMixerInFormat, resamplerChannelCount, devSampleRate, quality);
-                mResampler.reset(AudioResampler::create(
-                        mMixerInFormat,
-                        resamplerChannelCount,
-                        devSampleRate, quality));
-            }
-            return true;
-        }
-    }
-    return false;
-}
-
 bool AudioMixer::Track::setPlaybackRate(const AudioPlaybackRate &playbackRate)
 {
     if ((mTimestretchBufferProvider.get() == nullptr &&
@@ -863,8 +478,7 @@
     if (mTimestretchBufferProvider.get() == nullptr) {
         // TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
         // but if none exists, it is the channel count (1 for mono).
-        const int timestretchChannelCount = mDownmixerBufferProvider.get() != nullptr
-                ? mMixerChannelCount : channelCount;
+        const int timestretchChannelCount = getOutputChannelCount();
         mTimestretchBufferProvider.reset(new TimestretchBufferProvider(timestretchChannelCount,
                 mMixerInFormat, sampleRate, playbackRate));
         reconfigureBufferProviders();
@@ -875,84 +489,10 @@
     return true;
 }
 
-/* Checks to see if the volume ramp has completed and clears the increment
- * variables appropriately.
- *
- * FIXME: There is code to handle int/float ramp variable switchover should it not
- * complete within a mixer buffer processing call, but it is preferred to avoid switchover
- * due to precision issues.  The switchover code is included for legacy code purposes
- * and can be removed once the integer volume is removed.
- *
- * It is not sufficient to clear only the volumeInc integer variable because
- * if one channel requires ramping, all channels are ramped.
- *
- * There is a bit of duplicated code here, but it keeps backward compatibility.
- */
-inline void AudioMixer::Track::adjustVolumeRamp(bool aux, bool useFloat)
-{
-    if (useFloat) {
-        for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
-            if ((mVolumeInc[i] > 0 && mPrevVolume[i] + mVolumeInc[i] >= mVolume[i]) ||
-                     (mVolumeInc[i] < 0 && mPrevVolume[i] + mVolumeInc[i] <= mVolume[i])) {
-                volumeInc[i] = 0;
-                prevVolume[i] = volume[i] << 16;
-                mVolumeInc[i] = 0.;
-                mPrevVolume[i] = mVolume[i];
-            } else {
-                //ALOGV("ramp: %f %f %f", mVolume[i], mPrevVolume[i], mVolumeInc[i]);
-                prevVolume[i] = u4_28_from_float(mPrevVolume[i]);
-            }
-        }
-    } else {
-        for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
-            if (((volumeInc[i]>0) && (((prevVolume[i]+volumeInc[i])>>16) >= volume[i])) ||
-                    ((volumeInc[i]<0) && (((prevVolume[i]+volumeInc[i])>>16) <= volume[i]))) {
-                volumeInc[i] = 0;
-                prevVolume[i] = volume[i] << 16;
-                mVolumeInc[i] = 0.;
-                mPrevVolume[i] = mVolume[i];
-            } else {
-                //ALOGV("ramp: %d %d %d", volume[i] << 16, prevVolume[i], volumeInc[i]);
-                mPrevVolume[i]  = float_from_u4_28(prevVolume[i]);
-            }
-        }
-    }
-
-    if (aux) {
-#ifdef FLOAT_AUX
-        if (useFloat) {
-            if ((mAuxInc > 0.f && mPrevAuxLevel + mAuxInc >= mAuxLevel) ||
-                    (mAuxInc < 0.f && mPrevAuxLevel + mAuxInc <= mAuxLevel)) {
-                auxInc = 0;
-                prevAuxLevel = auxLevel << 16;
-                mAuxInc = 0.f;
-                mPrevAuxLevel = mAuxLevel;
-            }
-        } else
-#endif
-        if ((auxInc > 0 && ((prevAuxLevel + auxInc) >> 16) >= auxLevel) ||
-                (auxInc < 0 && ((prevAuxLevel + auxInc) >> 16) <= auxLevel)) {
-            auxInc = 0;
-            prevAuxLevel = auxLevel << 16;
-            mAuxInc = 0.f;
-            mPrevAuxLevel = mAuxLevel;
-        }
-    }
-}
-
-size_t AudioMixer::getUnreleasedFrames(int name) const
-{
-    const auto it = mTracks.find(name);
-    if (it != mTracks.end()) {
-        return it->second->getUnreleasedFrames();
-    }
-    return 0;
-}
-
 void AudioMixer::setBufferProvider(int name, AudioBufferProvider* bufferProvider)
 {
     LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
-    const std::shared_ptr<Track> &track = mTracks[name];
+    const std::shared_ptr<Track> &track = getTrack(name);
 
     if (track->mInputBufferProvider == bufferProvider) {
         return; // don't reset any buffer providers if identical.
@@ -976,679 +516,6 @@
     track->reconfigureBufferProviders();
 }
 
-void AudioMixer::process__validate()
-{
-    // TODO: fix all16BitsStereNoResample logic to
-    // either properly handle muted tracks (it should ignore them)
-    // or remove altogether as an obsolete optimization.
-    bool all16BitsStereoNoResample = true;
-    bool resampling = false;
-    bool volumeRamp = false;
-
-    mEnabled.clear();
-    mGroups.clear();
-    for (const auto &pair : mTracks) {
-        const int name = pair.first;
-        const std::shared_ptr<Track> &t = pair.second;
-        if (!t->enabled) continue;
-
-        mEnabled.emplace_back(name);  // we add to mEnabled in order of name.
-        mGroups[t->mainBuffer].emplace_back(name); // mGroups also in order of name.
-
-        uint32_t n = 0;
-        // FIXME can overflow (mask is only 3 bits)
-        n |= NEEDS_CHANNEL_1 + t->channelCount - 1;
-        if (t->doesResample()) {
-            n |= NEEDS_RESAMPLE;
-        }
-        if (t->auxLevel != 0 && t->auxBuffer != NULL) {
-            n |= NEEDS_AUX;
-        }
-
-        if (t->volumeInc[0]|t->volumeInc[1]) {
-            volumeRamp = true;
-        } else if (!t->doesResample() && t->volumeRL == 0) {
-            n |= NEEDS_MUTE;
-        }
-        t->needs = n;
-
-        if (n & NEEDS_MUTE) {
-            t->hook = &Track::track__nop;
-        } else {
-            if (n & NEEDS_AUX) {
-                all16BitsStereoNoResample = false;
-            }
-            if (n & NEEDS_RESAMPLE) {
-                all16BitsStereoNoResample = false;
-                resampling = true;
-                t->hook = Track::getTrackHook(TRACKTYPE_RESAMPLE, t->mMixerChannelCount,
-                        t->mMixerInFormat, t->mMixerFormat);
-                ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
-                        "Track %d needs downmix + resample", name);
-            } else {
-                if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_1){
-                    t->hook = Track::getTrackHook(
-                            (t->mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO  // TODO: MONO_HACK
-                                    && t->channelMask == AUDIO_CHANNEL_OUT_MONO)
-                                ? TRACKTYPE_NORESAMPLEMONO : TRACKTYPE_NORESAMPLE,
-                            t->mMixerChannelCount,
-                            t->mMixerInFormat, t->mMixerFormat);
-                    all16BitsStereoNoResample = false;
-                }
-                if ((n & NEEDS_CHANNEL_COUNT__MASK) >= NEEDS_CHANNEL_2){
-                    t->hook = Track::getTrackHook(TRACKTYPE_NORESAMPLE, t->mMixerChannelCount,
-                            t->mMixerInFormat, t->mMixerFormat);
-                    ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
-                            "Track %d needs downmix", name);
-                }
-            }
-        }
-    }
-
-    // select the processing hooks
-    mHook = &AudioMixer::process__nop;
-    if (mEnabled.size() > 0) {
-        if (resampling) {
-            if (mOutputTemp.get() == nullptr) {
-                mOutputTemp.reset(new int32_t[MAX_NUM_CHANNELS * mFrameCount]);
-            }
-            if (mResampleTemp.get() == nullptr) {
-                mResampleTemp.reset(new int32_t[MAX_NUM_CHANNELS * mFrameCount]);
-            }
-            mHook = &AudioMixer::process__genericResampling;
-        } else {
-            // we keep temp arrays around.
-            mHook = &AudioMixer::process__genericNoResampling;
-            if (all16BitsStereoNoResample && !volumeRamp) {
-                if (mEnabled.size() == 1) {
-                    const std::shared_ptr<Track> &t = mTracks[mEnabled[0]];
-                    if ((t->needs & NEEDS_MUTE) == 0) {
-                        // The check prevents a muted track from acquiring a process hook.
-                        //
-                        // This is dangerous if the track is MONO as that requires
-                        // special case handling due to implicit channel duplication.
-                        // Stereo or Multichannel should actually be fine here.
-                        mHook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
-                                t->mMixerChannelCount, t->mMixerInFormat, t->mMixerFormat);
-                    }
-                }
-            }
-        }
-    }
-
-    ALOGV("mixer configuration change: %zu "
-        "all16BitsStereoNoResample=%d, resampling=%d, volumeRamp=%d",
-        mEnabled.size(), all16BitsStereoNoResample, resampling, volumeRamp);
-
-   process();
-
-    // Now that the volume ramp has been done, set optimal state and
-    // track hooks for subsequent mixer process
-    if (mEnabled.size() > 0) {
-        bool allMuted = true;
-
-        for (const int name : mEnabled) {
-            const std::shared_ptr<Track> &t = mTracks[name];
-            if (!t->doesResample() && t->volumeRL == 0) {
-                t->needs |= NEEDS_MUTE;
-                t->hook = &Track::track__nop;
-            } else {
-                allMuted = false;
-            }
-        }
-        if (allMuted) {
-            mHook = &AudioMixer::process__nop;
-        } else if (all16BitsStereoNoResample) {
-            if (mEnabled.size() == 1) {
-                //const int i = 31 - __builtin_clz(enabledTracks);
-                const std::shared_ptr<Track> &t = mTracks[mEnabled[0]];
-                // Muted single tracks handled by allMuted above.
-                mHook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
-                        t->mMixerChannelCount, t->mMixerInFormat, t->mMixerFormat);
-            }
-        }
-    }
-}
-
-void AudioMixer::Track::track__genericResample(
-        int32_t* out, size_t outFrameCount, int32_t* temp, int32_t* aux)
-{
-    ALOGVV("track__genericResample\n");
-    mResampler->setSampleRate(sampleRate);
-
-    // ramp gain - resample to temp buffer and scale/mix in 2nd step
-    if (aux != NULL) {
-        // always resample with unity gain when sending to auxiliary buffer to be able
-        // to apply send level after resampling
-        mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
-        memset(temp, 0, outFrameCount * mMixerChannelCount * sizeof(int32_t));
-        mResampler->resample(temp, outFrameCount, bufferProvider);
-        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
-            volumeRampStereo(out, outFrameCount, temp, aux);
-        } else {
-            volumeStereo(out, outFrameCount, temp, aux);
-        }
-    } else {
-        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
-            mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
-            memset(temp, 0, outFrameCount * MAX_NUM_CHANNELS * sizeof(int32_t));
-            mResampler->resample(temp, outFrameCount, bufferProvider);
-            volumeRampStereo(out, outFrameCount, temp, aux);
-        }
-
-        // constant gain
-        else {
-            mResampler->setVolume(mVolume[0], mVolume[1]);
-            mResampler->resample(out, outFrameCount, bufferProvider);
-        }
-    }
-}
-
-void AudioMixer::Track::track__nop(int32_t* out __unused,
-        size_t outFrameCount __unused, int32_t* temp __unused, int32_t* aux __unused)
-{
-}
-
-void AudioMixer::Track::volumeRampStereo(
-        int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
-{
-    int32_t vl = prevVolume[0];
-    int32_t vr = prevVolume[1];
-    const int32_t vlInc = volumeInc[0];
-    const int32_t vrInc = volumeInc[1];
-
-    //ALOGD("[0] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
-    //        t, vlInc/65536.0f, vl/65536.0f, volume[0],
-    //       (vl + vlInc*frameCount)/65536.0f, frameCount);
-
-    // ramp volume
-    if (CC_UNLIKELY(aux != NULL)) {
-        int32_t va = prevAuxLevel;
-        const int32_t vaInc = auxInc;
-        int32_t l;
-        int32_t r;
-
-        do {
-            l = (*temp++ >> 12);
-            r = (*temp++ >> 12);
-            *out++ += (vl >> 16) * l;
-            *out++ += (vr >> 16) * r;
-            *aux++ += (va >> 17) * (l + r);
-            vl += vlInc;
-            vr += vrInc;
-            va += vaInc;
-        } while (--frameCount);
-        prevAuxLevel = va;
-    } else {
-        do {
-            *out++ += (vl >> 16) * (*temp++ >> 12);
-            *out++ += (vr >> 16) * (*temp++ >> 12);
-            vl += vlInc;
-            vr += vrInc;
-        } while (--frameCount);
-    }
-    prevVolume[0] = vl;
-    prevVolume[1] = vr;
-    adjustVolumeRamp(aux != NULL);
-}
-
-void AudioMixer::Track::volumeStereo(
-        int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
-{
-    const int16_t vl = volume[0];
-    const int16_t vr = volume[1];
-
-    if (CC_UNLIKELY(aux != NULL)) {
-        const int16_t va = auxLevel;
-        do {
-            int16_t l = (int16_t)(*temp++ >> 12);
-            int16_t r = (int16_t)(*temp++ >> 12);
-            out[0] = mulAdd(l, vl, out[0]);
-            int16_t a = (int16_t)(((int32_t)l + r) >> 1);
-            out[1] = mulAdd(r, vr, out[1]);
-            out += 2;
-            aux[0] = mulAdd(a, va, aux[0]);
-            aux++;
-        } while (--frameCount);
-    } else {
-        do {
-            int16_t l = (int16_t)(*temp++ >> 12);
-            int16_t r = (int16_t)(*temp++ >> 12);
-            out[0] = mulAdd(l, vl, out[0]);
-            out[1] = mulAdd(r, vr, out[1]);
-            out += 2;
-        } while (--frameCount);
-    }
-}
-
-void AudioMixer::Track::track__16BitsStereo(
-        int32_t* out, size_t frameCount, int32_t* temp __unused, int32_t* aux)
-{
-    ALOGVV("track__16BitsStereo\n");
-    const int16_t *in = static_cast<const int16_t *>(mIn);
-
-    if (CC_UNLIKELY(aux != NULL)) {
-        int32_t l;
-        int32_t r;
-        // ramp gain
-        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
-            int32_t vl = prevVolume[0];
-            int32_t vr = prevVolume[1];
-            int32_t va = prevAuxLevel;
-            const int32_t vlInc = volumeInc[0];
-            const int32_t vrInc = volumeInc[1];
-            const int32_t vaInc = auxInc;
-            // ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
-            //        t, vlInc/65536.0f, vl/65536.0f, volume[0],
-            //        (vl + vlInc*frameCount)/65536.0f, frameCount);
-
-            do {
-                l = (int32_t)*in++;
-                r = (int32_t)*in++;
-                *out++ += (vl >> 16) * l;
-                *out++ += (vr >> 16) * r;
-                *aux++ += (va >> 17) * (l + r);
-                vl += vlInc;
-                vr += vrInc;
-                va += vaInc;
-            } while (--frameCount);
-
-            prevVolume[0] = vl;
-            prevVolume[1] = vr;
-            prevAuxLevel = va;
-            adjustVolumeRamp(true);
-        }
-
-        // constant gain
-        else {
-            const uint32_t vrl = volumeRL;
-            const int16_t va = (int16_t)auxLevel;
-            do {
-                uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
-                int16_t a = (int16_t)(((int32_t)in[0] + in[1]) >> 1);
-                in += 2;
-                out[0] = mulAddRL(1, rl, vrl, out[0]);
-                out[1] = mulAddRL(0, rl, vrl, out[1]);
-                out += 2;
-                aux[0] = mulAdd(a, va, aux[0]);
-                aux++;
-            } while (--frameCount);
-        }
-    } else {
-        // ramp gain
-        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
-            int32_t vl = prevVolume[0];
-            int32_t vr = prevVolume[1];
-            const int32_t vlInc = volumeInc[0];
-            const int32_t vrInc = volumeInc[1];
-
-            // ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
-            //        t, vlInc/65536.0f, vl/65536.0f, volume[0],
-            //        (vl + vlInc*frameCount)/65536.0f, frameCount);
-
-            do {
-                *out++ += (vl >> 16) * (int32_t) *in++;
-                *out++ += (vr >> 16) * (int32_t) *in++;
-                vl += vlInc;
-                vr += vrInc;
-            } while (--frameCount);
-
-            prevVolume[0] = vl;
-            prevVolume[1] = vr;
-            adjustVolumeRamp(false);
-        }
-
-        // constant gain
-        else {
-            const uint32_t vrl = volumeRL;
-            do {
-                uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
-                in += 2;
-                out[0] = mulAddRL(1, rl, vrl, out[0]);
-                out[1] = mulAddRL(0, rl, vrl, out[1]);
-                out += 2;
-            } while (--frameCount);
-        }
-    }
-    mIn = in;
-}
-
-void AudioMixer::Track::track__16BitsMono(
-        int32_t* out, size_t frameCount, int32_t* temp __unused, int32_t* aux)
-{
-    ALOGVV("track__16BitsMono\n");
-    const int16_t *in = static_cast<int16_t const *>(mIn);
-
-    if (CC_UNLIKELY(aux != NULL)) {
-        // ramp gain
-        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
-            int32_t vl = prevVolume[0];
-            int32_t vr = prevVolume[1];
-            int32_t va = prevAuxLevel;
-            const int32_t vlInc = volumeInc[0];
-            const int32_t vrInc = volumeInc[1];
-            const int32_t vaInc = auxInc;
-
-            // ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
-            //         t, vlInc/65536.0f, vl/65536.0f, volume[0],
-            //         (vl + vlInc*frameCount)/65536.0f, frameCount);
-
-            do {
-                int32_t l = *in++;
-                *out++ += (vl >> 16) * l;
-                *out++ += (vr >> 16) * l;
-                *aux++ += (va >> 16) * l;
-                vl += vlInc;
-                vr += vrInc;
-                va += vaInc;
-            } while (--frameCount);
-
-            prevVolume[0] = vl;
-            prevVolume[1] = vr;
-            prevAuxLevel = va;
-            adjustVolumeRamp(true);
-        }
-        // constant gain
-        else {
-            const int16_t vl = volume[0];
-            const int16_t vr = volume[1];
-            const int16_t va = (int16_t)auxLevel;
-            do {
-                int16_t l = *in++;
-                out[0] = mulAdd(l, vl, out[0]);
-                out[1] = mulAdd(l, vr, out[1]);
-                out += 2;
-                aux[0] = mulAdd(l, va, aux[0]);
-                aux++;
-            } while (--frameCount);
-        }
-    } else {
-        // ramp gain
-        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
-            int32_t vl = prevVolume[0];
-            int32_t vr = prevVolume[1];
-            const int32_t vlInc = volumeInc[0];
-            const int32_t vrInc = volumeInc[1];
-
-            // ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
-            //         t, vlInc/65536.0f, vl/65536.0f, volume[0],
-            //         (vl + vlInc*frameCount)/65536.0f, frameCount);
-
-            do {
-                int32_t l = *in++;
-                *out++ += (vl >> 16) * l;
-                *out++ += (vr >> 16) * l;
-                vl += vlInc;
-                vr += vrInc;
-            } while (--frameCount);
-
-            prevVolume[0] = vl;
-            prevVolume[1] = vr;
-            adjustVolumeRamp(false);
-        }
-        // constant gain
-        else {
-            const int16_t vl = volume[0];
-            const int16_t vr = volume[1];
-            do {
-                int16_t l = *in++;
-                out[0] = mulAdd(l, vl, out[0]);
-                out[1] = mulAdd(l, vr, out[1]);
-                out += 2;
-            } while (--frameCount);
-        }
-    }
-    mIn = in;
-}
-
-// no-op case
-void AudioMixer::process__nop()
-{
-    ALOGVV("process__nop\n");
-
-    for (const auto &pair : mGroups) {
-        // process by group of tracks with same output buffer to
-        // avoid multiple memset() on same buffer
-        const auto &group = pair.second;
-
-        const std::shared_ptr<Track> &t = mTracks[group[0]];
-        memset(t->mainBuffer, 0,
-                mFrameCount * audio_bytes_per_frame(
-                        t->mMixerChannelCount + t->mMixerHapticChannelCount, t->mMixerFormat));
-
-        // now consume data
-        for (const int name : group) {
-            const std::shared_ptr<Track> &t = mTracks[name];
-            size_t outFrames = mFrameCount;
-            while (outFrames) {
-                t->buffer.frameCount = outFrames;
-                t->bufferProvider->getNextBuffer(&t->buffer);
-                if (t->buffer.raw == NULL) break;
-                outFrames -= t->buffer.frameCount;
-                t->bufferProvider->releaseBuffer(&t->buffer);
-            }
-        }
-    }
-}
-
-// generic code without resampling
-void AudioMixer::process__genericNoResampling()
-{
-    ALOGVV("process__genericNoResampling\n");
-    int32_t outTemp[BLOCKSIZE * MAX_NUM_CHANNELS] __attribute__((aligned(32)));
-
-    for (const auto &pair : mGroups) {
-        // process by group of tracks with same output main buffer to
-        // avoid multiple memset() on same buffer
-        const auto &group = pair.second;
-
-        // acquire buffer
-        for (const int name : group) {
-            const std::shared_ptr<Track> &t = mTracks[name];
-            t->buffer.frameCount = mFrameCount;
-            t->bufferProvider->getNextBuffer(&t->buffer);
-            t->frameCount = t->buffer.frameCount;
-            t->mIn = t->buffer.raw;
-        }
-
-        int32_t *out = (int *)pair.first;
-        size_t numFrames = 0;
-        do {
-            const size_t frameCount = std::min((size_t)BLOCKSIZE, mFrameCount - numFrames);
-            memset(outTemp, 0, sizeof(outTemp));
-            for (const int name : group) {
-                const std::shared_ptr<Track> &t = mTracks[name];
-                int32_t *aux = NULL;
-                if (CC_UNLIKELY(t->needs & NEEDS_AUX)) {
-                    aux = t->auxBuffer + numFrames;
-                }
-                for (int outFrames = frameCount; outFrames > 0; ) {
-                    // t->in == nullptr can happen if the track was flushed just after having
-                    // been enabled for mixing.
-                    if (t->mIn == nullptr) {
-                        break;
-                    }
-                    size_t inFrames = (t->frameCount > outFrames)?outFrames:t->frameCount;
-                    if (inFrames > 0) {
-                        (t.get()->*t->hook)(
-                                outTemp + (frameCount - outFrames) * t->mMixerChannelCount,
-                                inFrames, mResampleTemp.get() /* naked ptr */, aux);
-                        t->frameCount -= inFrames;
-                        outFrames -= inFrames;
-                        if (CC_UNLIKELY(aux != NULL)) {
-                            aux += inFrames;
-                        }
-                    }
-                    if (t->frameCount == 0 && outFrames) {
-                        t->bufferProvider->releaseBuffer(&t->buffer);
-                        t->buffer.frameCount = (mFrameCount - numFrames) -
-                                (frameCount - outFrames);
-                        t->bufferProvider->getNextBuffer(&t->buffer);
-                        t->mIn = t->buffer.raw;
-                        if (t->mIn == nullptr) {
-                            break;
-                        }
-                        t->frameCount = t->buffer.frameCount;
-                    }
-                }
-            }
-
-            const std::shared_ptr<Track> &t1 = mTracks[group[0]];
-            convertMixerFormat(out, t1->mMixerFormat, outTemp, t1->mMixerInFormat,
-                    frameCount * t1->mMixerChannelCount);
-            // TODO: fix ugly casting due to choice of out pointer type
-            out = reinterpret_cast<int32_t*>((uint8_t*)out
-                    + frameCount * t1->mMixerChannelCount
-                    * audio_bytes_per_sample(t1->mMixerFormat));
-            numFrames += frameCount;
-        } while (numFrames < mFrameCount);
-
-        // release each track's buffer
-        for (const int name : group) {
-            const std::shared_ptr<Track> &t = mTracks[name];
-            t->bufferProvider->releaseBuffer(&t->buffer);
-        }
-    }
-}
-
-// generic code with resampling
-void AudioMixer::process__genericResampling()
-{
-    ALOGVV("process__genericResampling\n");
-    int32_t * const outTemp = mOutputTemp.get(); // naked ptr
-    size_t numFrames = mFrameCount;
-
-    for (const auto &pair : mGroups) {
-        const auto &group = pair.second;
-        const std::shared_ptr<Track> &t1 = mTracks[group[0]];
-
-        // clear temp buffer
-        memset(outTemp, 0, sizeof(*outTemp) * t1->mMixerChannelCount * mFrameCount);
-        for (const int name : group) {
-            const std::shared_ptr<Track> &t = mTracks[name];
-            int32_t *aux = NULL;
-            if (CC_UNLIKELY(t->needs & NEEDS_AUX)) {
-                aux = t->auxBuffer;
-            }
-
-            // this is a little goofy, on the resampling case we don't
-            // acquire/release the buffers because it's done by
-            // the resampler.
-            if (t->needs & NEEDS_RESAMPLE) {
-                (t.get()->*t->hook)(outTemp, numFrames, mResampleTemp.get() /* naked ptr */, aux);
-            } else {
-
-                size_t outFrames = 0;
-
-                while (outFrames < numFrames) {
-                    t->buffer.frameCount = numFrames - outFrames;
-                    t->bufferProvider->getNextBuffer(&t->buffer);
-                    t->mIn = t->buffer.raw;
-                    // t->mIn == nullptr can happen if the track was flushed just after having
-                    // been enabled for mixing.
-                    if (t->mIn == nullptr) break;
-
-                    (t.get()->*t->hook)(
-                            outTemp + outFrames * t->mMixerChannelCount, t->buffer.frameCount,
-                            mResampleTemp.get() /* naked ptr */,
-                            aux != nullptr ? aux + outFrames : nullptr);
-                    outFrames += t->buffer.frameCount;
-
-                    t->bufferProvider->releaseBuffer(&t->buffer);
-                }
-            }
-        }
-        convertMixerFormat(t1->mainBuffer, t1->mMixerFormat,
-                outTemp, t1->mMixerInFormat, numFrames * t1->mMixerChannelCount);
-    }
-}
-
-// one track, 16 bits stereo without resampling is the most common case
-void AudioMixer::process__oneTrack16BitsStereoNoResampling()
-{
-    ALOGVV("process__oneTrack16BitsStereoNoResampling\n");
-    LOG_ALWAYS_FATAL_IF(mEnabled.size() != 0,
-            "%zu != 1 tracks enabled", mEnabled.size());
-    const int name = mEnabled[0];
-    const std::shared_ptr<Track> &t = mTracks[name];
-
-    AudioBufferProvider::Buffer& b(t->buffer);
-
-    int32_t* out = t->mainBuffer;
-    float *fout = reinterpret_cast<float*>(out);
-    size_t numFrames = mFrameCount;
-
-    const int16_t vl = t->volume[0];
-    const int16_t vr = t->volume[1];
-    const uint32_t vrl = t->volumeRL;
-    while (numFrames) {
-        b.frameCount = numFrames;
-        t->bufferProvider->getNextBuffer(&b);
-        const int16_t *in = b.i16;
-
-        // in == NULL can happen if the track was flushed just after having
-        // been enabled for mixing.
-        if (in == NULL || (((uintptr_t)in) & 3)) {
-            if ( AUDIO_FORMAT_PCM_FLOAT == t->mMixerFormat ) {
-                 memset((char*)fout, 0, numFrames
-                         * t->mMixerChannelCount * audio_bytes_per_sample(t->mMixerFormat));
-            } else {
-                 memset((char*)out, 0, numFrames
-                         * t->mMixerChannelCount * audio_bytes_per_sample(t->mMixerFormat));
-            }
-            ALOGE_IF((((uintptr_t)in) & 3),
-                    "process__oneTrack16BitsStereoNoResampling: misaligned buffer"
-                    " %p track %d, channels %d, needs %08x, volume %08x vfl %f vfr %f",
-                    in, name, t->channelCount, t->needs, vrl, t->mVolume[0], t->mVolume[1]);
-            return;
-        }
-        size_t outFrames = b.frameCount;
-
-        switch (t->mMixerFormat) {
-        case AUDIO_FORMAT_PCM_FLOAT:
-            do {
-                uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
-                in += 2;
-                int32_t l = mulRL(1, rl, vrl);
-                int32_t r = mulRL(0, rl, vrl);
-                *fout++ = float_from_q4_27(l);
-                *fout++ = float_from_q4_27(r);
-                // Note: In case of later int16_t sink output,
-                // conversion and clamping is done by memcpy_to_i16_from_float().
-            } while (--outFrames);
-            break;
-        case AUDIO_FORMAT_PCM_16_BIT:
-            if (CC_UNLIKELY(uint32_t(vl) > UNITY_GAIN_INT || uint32_t(vr) > UNITY_GAIN_INT)) {
-                // volume is boosted, so we might need to clamp even though
-                // we process only one track.
-                do {
-                    uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
-                    in += 2;
-                    int32_t l = mulRL(1, rl, vrl) >> 12;
-                    int32_t r = mulRL(0, rl, vrl) >> 12;
-                    // clamping...
-                    l = clamp16(l);
-                    r = clamp16(r);
-                    *out++ = (r<<16) | (l & 0xFFFF);
-                } while (--outFrames);
-            } else {
-                do {
-                    uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
-                    in += 2;
-                    int32_t l = mulRL(1, rl, vrl) >> 12;
-                    int32_t r = mulRL(0, rl, vrl) >> 12;
-                    *out++ = (r<<16) | (l & 0xFFFF);
-                } while (--outFrames);
-            }
-            break;
-        default:
-            LOG_ALWAYS_FATAL("bad mixer format: %d", t->mMixerFormat);
-        }
-        numFrames -= b.frameCount;
-        t->bufferProvider->releaseBuffer(&b);
-    }
-}
-
 /*static*/ pthread_once_t AudioMixer::sOnceControl = PTHREAD_ONCE_INIT;
 
 /*static*/ void AudioMixer::sInitRoutine()
@@ -1656,211 +523,71 @@
     DownmixerBufferProvider::init(); // for the downmixer
 }
 
-/* TODO: consider whether this level of optimization is necessary.
- * Perhaps just stick with a single for loop.
- */
-
-// Needs to derive a compile time constant (constexpr).  Could be targeted to go
-// to a MONOVOL mixtype based on MAX_NUM_VOLUMES, but that's an unnecessary complication.
-#define MIXTYPE_MONOVOL(mixtype) ((mixtype) == MIXTYPE_MULTI ? MIXTYPE_MULTI_MONOVOL : \
-        (mixtype) == MIXTYPE_MULTI_SAVEONLY ? MIXTYPE_MULTI_SAVEONLY_MONOVOL : (mixtype))
-
-/* MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27) or float
- */
-template <int MIXTYPE,
-        typename TO, typename TI, typename TV, typename TA, typename TAV>
-static void volumeRampMulti(uint32_t channels, TO* out, size_t frameCount,
-        const TI* in, TA* aux, TV *vol, const TV *volinc, TAV *vola, TAV volainc)
+std::shared_ptr<AudioMixerBase::TrackBase> AudioMixer::preCreateTrack()
 {
-    switch (channels) {
-    case 1:
-        volumeRampMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    case 2:
-        volumeRampMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    case 3:
-        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out,
-                frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    case 4:
-        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out,
-                frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    case 5:
-        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out,
-                frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    case 6:
-        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out,
-                frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    case 7:
-        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out,
-                frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    case 8:
-        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out,
-                frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    }
+    return std::make_shared<Track>();
 }
 
-/* MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27) or float
- */
-template <int MIXTYPE,
-        typename TO, typename TI, typename TV, typename TA, typename TAV>
-static void volumeMulti(uint32_t channels, TO* out, size_t frameCount,
-        const TI* in, TA* aux, const TV *vol, TAV vola)
+status_t AudioMixer::postCreateTrack(TrackBase *track)
 {
-    switch (channels) {
-    case 1:
-        volumeMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, vola);
-        break;
-    case 2:
-        volumeMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, vola);
-        break;
-    case 3:
-        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out, frameCount, in, aux, vol, vola);
-        break;
-    case 4:
-        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out, frameCount, in, aux, vol, vola);
-        break;
-    case 5:
-        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out, frameCount, in, aux, vol, vola);
-        break;
-    case 6:
-        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out, frameCount, in, aux, vol, vola);
-        break;
-    case 7:
-        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out, frameCount, in, aux, vol, vola);
-        break;
-    case 8:
-        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out, frameCount, in, aux, vol, vola);
-        break;
+    Track* t = static_cast<Track*>(track);
+
+    audio_channel_mask_t channelMask = t->channelMask;
+    t->mHapticChannelMask = channelMask & AUDIO_CHANNEL_HAPTIC_ALL;
+    t->mHapticChannelCount = audio_channel_count_from_out_mask(t->mHapticChannelMask);
+    channelMask &= ~AUDIO_CHANNEL_HAPTIC_ALL;
+    t->channelCount = audio_channel_count_from_out_mask(channelMask);
+    ALOGV_IF(audio_channel_mask_get_bits(channelMask) != AUDIO_CHANNEL_OUT_STEREO,
+            "Non-stereo channel mask: %d\n", channelMask);
+    t->channelMask = channelMask;
+    t->mInputBufferProvider = NULL;
+    t->mDownmixRequiresFormat = AUDIO_FORMAT_INVALID; // no format required
+    t->mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
+    // haptic
+    t->mHapticPlaybackEnabled = false;
+    t->mHapticIntensity = HAPTIC_SCALE_NONE;
+    t->mMixerHapticChannelMask = AUDIO_CHANNEL_NONE;
+    t->mMixerHapticChannelCount = 0;
+    t->mAdjustInChannelCount = t->channelCount + t->mHapticChannelCount;
+    t->mAdjustOutChannelCount = t->channelCount + t->mMixerHapticChannelCount;
+    t->mAdjustNonDestructiveInChannelCount = t->mAdjustOutChannelCount;
+    t->mAdjustNonDestructiveOutChannelCount = t->channelCount;
+    t->mKeepContractedChannels = false;
+    // Check the downmixing (or upmixing) requirements.
+    status_t status = t->prepareForDownmix();
+    if (status != OK) {
+        ALOGE("AudioMixer::getTrackName invalid channelMask (%#x)", channelMask);
+        return BAD_VALUE;
     }
+    // prepareForDownmix() may change mDownmixRequiresFormat
+    ALOGVV("mMixerFormat:%#x  mMixerInFormat:%#x\n", t->mMixerFormat, t->mMixerInFormat);
+    t->prepareForReformat();
+    t->prepareForAdjustChannelsNonDestructive(mFrameCount);
+    t->prepareForAdjustChannels();
+    return OK;
 }
 
-/* MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
- * USEFLOATVOL (set to true if float volume is used)
- * ADJUSTVOL   (set to true if volume ramp parameters needs adjustment afterwards)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27) or float
- */
-template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
-    typename TO, typename TI, typename TA>
-void AudioMixer::Track::volumeMix(TO *out, size_t outFrames,
-        const TI *in, TA *aux, bool ramp)
+void AudioMixer::preProcess()
 {
-    if (USEFLOATVOL) {
-        if (ramp) {
-            volumeRampMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
-                    mPrevVolume, mVolumeInc,
-#ifdef FLOAT_AUX
-                    &mPrevAuxLevel, mAuxInc
-#else
-                    &prevAuxLevel, auxInc
-#endif
-                );
-            if (ADJUSTVOL) {
-                adjustVolumeRamp(aux != NULL, true);
-            }
-        } else {
-            volumeMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
-                    mVolume,
-#ifdef FLOAT_AUX
-                    mAuxLevel
-#else
-                    auxLevel
-#endif
-            );
-        }
-    } else {
-        if (ramp) {
-            volumeRampMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
-                    prevVolume, volumeInc, &prevAuxLevel, auxInc);
-            if (ADJUSTVOL) {
-                adjustVolumeRamp(aux != NULL);
-            }
-        } else {
-            volumeMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
-                    volume, auxLevel);
+    for (const auto &pair : mTracks) {
+        // Clear contracted buffer before processing if contracted channels are saved
+        const std::shared_ptr<TrackBase> &tb = pair.second;
+        Track *t = static_cast<Track*>(tb.get());
+        if (t->mKeepContractedChannels) {
+            t->clearContractedBuffer();
         }
     }
 }
 
-/* This process hook is called when there is a single track without
- * aux buffer, volume ramp, or resampling.
- * TODO: Update the hook selection: this can properly handle aux and ramp.
- *
- * MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27)
- */
-template <int MIXTYPE, typename TO, typename TI, typename TA>
-void AudioMixer::process__noResampleOneTrack()
+void AudioMixer::postProcess()
 {
-    ALOGVV("process__noResampleOneTrack\n");
-    LOG_ALWAYS_FATAL_IF(mEnabled.size() != 1,
-            "%zu != 1 tracks enabled", mEnabled.size());
-    const std::shared_ptr<Track> &t = mTracks[mEnabled[0]];
-    const uint32_t channels = t->mMixerChannelCount;
-    TO* out = reinterpret_cast<TO*>(t->mainBuffer);
-    TA* aux = reinterpret_cast<TA*>(t->auxBuffer);
-    const bool ramp = t->needsRamp();
-
-    for (size_t numFrames = mFrameCount; numFrames > 0; ) {
-        AudioBufferProvider::Buffer& b(t->buffer);
-        // get input buffer
-        b.frameCount = numFrames;
-        t->bufferProvider->getNextBuffer(&b);
-        const TI *in = reinterpret_cast<TI*>(b.raw);
-
-        // in == NULL can happen if the track was flushed just after having
-        // been enabled for mixing.
-        if (in == NULL || (((uintptr_t)in) & 3)) {
-            memset(out, 0, numFrames
-                    * channels * audio_bytes_per_sample(t->mMixerFormat));
-            ALOGE_IF((((uintptr_t)in) & 3), "process__noResampleOneTrack: bus error: "
-                    "buffer %p track %p, channels %d, needs %#x",
-                    in, &t, t->channelCount, t->needs);
-            return;
-        }
-
-        const size_t outFrames = b.frameCount;
-        t->volumeMix<MIXTYPE, is_same<TI, float>::value /* USEFLOATVOL */, false /* ADJUSTVOL */> (
-                out, outFrames, in, aux, ramp);
-
-        out += outFrames * channels;
-        if (aux != NULL) {
-            aux += outFrames;
-        }
-        numFrames -= b.frameCount;
-
-        // release buffer
-        t->bufferProvider->releaseBuffer(&b);
-    }
-    if (ramp) {
-        t->adjustVolumeRamp(aux != NULL, is_same<TI, float>::value);
-    }
-}
-
-void AudioMixer::processHapticData()
-{
+    // Process haptic data.
     // Need to keep consistent with VibrationEffect.scale(int, float, int)
     for (const auto &pair : mGroups) {
         // process by group of tracks with same output main buffer.
         const auto &group = pair.second;
         for (const int name : group) {
-            const std::shared_ptr<Track> &t = mTracks[name];
+            const std::shared_ptr<Track> &t = getTrack(name);
             if (t->mHapticPlaybackEnabled) {
                 size_t sampleCount = mFrameCount * t->mMixerHapticChannelCount;
                 float gamma = t->getHapticScaleGamma();
@@ -1887,225 +614,5 @@
     }
 }
 
-/* This track hook is called to do resampling then mixing,
- * pulling from the track's upstream AudioBufferProvider.
- *
- * MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27) or float
- */
-template <int MIXTYPE, typename TO, typename TI, typename TA>
-void AudioMixer::Track::track__Resample(TO* out, size_t outFrameCount, TO* temp, TA* aux)
-{
-    ALOGVV("track__Resample\n");
-    mResampler->setSampleRate(sampleRate);
-    const bool ramp = needsRamp();
-    if (ramp || aux != NULL) {
-        // if ramp:        resample with unity gain to temp buffer and scale/mix in 2nd step.
-        // if aux != NULL: resample with unity gain to temp buffer then apply send level.
-
-        mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
-        memset(temp, 0, outFrameCount * mMixerChannelCount * sizeof(TO));
-        mResampler->resample((int32_t*)temp, outFrameCount, bufferProvider);
-
-        volumeMix<MIXTYPE, is_same<TI, float>::value /* USEFLOATVOL */, true /* ADJUSTVOL */>(
-                out, outFrameCount, temp, aux, ramp);
-
-    } else { // constant volume gain
-        mResampler->setVolume(mVolume[0], mVolume[1]);
-        mResampler->resample((int32_t*)out, outFrameCount, bufferProvider);
-    }
-}
-
-/* This track hook is called to mix a track, when no resampling is required.
- * The input buffer should be present in in.
- *
- * MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27) or float
- */
-template <int MIXTYPE, typename TO, typename TI, typename TA>
-void AudioMixer::Track::track__NoResample(TO* out, size_t frameCount, TO* temp __unused, TA* aux)
-{
-    ALOGVV("track__NoResample\n");
-    const TI *in = static_cast<const TI *>(mIn);
-
-    volumeMix<MIXTYPE, is_same<TI, float>::value /* USEFLOATVOL */, true /* ADJUSTVOL */>(
-            out, frameCount, in, aux, needsRamp());
-
-    // MIXTYPE_MONOEXPAND reads a single input channel and expands to NCHAN output channels.
-    // MIXTYPE_MULTI reads NCHAN input channels and places to NCHAN output channels.
-    in += (MIXTYPE == MIXTYPE_MONOEXPAND) ? frameCount : frameCount * mMixerChannelCount;
-    mIn = in;
-}
-
-/* The Mixer engine generates either int32_t (Q4_27) or float data.
- * We use this function to convert the engine buffers
- * to the desired mixer output format, either int16_t (Q.15) or float.
- */
-/* static */
-void AudioMixer::convertMixerFormat(void *out, audio_format_t mixerOutFormat,
-        void *in, audio_format_t mixerInFormat, size_t sampleCount)
-{
-    switch (mixerInFormat) {
-    case AUDIO_FORMAT_PCM_FLOAT:
-        switch (mixerOutFormat) {
-        case AUDIO_FORMAT_PCM_FLOAT:
-            memcpy(out, in, sampleCount * sizeof(float)); // MEMCPY. TODO optimize out
-            break;
-        case AUDIO_FORMAT_PCM_16_BIT:
-            memcpy_to_i16_from_float((int16_t*)out, (float*)in, sampleCount);
-            break;
-        default:
-            LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
-            break;
-        }
-        break;
-    case AUDIO_FORMAT_PCM_16_BIT:
-        switch (mixerOutFormat) {
-        case AUDIO_FORMAT_PCM_FLOAT:
-            memcpy_to_float_from_q4_27((float*)out, (const int32_t*)in, sampleCount);
-            break;
-        case AUDIO_FORMAT_PCM_16_BIT:
-            memcpy_to_i16_from_q4_27((int16_t*)out, (const int32_t*)in, sampleCount);
-            break;
-        default:
-            LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
-            break;
-        }
-        break;
-    default:
-        LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
-        break;
-    }
-}
-
-/* Returns the proper track hook to use for mixing the track into the output buffer.
- */
-/* static */
-AudioMixer::hook_t AudioMixer::Track::getTrackHook(int trackType, uint32_t channelCount,
-        audio_format_t mixerInFormat, audio_format_t mixerOutFormat __unused)
-{
-    if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
-        switch (trackType) {
-        case TRACKTYPE_NOP:
-            return &Track::track__nop;
-        case TRACKTYPE_RESAMPLE:
-            return &Track::track__genericResample;
-        case TRACKTYPE_NORESAMPLEMONO:
-            return &Track::track__16BitsMono;
-        case TRACKTYPE_NORESAMPLE:
-            return &Track::track__16BitsStereo;
-        default:
-            LOG_ALWAYS_FATAL("bad trackType: %d", trackType);
-            break;
-        }
-    }
-    LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
-    switch (trackType) {
-    case TRACKTYPE_NOP:
-        return &Track::track__nop;
-    case TRACKTYPE_RESAMPLE:
-        switch (mixerInFormat) {
-        case AUDIO_FORMAT_PCM_FLOAT:
-            return (AudioMixer::hook_t) &Track::track__Resample<
-                    MIXTYPE_MULTI, float /*TO*/, float /*TI*/, TYPE_AUX>;
-        case AUDIO_FORMAT_PCM_16_BIT:
-            return (AudioMixer::hook_t) &Track::track__Resample<
-                    MIXTYPE_MULTI, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
-        default:
-            LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
-            break;
-        }
-        break;
-    case TRACKTYPE_NORESAMPLEMONO:
-        switch (mixerInFormat) {
-        case AUDIO_FORMAT_PCM_FLOAT:
-            return (AudioMixer::hook_t) &Track::track__NoResample<
-                            MIXTYPE_MONOEXPAND, float /*TO*/, float /*TI*/, TYPE_AUX>;
-        case AUDIO_FORMAT_PCM_16_BIT:
-            return (AudioMixer::hook_t) &Track::track__NoResample<
-                            MIXTYPE_MONOEXPAND, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
-        default:
-            LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
-            break;
-        }
-        break;
-    case TRACKTYPE_NORESAMPLE:
-        switch (mixerInFormat) {
-        case AUDIO_FORMAT_PCM_FLOAT:
-            return (AudioMixer::hook_t) &Track::track__NoResample<
-                    MIXTYPE_MULTI, float /*TO*/, float /*TI*/, TYPE_AUX>;
-        case AUDIO_FORMAT_PCM_16_BIT:
-            return (AudioMixer::hook_t) &Track::track__NoResample<
-                    MIXTYPE_MULTI, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
-        default:
-            LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
-            break;
-        }
-        break;
-    default:
-        LOG_ALWAYS_FATAL("bad trackType: %d", trackType);
-        break;
-    }
-    return NULL;
-}
-
-/* Returns the proper process hook for mixing tracks. Currently works only for
- * PROCESSTYPE_NORESAMPLEONETRACK, a mix involving one track, no resampling.
- *
- * TODO: Due to the special mixing considerations of duplicating to
- * a stereo output track, the input track cannot be MONO.  This should be
- * prevented by the caller.
- */
-/* static */
-AudioMixer::process_hook_t AudioMixer::getProcessHook(
-        int processType, uint32_t channelCount,
-        audio_format_t mixerInFormat, audio_format_t mixerOutFormat)
-{
-    if (processType != PROCESSTYPE_NORESAMPLEONETRACK) { // Only NORESAMPLEONETRACK
-        LOG_ALWAYS_FATAL("bad processType: %d", processType);
-        return NULL;
-    }
-    if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
-        return &AudioMixer::process__oneTrack16BitsStereoNoResampling;
-    }
-    LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
-    switch (mixerInFormat) {
-    case AUDIO_FORMAT_PCM_FLOAT:
-        switch (mixerOutFormat) {
-        case AUDIO_FORMAT_PCM_FLOAT:
-            return &AudioMixer::process__noResampleOneTrack<
-                    MIXTYPE_MULTI_SAVEONLY, float /*TO*/, float /*TI*/, TYPE_AUX>;
-        case AUDIO_FORMAT_PCM_16_BIT:
-            return &AudioMixer::process__noResampleOneTrack<
-                    MIXTYPE_MULTI_SAVEONLY, int16_t /*TO*/, float /*TI*/, TYPE_AUX>;
-        default:
-            LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
-            break;
-        }
-        break;
-    case AUDIO_FORMAT_PCM_16_BIT:
-        switch (mixerOutFormat) {
-        case AUDIO_FORMAT_PCM_FLOAT:
-            return &AudioMixer::process__noResampleOneTrack<
-                    MIXTYPE_MULTI_SAVEONLY, float /*TO*/, int16_t /*TI*/, TYPE_AUX>;
-        case AUDIO_FORMAT_PCM_16_BIT:
-            return &AudioMixer::process__noResampleOneTrack<
-                    MIXTYPE_MULTI_SAVEONLY, int16_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
-        default:
-            LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
-            break;
-        }
-        break;
-    default:
-        LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
-        break;
-    }
-    return NULL;
-}
-
 // ----------------------------------------------------------------------------
 } // namespace android
diff --git a/media/libaudioprocessing/AudioMixerBase.cpp b/media/libaudioprocessing/AudioMixerBase.cpp
new file mode 100644
index 0000000..75c077d
--- /dev/null
+++ b/media/libaudioprocessing/AudioMixerBase.cpp
@@ -0,0 +1,1692 @@
+/*
+**
+** Copyright 2019, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "AudioMixer"
+//#define LOG_NDEBUG 0
+
+#include <sstream>
+#include <string.h>
+
+#include <audio_utils/primitives.h>
+#include <cutils/compiler.h>
+#include <media/AudioMixerBase.h>
+#include <utils/Log.h>
+
+#include "AudioMixerOps.h"
+
+// The FCC_2 macro refers to the Fixed Channel Count of 2 for the legacy integer mixer.
+#ifndef FCC_2
+#define FCC_2 2
+#endif
+
+// Look for MONO_HACK for any Mono hack involving legacy mono channel to
+// stereo channel conversion.
+
+/* VERY_VERY_VERBOSE_LOGGING will show exactly which process hook and track hook is
+ * being used. This is a considerable amount of log spam, so don't enable unless you
+ * are verifying the hook based code.
+ */
+//#define VERY_VERY_VERBOSE_LOGGING
+#ifdef VERY_VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+//define ALOGVV printf  // for test-mixer.cpp
+#else
+#define ALOGVV(a...) do { } while (0)
+#endif
+
+// TODO: remove BLOCKSIZE unit of processing - it isn't needed anymore.
+static constexpr int BLOCKSIZE = 16;
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+bool AudioMixerBase::isValidFormat(audio_format_t format) const
+{
+    switch (format) {
+    case AUDIO_FORMAT_PCM_8_BIT:
+    case AUDIO_FORMAT_PCM_16_BIT:
+    case AUDIO_FORMAT_PCM_24_BIT_PACKED:
+    case AUDIO_FORMAT_PCM_32_BIT:
+    case AUDIO_FORMAT_PCM_FLOAT:
+        return true;
+    default:
+        return false;
+    }
+}
+
+bool AudioMixerBase::isValidChannelMask(audio_channel_mask_t channelMask) const
+{
+    return audio_channel_count_from_out_mask(channelMask) <= MAX_NUM_CHANNELS;
+}
+
+std::shared_ptr<AudioMixerBase::TrackBase> AudioMixerBase::preCreateTrack()
+{
+    return std::make_shared<TrackBase>();
+}
+
+status_t AudioMixerBase::create(
+        int name, audio_channel_mask_t channelMask, audio_format_t format, int sessionId)
+{
+    LOG_ALWAYS_FATAL_IF(exists(name), "name %d already exists", name);
+
+    if (!isValidChannelMask(channelMask)) {
+        ALOGE("%s invalid channelMask: %#x", __func__, channelMask);
+        return BAD_VALUE;
+    }
+    if (!isValidFormat(format)) {
+        ALOGE("%s invalid format: %#x", __func__, format);
+        return BAD_VALUE;
+    }
+
+    auto t = preCreateTrack();
+    {
+        // TODO: move initialization to the Track constructor.
+        // assume default parameters for the track, except where noted below
+        t->needs = 0;
+
+        // Integer volume.
+        // Currently integer volume is kept for the legacy integer mixer.
+        // Will be removed when the legacy mixer path is removed.
+        t->volume[0] = 0;
+        t->volume[1] = 0;
+        t->prevVolume[0] = 0 << 16;
+        t->prevVolume[1] = 0 << 16;
+        t->volumeInc[0] = 0;
+        t->volumeInc[1] = 0;
+        t->auxLevel = 0;
+        t->auxInc = 0;
+        t->prevAuxLevel = 0;
+
+        // Floating point volume.
+        t->mVolume[0] = 0.f;
+        t->mVolume[1] = 0.f;
+        t->mPrevVolume[0] = 0.f;
+        t->mPrevVolume[1] = 0.f;
+        t->mVolumeInc[0] = 0.;
+        t->mVolumeInc[1] = 0.;
+        t->mAuxLevel = 0.;
+        t->mAuxInc = 0.;
+        t->mPrevAuxLevel = 0.;
+
+        // no initialization needed
+        // t->frameCount
+        t->channelCount = audio_channel_count_from_out_mask(channelMask);
+        t->enabled = false;
+        ALOGV_IF(audio_channel_mask_get_bits(channelMask) != AUDIO_CHANNEL_OUT_STEREO,
+                "Non-stereo channel mask: %d\n", channelMask);
+        t->channelMask = channelMask;
+        t->sessionId = sessionId;
+        // setBufferProvider(name, AudioBufferProvider *) is required before enable(name)
+        t->bufferProvider = NULL;
+        t->buffer.raw = NULL;
+        // no initialization needed
+        // t->buffer.frameCount
+        t->hook = NULL;
+        t->mIn = NULL;
+        t->sampleRate = mSampleRate;
+        // setParameter(name, TRACK, MAIN_BUFFER, mixBuffer) is required before enable(name)
+        t->mainBuffer = NULL;
+        t->auxBuffer = NULL;
+        t->mMixerFormat = AUDIO_FORMAT_PCM_16_BIT;
+        t->mFormat = format;
+        t->mMixerInFormat = kUseFloat && kUseNewMixer ?
+                AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+        t->mMixerChannelMask = audio_channel_mask_from_representation_and_bits(
+                AUDIO_CHANNEL_REPRESENTATION_POSITION, AUDIO_CHANNEL_OUT_STEREO);
+        t->mMixerChannelCount = audio_channel_count_from_out_mask(t->mMixerChannelMask);
+        status_t status = postCreateTrack(t.get());
+        if (status != OK) return status;
+        mTracks[name] = t;
+        return OK;
+    }
+}
+
+// Called when channel masks have changed for a track name
+bool AudioMixerBase::setChannelMasks(int name,
+        audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask)
+{
+    LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
+    const std::shared_ptr<TrackBase> &track = mTracks[name];
+
+    if (trackChannelMask == track->channelMask && mixerChannelMask == track->mMixerChannelMask) {
+        return false;  // no need to change
+    }
+    // always recompute for both channel masks even if only one has changed.
+    const uint32_t trackChannelCount = audio_channel_count_from_out_mask(trackChannelMask);
+    const uint32_t mixerChannelCount = audio_channel_count_from_out_mask(mixerChannelMask);
+
+    ALOG_ASSERT(trackChannelCount && mixerChannelCount);
+    track->channelMask = trackChannelMask;
+    track->channelCount = trackChannelCount;
+    track->mMixerChannelMask = mixerChannelMask;
+    track->mMixerChannelCount = mixerChannelCount;
+
+    // Resampler channels may have changed.
+    track->recreateResampler(mSampleRate);
+    return true;
+}
+
+void AudioMixerBase::destroy(int name)
+{
+    LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
+    ALOGV("deleteTrackName(%d)", name);
+
+    if (mTracks[name]->enabled) {
+        invalidate();
+    }
+    mTracks.erase(name); // deallocate track
+}
+
+void AudioMixerBase::enable(int name)
+{
+    LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
+    const std::shared_ptr<TrackBase> &track = mTracks[name];
+
+    if (!track->enabled) {
+        track->enabled = true;
+        ALOGV("enable(%d)", name);
+        invalidate();
+    }
+}
+
+void AudioMixerBase::disable(int name)
+{
+    LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
+    const std::shared_ptr<TrackBase> &track = mTracks[name];
+
+    if (track->enabled) {
+        track->enabled = false;
+        ALOGV("disable(%d)", name);
+        invalidate();
+    }
+}
+
+/* Sets the volume ramp variables for the AudioMixer.
+ *
+ * The volume ramp variables are used to transition from the previous
+ * volume to the set volume.  ramp controls the duration of the transition.
+ * Its value is typically one state framecount period, but may also be 0,
+ * meaning "immediate."
+ *
+ * FIXME: 1) Volume ramp is enabled only if there is a nonzero integer increment
+ * even if there is a nonzero floating point increment (in that case, the volume
+ * change is immediate).  This restriction should be changed when the legacy mixer
+ * is removed (see #2).
+ * FIXME: 2) Integer volume variables are used for Legacy mixing and should be removed
+ * when no longer needed.
+ *
+ * @param newVolume set volume target in floating point [0.0, 1.0].
+ * @param ramp number of frames to increment over. if ramp is 0, the volume
+ * should be set immediately.  Currently ramp should not exceed 65535 (frames).
+ * @param pIntSetVolume pointer to the U4.12 integer target volume, set on return.
+ * @param pIntPrevVolume pointer to the U4.28 integer previous volume, set on return.
+ * @param pIntVolumeInc pointer to the U4.28 increment per output audio frame, set on return.
+ * @param pSetVolume pointer to the float target volume, set on return.
+ * @param pPrevVolume pointer to the float previous volume, set on return.
+ * @param pVolumeInc pointer to the float increment per output audio frame, set on return.
+ * @return true if the volume has changed, false if volume is same.
+ */
+static inline bool setVolumeRampVariables(float newVolume, int32_t ramp,
+        int16_t *pIntSetVolume, int32_t *pIntPrevVolume, int32_t *pIntVolumeInc,
+        float *pSetVolume, float *pPrevVolume, float *pVolumeInc) {
+    // check floating point volume to see if it is identical to the previously
+    // set volume.
+    // We do not use a tolerance here (and reject changes too small)
+    // as it may be confusing to use a different value than the one set.
+    // If the resulting volume is too small to ramp, it is a direct set of the volume.
+    if (newVolume == *pSetVolume) {
+        return false;
+    }
+    if (newVolume < 0) {
+        newVolume = 0; // should not have negative volumes
+    } else {
+        switch (fpclassify(newVolume)) {
+        case FP_SUBNORMAL:
+        case FP_NAN:
+            newVolume = 0;
+            break;
+        case FP_ZERO:
+            break; // zero volume is fine
+        case FP_INFINITE:
+            // Infinite volume could be handled consistently since
+            // floating point math saturates at infinities,
+            // but we limit volume to unity gain float.
+            // ramp = 0; break;
+            //
+            newVolume = AudioMixerBase::UNITY_GAIN_FLOAT;
+            break;
+        case FP_NORMAL:
+        default:
+            // Floating point does not have problems with overflow wrap
+            // that integer has.  However, we limit the volume to
+            // unity gain here.
+            // TODO: Revisit the volume limitation and perhaps parameterize.
+            if (newVolume > AudioMixerBase::UNITY_GAIN_FLOAT) {
+                newVolume = AudioMixerBase::UNITY_GAIN_FLOAT;
+            }
+            break;
+        }
+    }
+
+    // set floating point volume ramp
+    if (ramp != 0) {
+        // when the ramp completes, *pPrevVolume is set to *pSetVolume, so there
+        // is no computational mismatch; hence equality is checked here.
+        ALOGD_IF(*pPrevVolume != *pSetVolume, "previous float ramp hasn't finished,"
+                " prev:%f  set_to:%f", *pPrevVolume, *pSetVolume);
+        const float inc = (newVolume - *pPrevVolume) / ramp; // could be inf, nan, subnormal
+        // could be inf, cannot be nan, subnormal
+        const float maxv = std::max(newVolume, *pPrevVolume);
+
+        if (isnormal(inc) // inc must be a normal number (no subnormals, infinite, nan)
+                && maxv + inc != maxv) { // inc must make forward progress
+            *pVolumeInc = inc;
+            // ramp is set now.
+            // Note: if newVolume is 0, then near the end of the ramp,
+            // it may be possible that the ramped volume may be subnormal or
+            // temporarily negative by a small amount or subnormal due to floating
+            // point inaccuracies.
+        } else {
+            ramp = 0; // ramp not allowed
+        }
+    }
+
+    // compute and check integer volume, no need to check negative values
+    // The integer volume is limited to "unity_gain" to avoid wrapping and other
+    // audio artifacts, so it never reaches the range limit of U4.28.
+    // We safely use signed 16 and 32 bit integers here.
+    const float scaledVolume = newVolume * AudioMixerBase::UNITY_GAIN_INT; // not neg, subnormal, nan
+    const int32_t intVolume = (scaledVolume >= (float)AudioMixerBase::UNITY_GAIN_INT) ?
+            AudioMixerBase::UNITY_GAIN_INT : (int32_t)scaledVolume;
+
+    // set integer volume ramp
+    if (ramp != 0) {
+        // integer volume is U4.12 (to use 16 bit multiplies), but ramping uses U4.28.
+        // when the ramp completes, *pIntPrevVolume is set to *pIntSetVolume << 16, so there
+        // is no computational mismatch; hence equality is checked here.
+        ALOGD_IF(*pIntPrevVolume != *pIntSetVolume << 16, "previous int ramp hasn't finished,"
+                " prev:%d  set_to:%d", *pIntPrevVolume, *pIntSetVolume << 16);
+        const int32_t inc = ((intVolume << 16) - *pIntPrevVolume) / ramp;
+
+        if (inc != 0) { // inc must make forward progress
+            *pIntVolumeInc = inc;
+        } else {
+            ramp = 0; // ramp not allowed
+        }
+    }
+
+    // if no ramp, or ramp not allowed, then clear float and integer increments
+    if (ramp == 0) {
+        *pVolumeInc = 0;
+        *pPrevVolume = newVolume;
+        *pIntVolumeInc = 0;
+        *pIntPrevVolume = intVolume << 16;
+    }
+    *pSetVolume = newVolume;
+    *pIntSetVolume = intVolume;
+    return true;
+}
+
+void AudioMixerBase::setParameter(int name, int target, int param, void *value)
+{
+    LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
+    const std::shared_ptr<TrackBase> &track = mTracks[name];
+
+    int valueInt = static_cast<int>(reinterpret_cast<uintptr_t>(value));
+    int32_t *valueBuf = reinterpret_cast<int32_t*>(value);
+
+    switch (target) {
+
+    case TRACK:
+        switch (param) {
+        case CHANNEL_MASK: {
+            const audio_channel_mask_t trackChannelMask =
+                static_cast<audio_channel_mask_t>(valueInt);
+            if (setChannelMasks(name, trackChannelMask, track->mMixerChannelMask)) {
+                ALOGV("setParameter(TRACK, CHANNEL_MASK, %x)", trackChannelMask);
+                invalidate();
+            }
+            } break;
+        case MAIN_BUFFER:
+            if (track->mainBuffer != valueBuf) {
+                track->mainBuffer = valueBuf;
+                ALOGV("setParameter(TRACK, MAIN_BUFFER, %p)", valueBuf);
+                invalidate();
+            }
+            break;
+        case AUX_BUFFER:
+            if (track->auxBuffer != valueBuf) {
+                track->auxBuffer = valueBuf;
+                ALOGV("setParameter(TRACK, AUX_BUFFER, %p)", valueBuf);
+                invalidate();
+            }
+            break;
+        case FORMAT: {
+            audio_format_t format = static_cast<audio_format_t>(valueInt);
+            if (track->mFormat != format) {
+                ALOG_ASSERT(audio_is_linear_pcm(format), "Invalid format %#x", format);
+                track->mFormat = format;
+                ALOGV("setParameter(TRACK, FORMAT, %#x)", format);
+                invalidate();
+            }
+            } break;
+        case MIXER_FORMAT: {
+            audio_format_t format = static_cast<audio_format_t>(valueInt);
+            if (track->mMixerFormat != format) {
+                track->mMixerFormat = format;
+                ALOGV("setParameter(TRACK, MIXER_FORMAT, %#x)", format);
+            }
+            } break;
+        case MIXER_CHANNEL_MASK: {
+            const audio_channel_mask_t mixerChannelMask =
+                    static_cast<audio_channel_mask_t>(valueInt);
+            if (setChannelMasks(name, track->channelMask, mixerChannelMask)) {
+                ALOGV("setParameter(TRACK, MIXER_CHANNEL_MASK, %#x)", mixerChannelMask);
+                invalidate();
+            }
+            } break;
+        default:
+            LOG_ALWAYS_FATAL("setParameter track: bad param %d", param);
+        }
+        break;
+
+    case RESAMPLE:
+        switch (param) {
+        case SAMPLE_RATE:
+            ALOG_ASSERT(valueInt > 0, "bad sample rate %d", valueInt);
+            if (track->setResampler(uint32_t(valueInt), mSampleRate)) {
+                ALOGV("setParameter(RESAMPLE, SAMPLE_RATE, %u)",
+                        uint32_t(valueInt));
+                invalidate();
+            }
+            break;
+        case RESET:
+            track->resetResampler();
+            invalidate();
+            break;
+        case REMOVE:
+            track->mResampler.reset(nullptr);
+            track->sampleRate = mSampleRate;
+            invalidate();
+            break;
+        default:
+            LOG_ALWAYS_FATAL("setParameter resample: bad param %d", param);
+        }
+        break;
+
+    case RAMP_VOLUME:
+    case VOLUME:
+        switch (param) {
+        case AUXLEVEL:
+            if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
+                    target == RAMP_VOLUME ? mFrameCount : 0,
+                    &track->auxLevel, &track->prevAuxLevel, &track->auxInc,
+                    &track->mAuxLevel, &track->mPrevAuxLevel, &track->mAuxInc)) {
+                ALOGV("setParameter(%s, AUXLEVEL: %04x)",
+                        target == VOLUME ? "VOLUME" : "RAMP_VOLUME", track->auxLevel);
+                invalidate();
+            }
+            break;
+        default:
+            if ((unsigned)param >= VOLUME0 && (unsigned)param < VOLUME0 + MAX_NUM_VOLUMES) {
+                if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
+                        target == RAMP_VOLUME ? mFrameCount : 0,
+                        &track->volume[param - VOLUME0],
+                        &track->prevVolume[param - VOLUME0],
+                        &track->volumeInc[param - VOLUME0],
+                        &track->mVolume[param - VOLUME0],
+                        &track->mPrevVolume[param - VOLUME0],
+                        &track->mVolumeInc[param - VOLUME0])) {
+                    ALOGV("setParameter(%s, VOLUME%d: %04x)",
+                            target == VOLUME ? "VOLUME" : "RAMP_VOLUME", param - VOLUME0,
+                                    track->volume[param - VOLUME0]);
+                    invalidate();
+                }
+            } else {
+                LOG_ALWAYS_FATAL("setParameter volume: bad param %d", param);
+            }
+        }
+        break;
+
+    default:
+        LOG_ALWAYS_FATAL("setParameter: bad target %d", target);
+    }
+}
+
+bool AudioMixerBase::TrackBase::setResampler(uint32_t trackSampleRate, uint32_t devSampleRate)
+{
+    if (trackSampleRate != devSampleRate || mResampler.get() != nullptr) {
+        if (sampleRate != trackSampleRate) {
+            sampleRate = trackSampleRate;
+            if (mResampler.get() == nullptr) {
+                ALOGV("Creating resampler from track %d Hz to device %d Hz",
+                        trackSampleRate, devSampleRate);
+                AudioResampler::src_quality quality;
+                // force lowest quality level resampler if use case isn't music or video
+                // FIXME this is flawed for dynamic sample rates, as we choose the resampler
+                // quality level based on the initial ratio, but that could change later.
+                // Should have a way to distinguish tracks with static ratios vs. dynamic ratios.
+                if (isMusicRate(trackSampleRate)) {
+                    quality = AudioResampler::DEFAULT_QUALITY;
+                } else {
+                    quality = AudioResampler::DYN_LOW_QUALITY;
+                }
+
+                // TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
+                // but if none exists, it is the channel count (1 for mono).
+                const int resamplerChannelCount = getOutputChannelCount();
+                ALOGVV("Creating resampler:"
+                        " format(%#x) channels(%d) devSampleRate(%u) quality(%d)\n",
+                        mMixerInFormat, resamplerChannelCount, devSampleRate, quality);
+                mResampler.reset(AudioResampler::create(
+                        mMixerInFormat,
+                        resamplerChannelCount,
+                        devSampleRate, quality));
+            }
+            return true;
+        }
+    }
+    return false;
+}
+
+/* Checks to see if the volume ramp has completed and clears the increment
+ * variables appropriately.
+ *
+ * FIXME: There is code to handle int/float ramp variable switchover should it not
+ * complete within a mixer buffer processing call, but it is preferred to avoid switchover
+ * due to precision issues.  The switchover code is included for legacy code purposes
+ * and can be removed once the integer volume is removed.
+ *
+ * It is not sufficient to clear only the volumeInc integer variable because
+ * if one channel requires ramping, all channels are ramped.
+ *
+ * There is a bit of duplicated code here, but it keeps backward compatibility.
+ */
+void AudioMixerBase::TrackBase::adjustVolumeRamp(bool aux, bool useFloat)
+{
+    if (useFloat) {
+        for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
+            if ((mVolumeInc[i] > 0 && mPrevVolume[i] + mVolumeInc[i] >= mVolume[i]) ||
+                     (mVolumeInc[i] < 0 && mPrevVolume[i] + mVolumeInc[i] <= mVolume[i])) {
+                volumeInc[i] = 0;
+                prevVolume[i] = volume[i] << 16;
+                mVolumeInc[i] = 0.;
+                mPrevVolume[i] = mVolume[i];
+            } else {
+                //ALOGV("ramp: %f %f %f", mVolume[i], mPrevVolume[i], mVolumeInc[i]);
+                prevVolume[i] = u4_28_from_float(mPrevVolume[i]);
+            }
+        }
+    } else {
+        for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
+            if (((volumeInc[i]>0) && (((prevVolume[i]+volumeInc[i])>>16) >= volume[i])) ||
+                    ((volumeInc[i]<0) && (((prevVolume[i]+volumeInc[i])>>16) <= volume[i]))) {
+                volumeInc[i] = 0;
+                prevVolume[i] = volume[i] << 16;
+                mVolumeInc[i] = 0.;
+                mPrevVolume[i] = mVolume[i];
+            } else {
+                //ALOGV("ramp: %d %d %d", volume[i] << 16, prevVolume[i], volumeInc[i]);
+                mPrevVolume[i]  = float_from_u4_28(prevVolume[i]);
+            }
+        }
+    }
+
+    if (aux) {
+#ifdef FLOAT_AUX
+        if (useFloat) {
+            if ((mAuxInc > 0.f && mPrevAuxLevel + mAuxInc >= mAuxLevel) ||
+                    (mAuxInc < 0.f && mPrevAuxLevel + mAuxInc <= mAuxLevel)) {
+                auxInc = 0;
+                prevAuxLevel = auxLevel << 16;
+                mAuxInc = 0.f;
+                mPrevAuxLevel = mAuxLevel;
+            }
+        } else
+#endif
+        if ((auxInc > 0 && ((prevAuxLevel + auxInc) >> 16) >= auxLevel) ||
+                (auxInc < 0 && ((prevAuxLevel + auxInc) >> 16) <= auxLevel)) {
+            auxInc = 0;
+            prevAuxLevel = auxLevel << 16;
+            mAuxInc = 0.f;
+            mPrevAuxLevel = mAuxLevel;
+        }
+    }
+}
+
+void AudioMixerBase::TrackBase::recreateResampler(uint32_t devSampleRate)
+{
+    if (mResampler.get() != nullptr) {
+        const uint32_t resetToSampleRate = sampleRate;
+        mResampler.reset(nullptr);
+        sampleRate = devSampleRate; // without resampler, track rate is device sample rate.
+        // recreate the resampler with updated format, channels, saved sampleRate.
+        setResampler(resetToSampleRate /*trackSampleRate*/, devSampleRate);
+    }
+}
+
+size_t AudioMixerBase::getUnreleasedFrames(int name) const
+{
+    const auto it = mTracks.find(name);
+    if (it != mTracks.end()) {
+        return it->second->getUnreleasedFrames();
+    }
+    return 0;
+}
+
+std::string AudioMixerBase::trackNames() const
+{
+    std::stringstream ss;
+    for (const auto &pair : mTracks) {
+        ss << pair.first << " ";
+    }
+    return ss.str();
+}
+
+void AudioMixerBase::process__validate()
+{
+    // TODO: fix all16BitsStereNoResample logic to
+    // either properly handle muted tracks (it should ignore them)
+    // or remove altogether as an obsolete optimization.
+    bool all16BitsStereoNoResample = true;
+    bool resampling = false;
+    bool volumeRamp = false;
+
+    mEnabled.clear();
+    mGroups.clear();
+    for (const auto &pair : mTracks) {
+        const int name = pair.first;
+        const std::shared_ptr<TrackBase> &t = pair.second;
+        if (!t->enabled) continue;
+
+        mEnabled.emplace_back(name);  // we add to mEnabled in order of name.
+        mGroups[t->mainBuffer].emplace_back(name); // mGroups also in order of name.
+
+        uint32_t n = 0;
+        // FIXME can overflow (mask is only 3 bits)
+        n |= NEEDS_CHANNEL_1 + t->channelCount - 1;
+        if (t->doesResample()) {
+            n |= NEEDS_RESAMPLE;
+        }
+        if (t->auxLevel != 0 && t->auxBuffer != NULL) {
+            n |= NEEDS_AUX;
+        }
+
+        if (t->volumeInc[0]|t->volumeInc[1]) {
+            volumeRamp = true;
+        } else if (!t->doesResample() && t->volumeRL == 0) {
+            n |= NEEDS_MUTE;
+        }
+        t->needs = n;
+
+        if (n & NEEDS_MUTE) {
+            t->hook = &TrackBase::track__nop;
+        } else {
+            if (n & NEEDS_AUX) {
+                all16BitsStereoNoResample = false;
+            }
+            if (n & NEEDS_RESAMPLE) {
+                all16BitsStereoNoResample = false;
+                resampling = true;
+                t->hook = TrackBase::getTrackHook(TRACKTYPE_RESAMPLE, t->mMixerChannelCount,
+                        t->mMixerInFormat, t->mMixerFormat);
+                ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
+                        "Track %d needs downmix + resample", name);
+            } else {
+                if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_1){
+                    t->hook = TrackBase::getTrackHook(
+                            (t->mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO  // TODO: MONO_HACK
+                                    && t->channelMask == AUDIO_CHANNEL_OUT_MONO)
+                                ? TRACKTYPE_NORESAMPLEMONO : TRACKTYPE_NORESAMPLE,
+                            t->mMixerChannelCount,
+                            t->mMixerInFormat, t->mMixerFormat);
+                    all16BitsStereoNoResample = false;
+                }
+                if ((n & NEEDS_CHANNEL_COUNT__MASK) >= NEEDS_CHANNEL_2){
+                    t->hook = TrackBase::getTrackHook(TRACKTYPE_NORESAMPLE, t->mMixerChannelCount,
+                            t->mMixerInFormat, t->mMixerFormat);
+                    ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
+                            "Track %d needs downmix", name);
+                }
+            }
+        }
+    }
+
+    // select the processing hooks
+    mHook = &AudioMixerBase::process__nop;
+    if (mEnabled.size() > 0) {
+        if (resampling) {
+            if (mOutputTemp.get() == nullptr) {
+                mOutputTemp.reset(new int32_t[MAX_NUM_CHANNELS * mFrameCount]);
+            }
+            if (mResampleTemp.get() == nullptr) {
+                mResampleTemp.reset(new int32_t[MAX_NUM_CHANNELS * mFrameCount]);
+            }
+            mHook = &AudioMixerBase::process__genericResampling;
+        } else {
+            // we keep temp arrays around.
+            mHook = &AudioMixerBase::process__genericNoResampling;
+            if (all16BitsStereoNoResample && !volumeRamp) {
+                if (mEnabled.size() == 1) {
+                    const std::shared_ptr<TrackBase> &t = mTracks[mEnabled[0]];
+                    if ((t->needs & NEEDS_MUTE) == 0) {
+                        // The check prevents a muted track from acquiring a process hook.
+                        //
+                        // This is dangerous if the track is MONO as that requires
+                        // special case handling due to implicit channel duplication.
+                        // Stereo or Multichannel should actually be fine here.
+                        mHook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
+                                t->mMixerChannelCount, t->mMixerInFormat, t->mMixerFormat);
+                    }
+                }
+            }
+        }
+    }
+
+    ALOGV("mixer configuration change: %zu "
+        "all16BitsStereoNoResample=%d, resampling=%d, volumeRamp=%d",
+        mEnabled.size(), all16BitsStereoNoResample, resampling, volumeRamp);
+
+    process();
+
+    // Now that the volume ramp has been done, set optimal state and
+    // track hooks for subsequent mixer process
+    if (mEnabled.size() > 0) {
+        bool allMuted = true;
+
+        for (const int name : mEnabled) {
+            const std::shared_ptr<TrackBase> &t = mTracks[name];
+            if (!t->doesResample() && t->volumeRL == 0) {
+                t->needs |= NEEDS_MUTE;
+                t->hook = &TrackBase::track__nop;
+            } else {
+                allMuted = false;
+            }
+        }
+        if (allMuted) {
+            mHook = &AudioMixerBase::process__nop;
+        } else if (all16BitsStereoNoResample) {
+            if (mEnabled.size() == 1) {
+                //const int i = 31 - __builtin_clz(enabledTracks);
+                const std::shared_ptr<TrackBase> &t = mTracks[mEnabled[0]];
+                // Muted single tracks handled by allMuted above.
+                mHook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
+                        t->mMixerChannelCount, t->mMixerInFormat, t->mMixerFormat);
+            }
+        }
+    }
+}
+
+void AudioMixerBase::TrackBase::track__genericResample(
+        int32_t* out, size_t outFrameCount, int32_t* temp, int32_t* aux)
+{
+    ALOGVV("track__genericResample\n");
+    mResampler->setSampleRate(sampleRate);
+
+    // ramp gain - resample to temp buffer and scale/mix in 2nd step
+    if (aux != NULL) {
+        // always resample with unity gain when sending to auxiliary buffer to be able
+        // to apply send level after resampling
+        mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
+        memset(temp, 0, outFrameCount * mMixerChannelCount * sizeof(int32_t));
+        mResampler->resample(temp, outFrameCount, bufferProvider);
+        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
+            volumeRampStereo(out, outFrameCount, temp, aux);
+        } else {
+            volumeStereo(out, outFrameCount, temp, aux);
+        }
+    } else {
+        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
+            mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
+            memset(temp, 0, outFrameCount * MAX_NUM_CHANNELS * sizeof(int32_t));
+            mResampler->resample(temp, outFrameCount, bufferProvider);
+            volumeRampStereo(out, outFrameCount, temp, aux);
+        }
+
+        // constant gain
+        else {
+            mResampler->setVolume(mVolume[0], mVolume[1]);
+            mResampler->resample(out, outFrameCount, bufferProvider);
+        }
+    }
+}
+
+void AudioMixerBase::TrackBase::track__nop(int32_t* out __unused,
+        size_t outFrameCount __unused, int32_t* temp __unused, int32_t* aux __unused)
+{
+}
+
+void AudioMixerBase::TrackBase::volumeRampStereo(
+        int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
+{
+    int32_t vl = prevVolume[0];
+    int32_t vr = prevVolume[1];
+    const int32_t vlInc = volumeInc[0];
+    const int32_t vrInc = volumeInc[1];
+
+    //ALOGD("[0] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
+    //        t, vlInc/65536.0f, vl/65536.0f, volume[0],
+    //       (vl + vlInc*frameCount)/65536.0f, frameCount);
+
+    // ramp volume
+    if (CC_UNLIKELY(aux != NULL)) {
+        int32_t va = prevAuxLevel;
+        const int32_t vaInc = auxInc;
+        int32_t l;
+        int32_t r;
+
+        do {
+            l = (*temp++ >> 12);
+            r = (*temp++ >> 12);
+            *out++ += (vl >> 16) * l;
+            *out++ += (vr >> 16) * r;
+            *aux++ += (va >> 17) * (l + r);
+            vl += vlInc;
+            vr += vrInc;
+            va += vaInc;
+        } while (--frameCount);
+        prevAuxLevel = va;
+    } else {
+        do {
+            *out++ += (vl >> 16) * (*temp++ >> 12);
+            *out++ += (vr >> 16) * (*temp++ >> 12);
+            vl += vlInc;
+            vr += vrInc;
+        } while (--frameCount);
+    }
+    prevVolume[0] = vl;
+    prevVolume[1] = vr;
+    adjustVolumeRamp(aux != NULL);
+}
+
+void AudioMixerBase::TrackBase::volumeStereo(
+        int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
+{
+    const int16_t vl = volume[0];
+    const int16_t vr = volume[1];
+
+    if (CC_UNLIKELY(aux != NULL)) {
+        const int16_t va = auxLevel;
+        do {
+            int16_t l = (int16_t)(*temp++ >> 12);
+            int16_t r = (int16_t)(*temp++ >> 12);
+            out[0] = mulAdd(l, vl, out[0]);
+            int16_t a = (int16_t)(((int32_t)l + r) >> 1);
+            out[1] = mulAdd(r, vr, out[1]);
+            out += 2;
+            aux[0] = mulAdd(a, va, aux[0]);
+            aux++;
+        } while (--frameCount);
+    } else {
+        do {
+            int16_t l = (int16_t)(*temp++ >> 12);
+            int16_t r = (int16_t)(*temp++ >> 12);
+            out[0] = mulAdd(l, vl, out[0]);
+            out[1] = mulAdd(r, vr, out[1]);
+            out += 2;
+        } while (--frameCount);
+    }
+}
+
+void AudioMixerBase::TrackBase::track__16BitsStereo(
+        int32_t* out, size_t frameCount, int32_t* temp __unused, int32_t* aux)
+{
+    ALOGVV("track__16BitsStereo\n");
+    const int16_t *in = static_cast<const int16_t *>(mIn);
+
+    if (CC_UNLIKELY(aux != NULL)) {
+        int32_t l;
+        int32_t r;
+        // ramp gain
+        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
+            int32_t vl = prevVolume[0];
+            int32_t vr = prevVolume[1];
+            int32_t va = prevAuxLevel;
+            const int32_t vlInc = volumeInc[0];
+            const int32_t vrInc = volumeInc[1];
+            const int32_t vaInc = auxInc;
+            // ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
+            //        t, vlInc/65536.0f, vl/65536.0f, volume[0],
+            //        (vl + vlInc*frameCount)/65536.0f, frameCount);
+
+            do {
+                l = (int32_t)*in++;
+                r = (int32_t)*in++;
+                *out++ += (vl >> 16) * l;
+                *out++ += (vr >> 16) * r;
+                *aux++ += (va >> 17) * (l + r);
+                vl += vlInc;
+                vr += vrInc;
+                va += vaInc;
+            } while (--frameCount);
+
+            prevVolume[0] = vl;
+            prevVolume[1] = vr;
+            prevAuxLevel = va;
+            adjustVolumeRamp(true);
+        }
+
+        // constant gain
+        else {
+            const uint32_t vrl = volumeRL;
+            const int16_t va = (int16_t)auxLevel;
+            do {
+                uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+                int16_t a = (int16_t)(((int32_t)in[0] + in[1]) >> 1);
+                in += 2;
+                out[0] = mulAddRL(1, rl, vrl, out[0]);
+                out[1] = mulAddRL(0, rl, vrl, out[1]);
+                out += 2;
+                aux[0] = mulAdd(a, va, aux[0]);
+                aux++;
+            } while (--frameCount);
+        }
+    } else {
+        // ramp gain
+        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
+            int32_t vl = prevVolume[0];
+            int32_t vr = prevVolume[1];
+            const int32_t vlInc = volumeInc[0];
+            const int32_t vrInc = volumeInc[1];
+
+            // ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
+            //        t, vlInc/65536.0f, vl/65536.0f, volume[0],
+            //        (vl + vlInc*frameCount)/65536.0f, frameCount);
+
+            do {
+                *out++ += (vl >> 16) * (int32_t) *in++;
+                *out++ += (vr >> 16) * (int32_t) *in++;
+                vl += vlInc;
+                vr += vrInc;
+            } while (--frameCount);
+
+            prevVolume[0] = vl;
+            prevVolume[1] = vr;
+            adjustVolumeRamp(false);
+        }
+
+        // constant gain
+        else {
+            const uint32_t vrl = volumeRL;
+            do {
+                uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+                in += 2;
+                out[0] = mulAddRL(1, rl, vrl, out[0]);
+                out[1] = mulAddRL(0, rl, vrl, out[1]);
+                out += 2;
+            } while (--frameCount);
+        }
+    }
+    mIn = in;
+}
+
+void AudioMixerBase::TrackBase::track__16BitsMono(
+        int32_t* out, size_t frameCount, int32_t* temp __unused, int32_t* aux)
+{
+    ALOGVV("track__16BitsMono\n");
+    const int16_t *in = static_cast<int16_t const *>(mIn);
+
+    if (CC_UNLIKELY(aux != NULL)) {
+        // ramp gain
+        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
+            int32_t vl = prevVolume[0];
+            int32_t vr = prevVolume[1];
+            int32_t va = prevAuxLevel;
+            const int32_t vlInc = volumeInc[0];
+            const int32_t vrInc = volumeInc[1];
+            const int32_t vaInc = auxInc;
+
+            // ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
+            //         t, vlInc/65536.0f, vl/65536.0f, volume[0],
+            //         (vl + vlInc*frameCount)/65536.0f, frameCount);
+
+            do {
+                int32_t l = *in++;
+                *out++ += (vl >> 16) * l;
+                *out++ += (vr >> 16) * l;
+                *aux++ += (va >> 16) * l;
+                vl += vlInc;
+                vr += vrInc;
+                va += vaInc;
+            } while (--frameCount);
+
+            prevVolume[0] = vl;
+            prevVolume[1] = vr;
+            prevAuxLevel = va;
+            adjustVolumeRamp(true);
+        }
+        // constant gain
+        else {
+            const int16_t vl = volume[0];
+            const int16_t vr = volume[1];
+            const int16_t va = (int16_t)auxLevel;
+            do {
+                int16_t l = *in++;
+                out[0] = mulAdd(l, vl, out[0]);
+                out[1] = mulAdd(l, vr, out[1]);
+                out += 2;
+                aux[0] = mulAdd(l, va, aux[0]);
+                aux++;
+            } while (--frameCount);
+        }
+    } else {
+        // ramp gain
+        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
+            int32_t vl = prevVolume[0];
+            int32_t vr = prevVolume[1];
+            const int32_t vlInc = volumeInc[0];
+            const int32_t vrInc = volumeInc[1];
+
+            // ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
+            //         t, vlInc/65536.0f, vl/65536.0f, volume[0],
+            //         (vl + vlInc*frameCount)/65536.0f, frameCount);
+
+            do {
+                int32_t l = *in++;
+                *out++ += (vl >> 16) * l;
+                *out++ += (vr >> 16) * l;
+                vl += vlInc;
+                vr += vrInc;
+            } while (--frameCount);
+
+            prevVolume[0] = vl;
+            prevVolume[1] = vr;
+            adjustVolumeRamp(false);
+        }
+        // constant gain
+        else {
+            const int16_t vl = volume[0];
+            const int16_t vr = volume[1];
+            do {
+                int16_t l = *in++;
+                out[0] = mulAdd(l, vl, out[0]);
+                out[1] = mulAdd(l, vr, out[1]);
+                out += 2;
+            } while (--frameCount);
+        }
+    }
+    mIn = in;
+}
+
+// no-op case
+void AudioMixerBase::process__nop()
+{
+    ALOGVV("process__nop\n");
+
+    for (const auto &pair : mGroups) {
+        // process by group of tracks with same output buffer to
+        // avoid multiple memset() on same buffer
+        const auto &group = pair.second;
+
+        const std::shared_ptr<TrackBase> &t = mTracks[group[0]];
+        memset(t->mainBuffer, 0,
+                mFrameCount * audio_bytes_per_frame(t->getMixerChannelCount(), t->mMixerFormat));
+
+        // now consume data
+        for (const int name : group) {
+            const std::shared_ptr<TrackBase> &t = mTracks[name];
+            size_t outFrames = mFrameCount;
+            while (outFrames) {
+                t->buffer.frameCount = outFrames;
+                t->bufferProvider->getNextBuffer(&t->buffer);
+                if (t->buffer.raw == NULL) break;
+                outFrames -= t->buffer.frameCount;
+                t->bufferProvider->releaseBuffer(&t->buffer);
+            }
+        }
+    }
+}
+
+// generic code without resampling
+void AudioMixerBase::process__genericNoResampling()
+{
+    ALOGVV("process__genericNoResampling\n");
+    int32_t outTemp[BLOCKSIZE * MAX_NUM_CHANNELS] __attribute__((aligned(32)));
+
+    for (const auto &pair : mGroups) {
+        // process by group of tracks with same output main buffer to
+        // avoid multiple memset() on same buffer
+        const auto &group = pair.second;
+
+        // acquire buffer
+        for (const int name : group) {
+            const std::shared_ptr<TrackBase> &t = mTracks[name];
+            t->buffer.frameCount = mFrameCount;
+            t->bufferProvider->getNextBuffer(&t->buffer);
+            t->frameCount = t->buffer.frameCount;
+            t->mIn = t->buffer.raw;
+        }
+
+        int32_t *out = (int *)pair.first;
+        size_t numFrames = 0;
+        do {
+            const size_t frameCount = std::min((size_t)BLOCKSIZE, mFrameCount - numFrames);
+            memset(outTemp, 0, sizeof(outTemp));
+            for (const int name : group) {
+                const std::shared_ptr<TrackBase> &t = mTracks[name];
+                int32_t *aux = NULL;
+                if (CC_UNLIKELY(t->needs & NEEDS_AUX)) {
+                    aux = t->auxBuffer + numFrames;
+                }
+                for (int outFrames = frameCount; outFrames > 0; ) {
+                    // t->in == nullptr can happen if the track was flushed just after having
+                    // been enabled for mixing.
+                    if (t->mIn == nullptr) {
+                        break;
+                    }
+                    size_t inFrames = (t->frameCount > outFrames)?outFrames:t->frameCount;
+                    if (inFrames > 0) {
+                        (t.get()->*t->hook)(
+                                outTemp + (frameCount - outFrames) * t->mMixerChannelCount,
+                                inFrames, mResampleTemp.get() /* naked ptr */, aux);
+                        t->frameCount -= inFrames;
+                        outFrames -= inFrames;
+                        if (CC_UNLIKELY(aux != NULL)) {
+                            aux += inFrames;
+                        }
+                    }
+                    if (t->frameCount == 0 && outFrames) {
+                        t->bufferProvider->releaseBuffer(&t->buffer);
+                        t->buffer.frameCount = (mFrameCount - numFrames) -
+                                (frameCount - outFrames);
+                        t->bufferProvider->getNextBuffer(&t->buffer);
+                        t->mIn = t->buffer.raw;
+                        if (t->mIn == nullptr) {
+                            break;
+                        }
+                        t->frameCount = t->buffer.frameCount;
+                    }
+                }
+            }
+
+            const std::shared_ptr<TrackBase> &t1 = mTracks[group[0]];
+            convertMixerFormat(out, t1->mMixerFormat, outTemp, t1->mMixerInFormat,
+                    frameCount * t1->mMixerChannelCount);
+            // TODO: fix ugly casting due to choice of out pointer type
+            out = reinterpret_cast<int32_t*>((uint8_t*)out
+                    + frameCount * t1->mMixerChannelCount
+                    * audio_bytes_per_sample(t1->mMixerFormat));
+            numFrames += frameCount;
+        } while (numFrames < mFrameCount);
+
+        // release each track's buffer
+        for (const int name : group) {
+            const std::shared_ptr<TrackBase> &t = mTracks[name];
+            t->bufferProvider->releaseBuffer(&t->buffer);
+        }
+    }
+}
+
+// generic code with resampling
+void AudioMixerBase::process__genericResampling()
+{
+    ALOGVV("process__genericResampling\n");
+    int32_t * const outTemp = mOutputTemp.get(); // naked ptr
+    size_t numFrames = mFrameCount;
+
+    for (const auto &pair : mGroups) {
+        const auto &group = pair.second;
+        const std::shared_ptr<TrackBase> &t1 = mTracks[group[0]];
+
+        // clear temp buffer
+        memset(outTemp, 0, sizeof(*outTemp) * t1->mMixerChannelCount * mFrameCount);
+        for (const int name : group) {
+            const std::shared_ptr<TrackBase> &t = mTracks[name];
+            int32_t *aux = NULL;
+            if (CC_UNLIKELY(t->needs & NEEDS_AUX)) {
+                aux = t->auxBuffer;
+            }
+
+            // this is a little goofy, on the resampling case we don't
+            // acquire/release the buffers because it's done by
+            // the resampler.
+            if (t->needs & NEEDS_RESAMPLE) {
+                (t.get()->*t->hook)(outTemp, numFrames, mResampleTemp.get() /* naked ptr */, aux);
+            } else {
+
+                size_t outFrames = 0;
+
+                while (outFrames < numFrames) {
+                    t->buffer.frameCount = numFrames - outFrames;
+                    t->bufferProvider->getNextBuffer(&t->buffer);
+                    t->mIn = t->buffer.raw;
+                    // t->mIn == nullptr can happen if the track was flushed just after having
+                    // been enabled for mixing.
+                    if (t->mIn == nullptr) break;
+
+                    (t.get()->*t->hook)(
+                            outTemp + outFrames * t->mMixerChannelCount, t->buffer.frameCount,
+                            mResampleTemp.get() /* naked ptr */,
+                            aux != nullptr ? aux + outFrames : nullptr);
+                    outFrames += t->buffer.frameCount;
+
+                    t->bufferProvider->releaseBuffer(&t->buffer);
+                }
+            }
+        }
+        convertMixerFormat(t1->mainBuffer, t1->mMixerFormat,
+                outTemp, t1->mMixerInFormat, numFrames * t1->mMixerChannelCount);
+    }
+}
+
+// one track, 16 bits stereo without resampling is the most common case
+void AudioMixerBase::process__oneTrack16BitsStereoNoResampling()
+{
+    ALOGVV("process__oneTrack16BitsStereoNoResampling\n");
+    LOG_ALWAYS_FATAL_IF(mEnabled.size() != 0,
+            "%zu != 1 tracks enabled", mEnabled.size());
+    const int name = mEnabled[0];
+    const std::shared_ptr<TrackBase> &t = mTracks[name];
+
+    AudioBufferProvider::Buffer& b(t->buffer);
+
+    int32_t* out = t->mainBuffer;
+    float *fout = reinterpret_cast<float*>(out);
+    size_t numFrames = mFrameCount;
+
+    const int16_t vl = t->volume[0];
+    const int16_t vr = t->volume[1];
+    const uint32_t vrl = t->volumeRL;
+    while (numFrames) {
+        b.frameCount = numFrames;
+        t->bufferProvider->getNextBuffer(&b);
+        const int16_t *in = b.i16;
+
+        // in == NULL can happen if the track was flushed just after having
+        // been enabled for mixing.
+        if (in == NULL || (((uintptr_t)in) & 3)) {
+            if ( AUDIO_FORMAT_PCM_FLOAT == t->mMixerFormat ) {
+                 memset((char*)fout, 0, numFrames
+                         * t->mMixerChannelCount * audio_bytes_per_sample(t->mMixerFormat));
+            } else {
+                 memset((char*)out, 0, numFrames
+                         * t->mMixerChannelCount * audio_bytes_per_sample(t->mMixerFormat));
+            }
+            ALOGE_IF((((uintptr_t)in) & 3),
+                    "process__oneTrack16BitsStereoNoResampling: misaligned buffer"
+                    " %p track %d, channels %d, needs %08x, volume %08x vfl %f vfr %f",
+                    in, name, t->channelCount, t->needs, vrl, t->mVolume[0], t->mVolume[1]);
+            return;
+        }
+        size_t outFrames = b.frameCount;
+
+        switch (t->mMixerFormat) {
+        case AUDIO_FORMAT_PCM_FLOAT:
+            do {
+                uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+                in += 2;
+                int32_t l = mulRL(1, rl, vrl);
+                int32_t r = mulRL(0, rl, vrl);
+                *fout++ = float_from_q4_27(l);
+                *fout++ = float_from_q4_27(r);
+                // Note: In case of later int16_t sink output,
+                // conversion and clamping is done by memcpy_to_i16_from_float().
+            } while (--outFrames);
+            break;
+        case AUDIO_FORMAT_PCM_16_BIT:
+            if (CC_UNLIKELY(uint32_t(vl) > UNITY_GAIN_INT || uint32_t(vr) > UNITY_GAIN_INT)) {
+                // volume is boosted, so we might need to clamp even though
+                // we process only one track.
+                do {
+                    uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+                    in += 2;
+                    int32_t l = mulRL(1, rl, vrl) >> 12;
+                    int32_t r = mulRL(0, rl, vrl) >> 12;
+                    // clamping...
+                    l = clamp16(l);
+                    r = clamp16(r);
+                    *out++ = (r<<16) | (l & 0xFFFF);
+                } while (--outFrames);
+            } else {
+                do {
+                    uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+                    in += 2;
+                    int32_t l = mulRL(1, rl, vrl) >> 12;
+                    int32_t r = mulRL(0, rl, vrl) >> 12;
+                    *out++ = (r<<16) | (l & 0xFFFF);
+                } while (--outFrames);
+            }
+            break;
+        default:
+            LOG_ALWAYS_FATAL("bad mixer format: %d", t->mMixerFormat);
+        }
+        numFrames -= b.frameCount;
+        t->bufferProvider->releaseBuffer(&b);
+    }
+}
+
+/* TODO: consider whether this level of optimization is necessary.
+ * Perhaps just stick with a single for loop.
+ */
+
+// Needs to derive a compile time constant (constexpr).  Could be targeted to go
+// to a MONOVOL mixtype based on MAX_NUM_VOLUMES, but that's an unnecessary complication.
+#define MIXTYPE_MONOVOL(mixtype) ((mixtype) == MIXTYPE_MULTI ? MIXTYPE_MULTI_MONOVOL : \
+        (mixtype) == MIXTYPE_MULTI_SAVEONLY ? MIXTYPE_MULTI_SAVEONLY_MONOVOL : (mixtype))
+
+/* MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27) or float
+ */
+template <int MIXTYPE,
+        typename TO, typename TI, typename TV, typename TA, typename TAV>
+static void volumeRampMulti(uint32_t channels, TO* out, size_t frameCount,
+        const TI* in, TA* aux, TV *vol, const TV *volinc, TAV *vola, TAV volainc)
+{
+    switch (channels) {
+    case 1:
+        volumeRampMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 2:
+        volumeRampMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 3:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 4:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 5:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 6:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 7:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 8:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    }
+}
+
+/* MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27) or float
+ */
+template <int MIXTYPE,
+        typename TO, typename TI, typename TV, typename TA, typename TAV>
+static void volumeMulti(uint32_t channels, TO* out, size_t frameCount,
+        const TI* in, TA* aux, const TV *vol, TAV vola)
+{
+    switch (channels) {
+    case 1:
+        volumeMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 2:
+        volumeMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 3:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 4:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 5:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 6:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 7:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 8:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out, frameCount, in, aux, vol, vola);
+        break;
+    }
+}
+
+/* MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * USEFLOATVOL (set to true if float volume is used)
+ * ADJUSTVOL   (set to true if volume ramp parameters needs adjustment afterwards)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27) or float
+ */
+template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
+    typename TO, typename TI, typename TA>
+void AudioMixerBase::TrackBase::volumeMix(TO *out, size_t outFrames,
+        const TI *in, TA *aux, bool ramp)
+{
+    if (USEFLOATVOL) {
+        if (ramp) {
+            volumeRampMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
+                    mPrevVolume, mVolumeInc,
+#ifdef FLOAT_AUX
+                    &mPrevAuxLevel, mAuxInc
+#else
+                    &prevAuxLevel, auxInc
+#endif
+                );
+            if (ADJUSTVOL) {
+                adjustVolumeRamp(aux != NULL, true);
+            }
+        } else {
+            volumeMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
+                    mVolume,
+#ifdef FLOAT_AUX
+                    mAuxLevel
+#else
+                    auxLevel
+#endif
+            );
+        }
+    } else {
+        if (ramp) {
+            volumeRampMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
+                    prevVolume, volumeInc, &prevAuxLevel, auxInc);
+            if (ADJUSTVOL) {
+                adjustVolumeRamp(aux != NULL);
+            }
+        } else {
+            volumeMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
+                    volume, auxLevel);
+        }
+    }
+}
+
+/* This process hook is called when there is a single track without
+ * aux buffer, volume ramp, or resampling.
+ * TODO: Update the hook selection: this can properly handle aux and ramp.
+ *
+ * MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
+ */
+template <int MIXTYPE, typename TO, typename TI, typename TA>
+void AudioMixerBase::process__noResampleOneTrack()
+{
+    ALOGVV("process__noResampleOneTrack\n");
+    LOG_ALWAYS_FATAL_IF(mEnabled.size() != 1,
+            "%zu != 1 tracks enabled", mEnabled.size());
+    const std::shared_ptr<TrackBase> &t = mTracks[mEnabled[0]];
+    const uint32_t channels = t->mMixerChannelCount;
+    TO* out = reinterpret_cast<TO*>(t->mainBuffer);
+    TA* aux = reinterpret_cast<TA*>(t->auxBuffer);
+    const bool ramp = t->needsRamp();
+
+    for (size_t numFrames = mFrameCount; numFrames > 0; ) {
+        AudioBufferProvider::Buffer& b(t->buffer);
+        // get input buffer
+        b.frameCount = numFrames;
+        t->bufferProvider->getNextBuffer(&b);
+        const TI *in = reinterpret_cast<TI*>(b.raw);
+
+        // in == NULL can happen if the track was flushed just after having
+        // been enabled for mixing.
+        if (in == NULL || (((uintptr_t)in) & 3)) {
+            memset(out, 0, numFrames
+                    * channels * audio_bytes_per_sample(t->mMixerFormat));
+            ALOGE_IF((((uintptr_t)in) & 3), "process__noResampleOneTrack: bus error: "
+                    "buffer %p track %p, channels %d, needs %#x",
+                    in, &t, t->channelCount, t->needs);
+            return;
+        }
+
+        const size_t outFrames = b.frameCount;
+        t->volumeMix<MIXTYPE, is_same<TI, float>::value /* USEFLOATVOL */, false /* ADJUSTVOL */> (
+                out, outFrames, in, aux, ramp);
+
+        out += outFrames * channels;
+        if (aux != NULL) {
+            aux += outFrames;
+        }
+        numFrames -= b.frameCount;
+
+        // release buffer
+        t->bufferProvider->releaseBuffer(&b);
+    }
+    if (ramp) {
+        t->adjustVolumeRamp(aux != NULL, is_same<TI, float>::value);
+    }
+}
+
+/* This track hook is called to do resampling then mixing,
+ * pulling from the track's upstream AudioBufferProvider.
+ *
+ * MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27) or float
+ */
+template <int MIXTYPE, typename TO, typename TI, typename TA>
+void AudioMixerBase::TrackBase::track__Resample(TO* out, size_t outFrameCount, TO* temp, TA* aux)
+{
+    ALOGVV("track__Resample\n");
+    mResampler->setSampleRate(sampleRate);
+    const bool ramp = needsRamp();
+    if (ramp || aux != NULL) {
+        // if ramp:        resample with unity gain to temp buffer and scale/mix in 2nd step.
+        // if aux != NULL: resample with unity gain to temp buffer then apply send level.
+
+        mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
+        memset(temp, 0, outFrameCount * mMixerChannelCount * sizeof(TO));
+        mResampler->resample((int32_t*)temp, outFrameCount, bufferProvider);
+
+        volumeMix<MIXTYPE, is_same<TI, float>::value /* USEFLOATVOL */, true /* ADJUSTVOL */>(
+                out, outFrameCount, temp, aux, ramp);
+
+    } else { // constant volume gain
+        mResampler->setVolume(mVolume[0], mVolume[1]);
+        mResampler->resample((int32_t*)out, outFrameCount, bufferProvider);
+    }
+}
+
+/* This track hook is called to mix a track, when no resampling is required.
+ * The input buffer should be present in in.
+ *
+ * MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27) or float
+ */
+template <int MIXTYPE, typename TO, typename TI, typename TA>
+void AudioMixerBase::TrackBase::track__NoResample(
+        TO* out, size_t frameCount, TO* temp __unused, TA* aux)
+{
+    ALOGVV("track__NoResample\n");
+    const TI *in = static_cast<const TI *>(mIn);
+
+    volumeMix<MIXTYPE, is_same<TI, float>::value /* USEFLOATVOL */, true /* ADJUSTVOL */>(
+            out, frameCount, in, aux, needsRamp());
+
+    // MIXTYPE_MONOEXPAND reads a single input channel and expands to NCHAN output channels.
+    // MIXTYPE_MULTI reads NCHAN input channels and places to NCHAN output channels.
+    in += (MIXTYPE == MIXTYPE_MONOEXPAND) ? frameCount : frameCount * mMixerChannelCount;
+    mIn = in;
+}
+
+/* The Mixer engine generates either int32_t (Q4_27) or float data.
+ * We use this function to convert the engine buffers
+ * to the desired mixer output format, either int16_t (Q.15) or float.
+ */
+/* static */
+void AudioMixerBase::convertMixerFormat(void *out, audio_format_t mixerOutFormat,
+        void *in, audio_format_t mixerInFormat, size_t sampleCount)
+{
+    switch (mixerInFormat) {
+    case AUDIO_FORMAT_PCM_FLOAT:
+        switch (mixerOutFormat) {
+        case AUDIO_FORMAT_PCM_FLOAT:
+            memcpy(out, in, sampleCount * sizeof(float)); // MEMCPY. TODO optimize out
+            break;
+        case AUDIO_FORMAT_PCM_16_BIT:
+            memcpy_to_i16_from_float((int16_t*)out, (float*)in, sampleCount);
+            break;
+        default:
+            LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
+            break;
+        }
+        break;
+    case AUDIO_FORMAT_PCM_16_BIT:
+        switch (mixerOutFormat) {
+        case AUDIO_FORMAT_PCM_FLOAT:
+            memcpy_to_float_from_q4_27((float*)out, (const int32_t*)in, sampleCount);
+            break;
+        case AUDIO_FORMAT_PCM_16_BIT:
+            memcpy_to_i16_from_q4_27((int16_t*)out, (const int32_t*)in, sampleCount);
+            break;
+        default:
+            LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
+            break;
+        }
+        break;
+    default:
+        LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
+        break;
+    }
+}
+
+/* Returns the proper track hook to use for mixing the track into the output buffer.
+ */
+/* static */
+AudioMixerBase::hook_t AudioMixerBase::TrackBase::getTrackHook(int trackType, uint32_t channelCount,
+        audio_format_t mixerInFormat, audio_format_t mixerOutFormat __unused)
+{
+    if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
+        switch (trackType) {
+        case TRACKTYPE_NOP:
+            return &TrackBase::track__nop;
+        case TRACKTYPE_RESAMPLE:
+            return &TrackBase::track__genericResample;
+        case TRACKTYPE_NORESAMPLEMONO:
+            return &TrackBase::track__16BitsMono;
+        case TRACKTYPE_NORESAMPLE:
+            return &TrackBase::track__16BitsStereo;
+        default:
+            LOG_ALWAYS_FATAL("bad trackType: %d", trackType);
+            break;
+        }
+    }
+    LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
+    switch (trackType) {
+    case TRACKTYPE_NOP:
+        return &TrackBase::track__nop;
+    case TRACKTYPE_RESAMPLE:
+        switch (mixerInFormat) {
+        case AUDIO_FORMAT_PCM_FLOAT:
+            return (AudioMixerBase::hook_t) &TrackBase::track__Resample<
+                    MIXTYPE_MULTI, float /*TO*/, float /*TI*/, TYPE_AUX>;
+        case AUDIO_FORMAT_PCM_16_BIT:
+            return (AudioMixerBase::hook_t) &TrackBase::track__Resample<
+                    MIXTYPE_MULTI, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
+        default:
+            LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
+            break;
+        }
+        break;
+    case TRACKTYPE_NORESAMPLEMONO:
+        switch (mixerInFormat) {
+        case AUDIO_FORMAT_PCM_FLOAT:
+            return (AudioMixerBase::hook_t) &TrackBase::track__NoResample<
+                            MIXTYPE_MONOEXPAND, float /*TO*/, float /*TI*/, TYPE_AUX>;
+        case AUDIO_FORMAT_PCM_16_BIT:
+            return (AudioMixerBase::hook_t) &TrackBase::track__NoResample<
+                            MIXTYPE_MONOEXPAND, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
+        default:
+            LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
+            break;
+        }
+        break;
+    case TRACKTYPE_NORESAMPLE:
+        switch (mixerInFormat) {
+        case AUDIO_FORMAT_PCM_FLOAT:
+            return (AudioMixerBase::hook_t) &TrackBase::track__NoResample<
+                    MIXTYPE_MULTI, float /*TO*/, float /*TI*/, TYPE_AUX>;
+        case AUDIO_FORMAT_PCM_16_BIT:
+            return (AudioMixerBase::hook_t) &TrackBase::track__NoResample<
+                    MIXTYPE_MULTI, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
+        default:
+            LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
+            break;
+        }
+        break;
+    default:
+        LOG_ALWAYS_FATAL("bad trackType: %d", trackType);
+        break;
+    }
+    return NULL;
+}
+
+/* Returns the proper process hook for mixing tracks. Currently works only for
+ * PROCESSTYPE_NORESAMPLEONETRACK, a mix involving one track, no resampling.
+ *
+ * TODO: Due to the special mixing considerations of duplicating to
+ * a stereo output track, the input track cannot be MONO.  This should be
+ * prevented by the caller.
+ */
+/* static */
+AudioMixerBase::process_hook_t AudioMixerBase::getProcessHook(
+        int processType, uint32_t channelCount,
+        audio_format_t mixerInFormat, audio_format_t mixerOutFormat)
+{
+    if (processType != PROCESSTYPE_NORESAMPLEONETRACK) { // Only NORESAMPLEONETRACK
+        LOG_ALWAYS_FATAL("bad processType: %d", processType);
+        return NULL;
+    }
+    if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
+        return &AudioMixerBase::process__oneTrack16BitsStereoNoResampling;
+    }
+    LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
+    switch (mixerInFormat) {
+    case AUDIO_FORMAT_PCM_FLOAT:
+        switch (mixerOutFormat) {
+        case AUDIO_FORMAT_PCM_FLOAT:
+            return &AudioMixerBase::process__noResampleOneTrack<
+                    MIXTYPE_MULTI_SAVEONLY, float /*TO*/, float /*TI*/, TYPE_AUX>;
+        case AUDIO_FORMAT_PCM_16_BIT:
+            return &AudioMixerBase::process__noResampleOneTrack<
+                    MIXTYPE_MULTI_SAVEONLY, int16_t /*TO*/, float /*TI*/, TYPE_AUX>;
+        default:
+            LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
+            break;
+        }
+        break;
+    case AUDIO_FORMAT_PCM_16_BIT:
+        switch (mixerOutFormat) {
+        case AUDIO_FORMAT_PCM_FLOAT:
+            return &AudioMixerBase::process__noResampleOneTrack<
+                    MIXTYPE_MULTI_SAVEONLY, float /*TO*/, int16_t /*TI*/, TYPE_AUX>;
+        case AUDIO_FORMAT_PCM_16_BIT:
+            return &AudioMixerBase::process__noResampleOneTrack<
+                    MIXTYPE_MULTI_SAVEONLY, int16_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
+        default:
+            LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
+            break;
+        }
+        break;
+    default:
+        LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
+        break;
+    }
+    return NULL;
+}
+
+// ----------------------------------------------------------------------------
+} // namespace android
diff --git a/media/libaudioprocessing/include/media/AudioMixer.h b/media/libaudioprocessing/include/media/AudioMixer.h
new file mode 100644
index 0000000..3f7cd48
--- /dev/null
+++ b/media/libaudioprocessing/include/media/AudioMixer.h
@@ -0,0 +1,238 @@
+/*
+**
+** Copyright 2007, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef ANDROID_AUDIO_MIXER_H
+#define ANDROID_AUDIO_MIXER_H
+
+#include <pthread.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <android/os/IExternalVibratorService.h>
+#include <media/AudioMixerBase.h>
+#include <media/BufferProviders.h>
+#include <utils/threads.h>
+
+// FIXME This is actually unity gain, which might not be max in future, expressed in U.12
+#define MAX_GAIN_INT AudioMixerBase::UNITY_GAIN_INT
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+// AudioMixer extends AudioMixerBase by adding support for down- and up-mixing
+// and time stretch that are implemented via Effects HAL, and adding support
+// for haptic channels which depends on Vibrator service. This is the version
+// that is used by Audioflinger.
+
+class AudioMixer : public AudioMixerBase
+{
+public:
+    // maximum number of channels supported for the content
+    static const uint32_t MAX_NUM_CHANNELS_TO_DOWNMIX = AUDIO_CHANNEL_COUNT_MAX;
+
+    enum { // extension of AudioMixerBase parameters
+        DOWNMIX_TYPE    = 0x4004,
+        // for haptic
+        HAPTIC_ENABLED  = 0x4007, // Set haptic data from this track should be played or not.
+        HAPTIC_INTENSITY = 0x4008, // Set the intensity to play haptic data.
+        // for target TIMESTRETCH
+        PLAYBACK_RATE   = 0x4300, // Configure timestretch on this track name;
+                                  // parameter 'value' is a pointer to the new playback rate.
+    };
+
+    typedef enum { // Haptic intensity, should keep consistent with VibratorService
+        HAPTIC_SCALE_MUTE = os::IExternalVibratorService::SCALE_MUTE,
+        HAPTIC_SCALE_VERY_LOW = os::IExternalVibratorService::SCALE_VERY_LOW,
+        HAPTIC_SCALE_LOW = os::IExternalVibratorService::SCALE_LOW,
+        HAPTIC_SCALE_NONE = os::IExternalVibratorService::SCALE_NONE,
+        HAPTIC_SCALE_HIGH = os::IExternalVibratorService::SCALE_HIGH,
+        HAPTIC_SCALE_VERY_HIGH = os::IExternalVibratorService::SCALE_VERY_HIGH,
+    } haptic_intensity_t;
+    static constexpr float HAPTIC_SCALE_VERY_LOW_RATIO = 2.0f / 3.0f;
+    static constexpr float HAPTIC_SCALE_LOW_RATIO = 3.0f / 4.0f;
+    static const constexpr float HAPTIC_MAX_AMPLITUDE_FLOAT = 1.0f;
+
+    static inline bool isValidHapticIntensity(haptic_intensity_t hapticIntensity) {
+        switch (hapticIntensity) {
+        case HAPTIC_SCALE_MUTE:
+        case HAPTIC_SCALE_VERY_LOW:
+        case HAPTIC_SCALE_LOW:
+        case HAPTIC_SCALE_NONE:
+        case HAPTIC_SCALE_HIGH:
+        case HAPTIC_SCALE_VERY_HIGH:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    AudioMixer(size_t frameCount, uint32_t sampleRate)
+            : AudioMixerBase(frameCount, sampleRate) {
+        pthread_once(&sOnceControl, &sInitRoutine);
+    }
+
+    bool isValidChannelMask(audio_channel_mask_t channelMask) const override;
+
+    void setParameter(int name, int target, int param, void *value) override;
+    void setBufferProvider(int name, AudioBufferProvider* bufferProvider);
+
+private:
+
+    struct Track : public TrackBase {
+        Track() : TrackBase() {}
+
+        ~Track()
+        {
+            // mInputBufferProvider need not be deleted.
+            // Ensure the order of destruction of buffer providers as they
+            // release the upstream provider in the destructor.
+            mTimestretchBufferProvider.reset(nullptr);
+            mPostDownmixReformatBufferProvider.reset(nullptr);
+            mDownmixerBufferProvider.reset(nullptr);
+            mReformatBufferProvider.reset(nullptr);
+            mContractChannelsNonDestructiveBufferProvider.reset(nullptr);
+            mAdjustChannelsBufferProvider.reset(nullptr);
+        }
+
+        uint32_t getOutputChannelCount() override {
+            return mDownmixerBufferProvider.get() != nullptr ? mMixerChannelCount : channelCount;
+        }
+        uint32_t getMixerChannelCount() override {
+            return mMixerChannelCount + mMixerHapticChannelCount;
+        }
+
+        status_t    prepareForDownmix();
+        void        unprepareForDownmix();
+        status_t    prepareForReformat();
+        void        unprepareForReformat();
+        status_t    prepareForAdjustChannels();
+        void        unprepareForAdjustChannels();
+        status_t    prepareForAdjustChannelsNonDestructive(size_t frames);
+        void        unprepareForAdjustChannelsNonDestructive();
+        void        clearContractedBuffer();
+        bool        setPlaybackRate(const AudioPlaybackRate &playbackRate);
+        void        reconfigureBufferProviders();
+
+        /* Buffer providers are constructed to translate the track input data as needed.
+         * See DownmixerBufferProvider below for how the Track buffer provider
+         * is wrapped by another one when dowmixing is required.
+         *
+         * TODO: perhaps make a single PlaybackConverterProvider class to move
+         * all pre-mixer track buffer conversions outside the AudioMixer class.
+         *
+         * 1) mInputBufferProvider: The AudioTrack buffer provider.
+         * 2) mAdjustChannelsBufferProvider: Expands or contracts sample data from one interleaved
+         *    channel format to another. Expanded channels are filled with zeros and put at the end
+         *    of each audio frame. Contracted channels are copied to the end of the buffer.
+         * 3) mContractChannelsNonDestructiveBufferProvider: Non-destructively contract sample data.
+         *    This is currently using at audio-haptic coupled playback to separate audio and haptic
+         *    data. Contracted channels could be written to given buffer.
+         * 4) mReformatBufferProvider: If not NULL, performs the audio reformat to
+         *    match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
+         *    requires reformat. For example, it may convert floating point input to
+         *    PCM_16_bit if that's required by the downmixer.
+         * 5) mDownmixerBufferProvider: If not NULL, performs the channel remixing to match
+         *    the number of channels required by the mixer sink.
+         * 6) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
+         *    the downmixer requirements to the mixer engine input requirements.
+         * 7) mTimestretchBufferProvider: Adds timestretching for playback rate
+         */
+        AudioBufferProvider* mInputBufferProvider;    // externally provided buffer provider.
+        // TODO: combine mAdjustChannelsBufferProvider and
+        // mContractChannelsNonDestructiveBufferProvider
+        std::unique_ptr<PassthruBufferProvider> mAdjustChannelsBufferProvider;
+        std::unique_ptr<PassthruBufferProvider> mContractChannelsNonDestructiveBufferProvider;
+        std::unique_ptr<PassthruBufferProvider> mReformatBufferProvider;
+        std::unique_ptr<PassthruBufferProvider> mDownmixerBufferProvider;
+        std::unique_ptr<PassthruBufferProvider> mPostDownmixReformatBufferProvider;
+        std::unique_ptr<PassthruBufferProvider> mTimestretchBufferProvider;
+
+        audio_format_t mDownmixRequiresFormat;  // required downmixer format
+                                                // AUDIO_FORMAT_PCM_16_BIT if 16 bit necessary
+                                                // AUDIO_FORMAT_INVALID if no required format
+
+        AudioPlaybackRate    mPlaybackRate;
+
+        // Haptic
+        bool                 mHapticPlaybackEnabled;
+        haptic_intensity_t   mHapticIntensity;
+        audio_channel_mask_t mHapticChannelMask;
+        uint32_t             mHapticChannelCount;
+        audio_channel_mask_t mMixerHapticChannelMask;
+        uint32_t             mMixerHapticChannelCount;
+        uint32_t             mAdjustInChannelCount;
+        uint32_t             mAdjustOutChannelCount;
+        uint32_t             mAdjustNonDestructiveInChannelCount;
+        uint32_t             mAdjustNonDestructiveOutChannelCount;
+        bool                 mKeepContractedChannels;
+
+        float getHapticScaleGamma() const {
+        // Need to keep consistent with the value in VibratorService.
+        switch (mHapticIntensity) {
+        case HAPTIC_SCALE_VERY_LOW:
+            return 2.0f;
+        case HAPTIC_SCALE_LOW:
+            return 1.5f;
+        case HAPTIC_SCALE_HIGH:
+            return 0.5f;
+        case HAPTIC_SCALE_VERY_HIGH:
+            return 0.25f;
+        default:
+            return 1.0f;
+        }
+        }
+
+        float getHapticMaxAmplitudeRatio() const {
+        // Need to keep consistent with the value in VibratorService.
+        switch (mHapticIntensity) {
+        case HAPTIC_SCALE_VERY_LOW:
+            return HAPTIC_SCALE_VERY_LOW_RATIO;
+        case HAPTIC_SCALE_LOW:
+            return HAPTIC_SCALE_LOW_RATIO;
+        case HAPTIC_SCALE_NONE:
+        case HAPTIC_SCALE_HIGH:
+        case HAPTIC_SCALE_VERY_HIGH:
+            return 1.0f;
+        default:
+            return 0.0f;
+        }
+        }
+    };
+
+    inline std::shared_ptr<Track> getTrack(int name) {
+        return std::static_pointer_cast<Track>(mTracks[name]);
+    }
+
+    std::shared_ptr<TrackBase> preCreateTrack() override;
+    status_t postCreateTrack(TrackBase *track) override;
+
+    void preProcess() override;
+    void postProcess() override;
+
+    bool setChannelMasks(int name,
+            audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask) override;
+
+    static void sInitRoutine();
+
+    static pthread_once_t sOnceControl; // initialized in constructor by first new
+};
+
+// ----------------------------------------------------------------------------
+} // namespace android
+
+#endif // ANDROID_AUDIO_MIXER_H
diff --git a/media/libaudioprocessing/include/media/AudioMixerBase.h b/media/libaudioprocessing/include/media/AudioMixerBase.h
new file mode 100644
index 0000000..805b6d0
--- /dev/null
+++ b/media/libaudioprocessing/include/media/AudioMixerBase.h
@@ -0,0 +1,359 @@
+/*
+**
+** Copyright 2019, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef ANDROID_AUDIO_MIXER_BASE_H
+#define ANDROID_AUDIO_MIXER_BASE_H
+
+#include <map>
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include <media/AudioBufferProvider.h>
+#include <media/AudioResampler.h>
+#include <media/AudioResamplerPublic.h>
+#include <system/audio.h>
+#include <utils/Compat.h>
+
+// This must match frameworks/av/services/audioflinger/Configuration.h
+// when used with the Audio Framework.
+#define FLOAT_AUX
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+// AudioMixerBase is functional on its own if only mixing and resampling
+// is needed.
+
+class AudioMixerBase
+{
+public:
+    // Do not change these unless underlying code changes.
+    // This mixer has a hard-coded upper limit of 8 channels for output.
+    static constexpr uint32_t MAX_NUM_CHANNELS = FCC_8;
+    static constexpr uint32_t MAX_NUM_VOLUMES = FCC_2; // stereo volume only
+
+    static const uint16_t UNITY_GAIN_INT = 0x1000;
+    static const CONSTEXPR float UNITY_GAIN_FLOAT = 1.0f;
+
+    enum { // names
+        // setParameter targets
+        TRACK           = 0x3000,
+        RESAMPLE        = 0x3001,
+        RAMP_VOLUME     = 0x3002, // ramp to new volume
+        VOLUME          = 0x3003, // don't ramp
+        TIMESTRETCH     = 0x3004,
+
+        // set Parameter names
+        // for target TRACK
+        CHANNEL_MASK    = 0x4000,
+        FORMAT          = 0x4001,
+        MAIN_BUFFER     = 0x4002,
+        AUX_BUFFER      = 0x4003,
+        // 0x4004 reserved
+        MIXER_FORMAT    = 0x4005, // AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
+        MIXER_CHANNEL_MASK = 0x4006, // Channel mask for mixer output
+        // for target RESAMPLE
+        SAMPLE_RATE     = 0x4100, // Configure sample rate conversion on this track name;
+                                  // parameter 'value' is the new sample rate in Hz.
+                                  // Only creates a sample rate converter the first time that
+                                  // the track sample rate is different from the mix sample rate.
+                                  // If the new sample rate is the same as the mix sample rate,
+                                  // and a sample rate converter already exists,
+                                  // then the sample rate converter remains present but is a no-op.
+        RESET           = 0x4101, // Reset sample rate converter without changing sample rate.
+                                  // This clears out the resampler's input buffer.
+        REMOVE          = 0x4102, // Remove the sample rate converter on this track name;
+                                  // the track is restored to the mix sample rate.
+        // for target RAMP_VOLUME and VOLUME (8 channels max)
+        // FIXME use float for these 3 to improve the dynamic range
+        VOLUME0         = 0x4200,
+        VOLUME1         = 0x4201,
+        AUXLEVEL        = 0x4210,
+    };
+
+    AudioMixerBase(size_t frameCount, uint32_t sampleRate)
+        : mSampleRate(sampleRate)
+        , mFrameCount(frameCount) {
+    }
+
+    virtual ~AudioMixerBase() {}
+
+    virtual bool isValidFormat(audio_format_t format) const;
+    virtual bool isValidChannelMask(audio_channel_mask_t channelMask) const;
+
+    // Create a new track in the mixer.
+    //
+    // \param name        a unique user-provided integer associated with the track.
+    //                    If name already exists, the function will abort.
+    // \param channelMask output channel mask.
+    // \param format      PCM format
+    // \param sessionId   Session id for the track. Tracks with the same
+    //                    session id will be submixed together.
+    //
+    // \return OK        on success.
+    //         BAD_VALUE if the format does not satisfy isValidFormat()
+    //                   or the channelMask does not satisfy isValidChannelMask().
+    status_t    create(
+            int name, audio_channel_mask_t channelMask, audio_format_t format, int sessionId);
+
+    bool        exists(int name) const {
+        return mTracks.count(name) > 0;
+    }
+
+    // Free an allocated track by name.
+    void        destroy(int name);
+
+    // Enable or disable an allocated track by name
+    void        enable(int name);
+    void        disable(int name);
+
+    virtual void setParameter(int name, int target, int param, void *value);
+
+    void        process() {
+        preProcess();
+        (this->*mHook)();
+        postProcess();
+    }
+
+    size_t      getUnreleasedFrames(int name) const;
+
+    std::string trackNames() const;
+
+  protected:
+    // Set kUseNewMixer to true to use the new mixer engine always. Otherwise the
+    // original code will be used for stereo sinks, the new mixer for everything else.
+    static constexpr bool kUseNewMixer = true;
+
+    // Set kUseFloat to true to allow floating input into the mixer engine.
+    // If kUseNewMixer is false, this is ignored or may be overridden internally
+    static constexpr bool kUseFloat = true;
+
+#ifdef FLOAT_AUX
+    using TYPE_AUX = float;
+    static_assert(kUseNewMixer && kUseFloat,
+            "kUseNewMixer and kUseFloat must be true for FLOAT_AUX option");
+#else
+    using TYPE_AUX = int32_t; // q4.27
+#endif
+
+    /* For multi-format functions (calls template functions
+     * in AudioMixerOps.h).  The template parameters are as follows:
+     *
+     *   MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+     *   USEFLOATVOL (set to true if float volume is used)
+     *   ADJUSTVOL   (set to true if volume ramp parameters needs adjustment afterwards)
+     *   TO: int32_t (Q4.27) or float
+     *   TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+     *   TA: int32_t (Q4.27)
+     */
+
+    enum {
+        // FIXME this representation permits up to 8 channels
+        NEEDS_CHANNEL_COUNT__MASK   = 0x00000007,
+    };
+
+    enum {
+        NEEDS_CHANNEL_1             = 0x00000000,   // mono
+        NEEDS_CHANNEL_2             = 0x00000001,   // stereo
+
+        // sample format is not explicitly specified, and is assumed to be AUDIO_FORMAT_PCM_16_BIT
+
+        NEEDS_MUTE                  = 0x00000100,
+        NEEDS_RESAMPLE              = 0x00001000,
+        NEEDS_AUX                   = 0x00010000,
+    };
+
+    // hook types
+    enum {
+        PROCESSTYPE_NORESAMPLEONETRACK, // others set elsewhere
+    };
+
+    enum {
+        TRACKTYPE_NOP,
+        TRACKTYPE_RESAMPLE,
+        TRACKTYPE_NORESAMPLE,
+        TRACKTYPE_NORESAMPLEMONO,
+    };
+
+    // process hook functionality
+    using process_hook_t = void(AudioMixerBase::*)();
+
+    struct TrackBase;
+    using hook_t = void(TrackBase::*)(
+            int32_t* output, size_t numOutFrames, int32_t* temp, int32_t* aux);
+
+    struct TrackBase {
+        TrackBase()
+            : bufferProvider(nullptr)
+        {
+            // TODO: move additional initialization here.
+        }
+        virtual ~TrackBase() {}
+
+        virtual uint32_t getOutputChannelCount() { return channelCount; }
+        virtual uint32_t getMixerChannelCount() { return mMixerChannelCount; }
+
+        bool        needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; }
+        bool        setResampler(uint32_t trackSampleRate, uint32_t devSampleRate);
+        bool        doesResample() const { return mResampler.get() != nullptr; }
+        void        recreateResampler(uint32_t devSampleRate);
+        void        resetResampler() { if (mResampler.get() != nullptr) mResampler->reset(); }
+        void        adjustVolumeRamp(bool aux, bool useFloat = false);
+        size_t      getUnreleasedFrames() const { return mResampler.get() != nullptr ?
+                                                    mResampler->getUnreleasedFrames() : 0; };
+
+        static hook_t getTrackHook(int trackType, uint32_t channelCount,
+                audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
+
+        void track__nop(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
+
+        template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
+            typename TO, typename TI, typename TA>
+        void volumeMix(TO *out, size_t outFrames, const TI *in, TA *aux, bool ramp);
+
+        uint32_t    needs;
+
+        // TODO: Eventually remove legacy integer volume settings
+        union {
+        int16_t     volume[MAX_NUM_VOLUMES]; // U4.12 fixed point (top bit should be zero)
+        int32_t     volumeRL;
+        };
+
+        int32_t     prevVolume[MAX_NUM_VOLUMES];
+        int32_t     volumeInc[MAX_NUM_VOLUMES];
+        int32_t     auxInc;
+        int32_t     prevAuxLevel;
+        int16_t     auxLevel;       // 0 <= auxLevel <= MAX_GAIN_INT, but signed for mul performance
+
+        uint16_t    frameCount;
+
+        uint8_t     channelCount;   // 1 or 2, redundant with (needs & NEEDS_CHANNEL_COUNT__MASK)
+        uint8_t     unused_padding; // formerly format, was always 16
+        uint16_t    enabled;        // actually bool
+        audio_channel_mask_t channelMask;
+
+        // actual buffer provider used by the track hooks
+        AudioBufferProvider*                bufferProvider;
+
+        mutable AudioBufferProvider::Buffer buffer; // 8 bytes
+
+        hook_t      hook;
+        const void  *mIn;             // current location in buffer
+
+        std::unique_ptr<AudioResampler> mResampler;
+        uint32_t    sampleRate;
+        int32_t*    mainBuffer;
+        int32_t*    auxBuffer;
+
+        int32_t     sessionId;
+
+        audio_format_t mMixerFormat;     // output mix format: AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
+        audio_format_t mFormat;          // input track format
+        audio_format_t mMixerInFormat;   // mix internal format AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
+                                         // each track must be converted to this format.
+
+        float          mVolume[MAX_NUM_VOLUMES];     // floating point set volume
+        float          mPrevVolume[MAX_NUM_VOLUMES]; // floating point previous volume
+        float          mVolumeInc[MAX_NUM_VOLUMES];  // floating point volume increment
+
+        float          mAuxLevel;                     // floating point set aux level
+        float          mPrevAuxLevel;                 // floating point prev aux level
+        float          mAuxInc;                       // floating point aux increment
+
+        audio_channel_mask_t mMixerChannelMask;
+        uint32_t             mMixerChannelCount;
+
+      protected:
+
+        // hooks
+        void track__genericResample(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
+        void track__16BitsStereo(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
+        void track__16BitsMono(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
+
+        void volumeRampStereo(int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
+        void volumeStereo(int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
+
+        // multi-format track hooks
+        template <int MIXTYPE, typename TO, typename TI, typename TA>
+        void track__Resample(TO* out, size_t frameCount, TO* temp __unused, TA* aux);
+        template <int MIXTYPE, typename TO, typename TI, typename TA>
+        void track__NoResample(TO* out, size_t frameCount, TO* temp __unused, TA* aux);
+    };
+
+    // preCreateTrack must create an instance of a proper TrackBase descendant.
+    // postCreateTrack is called after filling out fields of TrackBase. It can
+    // abort track creation by returning non-OK status. See the implementation
+    // of create() for details.
+    virtual std::shared_ptr<TrackBase> preCreateTrack();
+    virtual status_t postCreateTrack(TrackBase *track __unused) { return OK; }
+
+    // preProcess is called before the process hook, postProcess after,
+    // see the implementation of process() method.
+    virtual void preProcess() {}
+    virtual void postProcess() {}
+
+    virtual bool setChannelMasks(int name,
+            audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask);
+
+    // Called when track info changes and a new process hook should be determined.
+    void invalidate() {
+        mHook = &AudioMixerBase::process__validate;
+    }
+
+    void process__validate();
+    void process__nop();
+    void process__genericNoResampling();
+    void process__genericResampling();
+    void process__oneTrack16BitsStereoNoResampling();
+
+    template <int MIXTYPE, typename TO, typename TI, typename TA>
+    void process__noResampleOneTrack();
+
+    static process_hook_t getProcessHook(int processType, uint32_t channelCount,
+            audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
+
+    static void convertMixerFormat(void *out, audio_format_t mixerOutFormat,
+            void *in, audio_format_t mixerInFormat, size_t sampleCount);
+
+    // initialization constants
+    const uint32_t mSampleRate;
+    const size_t mFrameCount;
+
+    process_hook_t mHook = &AudioMixerBase::process__nop;   // one of process__*, never nullptr
+
+    // the size of the type (int32_t) should be the largest of all types supported
+    // by the mixer.
+    std::unique_ptr<int32_t[]> mOutputTemp;
+    std::unique_ptr<int32_t[]> mResampleTemp;
+
+    // track names grouped by main buffer, in no particular order of main buffer.
+    // however names for a particular main buffer are in order (by construction).
+    std::unordered_map<void * /* mainBuffer */, std::vector<int /* name */>> mGroups;
+
+    // track names that are enabled, in increasing order (by construction).
+    std::vector<int /* name */> mEnabled;
+
+    // track smart pointers, by name, in increasing order of name.
+    std::map<int /* name */, std::shared_ptr<TrackBase>> mTracks;
+};
+
+}  // namespace android
+
+#endif  // ANDROID_AUDIO_MIXER_BASE_H
diff --git a/media/libmedia/include/media/BufferProviders.h b/media/libaudioprocessing/include/media/BufferProviders.h
similarity index 100%
rename from media/libmedia/include/media/BufferProviders.h
rename to media/libaudioprocessing/include/media/BufferProviders.h
diff --git a/media/libeffects/downmix/Android.bp b/media/libeffects/downmix/Android.bp
index 9c82b1d..2a2f36e 100644
--- a/media/libeffects/downmix/Android.bp
+++ b/media/libeffects/downmix/Android.bp
@@ -6,6 +6,7 @@
     srcs: ["EffectDownmix.c"],
 
     shared_libs: [
+        "libaudioutils",
         "libcutils",
         "liblog",
     ],
@@ -23,5 +24,4 @@
         "libaudioeffects",
         "libhardware_headers",
     ],
-    static_libs: ["libaudioutils" ],
 }
diff --git a/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h b/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
index 7468a90..10eedd9 100644
--- a/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
+++ b/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
@@ -53,6 +53,7 @@
                                  LVM_INT16 NrFrames,
                                  LVM_INT32 NrChannels);
 void Copy_Float_Stereo_Mc(       const LVM_FLOAT *src,
+                                 LVM_FLOAT *StereoOut,
                                  LVM_FLOAT *dst,
                                  LVM_INT16 NrFrames,
                                  LVM_INT32 NrChannels);
diff --git a/media/libeffects/lvm/lib/Common/src/Copy_16.c b/media/libeffects/lvm/lib/Common/src/Copy_16.c
index 3858450..3eb3c14 100644
--- a/media/libeffects/lvm/lib/Common/src/Copy_16.c
+++ b/media/libeffects/lvm/lib/Common/src/Copy_16.c
@@ -117,30 +117,31 @@
     }
 }
 
-// Merge a multichannel source with stereo contained in dst, to dst.
+// Merge a multichannel source with stereo contained in StereoOut, to dst.
 void Copy_Float_Stereo_Mc(const LVM_FLOAT *src,
+                 LVM_FLOAT *StereoOut,
                  LVM_FLOAT *dst,
                  LVM_INT16 NrFrames, /* Number of frames*/
                  LVM_INT32 NrChannels)
 {
     LVM_INT16 ii, jj;
-    LVM_FLOAT *src_st = dst + 2 * (NrFrames - 1);
 
-    // repack dst which carries stereo information
+    // pack dst with stereo information of StereoOut
     // together with the upper channels of src.
+    StereoOut += 2 * (NrFrames - 1);
     dst += NrChannels * (NrFrames - 1);
     src += NrChannels * (NrFrames - 1);
     for (ii = NrFrames; ii != 0; ii--)
     {
-        dst[1] = src_st[1];
-        dst[0] = src_st[0]; // copy 1 before 0 is required for NrChannels == 3.
+        dst[1] = StereoOut[1];
+        dst[0] = StereoOut[0]; // copy 1 before 0 is required for NrChannels == 3.
         for (jj = 2; jj < NrChannels; jj++)
         {
             dst[jj] = src[jj];
         }
         dst    -= NrChannels;
         src    -= NrChannels;
-        src_st -= 2;
+        StereoOut -= 2;
     }
 }
 #endif
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h
index ab8ccd1..c8df8e4 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h
@@ -60,7 +60,11 @@
 #define LVCS_COMPGAINFRAME          64          /* Compressor gain update interval */
 
 /* Memory */
+#ifdef SUPPORT_MC
+#define LVCS_SCRATCHBUFFERS              8      /* Number of buffers required for inplace processing */
+#else
 #define LVCS_SCRATCHBUFFERS              6      /* Number of buffers required for inplace processing */
+#endif
 #ifdef SUPPORT_MC
 /*
  * The Concert Surround module applies processing only on the first two
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.c b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.c
index ef1d9eb..56fb04f 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.c
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.c
@@ -106,7 +106,7 @@
      * The Concert Surround module carries out processing only on L, R.
      */
     pInput = pScratch + (2 * NrFrames);
-    pStIn  = pScratch + (LVCS_SCRATCHBUFFERS * NrFrames);
+    pStIn  = pScratch + ((LVCS_SCRATCHBUFFERS - 2) * NrFrames);
     /* The first two channel data is extracted from the input data and
      * copied into pInput buffer
      */
@@ -303,13 +303,45 @@
      */
     if (pInstance->Params.OperatingMode != LVCS_OFF)
     {
+#ifdef SUPPORT_MC
+        LVM_FLOAT *pStereoOut;
+        /*
+         * LVCS_Process_CS uses output buffer to store intermediate outputs of StereoEnhancer,
+         * Equalizer, ReverbGenerator and BypassMixer.
+         * So, to avoid i/o data overlapping, when i/o buffers are common, use scratch buffer
+         * to store intermediate outputs.
+         */
+        if (pOutData == pInData)
+        {
+          /*
+           * Scratch memory is used in 4 chunks of (2 * NrFrames) size.
+           * First chunk of memory is used by LVCS_StereoEnhancer and LVCS_ReverbGenerator,
+           * second and fourth are used as input buffers by pInput and pStIn in LVCS_Process_CS.
+           * Hence, pStereoOut is pointed to use unused third portion of scratch memory.
+           */
+            pStereoOut = (LVM_FLOAT *) \
+                          pInstance->MemoryTable. \
+                          Region[LVCS_MEMREGION_TEMPORARY_FAST].pBaseAddress +
+                          ((LVCS_SCRATCHBUFFERS - 4) * NrFrames);
+        }
+        else
+        {
+            pStereoOut = pOutData;
+        }
+
         /*
          * Call CS process function
          */
             err = LVCS_Process_CS(hInstance,
                                   pInData,
+                                  pStereoOut,
+                                  NrFrames);
+#else
+            err = LVCS_Process_CS(hInstance,
+                                  pInData,
                                   pOutData,
                                   NumSamples);
+#endif
 
 
         /*
@@ -329,10 +361,17 @@
 
             if(NumSamples < LVCS_COMPGAINFRAME)
             {
+#ifdef SUPPORT_MC
+                NonLinComp_Float(Gain,                    /* Compressor gain setting */
+                                 pStereoOut,
+                                 pStereoOut,
+                                 (LVM_INT32)(2 * NrFrames));
+#else
                 NonLinComp_Float(Gain,                    /* Compressor gain setting */
                                  pOutData,
                                  pOutData,
                                  (LVM_INT32)(2 * NumSamples));
+#endif
             }
             else
             {
@@ -361,7 +400,11 @@
 
                 FinalGain = Gain;
                 Gain = pInstance->CompressGain;
+#ifdef SUPPORT_MC
+                pOutPtr = pStereoOut;
+#else
                 pOutPtr = pOutData;
+#endif
 
                 while(SampleToProcess > 0)
                 {
@@ -428,6 +471,7 @@
         }
 #ifdef SUPPORT_MC
         Copy_Float_Stereo_Mc(pInData,
+                             pStereoOut,
                              pOutData,
                              NrFrames,
                              channels);
diff --git a/media/libheif/HeifDecoderImpl.cpp b/media/libheif/HeifDecoderImpl.cpp
index a977300..33ea1ca 100644
--- a/media/libheif/HeifDecoderImpl.cpp
+++ b/media/libheif/HeifDecoderImpl.cpp
@@ -31,6 +31,7 @@
 #include <private/media/VideoFrame.h>
 #include <utils/Log.h>
 #include <utils/RefBase.h>
+#include <vector>
 
 HeifDecoder* createHeifDecoder() {
     return new android::HeifDecoderImpl();
@@ -38,6 +39,22 @@
 
 namespace android {
 
+void initFrameInfo(HeifFrameInfo *info, const VideoFrame *videoFrame) {
+    info->mWidth = videoFrame->mWidth;
+    info->mHeight = videoFrame->mHeight;
+    info->mRotationAngle = videoFrame->mRotationAngle;
+    info->mBytesPerPixel = videoFrame->mBytesPerPixel;
+    info->mDurationUs = videoFrame->mDurationUs;
+    if (videoFrame->mIccSize > 0) {
+        info->mIccData.assign(
+                videoFrame->getFlattenedIccData(),
+                videoFrame->getFlattenedIccData() + videoFrame->mIccSize);
+    } else {
+        // clear old Icc data if there is no Icc data.
+        info->mIccData.clear();
+    }
+}
+
 /*
  * HeifDataSource
  *
@@ -156,7 +173,7 @@
 
     // copy from cache if the request falls entirely in cache
     if (offset + size <= mCachedOffset + mCachedSize) {
-        memcpy(mMemory->pointer(), mCache.get() + offset - mCachedOffset, size);
+        memcpy(mMemory->unsecurePointer(), mCache.get() + offset - mCachedOffset, size);
         return size;
     }
 
@@ -254,7 +271,7 @@
     if (bytesAvailable < (int64_t)size) {
         size = bytesAvailable;
     }
-    memcpy(mMemory->pointer(), mCache.get() + offset - mCachedOffset, size);
+    memcpy(mMemory->unsecurePointer(), mCache.get() + offset - mCachedOffset, size);
     return size;
 }
 
@@ -293,11 +310,11 @@
     // it's not, default to HAL_PIXEL_FORMAT_RGB_565.
     mOutputColor(HAL_PIXEL_FORMAT_RGB_565),
     mCurScanline(0),
-    mWidth(0),
-    mHeight(0),
+    mTotalScanline(0),
     mFrameDecoded(false),
     mHasImage(false),
     mHasVideo(false),
+    mSequenceLength(0),
     mAvailableLines(0),
     mNumSlices(1),
     mSliceHeight(0),
@@ -336,48 +353,103 @@
 
     mHasImage = hasImage && !strcasecmp(hasImage, "yes");
     mHasVideo = hasVideo && !strcasecmp(hasVideo, "yes");
-    sp<IMemory> sharedMem;
+
+    HeifFrameInfo* defaultInfo = nullptr;
     if (mHasImage) {
         // image index < 0 to retrieve primary image
-        sharedMem = mRetriever->getImageAtIndex(
+        sp<IMemory> sharedMem = mRetriever->getImageAtIndex(
                 -1, mOutputColor, true /*metaOnly*/);
-    } else if (mHasVideo) {
-        sharedMem = mRetriever->getFrameAtTime(0,
-                MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC,
-                mOutputColor, true /*metaOnly*/);
+
+        if (sharedMem == nullptr || sharedMem->unsecurePointer() == nullptr) {
+            ALOGE("init: videoFrame is a nullptr");
+            return false;
+        }
+
+        // TODO: Using unsecurePointer() has some associated security pitfalls
+        //       (see declaration for details).
+        //       Either document why it is safe in this case or address the
+        //       issue (e.g. by copying).
+        VideoFrame* videoFrame = static_cast<VideoFrame*>(sharedMem->unsecurePointer());
+
+        ALOGV("Image dimension %dx%d, display %dx%d, angle %d, iccSize %d",
+                videoFrame->mWidth,
+                videoFrame->mHeight,
+                videoFrame->mDisplayWidth,
+                videoFrame->mDisplayHeight,
+                videoFrame->mRotationAngle,
+                videoFrame->mIccSize);
+
+        initFrameInfo(&mImageInfo, videoFrame);
+
+        if (videoFrame->mTileHeight >= 512) {
+            // Try decoding in slices only if the image has tiles and is big enough.
+            mSliceHeight = videoFrame->mTileHeight;
+            ALOGV("mSliceHeight %u", mSliceHeight);
+        }
+
+        defaultInfo = &mImageInfo;
     }
 
-    if (sharedMem == nullptr || sharedMem->pointer() == nullptr) {
-        ALOGE("getFrameAtTime: videoFrame is a nullptr");
+    if (mHasVideo) {
+        sp<IMemory> sharedMem = mRetriever->getFrameAtTime(0,
+                MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC,
+                mOutputColor, true /*metaOnly*/);
+
+        if (sharedMem == nullptr || sharedMem->unsecurePointer() == nullptr) {
+            ALOGE("init: videoFrame is a nullptr");
+            return false;
+        }
+
+        // TODO: Using unsecurePointer() has some associated security pitfalls
+        //       (see declaration for details).
+        //       Either document why it is safe in this case or address the
+        //       issue (e.g. by copying).
+        VideoFrame* videoFrame = static_cast<VideoFrame*>(
+            sharedMem->unsecurePointer());
+
+        ALOGV("Sequence dimension %dx%d, display %dx%d, angle %d, iccSize %d",
+                videoFrame->mWidth,
+                videoFrame->mHeight,
+                videoFrame->mDisplayWidth,
+                videoFrame->mDisplayHeight,
+                videoFrame->mRotationAngle,
+                videoFrame->mIccSize);
+
+        initFrameInfo(&mSequenceInfo, videoFrame);
+
+        mSequenceLength = atoi(mRetriever->extractMetadata(METADATA_KEY_VIDEO_FRAME_COUNT));
+
+        if (defaultInfo == nullptr) {
+            defaultInfo = &mSequenceInfo;
+        }
+    }
+
+    if (defaultInfo == nullptr) {
+        ALOGD("No valid image or sequence available");
         return false;
     }
 
-    VideoFrame* videoFrame = static_cast<VideoFrame*>(sharedMem->pointer());
-
-    ALOGV("Meta dimension %dx%d, display %dx%d, angle %d, iccSize %d",
-            videoFrame->mWidth,
-            videoFrame->mHeight,
-            videoFrame->mDisplayWidth,
-            videoFrame->mDisplayHeight,
-            videoFrame->mRotationAngle,
-            videoFrame->mIccSize);
-
     if (frameInfo != nullptr) {
-        frameInfo->set(
-                videoFrame->mWidth,
-                videoFrame->mHeight,
-                videoFrame->mRotationAngle,
-                videoFrame->mBytesPerPixel,
-                videoFrame->mIccSize,
-                videoFrame->getFlattenedIccData());
+        *frameInfo = *defaultInfo;
     }
-    mWidth = videoFrame->mWidth;
-    mHeight = videoFrame->mHeight;
-    if (mHasImage && videoFrame->mTileHeight >= 512 && mWidth >= 3000 && mHeight >= 2000 ) {
-        // Try decoding in slices only if the image has tiles and is big enough.
-        mSliceHeight = videoFrame->mTileHeight;
-        mNumSlices = (videoFrame->mHeight + mSliceHeight - 1) / mSliceHeight;
-        ALOGV("mSliceHeight %u, mNumSlices %zu", mSliceHeight, mNumSlices);
+
+    // default total scanline, this might change if decodeSequence() is used
+    mTotalScanline = defaultInfo->mHeight;
+
+    return true;
+}
+
+bool HeifDecoderImpl::getSequenceInfo(
+        HeifFrameInfo* frameInfo, size_t *frameCount) {
+    ALOGV("%s", __FUNCTION__);
+    if (!mHasVideo) {
+        return false;
+    }
+    if (frameInfo != nullptr) {
+        *frameInfo = mSequenceInfo;
+    }
+    if (frameCount != nullptr) {
+        *frameCount = mSequenceLength;
     }
     return true;
 }
@@ -416,15 +488,15 @@
         ALOGV("decodeAsync(): decoding slice %zu", i);
         size_t top = i * mSliceHeight;
         size_t bottom = (i + 1) * mSliceHeight;
-        if (bottom > mHeight) {
-            bottom = mHeight;
+        if (bottom > mImageInfo.mHeight) {
+            bottom = mImageInfo.mHeight;
         }
         sp<IMemory> frameMemory = mRetriever->getImageRectAtIndex(
-                -1, mOutputColor, 0, top, mWidth, bottom);
+                -1, mOutputColor, 0, top, mImageInfo.mWidth, bottom);
         {
             Mutex::Autolock autolock(mLock);
 
-            if (frameMemory == nullptr || frameMemory->pointer() == nullptr) {
+            if (frameMemory == nullptr || frameMemory->unsecurePointer() == nullptr) {
                 mAsyncDecodeDone = true;
                 mScanlineReady.signal();
                 break;
@@ -452,42 +524,48 @@
     // See if we want to decode in slices to allow client to start
     // scanline processing in parallel with decode. If this fails
     // we fallback to decoding the full frame.
-    if (mHasImage && mNumSlices > 1) {
-        // get first slice and metadata
-        sp<IMemory> frameMemory = mRetriever->getImageRectAtIndex(
-                -1, mOutputColor, 0, 0, mWidth, mSliceHeight);
-
-        if (frameMemory == nullptr || frameMemory->pointer() == nullptr) {
-            ALOGE("decode: metadata is a nullptr");
-            return false;
+    if (mHasImage) {
+        if (mSliceHeight >= 512 &&
+                mImageInfo.mWidth >= 3000 &&
+                mImageInfo.mHeight >= 2000 ) {
+            // Try decoding in slices only if the image has tiles and is big enough.
+            mNumSlices = (mImageInfo.mHeight + mSliceHeight - 1) / mSliceHeight;
+            ALOGV("mSliceHeight %u, mNumSlices %zu", mSliceHeight, mNumSlices);
         }
 
-        VideoFrame* videoFrame = static_cast<VideoFrame*>(frameMemory->pointer());
+        if (mNumSlices > 1) {
+            // get first slice and metadata
+            sp<IMemory> frameMemory = mRetriever->getImageRectAtIndex(
+                    -1, mOutputColor, 0, 0, mImageInfo.mWidth, mSliceHeight);
 
-        if (frameInfo != nullptr) {
-            frameInfo->set(
-                    videoFrame->mWidth,
-                    videoFrame->mHeight,
-                    videoFrame->mRotationAngle,
-                    videoFrame->mBytesPerPixel,
-                    videoFrame->mIccSize,
-                    videoFrame->getFlattenedIccData());
+            if (frameMemory == nullptr || frameMemory->unsecurePointer() == nullptr) {
+                ALOGE("decode: metadata is a nullptr");
+                return false;
+            }
+
+            // TODO: Using unsecurePointer() has some associated security pitfalls
+            //       (see declaration for details).
+            //       Either document why it is safe in this case or address the
+            //       issue (e.g. by copying).
+            VideoFrame* videoFrame = static_cast<VideoFrame*>(frameMemory->unsecurePointer());
+
+            if (frameInfo != nullptr) {
+                initFrameInfo(frameInfo, videoFrame);
+            }
+            mFrameMemory = frameMemory;
+            mAvailableLines = mSliceHeight;
+            mThread = new DecodeThread(this);
+            if (mThread->run("HeifDecode", ANDROID_PRIORITY_FOREGROUND) == OK) {
+                mFrameDecoded = true;
+                return true;
+            }
+            // Fallback to decode without slicing
+            mThread.clear();
+            mNumSlices = 1;
+            mSliceHeight = 0;
+            mAvailableLines = 0;
+            mFrameMemory.clear();
         }
-
-        mFrameMemory = frameMemory;
-        mAvailableLines = mSliceHeight;
-        mThread = new DecodeThread(this);
-        if (mThread->run("HeifDecode", ANDROID_PRIORITY_FOREGROUND) == OK) {
-            mFrameDecoded = true;
-            return true;
-        }
-
-        // Fallback to decode without slicing
-        mThread.clear();
-        mNumSlices = 1;
-        mSliceHeight = 0;
-        mAvailableLines = 0;
-        mFrameMemory.clear();
     }
 
     if (mHasImage) {
@@ -498,12 +576,16 @@
                 MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC, mOutputColor);
     }
 
-    if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
+    if (mFrameMemory == nullptr || mFrameMemory->unsecurePointer() == nullptr) {
         ALOGE("decode: videoFrame is a nullptr");
         return false;
     }
 
-    VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
+    // TODO: Using unsecurePointer() has some associated security pitfalls
+    //       (see declaration for details).
+    //       Either document why it is safe in this case or address the
+    //       issue (e.g. by copying).
+    VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->unsecurePointer());
     if (videoFrame->mSize == 0 ||
             mFrameMemory->size() < videoFrame->getFlattenedSize()) {
         ALOGE("decode: videoFrame size is invalid");
@@ -520,13 +602,8 @@
             videoFrame->mSize);
 
     if (frameInfo != nullptr) {
-        frameInfo->set(
-                videoFrame->mWidth,
-                videoFrame->mHeight,
-                videoFrame->mRotationAngle,
-                videoFrame->mBytesPerPixel,
-                videoFrame->mIccSize,
-                videoFrame->getFlattenedIccData());
+        initFrameInfo(frameInfo, videoFrame);
+
     }
     mFrameDecoded = true;
 
@@ -536,18 +613,70 @@
     return true;
 }
 
-bool HeifDecoderImpl::getScanlineInner(uint8_t* dst) {
-    if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
+bool HeifDecoderImpl::decodeSequence(int frameIndex, HeifFrameInfo* frameInfo) {
+    ALOGV("%s: frame index %d", __FUNCTION__, frameIndex);
+    if (!mHasVideo) {
         return false;
     }
-    VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
+
+    if (frameIndex < 0 || frameIndex >= mSequenceLength) {
+        ALOGE("invalid frame index: %d, total frames %zu", frameIndex, mSequenceLength);
+        return false;
+    }
+
+    mCurScanline = 0;
+
+    // set total scanline to sequence height now
+    mTotalScanline = mSequenceInfo.mHeight;
+
+    mFrameMemory = mRetriever->getFrameAtIndex(frameIndex, mOutputColor);
+    if (mFrameMemory == nullptr || mFrameMemory->unsecurePointer() == nullptr) {
+        ALOGE("decode: videoFrame is a nullptr");
+        return false;
+    }
+
+    // TODO: Using unsecurePointer() has some associated security pitfalls
+    //       (see declaration for details).
+    //       Either document why it is safe in this case or address the
+    //       issue (e.g. by copying).
+    VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->unsecurePointer());
+    if (videoFrame->mSize == 0 ||
+            mFrameMemory->size() < videoFrame->getFlattenedSize()) {
+        ALOGE("decode: videoFrame size is invalid");
+        return false;
+    }
+
+    ALOGV("Decoded dimension %dx%d, display %dx%d, angle %d, rowbytes %d, size %d",
+            videoFrame->mWidth,
+            videoFrame->mHeight,
+            videoFrame->mDisplayWidth,
+            videoFrame->mDisplayHeight,
+            videoFrame->mRotationAngle,
+            videoFrame->mRowBytes,
+            videoFrame->mSize);
+
+    if (frameInfo != nullptr) {
+        initFrameInfo(frameInfo, videoFrame);
+    }
+    return true;
+}
+
+bool HeifDecoderImpl::getScanlineInner(uint8_t* dst) {
+    if (mFrameMemory == nullptr || mFrameMemory->unsecurePointer() == nullptr) {
+        return false;
+    }
+    // TODO: Using unsecurePointer() has some associated security pitfalls
+    //       (see declaration for details).
+    //       Either document why it is safe in this case or address the
+    //       issue (e.g. by copying).
+    VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->unsecurePointer());
     uint8_t* src = videoFrame->getFlattenedData() + videoFrame->mRowBytes * mCurScanline++;
     memcpy(dst, src, videoFrame->mBytesPerPixel * videoFrame->mWidth);
     return true;
 }
 
 bool HeifDecoderImpl::getScanline(uint8_t* dst) {
-    if (mCurScanline >= mHeight) {
+    if (mCurScanline >= mTotalScanline) {
         ALOGE("no more scanline available");
         return false;
     }
@@ -567,8 +696,8 @@
 size_t HeifDecoderImpl::skipScanlines(size_t count) {
     uint32_t oldScanline = mCurScanline;
     mCurScanline += count;
-    if (mCurScanline > mHeight) {
-        mCurScanline = mHeight;
+    if (mCurScanline > mTotalScanline) {
+        mCurScanline = mTotalScanline;
     }
     return (mCurScanline > oldScanline) ? (mCurScanline - oldScanline) : 0;
 }
diff --git a/media/libheif/HeifDecoderImpl.h b/media/libheif/HeifDecoderImpl.h
index 528ee3b..69c74a7 100644
--- a/media/libheif/HeifDecoderImpl.h
+++ b/media/libheif/HeifDecoderImpl.h
@@ -40,12 +40,16 @@
 
     bool init(HeifStream* stream, HeifFrameInfo* frameInfo) override;
 
+    bool getSequenceInfo(HeifFrameInfo* frameInfo, size_t *frameCount) override;
+
     bool getEncodedColor(HeifEncodedColor* outColor) const override;
 
     bool setOutputColor(HeifColorFormat heifColor) override;
 
     bool decode(HeifFrameInfo* frameInfo) override;
 
+    bool decodeSequence(int frameIndex, HeifFrameInfo* frameInfo) override;
+
     bool getScanline(uint8_t* dst) override;
 
     size_t skipScanlines(size_t count) override;
@@ -56,13 +60,15 @@
     sp<IDataSource> mDataSource;
     sp<MediaMetadataRetriever> mRetriever;
     sp<IMemory> mFrameMemory;
+    HeifFrameInfo mImageInfo;
+    HeifFrameInfo mSequenceInfo;
     android_pixel_format_t mOutputColor;
     size_t mCurScanline;
-    uint32_t mWidth;
-    uint32_t mHeight;
+    size_t mTotalScanline;
     bool mFrameDecoded;
     bool mHasImage;
     bool mHasVideo;
+    size_t mSequenceLength;
 
     // Slice decoding only
     Mutex mLock;
diff --git a/media/libheif/include/HeifDecoderAPI.h b/media/libheif/include/HeifDecoderAPI.h
index aa10f33..9073672 100644
--- a/media/libheif/include/HeifDecoderAPI.h
+++ b/media/libheif/include/HeifDecoderAPI.h
@@ -17,7 +17,7 @@
 #ifndef _HEIF_DECODER_API_
 #define _HEIF_DECODER_API_
 
-#include <memory>
+#include <vector>
 
 /*
  * The output color pixel format of heif decoder.
@@ -40,41 +40,13 @@
 /*
  * Represents a color converted (RGB-based) video frame
  */
-struct HeifFrameInfo
-{
-    HeifFrameInfo() :
-        mWidth(0), mHeight(0), mRotationAngle(0), mBytesPerPixel(0),
-        mIccSize(0), mIccData(nullptr) {}
-
-    // update the frame info, will make a copy of |iccData| internally
-    void set(uint32_t width, uint32_t height, int32_t rotation, uint32_t bpp,
-            uint32_t iccSize, uint8_t* iccData) {
-        mWidth = width;
-        mHeight = height;
-        mRotationAngle = rotation;
-        mBytesPerPixel = bpp;
-
-        if (mIccData != nullptr) {
-            mIccData.reset(nullptr);
-        }
-        mIccSize = iccSize;
-        if (iccSize > 0) {
-            mIccData.reset(new uint8_t[iccSize]);
-            if (mIccData.get() != nullptr) {
-                memcpy(mIccData.get(), iccData, iccSize);
-            } else {
-                mIccSize = 0;
-            }
-        }
-    }
-
-    // Intentional public access modifiers:
+struct HeifFrameInfo {
     uint32_t mWidth;
     uint32_t mHeight;
     int32_t  mRotationAngle;           // Rotation angle, clockwise, should be multiple of 90
     uint32_t mBytesPerPixel;           // Number of bytes for one pixel
-    uint32_t mIccSize;                 // Number of bytes in mIccData
-    std::unique_ptr<uint8_t[]> mIccData; // Actual ICC data, memory is owned by this structure
+    int64_t mDurationUs;               // Duration of the frame in us
+    std::vector<uint8_t> mIccData;     // ICC data array
 };
 
 /*
@@ -113,8 +85,8 @@
     virtual size_t getLength() const = 0;
 
 private:
-    HeifStream(const HeifFrameInfo&) = delete;
-    HeifStream& operator=(const HeifFrameInfo&) = delete;
+    HeifStream(const HeifStream&) = delete;
+    HeifStream& operator=(const HeifStream&) = delete;
 };
 
 /*
@@ -146,6 +118,14 @@
     virtual bool init(HeifStream* stream, HeifFrameInfo* frameInfo) = 0;
 
     /*
+     * Returns true if the stream contains an image sequence and false otherwise.
+     * |frameInfo| will be filled with information of pictures in the sequence
+     * and |frameCount| the length of the sequence upon success and unmodified
+     * upon failure.
+     */
+    virtual bool getSequenceInfo(HeifFrameInfo* frameInfo, size_t *frameCount) = 0;
+
+    /*
      * Decode the picture internally, returning whether it succeeded. |frameInfo|
      * will be filled with information of the primary picture upon success and
      * unmodified upon failure.
@@ -156,6 +136,20 @@
     virtual bool decode(HeifFrameInfo* frameInfo) = 0;
 
     /*
+     * Decode the picture from the image sequence at index |frameIndex|.
+     * |frameInfo| will be filled with information of the decoded picture upon
+     * success and unmodified upon failure.
+     *
+     * |frameIndex| is the 0-based index of the video frame to retrieve. The frame
+     * index must be that of a valid frame. The total number of frames available for
+     * retrieval was reported via getSequenceInfo().
+     *
+     * After this succeeded, getScanline can be called to read the scanlines
+     * that were decoded.
+     */
+    virtual bool decodeSequence(int frameIndex, HeifFrameInfo* frameInfo) = 0;
+
+    /*
      * Read the next scanline (in top-down order), returns true upon success
      * and false otherwise.
      */
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index d141287..81291a1 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -1,10 +1,3 @@
-cc_defaults {
-    name: "libmedia_defaults",
-    include_dirs: [
-        "bionic/libc/private",
-    ],
-}
-
 cc_library_headers {
     name: "libmedia_headers",
     vendor_available: true,
@@ -67,10 +60,8 @@
     srcs: [
         ":libmedia_omx_aidl",
 
-        "IMediaCodecList.cpp",
         "IOMX.cpp",
         "MediaCodecBuffer.cpp",
-        "MediaCodecInfo.cpp",
         "OMXBuffer.cpp",
         "omx/1.0/WGraphicBufferSource.cpp",
         "omx/1.0/WOmxBufferSource.cpp",
@@ -93,7 +84,6 @@
         "libbinder",
         "libcutils",
         "libhidlbase",
-        "libhidltransport",
         "liblog",
         "libstagefright_foundation",
         "libui",
@@ -154,7 +144,6 @@
         "libcutils",
         "libgui",
         "libhidlbase",
-        "libhidltransport",
         "liblog",
         "libmedia_omx",
         "libstagefright_foundation",
@@ -229,12 +218,11 @@
 cc_library {
     name: "libmedia",
 
-    defaults: [ "libmedia_defaults" ],
-
     srcs: [
         "IDataSource.cpp",
         "BufferingSettings.cpp",
         "mediaplayer.cpp",
+        "IMediaCodecList.cpp",
         "IMediaHTTPConnection.cpp",
         "IMediaHTTPService.cpp",
         "IMediaExtractor.cpp",
@@ -250,6 +238,7 @@
         "IResourceManagerClient.cpp",
         "IResourceManagerService.cpp",
         "IStreamSource.cpp",
+        "MediaCodecInfo.cpp",
         "MediaUtils.cpp",
         "Metadata.cpp",
         "mediarecorder.cpp",
@@ -276,6 +265,7 @@
     },
 
     header_libs: [
+        "bionic_libc_platform_headers",
         "libstagefright_headers",
         "media_ndk_headers",
     ],
@@ -341,8 +331,6 @@
 cc_library_static {
     name: "libmedia_player2_util",
 
-    defaults: [ "libmedia_defaults" ],
-
     srcs: [
         "AudioParameter.cpp",
         "BufferingSettings.cpp",
diff --git a/media/libmedia/AudioParameter.cpp b/media/libmedia/AudioParameter.cpp
index 060b92b..9f34035 100644
--- a/media/libmedia/AudioParameter.cpp
+++ b/media/libmedia/AudioParameter.cpp
@@ -42,6 +42,8 @@
 const char * const AudioParameter::keyStreamHwAvSync = AUDIO_PARAMETER_STREAM_HW_AV_SYNC;
 const char * const AudioParameter::keyDeviceConnect = AUDIO_PARAMETER_DEVICE_CONNECT;
 const char * const AudioParameter::keyDeviceDisconnect = AUDIO_PARAMETER_DEVICE_DISCONNECT;
+const char * const AudioParameter::keyStreamConnect = AUDIO_PARAMETER_DEVICE_CONNECT;
+const char * const AudioParameter::keyStreamDisconnect = AUDIO_PARAMETER_DEVICE_DISCONNECT;
 const char * const AudioParameter::keyStreamSupportedFormats = AUDIO_PARAMETER_STREAM_SUP_FORMATS;
 const char * const AudioParameter::keyStreamSupportedChannels = AUDIO_PARAMETER_STREAM_SUP_CHANNELS;
 const char * const AudioParameter::keyStreamSupportedSamplingRates =
diff --git a/media/libmedia/IMediaHTTPConnection.cpp b/media/libmedia/IMediaHTTPConnection.cpp
index 1bb8d67..8cbb4c2 100644
--- a/media/libmedia/IMediaHTTPConnection.cpp
+++ b/media/libmedia/IMediaHTTPConnection.cpp
@@ -128,12 +128,12 @@
            ALOGE("readAt got a NULL buffer");
            return UNKNOWN_ERROR;
         }
-        if (mMemory->pointer() == NULL) {
-           ALOGE("readAt got a NULL mMemory->pointer()");
+        if (mMemory->unsecurePointer() == NULL) {
+           ALOGE("readAt got a NULL mMemory->unsecurePointer()");
            return UNKNOWN_ERROR;
         }
 
-        memcpy(buffer, mMemory->pointer(), len);
+        memcpy(buffer, mMemory->unsecurePointer(), len);
 
         return len;
     }
diff --git a/media/libmedia/IMediaMetadataRetriever.cpp b/media/libmedia/IMediaMetadataRetriever.cpp
index 028bea1..d95bc8e 100644
--- a/media/libmedia/IMediaMetadataRetriever.cpp
+++ b/media/libmedia/IMediaMetadataRetriever.cpp
@@ -213,15 +213,14 @@
         return interface_cast<IMemory>(reply.readStrongBinder());
     }
 
-    status_t getFrameAtIndex(std::vector<sp<IMemory> > *frames,
-            int frameIndex, int numFrames, int colorFormat, bool metaOnly)
+    sp<IMemory> getFrameAtIndex(
+            int index, int colorFormat, bool metaOnly)
     {
-        ALOGV("getFrameAtIndex: frameIndex(%d), numFrames(%d), colorFormat(%d) metaOnly(%d)",
-                frameIndex, numFrames, colorFormat, metaOnly);
+        ALOGV("getFrameAtIndex: index(%d), colorFormat(%d) metaOnly(%d)",
+                index, colorFormat, metaOnly);
         Parcel data, reply;
         data.writeInterfaceToken(IMediaMetadataRetriever::getInterfaceDescriptor());
-        data.writeInt32(frameIndex);
-        data.writeInt32(numFrames);
+        data.writeInt32(index);
         data.writeInt32(colorFormat);
         data.writeInt32(metaOnly);
 #ifndef DISABLE_GROUP_SCHEDULE_HACK
@@ -230,16 +229,9 @@
         remote()->transact(GET_FRAME_AT_INDEX, data, &reply);
         status_t ret = reply.readInt32();
         if (ret != NO_ERROR) {
-            return ret;
+            return NULL;
         }
-        int retNumFrames = reply.readInt32();
-        if (retNumFrames < numFrames) {
-            numFrames = retNumFrames;
-        }
-        for (int i = 0; i < numFrames; i++) {
-            frames->push_back(interface_cast<IMemory>(reply.readStrongBinder()));
-        }
-        return OK;
+        return interface_cast<IMemory>(reply.readStrongBinder());
     }
 
     sp<IMemory> extractAlbumArt()
@@ -442,24 +434,20 @@
 
         case GET_FRAME_AT_INDEX: {
             CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
-            int frameIndex = data.readInt32();
-            int numFrames = data.readInt32();
+            int index = data.readInt32();
             int colorFormat = data.readInt32();
             bool metaOnly = (data.readInt32() != 0);
-            ALOGV("getFrameAtIndex: frameIndex(%d), numFrames(%d), colorFormat(%d), metaOnly(%d)",
-                    frameIndex, numFrames, colorFormat, metaOnly);
+            ALOGV("getFrameAtIndex: index(%d), colorFormat(%d), metaOnly(%d)",
+                    index, colorFormat, metaOnly);
 #ifndef DISABLE_GROUP_SCHEDULE_HACK
             setSchedPolicy(data);
 #endif
-            std::vector<sp<IMemory> > frames;
-            status_t err = getFrameAtIndex(
-                    &frames, frameIndex, numFrames, colorFormat, metaOnly);
-            reply->writeInt32(err);
-            if (OK == err) {
-                reply->writeInt32(frames.size());
-                for (size_t i = 0; i < frames.size(); i++) {
-                    reply->writeStrongBinder(IInterface::asBinder(frames[i]));
-                }
+            sp<IMemory> frame = getFrameAtIndex(index, colorFormat, metaOnly);
+            if (frame != nullptr) {  // Don't send NULL across the binder interface
+                reply->writeInt32(NO_ERROR);
+                reply->writeStrongBinder(IInterface::asBinder(frame));
+            } else {
+                reply->writeInt32(UNKNOWN_ERROR);
             }
 #ifndef DISABLE_GROUP_SCHEDULE_HACK
             restoreSchedPolicy();
diff --git a/media/libmedia/IResourceManagerService.cpp b/media/libmedia/IResourceManagerService.cpp
index 9724fc1..f8a0a14 100644
--- a/media/libmedia/IResourceManagerService.cpp
+++ b/media/libmedia/IResourceManagerService.cpp
@@ -32,6 +32,7 @@
     CONFIG = IBinder::FIRST_CALL_TRANSACTION,
     ADD_RESOURCE,
     REMOVE_RESOURCE,
+    REMOVE_CLIENT,
     RECLAIM_RESOURCE,
 };
 
@@ -72,12 +73,14 @@
 
     virtual void addResource(
             int pid,
+            int uid,
             int64_t clientId,
             const sp<IResourceManagerClient> client,
             const Vector<MediaResource> &resources) {
         Parcel data, reply;
         data.writeInterfaceToken(IResourceManagerService::getInterfaceDescriptor());
         data.writeInt32(pid);
+        data.writeInt32(uid);
         data.writeInt64(clientId);
         data.writeStrongBinder(IInterface::asBinder(client));
         writeToParcel(&data, resources);
@@ -85,13 +88,23 @@
         remote()->transact(ADD_RESOURCE, data, &reply);
     }
 
-    virtual void removeResource(int pid, int64_t clientId) {
+    virtual void removeResource(int pid, int64_t clientId, const Vector<MediaResource> &resources) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IResourceManagerService::getInterfaceDescriptor());
+        data.writeInt32(pid);
+        data.writeInt64(clientId);
+        writeToParcel(&data, resources);
+
+        remote()->transact(REMOVE_RESOURCE, data, &reply);
+    }
+
+    virtual void removeClient(int pid, int64_t clientId) {
         Parcel data, reply;
         data.writeInterfaceToken(IResourceManagerService::getInterfaceDescriptor());
         data.writeInt32(pid);
         data.writeInt64(clientId);
 
-        remote()->transact(REMOVE_RESOURCE, data, &reply);
+        remote()->transact(REMOVE_CLIENT, data, &reply);
     }
 
     virtual bool reclaimResource(int callingPid, const Vector<MediaResource> &resources) {
@@ -129,6 +142,7 @@
         case ADD_RESOURCE: {
             CHECK_INTERFACE(IResourceManagerService, data, reply);
             int pid = data.readInt32();
+            int uid = data.readInt32();
             int64_t clientId = data.readInt64();
             sp<IResourceManagerClient> client(
                     interface_cast<IResourceManagerClient>(data.readStrongBinder()));
@@ -137,7 +151,7 @@
             }
             Vector<MediaResource> resources;
             readFromParcel(data, &resources);
-            addResource(pid, clientId, client, resources);
+            addResource(pid, uid, clientId, client, resources);
             return NO_ERROR;
         } break;
 
@@ -145,7 +159,17 @@
             CHECK_INTERFACE(IResourceManagerService, data, reply);
             int pid = data.readInt32();
             int64_t clientId = data.readInt64();
-            removeResource(pid, clientId);
+            Vector<MediaResource> resources;
+            readFromParcel(data, &resources);
+            removeResource(pid, clientId, resources);
+            return NO_ERROR;
+        } break;
+
+        case REMOVE_CLIENT: {
+            CHECK_INTERFACE(IResourceManagerService, data, reply);
+            int pid = data.readInt32();
+            int64_t clientId = data.readInt64();
+            removeClient(pid, clientId);
             return NO_ERROR;
         } break;
 
diff --git a/media/libmedia/MediaUtils.cpp b/media/libmedia/MediaUtils.cpp
index 31972fa..2efb30e 100644
--- a/media/libmedia/MediaUtils.cpp
+++ b/media/libmedia/MediaUtils.cpp
@@ -22,7 +22,7 @@
 #include <sys/resource.h>
 #include <unistd.h>
 
-#include <bionic_malloc.h>
+#include <bionic/malloc.h>
 
 #include "MediaUtils.h"
 
diff --git a/media/libmedia/include/media/IMediaMetadataRetriever.h b/media/libmedia/include/media/IMediaMetadataRetriever.h
index c6f422d..28d2192 100644
--- a/media/libmedia/include/media/IMediaMetadataRetriever.h
+++ b/media/libmedia/include/media/IMediaMetadataRetriever.h
@@ -48,9 +48,8 @@
             int index, int colorFormat, bool metaOnly, bool thumbnail) = 0;
     virtual sp<IMemory>     getImageRectAtIndex(
             int index, int colorFormat, int left, int top, int right, int bottom) = 0;
-    virtual status_t        getFrameAtIndex(
-            std::vector<sp<IMemory> > *frames,
-            int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;
+    virtual sp<IMemory>     getFrameAtIndex(
+            int index, int colorFormat, bool metaOnly) = 0;
     virtual sp<IMemory>     extractAlbumArt() = 0;
     virtual const char*     extractMetadata(int keyCode) = 0;
 };
diff --git a/media/libmedia/include/media/IResourceManagerService.h b/media/libmedia/include/media/IResourceManagerService.h
index 1e4f6de..8992f8b 100644
--- a/media/libmedia/include/media/IResourceManagerService.h
+++ b/media/libmedia/include/media/IResourceManagerService.h
@@ -39,11 +39,15 @@
 
     virtual void addResource(
             int pid,
+            int uid,
             int64_t clientId,
             const sp<IResourceManagerClient> client,
             const Vector<MediaResource> &resources) = 0;
 
-    virtual void removeResource(int pid, int64_t clientId) = 0;
+    virtual void removeResource(int pid, int64_t clientId,
+            const Vector<MediaResource> &resources) = 0;
+
+    virtual void removeClient(int pid, int64_t clientId) = 0;
 
     virtual bool reclaimResource(
             int callingPid,
diff --git a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
index 98d300f..37dc401 100644
--- a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
+++ b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
@@ -49,9 +49,8 @@
             int index, int colorFormat, bool metaOnly, bool thumbnail) = 0;
     virtual sp<IMemory> getImageRectAtIndex(
             int index, int colorFormat, int left, int top, int right, int bottom) = 0;
-    virtual status_t getFrameAtIndex(
-            std::vector<sp<IMemory> >* frames,
-            int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;
+    virtual sp<IMemory> getFrameAtIndex(
+            int frameIndex, int colorFormat, bool metaOnly) = 0;
     virtual MediaAlbumArt* extractAlbumArt() = 0;
     virtual const char* extractMetadata(int keyCode) = 0;
 };
diff --git a/media/libmedia/include/media/MediaResource.h b/media/libmedia/include/media/MediaResource.h
index e1fdb9b..10a07bb 100644
--- a/media/libmedia/include/media/MediaResource.h
+++ b/media/libmedia/include/media/MediaResource.h
@@ -31,12 +31,13 @@
         kNonSecureCodec,
         kGraphicMemory,
         kCpuBoost,
+        kBattery,
     };
 
     enum SubType {
         kUnspecifiedSubType = 0,
         kAudioCodec,
-        kVideoCodec
+        kVideoCodec,
     };
 
     MediaResource();
@@ -62,6 +63,8 @@
         case MediaResource::kSecureCodec:    return "secure-codec";
         case MediaResource::kNonSecureCodec: return "non-secure-codec";
         case MediaResource::kGraphicMemory:  return "graphic-memory";
+        case MediaResource::kCpuBoost:       return "cpu-boost";
+        case MediaResource::kBattery:        return "battery";
         default:                             return def;
     }
 }
diff --git a/media/libmedia/include/media/TypeConverter.h b/media/libmedia/include/media/TypeConverter.h
index 2f8c209..011498a 100644
--- a/media/libmedia/include/media/TypeConverter.h
+++ b/media/libmedia/include/media/TypeConverter.h
@@ -17,10 +17,11 @@
 #ifndef ANDROID_TYPE_CONVERTER_H_
 #define ANDROID_TYPE_CONVERTER_H_
 
+#include <set>
 #include <string>
 #include <string.h>
-
 #include <vector>
+
 #include <system/audio.h>
 #include <utils/Log.h>
 #include <utils/Vector.h>
@@ -42,16 +43,6 @@
     }
 };
 template <typename T>
-struct VectorTraits
-{
-    typedef T Type;
-    typedef Vector<Type> Collection;
-    static void add(Collection &collection, Type value)
-    {
-        collection.add(value);
-    }
-};
-template <typename T>
 struct SortedVectorTraits
 {
     typedef T Type;
@@ -61,18 +52,28 @@
         collection.add(value);
     }
 };
+template <typename T>
+struct SetTraits
+{
+    typedef T Type;
+    typedef std::set<Type> Collection;
+    static void add(Collection &collection, Type value)
+    {
+        collection.insert(value);
+    }
+};
 
-using SampleRateTraits = SortedVectorTraits<uint32_t>;
+using SampleRateTraits = SetTraits<uint32_t>;
 using DeviceTraits = DefaultTraits<audio_devices_t>;
 struct OutputDeviceTraits : public DeviceTraits {};
 struct InputDeviceTraits : public DeviceTraits {};
-using ChannelTraits = SortedVectorTraits<audio_channel_mask_t>;
+using ChannelTraits = SetTraits<audio_channel_mask_t>;
 struct OutputChannelTraits : public ChannelTraits {};
 struct InputChannelTraits : public ChannelTraits {};
 struct ChannelIndexTraits : public ChannelTraits {};
 using InputFlagTraits = DefaultTraits<audio_input_flags_t>;
 using OutputFlagTraits = DefaultTraits<audio_output_flags_t>;
-using FormatTraits = VectorTraits<audio_format_t>;
+using FormatTraits = DefaultTraits<audio_format_t>;
 using GainModeTraits = DefaultTraits<audio_gain_mode_t>;
 using StreamTraits = DefaultTraits<audio_stream_type_t>;
 using AudioModeTraits = DefaultTraits<audio_mode_t>;
@@ -259,6 +260,7 @@
                                     || std::is_same<T, audio_source_t>::value
                                     || std::is_same<T, audio_stream_type_t>::value
                                     || std::is_same<T, audio_usage_t>::value
+                                    || std::is_same<T, audio_format_t>::value
                                     , int> = 0>
 static inline std::string toString(const T& value)
 {
@@ -291,14 +293,6 @@
     return result;
 }
 
-// TODO: Remove when FormatTraits uses DefaultTraits.
-static inline std::string toString(const audio_format_t& format)
-{
-    std::string result;
-    return TypeConverter<VectorTraits<audio_format_t>>::toString(format, result)
-            ? result : std::to_string(static_cast<int>(format));
-}
-
 static inline std::string toString(const audio_attributes_t& attributes)
 {
     std::ostringstream result;
diff --git a/media/libmedia/include/media/mediametadataretriever.h b/media/libmedia/include/media/mediametadataretriever.h
index d29e97d..138a014 100644
--- a/media/libmedia/include/media/mediametadataretriever.h
+++ b/media/libmedia/include/media/mediametadataretriever.h
@@ -98,9 +98,8 @@
             int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false, bool thumbnail = false);
     sp<IMemory> getImageRectAtIndex(
             int index, int colorFormat, int left, int top, int right, int bottom);
-    status_t getFrameAtIndex(
-            std::vector<sp<IMemory> > *frames, int frameIndex, int numFrames = 1,
-            int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
+    sp<IMemory>  getFrameAtIndex(
+            int index, int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
     sp<IMemory> extractAlbumArt();
     const char* extractMetadata(int keyCode);
 
diff --git a/media/libmedia/mediametadataretriever.cpp b/media/libmedia/mediametadataretriever.cpp
index e61b04d..2ae76b3 100644
--- a/media/libmedia/mediametadataretriever.cpp
+++ b/media/libmedia/mediametadataretriever.cpp
@@ -179,18 +179,16 @@
             index, colorFormat, left, top, right, bottom);
 }
 
-status_t MediaMetadataRetriever::getFrameAtIndex(
-        std::vector<sp<IMemory> > *frames,
-        int frameIndex, int numFrames, int colorFormat, bool metaOnly) {
-    ALOGV("getFrameAtIndex: frameIndex(%d), numFrames(%d), colorFormat(%d) metaOnly(%d)",
-            frameIndex, numFrames, colorFormat, metaOnly);
+sp<IMemory>  MediaMetadataRetriever::getFrameAtIndex(
+        int index, int colorFormat, bool metaOnly) {
+    ALOGV("getFrameAtIndex: index(%d), colorFormat(%d) metaOnly(%d)",
+            index, colorFormat, metaOnly);
     Mutex::Autolock _l(mLock);
     if (mRetriever == 0) {
         ALOGE("retriever is not initialized");
-        return INVALID_OPERATION;
+        return NULL;
     }
-    return mRetriever->getFrameAtIndex(
-            frames, frameIndex, numFrames, colorFormat, metaOnly);
+    return mRetriever->getFrameAtIndex(index, colorFormat, metaOnly);
 }
 
 const char* MediaMetadataRetriever::extractMetadata(int keyCode)
diff --git a/media/libmedia/xsd/vts/Android.bp b/media/libmedia/xsd/vts/Android.bp
index b590a12..4739504 100644
--- a/media/libmedia/xsd/vts/Android.bp
+++ b/media/libmedia/xsd/vts/Android.bp
@@ -24,6 +24,7 @@
         "libxml2",
     ],
     shared_libs: [
+        "libbase",
         "liblog",
     ],
     cflags: [
diff --git a/media/libmedia/xsd/vts/ValidateMediaProfiles.cpp b/media/libmedia/xsd/vts/ValidateMediaProfiles.cpp
index ff9b060..7729d52 100644
--- a/media/libmedia/xsd/vts/ValidateMediaProfiles.cpp
+++ b/media/libmedia/xsd/vts/ValidateMediaProfiles.cpp
@@ -14,6 +14,10 @@
  * limitations under the License.
  */
 
+#include <string>
+
+#include <android-base/file.h>
+#include <android-base/properties.h>
 #include "utility/ValidateXml.h"
 
 TEST(CheckConfig, mediaProfilesValidation) {
@@ -21,8 +25,12 @@
                    "Verify that the media profiles file "
                    "is valid according to the schema");
 
-    const char* location = "/vendor/etc";
+    std::string mediaSettingsPath = android::base::GetProperty("media.settings.xml", "");
+    if (mediaSettingsPath.empty()) {
+        mediaSettingsPath.assign("/vendor/etc/media_profiles_V1_0.xml");
+    }
 
-    EXPECT_ONE_VALID_XML_MULTIPLE_LOCATIONS("media_profiles_V1_0.xml", {location},
+    EXPECT_ONE_VALID_XML_MULTIPLE_LOCATIONS(android::base::Basename(mediaSettingsPath).c_str(),
+                                            {android::base::Dirname(mediaSettingsPath).c_str()},
                                             "/data/local/tmp/media_profiles.xsd");
 }
diff --git a/media/libmediametrics/Android.bp b/media/libmediametrics/Android.bp
index 15ea578..9d348ec 100644
--- a/media/libmediametrics/Android.bp
+++ b/media/libmediametrics/Android.bp
@@ -37,6 +37,15 @@
             "1" ,
         ]
     },
+
+    header_abi_checker: {
+        enabled: true,
+        symbol_file: "libmediametrics.map.txt",
+    },
+
+    visibility: [
+        "//frameworks/av:__subpackages__",
+        "//frameworks/base/core/jni",
+        "//frameworks/base/media/jni",
+    ],
 }
-
-
diff --git a/media/libmediametrics/MediaAnalyticsItem.cpp b/media/libmediametrics/MediaAnalyticsItem.cpp
index 02c23b1..b7856a6 100644
--- a/media/libmediametrics/MediaAnalyticsItem.cpp
+++ b/media/libmediametrics/MediaAnalyticsItem.cpp
@@ -64,6 +64,16 @@
     return item;
 }
 
+MediaAnalyticsItem* MediaAnalyticsItem::convert(mediametrics_handle_t handle) {
+    MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    return item;
+}
+
+mediametrics_handle_t MediaAnalyticsItem::convert(MediaAnalyticsItem *item ) {
+    mediametrics_handle_t handle = (mediametrics_handle_t) item;
+    return handle;
+}
+
 // access functions for the class
 MediaAnalyticsItem::MediaAnalyticsItem()
     : mPid(-1),
diff --git a/media/libmediametrics/MediaMetrics.cpp b/media/libmediametrics/MediaMetrics.cpp
index 6109190..360ae0c 100644
--- a/media/libmediametrics/MediaMetrics.cpp
+++ b/media/libmediametrics/MediaMetrics.cpp
@@ -169,6 +169,11 @@
     return item->selfrecord();
 }
 
+mediametrics_handle_t mediametrics_dup(mediametrics_handle_t handle) {
+    android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+    if (item == NULL) return android::MediaAnalyticsItem::convert(item);
+    return android::MediaAnalyticsItem::convert(item->dup());
+}
 
 const char *mediametrics_readable(mediametrics_handle_t handle) {
     android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
diff --git a/media/libmediametrics/include/MediaAnalyticsItem.h b/media/libmediametrics/include/MediaAnalyticsItem.h
index 4a36f6a..42a2f5b 100644
--- a/media/libmediametrics/include/MediaAnalyticsItem.h
+++ b/media/libmediametrics/include/MediaAnalyticsItem.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_MEDIA_MEDIAANALYTICSITEM_H
 #define ANDROID_MEDIA_MEDIAANALYTICSITEM_H
 
+#include "MediaMetrics.h"
+
 #include <string>
 #include <sys/types.h>
 
@@ -94,6 +96,9 @@
         static MediaAnalyticsItem* create(Key key);
         static MediaAnalyticsItem* create();
 
+        static MediaAnalyticsItem* convert(mediametrics_handle_t);
+        static mediametrics_handle_t convert(MediaAnalyticsItem *);
+
         // access functions for the class
         ~MediaAnalyticsItem();
 
diff --git a/media/libmediametrics/include/MediaMetrics.h b/media/libmediametrics/include/MediaMetrics.h
index a4e1ed2..29fb241 100644
--- a/media/libmediametrics/include/MediaMetrics.h
+++ b/media/libmediametrics/include/MediaMetrics.h
@@ -79,6 +79,7 @@
 // # of attributes set within this record.
 int32_t mediametrics_count(mediametrics_handle_t handle);
 
+mediametrics_handle_t mediametrics_dup(mediametrics_handle_t handle);
 bool mediametrics_selfRecord(mediametrics_handle_t handle);
 
 const char *mediametrics_readable(mediametrics_handle_t handle);
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index dfd3933..8ac169f 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -58,6 +58,7 @@
 #include <media/AudioTrack.h>
 #include <media/MemoryLeakTrackUtil.h>
 #include <media/stagefright/InterfaceUtils.h>
+#include <media/stagefright/MediaCodecConstants.h>
 #include <media/stagefright/MediaCodecList.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/Utils.h>
@@ -264,6 +265,172 @@
     return ok;
 }
 
+static void dumpCodecDetails(int fd, const sp<IMediaCodecList> &codecList, bool queryDecoders) {
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    const char *codecType = queryDecoders? "Decoder" : "Encoder";
+    snprintf(buffer, SIZE - 1, "\n%s infos by media types:\n"
+             "=============================\n", codecType);
+    result.append(buffer);
+
+    size_t numCodecs = codecList->countCodecs();
+
+    // gather all media types supported by codec class, and link to codecs that support them
+    KeyedVector<AString, Vector<sp<MediaCodecInfo>>> allMediaTypes;
+    for (size_t codec_ix = 0; codec_ix < numCodecs; ++codec_ix) {
+        sp<MediaCodecInfo> info = codecList->getCodecInfo(codec_ix);
+        if (info->isEncoder() == !queryDecoders) {
+            Vector<AString> supportedMediaTypes;
+            info->getSupportedMediaTypes(&supportedMediaTypes);
+            if (!supportedMediaTypes.size()) {
+                snprintf(buffer, SIZE - 1, "warning: %s does not support any media types\n",
+                        info->getCodecName());
+                result.append(buffer);
+            } else {
+                for (const AString &mediaType : supportedMediaTypes) {
+                    if (allMediaTypes.indexOfKey(mediaType) < 0) {
+                        allMediaTypes.add(mediaType, Vector<sp<MediaCodecInfo>>());
+                    }
+                    allMediaTypes.editValueFor(mediaType).add(info);
+                }
+            }
+        }
+    }
+
+    KeyedVector<AString, bool> visitedCodecs;
+    for (size_t type_ix = 0; type_ix < allMediaTypes.size(); ++type_ix) {
+        const AString &mediaType = allMediaTypes.keyAt(type_ix);
+        snprintf(buffer, SIZE - 1, "\nMedia type '%s':\n", mediaType.c_str());
+        result.append(buffer);
+
+        for (const sp<MediaCodecInfo> &info : allMediaTypes.valueAt(type_ix)) {
+            sp<MediaCodecInfo::Capabilities> caps = info->getCapabilitiesFor(mediaType.c_str());
+            if (caps == NULL) {
+                snprintf(buffer, SIZE - 1, "warning: %s does not have capabilities for type %s\n",
+                        info->getCodecName(), mediaType.c_str());
+                result.append(buffer);
+                continue;
+            }
+            snprintf(buffer, SIZE - 1, "  %s \"%s\" supports\n",
+                       codecType, info->getCodecName());
+            result.append(buffer);
+
+            auto printList = [&](const char *type, const Vector<AString> &values){
+                snprintf(buffer, SIZE - 1, "    %s: [", type);
+                result.append(buffer);
+                for (size_t j = 0; j < values.size(); ++j) {
+                    snprintf(buffer, SIZE - 1, "\n      %s%s", values[j].c_str(),
+                            j == values.size() - 1 ? " " : ",");
+                    result.append(buffer);
+                }
+                result.append("]\n");
+            };
+
+            if (visitedCodecs.indexOfKey(info->getCodecName()) < 0) {
+                visitedCodecs.add(info->getCodecName(), true);
+                {
+                    Vector<AString> aliases;
+                    info->getAliases(&aliases);
+                    // quote alias
+                    for (AString &alias : aliases) {
+                        alias.insert("\"", 1, 0);
+                        alias.append('"');
+                    }
+                    printList("aliases", aliases);
+                }
+                {
+                    uint32_t attrs = info->getAttributes();
+                    Vector<AString> list;
+                    list.add(AStringPrintf("encoder: %d",
+                                           !!(attrs & MediaCodecInfo::kFlagIsEncoder)));
+                    list.add(AStringPrintf("vendor: %d",
+                                           !!(attrs & MediaCodecInfo::kFlagIsVendor)));
+                    list.add(AStringPrintf("software-only: %d",
+                                           !!(attrs & MediaCodecInfo::kFlagIsSoftwareOnly)));
+                    list.add(AStringPrintf("hw-accelerated: %d",
+                                           !!(attrs & MediaCodecInfo::kFlagIsHardwareAccelerated)));
+                    printList(AStringPrintf("attributes: %#x", attrs).c_str(), list);
+                }
+
+                snprintf(buffer, SIZE - 1, "    owner: \"%s\"\n", info->getOwnerName());
+                result.append(buffer);
+                snprintf(buffer, SIZE - 1, "    rank: %u\n", info->getRank());
+                result.append(buffer);
+            } else {
+                result.append("    aliases, attributes, owner, rank: see above\n");
+            }
+
+            {
+                Vector<AString> list;
+                Vector<MediaCodecInfo::ProfileLevel> profileLevels;
+                caps->getSupportedProfileLevels(&profileLevels);
+                for (const MediaCodecInfo::ProfileLevel &pl : profileLevels) {
+                    const char *niceProfile =
+                        mediaType.equalsIgnoreCase(MIMETYPE_AUDIO_AAC)
+                            ? asString_AACObject(pl.mProfile) :
+                        mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_MPEG2)
+                            ? asString_MPEG2Profile(pl.mProfile) :
+                        mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_H263)
+                            ? asString_H263Profile(pl.mProfile) :
+                        mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_MPEG4)
+                            ? asString_MPEG4Profile(pl.mProfile) :
+                        mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_AVC)
+                            ? asString_AVCProfile(pl.mProfile) :
+                        mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_VP8)
+                            ? asString_VP8Profile(pl.mProfile) :
+                        mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_HEVC)
+                            ? asString_HEVCProfile(pl.mProfile) :
+                        mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_VP9)
+                            ? asString_VP9Profile(pl.mProfile) :
+                        mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_AV1)
+                            ? asString_AV1Profile(pl.mProfile) : "??";
+                    const char *niceLevel =
+                        mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_MPEG2)
+                            ? asString_MPEG2Level(pl.mLevel) :
+                        mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_H263)
+                            ? asString_H263Level(pl.mLevel) :
+                        mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_MPEG4)
+                            ? asString_MPEG4Level(pl.mLevel) :
+                        mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_AVC)
+                            ? asString_AVCLevel(pl.mLevel) :
+                        mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_VP8)
+                            ? asString_VP8Level(pl.mLevel) :
+                        mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_HEVC)
+                            ? asString_HEVCTierLevel(pl.mLevel) :
+                        mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_VP9)
+                            ? asString_VP9Level(pl.mLevel) :
+                        mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_AV1)
+                            ? asString_AV1Level(pl.mLevel) : "??";
+
+                    list.add(AStringPrintf("% 5u/% 5u (%s/%s)",
+                            pl.mProfile, pl.mLevel, niceProfile, niceLevel));
+                }
+                printList("profile/levels", list);
+            }
+
+            {
+                Vector<AString> list;
+                Vector<uint32_t> colors;
+                caps->getSupportedColorFormats(&colors);
+                for (uint32_t color : colors) {
+                    list.add(AStringPrintf("%#x (%s)", color,
+                            asString_ColorFormat((int32_t)color)));
+                }
+                printList("colors", list);
+            }
+
+            snprintf(buffer, SIZE - 1, "    details: %s\n",
+                     caps->getDetails()->debugString(6).c_str());
+            result.append(buffer);
+        }
+    }
+    result.append("\n");
+    ::write(fd, result.string(), result.size());
+}
+
+
 // TODO: Find real cause of Audio/Video delay in PV framework and remove this workaround
 /* static */ int MediaPlayerService::AudioOutput::mMinBufferCount = 4;
 /* static */ bool MediaPlayerService::AudioOutput::mIsOnEmulator = false;
@@ -423,7 +590,7 @@
     SortedVector< sp<MediaRecorderClient> > mediaRecorderClients;
 
     if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
-        snprintf(buffer, SIZE, "Permission Denial: "
+        snprintf(buffer, SIZE - 1, "Permission Denial: "
                 "can't dump MediaPlayerService from pid=%d, uid=%d\n",
                 IPCThreadState::self()->getCallingPid(),
                 IPCThreadState::self()->getCallingUid());
@@ -452,11 +619,11 @@
         }
 
         result.append(" Files opened and/or mapped:\n");
-        snprintf(buffer, SIZE, "/proc/%d/maps", getpid());
+        snprintf(buffer, SIZE - 1, "/proc/%d/maps", getpid());
         FILE *f = fopen(buffer, "r");
         if (f) {
             while (!feof(f)) {
-                fgets(buffer, SIZE, f);
+                fgets(buffer, SIZE - 1, f);
                 if (strstr(buffer, " /storage/") ||
                     strstr(buffer, " /system/sounds/") ||
                     strstr(buffer, " /data/") ||
@@ -472,13 +639,13 @@
             result.append("\n");
         }
 
-        snprintf(buffer, SIZE, "/proc/%d/fd", getpid());
+        snprintf(buffer, SIZE - 1, "/proc/%d/fd", getpid());
         DIR *d = opendir(buffer);
         if (d) {
             struct dirent *ent;
             while((ent = readdir(d)) != NULL) {
                 if (strcmp(ent->d_name,".") && strcmp(ent->d_name,"..")) {
-                    snprintf(buffer, SIZE, "/proc/%d/fd/%s", getpid(), ent->d_name);
+                    snprintf(buffer, SIZE - 1, "/proc/%d/fd/%s", getpid(), ent->d_name);
                     struct stat s;
                     if (lstat(buffer, &s) == 0) {
                         if ((s.st_mode & S_IFMT) == S_IFLNK) {
@@ -543,6 +710,11 @@
         }
     }
     write(fd, result.string(), result.size());
+
+    sp<IMediaCodecList> codecList = getCodecList();
+    dumpCodecDetails(fd, codecList, true /* decoders */);
+    dumpCodecDetails(fd, codecList, false /* !decoders */);
+
     return NO_ERROR;
 }
 
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.cpp b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
index 40b17bf..b44eb43 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.cpp
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
@@ -242,31 +242,27 @@
     sp<IMemory> frame = mRetriever->getImageRectAtIndex(
             index, colorFormat, left, top, right, bottom);
     if (frame == NULL) {
-        ALOGE("failed to extract image");
-        return NULL;
+        ALOGE("failed to extract image at index %d", index);
     }
     return frame;
 }
 
-status_t MetadataRetrieverClient::getFrameAtIndex(
-            std::vector<sp<IMemory> > *frames,
-            int frameIndex, int numFrames, int colorFormat, bool metaOnly) {
-    ALOGV("getFrameAtIndex: frameIndex(%d), numFrames(%d), colorFormat(%d), metaOnly(%d)",
-            frameIndex, numFrames, colorFormat, metaOnly);
+sp<IMemory> MetadataRetrieverClient::getFrameAtIndex(
+            int index, int colorFormat, bool metaOnly) {
+    ALOGV("getFrameAtIndex: index(%d), colorFormat(%d), metaOnly(%d)",
+            index, colorFormat, metaOnly);
     Mutex::Autolock lock(mLock);
     Mutex::Autolock glock(sLock);
     if (mRetriever == NULL) {
         ALOGE("retriever is not initialized");
-        return INVALID_OPERATION;
+        return NULL;
     }
 
-    status_t err = mRetriever->getFrameAtIndex(
-            frames, frameIndex, numFrames, colorFormat, metaOnly);
-    if (err != OK) {
-        frames->clear();
-        return err;
+    sp<IMemory> frame = mRetriever->getFrameAtIndex(index, colorFormat, metaOnly);
+    if (frame == NULL) {
+        ALOGE("failed to extract frame at index %d", index);
     }
-    return OK;
+    return frame;
 }
 
 sp<IMemory> MetadataRetrieverClient::extractAlbumArt()
@@ -296,7 +292,7 @@
         delete albumArt;
         return NULL;
     }
-    MediaAlbumArt::init((MediaAlbumArt *) mAlbumArt->pointer(),
+    MediaAlbumArt::init((MediaAlbumArt *) mAlbumArt->unsecurePointer(),
                         albumArt->size(), albumArt->data());
     delete albumArt;  // We've taken our copy.
     return mAlbumArt;
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.h b/media/libmediaplayerservice/MetadataRetrieverClient.h
index 272d093..8020441 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.h
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.h
@@ -56,9 +56,8 @@
             int index, int colorFormat, bool metaOnly, bool thumbnail);
     virtual sp<IMemory>             getImageRectAtIndex(
             int index, int colorFormat, int left, int top, int right, int bottom);
-    virtual status_t getFrameAtIndex(
-                std::vector<sp<IMemory> > *frames,
-                int frameIndex, int numFrames, int colorFormat, bool metaOnly);
+    virtual sp<IMemory>             getFrameAtIndex(
+            int index, int colorFormat, bool metaOnly);
     virtual sp<IMemory>             extractAlbumArt();
     virtual const char*             extractMetadata(int keyCode);
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 39be40d..c30f048 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -33,6 +33,7 @@
 #include <media/stagefright/Utils.h>
 #include <media/stagefright/VideoFrameScheduler.h>
 #include <media/MediaCodecBuffer.h>
+#include <utils/SystemClock.h>
 
 #include <inttypes.h>
 
@@ -156,6 +157,7 @@
     CHECK(mediaClock != NULL);
     mPlaybackRate = mPlaybackSettings.mSpeed;
     mMediaClock->setPlaybackRate(mPlaybackRate);
+    (void)mSyncFlag.test_and_set();
 }
 
 NuPlayer::Renderer::~Renderer() {
@@ -326,9 +328,27 @@
         mSyncQueues = false;
     }
 
+    // Wait until the current job in the message queue is done, to make sure
+    // buffer processing from the old generation is finished. After the current
+    // job is finished, access to buffers are protected by generation.
+    Mutex::Autolock syncLock(mSyncLock);
+    int64_t syncCount = mSyncCount;
+    mSyncFlag.clear();
+
+    // Make sure message queue is not empty after mSyncFlag is cleared.
     sp<AMessage> msg = new AMessage(kWhatFlush, this);
     msg->setInt32("audio", static_cast<int32_t>(audio));
     msg->post();
+
+    int64_t uptimeMs = uptimeMillis();
+    while (mSyncCount == syncCount) {
+        (void)mSyncCondition.waitRelative(mSyncLock, ms2ns(1000));
+        if (uptimeMillis() - uptimeMs > 1000) {
+            ALOGW("flush(): no wake-up from sync point for 1s; stop waiting to "
+                  "prevent being stuck indefinitely.");
+            break;
+        }
+    }
 }
 
 void NuPlayer::Renderer::signalTimeDiscontinuity() {
@@ -781,6 +801,11 @@
             TRESPASS();
             break;
     }
+    if (!mSyncFlag.test_and_set()) {
+        Mutex::Autolock syncLock(mSyncLock);
+        ++mSyncCount;
+        mSyncCondition.broadcast();
+    }
 }
 
 void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index a521f62..3d2b033 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -18,6 +18,8 @@
 
 #define NUPLAYER_RENDERER_H_
 
+#include <atomic>
+
 #include <media/AudioResamplerPublic.h>
 #include <media/AVSyncSettings.h>
 
@@ -220,6 +222,11 @@
 
     sp<AWakeLock> mWakeLock;
 
+    std::atomic_flag mSyncFlag = ATOMIC_FLAG_INIT;
+    Mutex mSyncLock;
+    Condition mSyncCondition;
+    int64_t mSyncCount{0};
+
     status_t getCurrentPositionOnLooper(int64_t *mediaUs);
     status_t getCurrentPositionOnLooper(
             int64_t *mediaUs, int64_t nowUs, bool allowPastQueuedVideo = false);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.cpp
index ee70306..b5142ed 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.cpp
@@ -154,7 +154,7 @@
     }
 
     memcpy(data,
-           (const uint8_t *)mem->pointer()
+           (const uint8_t *)mem->unsecurePointer()
             + entry->mOffset,
            copy);
 
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index bf14ec2..83da092 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -144,6 +144,10 @@
     if (mLooper == NULL) {
         return;
     }
+
+    // Close socket before posting message to RTSPSource message handler.
+    close(mHandler->getARTSPConnection()->getSocket());
+
     sp<AMessage> msg = new AMessage(kWhatDisconnect, this);
 
     sp<AMessage> dummy;
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
index afdcd37..f21d2b3 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
@@ -130,29 +130,32 @@
         } else if (n < 0) {
             break;
         } else {
-            if (buffer[0] == 0x00) {
+            if (buffer[0] == 0x00) { // OK to access buffer[0] since n must be > 0 here
                 // XXX legacy
 
                 if (extra == NULL) {
                     extra = new AMessage;
                 }
 
-                uint8_t type = buffer[1];
+                uint8_t type = 0;
+                if (n > 1) {
+                    type = buffer[1];
 
-                if (type & 2) {
-                    int64_t mediaTimeUs;
-                    memcpy(&mediaTimeUs, &buffer[2], sizeof(mediaTimeUs));
+                    if ((type & 2) && (n >= 2 + sizeof(int64_t))) {
+                        int64_t mediaTimeUs;
+                        memcpy(&mediaTimeUs, &buffer[2], sizeof(mediaTimeUs));
 
-                    extra->setInt64(kATSParserKeyMediaTimeUs, mediaTimeUs);
+                        extra->setInt64(kATSParserKeyMediaTimeUs, mediaTimeUs);
+                    }
                 }
 
                 mTSParser->signalDiscontinuity(
                         ((type & 1) == 0)
-                            ? ATSParser::DISCONTINUITY_TIME
-                            : ATSParser::DISCONTINUITY_FORMATCHANGE,
+                                ? ATSParser::DISCONTINUITY_TIME
+                                : ATSParser::DISCONTINUITY_FORMATCHANGE,
                         extra);
             } else {
-                status_t err = mTSParser->feedTSPacket(buffer, sizeof(buffer));
+                status_t err = mTSParser->feedTSPacket(buffer, n);
 
                 if (err != OK) {
                     ALOGE("TS Parser returned error %d", err);
diff --git a/media/libnbaio/Android.bp b/media/libnbaio/Android.bp
index a4df38d..04ddcff 100644
--- a/media/libnbaio/Android.bp
+++ b/media/libnbaio/Android.bp
@@ -1,4 +1,3 @@
-
 cc_defaults {
     name: "libnbaio_mono_defaults",
     srcs: [
@@ -9,20 +8,27 @@
     header_libs: [
         "libaudioclient_headers",
         "libaudio_system_headers",
-        "libmedia_headers",
     ],
     export_header_lib_headers: [
         "libaudioclient_headers",
-        "libmedia_headers",
     ],
 
     shared_libs: [
         "libaudioutils",
+        "libcutils",
         "liblog",
         "libutils",
     ],
+    export_shared_lib_headers: [
+        "libaudioutils",
+    ],
 
     export_include_dirs: ["include_mono"],
+
+    cflags: [
+        "-Werror",
+        "-Wall",
+    ],
 }
 
 // libnbaio_mono is the part of libnbaio that is available for vendors to use. Vendor modules can't
@@ -53,20 +59,7 @@
     // ],
     // static_libs: ["libsndfile"],
 
-    shared_libs: [
-        "libaudioutils",
-        "libbinder",
-        "libcutils",
-        "liblog",
-        "libutils",
-    ],
-
-    cflags: [
-        "-Werror",
-        "-Wall",
-    ],
-
-    include_dirs: ["system/media/audio_utils/include"],
+    header_libs: ["libaudiohal_headers"],
 
     export_include_dirs: ["include"],
 }
diff --git a/media/libnbaio/include_mono/media/nbaio/MonoPipe.h b/media/libnbaio/include_mono/media/nbaio/MonoPipe.h
index c51d0fe..926d84a 100644
--- a/media/libnbaio/include_mono/media/nbaio/MonoPipe.h
+++ b/media/libnbaio/include_mono/media/nbaio/MonoPipe.h
@@ -19,7 +19,7 @@
 
 #include <time.h>
 #include <audio_utils/fifo.h>
-#include <media/SingleStateQueue.h>
+#include <media/nbaio/SingleStateQueue.h>
 #include <media/nbaio/NBAIO.h>
 
 namespace android {
diff --git a/media/libmedia/include/media/SingleStateQueue.h b/media/libnbaio/include_mono/media/nbaio/SingleStateQueue.h
similarity index 100%
rename from media/libmedia/include/media/SingleStateQueue.h
rename to media/libnbaio/include_mono/media/nbaio/SingleStateQueue.h
diff --git a/media/libnblog/Reader.cpp b/media/libnblog/Reader.cpp
index f556e37..67d028d 100644
--- a/media/libnblog/Reader.cpp
+++ b/media/libnblog/Reader.cpp
@@ -45,7 +45,12 @@
 }
 
 Reader::Reader(const sp<IMemory>& iMemory, size_t size, const std::string &name)
-    : Reader(iMemory != 0 ? (Shared *) iMemory->pointer() : NULL, size, name)
+    // TODO: Using unsecurePointer() has some associated security pitfalls
+    //       (see declaration for details).
+    //       Either document why it is safe in this case or address the
+    //       issue (e.g. by copying).
+    : Reader(iMemory != 0 ? (Shared *) iMemory->unsecurePointer() : NULL, size,
+             name)
 {
     mIMemory = iMemory;
 }
@@ -156,7 +161,8 @@
 
 bool Reader::isIMemory(const sp<IMemory>& iMemory) const
 {
-    return iMemory != 0 && mIMemory != 0 && iMemory->pointer() == mIMemory->pointer();
+    return iMemory != 0 && mIMemory != 0 &&
+           iMemory->unsecurePointer() == mIMemory->unsecurePointer();
 }
 
 // We make a set of the invalid types rather than the valid types when aligning
diff --git a/media/libnblog/Writer.cpp b/media/libnblog/Writer.cpp
index da3bd52..86d3b98 100644
--- a/media/libnblog/Writer.cpp
+++ b/media/libnblog/Writer.cpp
@@ -56,7 +56,11 @@
 }
 
 Writer::Writer(const sp<IMemory>& iMemory, size_t size)
-    : Writer(iMemory != 0 ? (Shared *) iMemory->pointer() : NULL, size)
+    // TODO: Using unsecurePointer() has some associated security pitfalls
+    //       (see declaration for details).
+    //       Either document why it is safe in this case or address the
+    //       issue (e.g. by copying).
+    : Writer(iMemory != 0 ? (Shared *) iMemory->unsecurePointer() : NULL, size)
 {
     mIMemory = iMemory;
 }
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 3d67c91..369e13f 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -1826,6 +1826,23 @@
             mRepeatFrameDelayUs = -1LL;
         }
 
+        if (!msg->findDouble("time-lapse-fps", &mCaptureFps)) {
+            float captureRate;
+            if (msg->findAsFloat(KEY_CAPTURE_RATE, &captureRate)) {
+                mCaptureFps = captureRate;
+            } else {
+                mCaptureFps = -1.0;
+            }
+        }
+
+        if (!msg->findInt32(
+                KEY_CREATE_INPUT_SURFACE_SUSPENDED,
+                (int32_t*)&mCreateInputBuffersSuspended)) {
+            mCreateInputBuffersSuspended = false;
+        }
+    }
+
+    if (encoder && (mIsVideo || mIsImage)) {
         // only allow 32-bit value, since we pass it as U32 to OMX.
         if (!msg->findInt64(KEY_MAX_PTS_GAP_TO_ENCODER, &mMaxPtsGapUs)) {
             mMaxPtsGapUs = 0LL;
@@ -1842,16 +1859,6 @@
         if (mMaxPtsGapUs < 0LL) {
             mMaxFps = -1;
         }
-
-        if (!msg->findDouble("time-lapse-fps", &mCaptureFps)) {
-            mCaptureFps = -1.0;
-        }
-
-        if (!msg->findInt32(
-                KEY_CREATE_INPUT_SURFACE_SUSPENDED,
-                (int32_t*)&mCreateInputBuffersSuspended)) {
-            mCreateInputBuffersSuspended = false;
-        }
     }
 
     // NOTE: we only use native window for video decoders
@@ -4505,22 +4512,38 @@
 status_t ACodec::configureImageGrid(
         const sp<AMessage> &msg, sp<AMessage> &outputFormat) {
     int32_t tileWidth, tileHeight, gridRows, gridCols;
-    if (!msg->findInt32("tile-width", &tileWidth) ||
-        !msg->findInt32("tile-height", &tileHeight) ||
-        !msg->findInt32("grid-rows", &gridRows) ||
-        !msg->findInt32("grid-cols", &gridCols)) {
+    OMX_BOOL useGrid = OMX_FALSE;
+    if (msg->findInt32("tile-width", &tileWidth) &&
+        msg->findInt32("tile-height", &tileHeight) &&
+        msg->findInt32("grid-rows", &gridRows) &&
+        msg->findInt32("grid-cols", &gridCols)) {
+        useGrid = OMX_TRUE;
+    } else {
+        // when bEnabled is false, the tile info is not used,
+        // but clear out these too.
+        tileWidth = tileHeight = gridRows = gridCols = 0;
+    }
+
+    if (!mIsImage && !useGrid) {
         return OK;
     }
 
     OMX_VIDEO_PARAM_ANDROID_IMAGEGRIDTYPE gridType;
     InitOMXParams(&gridType);
     gridType.nPortIndex = kPortIndexOutput;
-    gridType.bEnabled = OMX_TRUE;
+    gridType.bEnabled = useGrid;
     gridType.nTileWidth = tileWidth;
     gridType.nTileHeight = tileHeight;
     gridType.nGridRows = gridRows;
     gridType.nGridCols = gridCols;
 
+    ALOGV("sending image grid info to component: bEnabled %d, tile %dx%d, grid %dx%d",
+            gridType.bEnabled,
+            gridType.nTileWidth,
+            gridType.nTileHeight,
+            gridType.nGridRows,
+            gridType.nGridCols);
+
     status_t err = mOMXNode->setParameter(
             (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidImageGrid,
             &gridType, sizeof(gridType));
@@ -4541,6 +4564,13 @@
             (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidImageGrid,
             &gridType, sizeof(gridType));
 
+    ALOGV("received image grid info from component: bEnabled %d, tile %dx%d, grid %dx%d",
+            gridType.bEnabled,
+            gridType.nTileWidth,
+            gridType.nTileHeight,
+            gridType.nGridRows,
+            gridType.nGridCols);
+
     if (err == OK && gridType.bEnabled) {
         outputFormat->setInt32("tile-width", gridType.nTileWidth);
         outputFormat->setInt32("tile-height", gridType.nTileHeight);
diff --git a/media/libstagefright/ACodecBufferChannel.cpp b/media/libstagefright/ACodecBufferChannel.cpp
index 266a240..dd6f7b4 100644
--- a/media/libstagefright/ACodecBufferChannel.cpp
+++ b/media/libstagefright/ACodecBufferChannel.cpp
@@ -153,7 +153,8 @@
         }
 
         if (destination.mType == ICrypto::kDestinationTypeSharedMemory) {
-            memcpy(it->mCodecBuffer->base(), destination.mSharedMemory->pointer(), result);
+            memcpy(it->mCodecBuffer->base(),
+                destination.mSharedMemory->unsecurePointer(), result);
         }
     } else {
         // Here we cast CryptoPlugin::SubSample to hardware::cas::native::V1_0::SubSample
@@ -219,7 +220,8 @@
 
         if (dstBuffer.type == BufferType::SHARED_MEMORY) {
             memcpy(it->mCodecBuffer->base(),
-                    (uint8_t*)it->mSharedEncryptedBuffer->pointer(), result);
+                    (uint8_t*)it->mSharedEncryptedBuffer->unsecurePointer(),
+                    result);
         }
     }
 
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 9170805..832f9aa 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -61,7 +61,7 @@
     shared_libs: [
         "libgui",
         "liblog",
-        "libmedia_omx",
+        "libmedia",
         "libstagefright_foundation",
         "libui",
         "libutils",
@@ -129,7 +129,6 @@
         "CameraSource.cpp",
         "CameraSourceTimeLapse.cpp",
         "DataConverter.cpp",
-        "DataSourceBase.cpp",
         "DataSourceFactory.cpp",
         "DataURISource.cpp",
         "ClearFileSource.cpp",
@@ -190,7 +189,6 @@
         "libmedia_omx_client",
         "libaudioclient",
         "libmediametrics",
-        "libmediautils",
         "libui",
         "libutils",
         "libmedia_helper",
@@ -220,7 +218,7 @@
     ],
 
     header_libs:[
-        "libnativeloader-dummy-headers",
+        "libnativeloader-headers",
         "libstagefright_xmlparser_headers",
         "media_ndk_headers",
     ],
@@ -267,7 +265,6 @@
     srcs: [
         "ClearFileSource.cpp",
         "DataURISource.cpp",
-        "DataSourceBase.cpp",
         "HTTPBase.cpp",
         "HevcUtils.cpp",
         "MediaClock.cpp",
diff --git a/media/libstagefright/BufferImpl.cpp b/media/libstagefright/BufferImpl.cpp
index b760273..68440e6 100644
--- a/media/libstagefright/BufferImpl.cpp
+++ b/media/libstagefright/BufferImpl.cpp
@@ -32,7 +32,11 @@
 // SharedMemoryBuffer
 
 SharedMemoryBuffer::SharedMemoryBuffer(const sp<AMessage> &format, const sp<IMemory> &mem)
-    : MediaCodecBuffer(format, new ABuffer(mem->pointer(), mem->size())),
+    // TODO: Using unsecurePointer() has some associated security pitfalls
+    //       (see declaration for details).
+    //       Either document why it is safe in this case or address the
+    //       issue (e.g. by copying).
+    : MediaCodecBuffer(format, new ABuffer(mem->unsecurePointer(), mem->size())),
       mMemory(mem) {
 }
 
diff --git a/media/libstagefright/CallbackDataSource.cpp b/media/libstagefright/CallbackDataSource.cpp
index 92e6eb9..2f8e6af 100644
--- a/media/libstagefright/CallbackDataSource.cpp
+++ b/media/libstagefright/CallbackDataSource.cpp
@@ -81,7 +81,8 @@
             return ERROR_OUT_OF_RANGE;
         }
         CHECK(numRead >= 0 && (size_t)numRead <= bufferSize);
-        memcpy(((uint8_t*)data) + totalNumRead, mMemory->pointer(), numRead);
+        memcpy(((uint8_t*)data) + totalNumRead, mMemory->unsecurePointer(),
+            numRead);
         numLeft -= numRead;
         totalNumRead += numRead;
     }
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 41f5db0..9b3f420 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -89,7 +89,7 @@
 void CameraSourceListener::postData(int32_t msgType, const sp<IMemory> &dataPtr,
                                     camera_frame_metadata_t * /* metadata */) {
     ALOGV("postData(%d, ptr:%p, size:%zu)",
-         msgType, dataPtr->pointer(), dataPtr->size());
+         msgType, dataPtr->unsecurePointer(), dataPtr->size());
 
     sp<CameraSource> source = mSource.promote();
     if (source.get() != NULL) {
@@ -966,8 +966,12 @@
 
         // Check if frame contains a VideoNativeHandleMetadata.
         if (frame->size() == sizeof(VideoNativeHandleMetadata)) {
-            VideoNativeHandleMetadata *metadata =
-                (VideoNativeHandleMetadata*)(frame->pointer());
+          // TODO: Using unsecurePointer() has some associated security pitfalls
+          //       (see declaration for details).
+          //       Either document why it is safe in this case or address the
+          //       issue (e.g. by copying).
+           VideoNativeHandleMetadata *metadata =
+                (VideoNativeHandleMetadata*)(frame->unsecurePointer());
             if (metadata->eType == kMetadataBufferTypeNativeHandleSource) {
                 handle = metadata->pHandle;
             }
@@ -1047,7 +1051,7 @@
     Mutex::Autolock autoLock(mLock);
     for (List<sp<IMemory> >::iterator it = mFramesBeingEncoded.begin();
          it != mFramesBeingEncoded.end(); ++it) {
-        if ((*it)->pointer() ==  buffer->data()) {
+        if ((*it)->unsecurePointer() ==  buffer->data()) {
             releaseOneRecordingFrame((*it));
             mFramesBeingEncoded.erase(it);
             ++mNumFramesEncoded;
@@ -1102,7 +1106,11 @@
         frameTime = *mFrameTimes.begin();
         mFrameTimes.erase(mFrameTimes.begin());
         mFramesBeingEncoded.push_back(frame);
-        *buffer = new MediaBuffer(frame->pointer(), frame->size());
+        // TODO: Using unsecurePointer() has some associated security pitfalls
+        //       (see declaration for details).
+        //       Either document why it is safe in this case or address the
+        //       issue (e.g. by copying).
+        *buffer = new MediaBuffer(frame->unsecurePointer(), frame->size());
         (*buffer)->setObserver(this);
         (*buffer)->add_ref();
         (*buffer)->meta_data().setInt64(kKeyTime, frameTime);
@@ -1248,7 +1256,7 @@
     mMemoryBases.erase(mMemoryBases.begin());
 
     // Wrap native handle in sp<IMemory> so it can be pushed to mFramesReceived.
-    VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(data->pointer());
+    VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(data->unsecurePointer());
     metadata->eType = kMetadataBufferTypeNativeHandleSource;
     metadata->pHandle = handle;
 
@@ -1296,7 +1304,11 @@
         mMemoryBases.erase(mMemoryBases.begin());
 
         // Wrap native handle in sp<IMemory> so it can be pushed to mFramesReceived.
-        VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(data->pointer());
+        // TODO: Using unsecurePointer() has some associated security pitfalls
+        //       (see declaration for details).
+        //       Either document why it is safe in this case or address the
+        //       issue (e.g. by copying).
+        VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(data->unsecurePointer());
         metadata->eType = kMetadataBufferTypeNativeHandleSource;
         metadata->pHandle = handle;
 
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
index 2a819ad..e0a6eb3 100644
--- a/media/libstagefright/CameraSourceTimeLapse.cpp
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -245,11 +245,11 @@
 
     ALOGV("createIMemoryCopy");
     size_t source_size = source_data->size();
-    void* source_pointer = source_data->pointer();
+    void* source_pointer = source_data->unsecurePointer();
 
     sp<MemoryHeapBase> newMemoryHeap = new MemoryHeapBase(source_size);
     sp<MemoryBase> newMemory = new MemoryBase(newMemoryHeap, 0, source_size);
-    memcpy(newMemory->pointer(), source_pointer, source_size);
+    memcpy(newMemory->unsecurePointer(), source_pointer, source_size);
     return newMemory;
 }
 
diff --git a/media/libstagefright/DataSourceBase.cpp b/media/libstagefright/DataSourceBase.cpp
deleted file mode 100644
index 8f47ee5..0000000
--- a/media/libstagefright/DataSourceBase.cpp
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-//#define LOG_NDEBUG 0
-#define LOG_TAG "DataSourceBase"
-
-#include <media/DataSourceBase.h>
-#include <media/stagefright/foundation/ByteUtils.h>
-#include <media/stagefright/MediaErrors.h>
-#include <utils/String8.h>
-
-namespace android {
-
-bool DataSourceBase::getUInt16(off64_t offset, uint16_t *x) {
-    *x = 0;
-
-    uint8_t byte[2];
-    if (readAt(offset, byte, 2) != 2) {
-        return false;
-    }
-
-    *x = (byte[0] << 8) | byte[1];
-
-    return true;
-}
-
-bool DataSourceBase::getUInt24(off64_t offset, uint32_t *x) {
-    *x = 0;
-
-    uint8_t byte[3];
-    if (readAt(offset, byte, 3) != 3) {
-        return false;
-    }
-
-    *x = (byte[0] << 16) | (byte[1] << 8) | byte[2];
-
-    return true;
-}
-
-bool DataSourceBase::getUInt32(off64_t offset, uint32_t *x) {
-    *x = 0;
-
-    uint32_t tmp;
-    if (readAt(offset, &tmp, 4) != 4) {
-        return false;
-    }
-
-    *x = ntohl(tmp);
-
-    return true;
-}
-
-bool DataSourceBase::getUInt64(off64_t offset, uint64_t *x) {
-    *x = 0;
-
-    uint64_t tmp;
-    if (readAt(offset, &tmp, 8) != 8) {
-        return false;
-    }
-
-    *x = ntoh64(tmp);
-
-    return true;
-}
-
-bool DataSourceBase::getUInt16Var(off64_t offset, uint16_t *x, size_t size) {
-    if (size == 2) {
-        return getUInt16(offset, x);
-    }
-    if (size == 1) {
-        uint8_t tmp;
-        if (readAt(offset, &tmp, 1) == 1) {
-            *x = tmp;
-            return true;
-        }
-    }
-    return false;
-}
-
-bool DataSourceBase::getUInt32Var(off64_t offset, uint32_t *x, size_t size) {
-    if (size == 4) {
-        return getUInt32(offset, x);
-    }
-    if (size == 2) {
-        uint16_t tmp;
-        if (getUInt16(offset, &tmp)) {
-            *x = tmp;
-            return true;
-        }
-    }
-    return false;
-}
-
-bool DataSourceBase::getUInt64Var(off64_t offset, uint64_t *x, size_t size) {
-    if (size == 8) {
-        return getUInt64(offset, x);
-    }
-    if (size == 4) {
-        uint32_t tmp;
-        if (getUInt32(offset, &tmp)) {
-            *x = tmp;
-            return true;
-        }
-    }
-    return false;
-}
-
-status_t DataSourceBase::getSize(off64_t *size) {
-    *size = 0;
-
-    return ERROR_UNSUPPORTED;
-}
-
-bool DataSourceBase::getUri(char *uriString __unused, size_t bufferSize __unused) {
-    return false;
-}
-
-}  // namespace android
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 18a6bd8..a5a07d6 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -21,6 +21,7 @@
 #include <binder/MemoryBase.h>
 #include <binder/MemoryHeapBase.h>
 #include <gui/Surface.h>
+#include <gui/SurfaceComposerClient.h>
 #include <inttypes.h>
 #include <media/ICrypto.h>
 #include <media/IMediaSource.h>
@@ -28,6 +29,7 @@
 #include <media/stagefright/foundation/avc_utils.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ColorUtils.h>
 #include <media/stagefright/ColorConverter.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaCodec.h>
@@ -41,10 +43,11 @@
 
 static const int64_t kBufferTimeOutUs = 10000LL; // 10 msec
 static const size_t kRetryCount = 50; // must be >0
+static const int64_t kDefaultSampleDurationUs = 33333LL; // 33ms
 
 sp<IMemory> allocVideoFrame(const sp<MetaData>& trackMeta,
         int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
-        int32_t dstBpp, bool metaOnly = false) {
+        int32_t dstBpp, bool allocRotated, bool metaOnly) {
     int32_t rotationAngle;
     if (!trackMeta->findInt32(kKeyRotation, &rotationAngle)) {
         rotationAngle = 0;  // By default, no rotation
@@ -74,6 +77,14 @@
         displayHeight = height;
     }
 
+    if (allocRotated && (rotationAngle == 90 || rotationAngle == 270)) {
+        int32_t tmp;
+        tmp = width; width = height; height = tmp;
+        tmp = displayWidth; displayWidth = displayHeight; displayHeight = tmp;
+        tmp = tileWidth; tileWidth = tileHeight; tileHeight = tmp;
+        rotationAngle = 0;
+    }
+
     VideoFrame frame(width, height, displayWidth, displayHeight,
             tileWidth, tileHeight, rotationAngle, dstBpp, !metaOnly, iccSize);
 
@@ -88,12 +99,26 @@
         ALOGE("not enough memory for VideoFrame size=%zu", size);
         return NULL;
     }
-    VideoFrame* frameCopy = static_cast<VideoFrame*>(frameMem->pointer());
+    VideoFrame* frameCopy = static_cast<VideoFrame*>(frameMem->unsecurePointer());
     frameCopy->init(frame, iccData, iccSize);
 
     return frameMem;
 }
 
+sp<IMemory> allocVideoFrame(const sp<MetaData>& trackMeta,
+        int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
+        int32_t dstBpp, bool allocRotated = false) {
+    return allocVideoFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp,
+            allocRotated, false /*metaOnly*/);
+}
+
+sp<IMemory> allocMetaFrame(const sp<MetaData>& trackMeta,
+        int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
+        int32_t dstBpp) {
+    return allocVideoFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp,
+            false /*allocRotated*/, true /*metaOnly*/);
+}
+
 bool findThumbnailInfo(
         const sp<MetaData> &trackMeta, int32_t *width, int32_t *height,
         uint32_t *type = NULL, const void **data = NULL, size_t *size = NULL) {
@@ -117,23 +142,27 @@
 bool getDstColorFormat(
         android_pixel_format_t colorFormat,
         OMX_COLOR_FORMATTYPE *dstFormat,
+        ui::PixelFormat *captureFormat,
         int32_t *dstBpp) {
     switch (colorFormat) {
         case HAL_PIXEL_FORMAT_RGB_565:
         {
             *dstFormat = OMX_COLOR_Format16bitRGB565;
+            *captureFormat = ui::PixelFormat::RGB_565;
             *dstBpp = 2;
             return true;
         }
         case HAL_PIXEL_FORMAT_RGBA_8888:
         {
             *dstFormat = OMX_COLOR_Format32BitRGBA8888;
+            *captureFormat = ui::PixelFormat::RGBA_8888;
             *dstBpp = 4;
             return true;
         }
         case HAL_PIXEL_FORMAT_BGRA_8888:
         {
             *dstFormat = OMX_COLOR_Format32bitBGRA8888;
+            *captureFormat = ui::PixelFormat::BGRA_8888;
             *dstBpp = 4;
             return true;
         }
@@ -150,9 +179,10 @@
 sp<IMemory> FrameDecoder::getMetadataOnly(
         const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail) {
     OMX_COLOR_FORMATTYPE dstFormat;
+    ui::PixelFormat captureFormat;
     int32_t dstBpp;
-    if (!getDstColorFormat(
-            (android_pixel_format_t)colorFormat, &dstFormat, &dstBpp)) {
+    if (!getDstColorFormat((android_pixel_format_t)colorFormat,
+            &dstFormat, &captureFormat, &dstBpp)) {
         return NULL;
     }
 
@@ -170,8 +200,19 @@
             tileWidth = tileHeight = 0;
         }
     }
-    return allocVideoFrame(trackMeta,
-            width, height, tileWidth, tileHeight, dstBpp, true /*metaOnly*/);
+
+    sp<IMemory> metaMem = allocMetaFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp);
+
+    // try to fill sequence meta's duration based on average frame rate,
+    // default to 33ms if frame rate is unavailable.
+    int32_t frameRate;
+    VideoFrame* meta = static_cast<VideoFrame*>(metaMem->unsecurePointer());
+    if (trackMeta->findInt32(kKeyFrameRate, &frameRate) && frameRate > 0) {
+        meta->mDurationUs = 1000000LL / frameRate;
+    } else {
+        meta->mDurationUs = kDefaultSampleDurationUs;
+    }
+    return metaMem;
 }
 
 FrameDecoder::FrameDecoder(
@@ -194,15 +235,30 @@
     }
 }
 
+bool isHDR(const sp<AMessage> &format) {
+    uint32_t standard, range, transfer;
+    if (!format->findInt32("color-standard", (int32_t*)&standard)) {
+        standard = 0;
+    }
+    if (!format->findInt32("color-range", (int32_t*)&range)) {
+        range = 0;
+    }
+    if (!format->findInt32("color-transfer", (int32_t*)&transfer)) {
+        transfer = 0;
+    }
+    return standard == ColorUtils::kColorStandardBT2020 &&
+            transfer == ColorUtils::kColorTransferST2084;
+}
+
 status_t FrameDecoder::init(
-        int64_t frameTimeUs, size_t numFrames, int option, int colorFormat) {
-    if (!getDstColorFormat(
-            (android_pixel_format_t)colorFormat, &mDstFormat, &mDstBpp)) {
+        int64_t frameTimeUs, int option, int colorFormat) {
+    if (!getDstColorFormat((android_pixel_format_t)colorFormat,
+            &mDstFormat, &mCaptureFormat, &mDstBpp)) {
         return ERROR_UNSUPPORTED;
     }
 
     sp<AMessage> videoFormat = onGetFormatAndSeekOptions(
-            frameTimeUs, numFrames, option, &mReadOptions);
+            frameTimeUs, option, &mReadOptions, &mSurface);
     if (videoFormat == NULL) {
         ALOGE("video format or seek mode not supported");
         return ERROR_UNSUPPORTED;
@@ -219,7 +275,7 @@
     }
 
     err = decoder->configure(
-            videoFormat, NULL /* surface */, NULL /* crypto */, 0 /* flags */);
+            videoFormat, mSurface, NULL /* crypto */, 0 /* flags */);
     if (err != OK) {
         ALOGW("configure returned error %d (%s)", err, asString(err));
         decoder->release();
@@ -253,19 +309,7 @@
         return NULL;
     }
 
-    return mFrames.size() > 0 ? mFrames[0] : NULL;
-}
-
-status_t FrameDecoder::extractFrames(std::vector<sp<IMemory> >* frames) {
-    status_t err = extractInternal();
-    if (err != OK) {
-        return err;
-    }
-
-    for (size_t i = 0; i < mFrames.size(); i++) {
-        frames->push_back(mFrames[i]);
-    }
-    return OK;
+    return mFrameMemory;
 }
 
 status_t FrameDecoder::extractInternal() {
@@ -379,8 +423,13 @@
                         ALOGE("failed to get output buffer %zu", index);
                         break;
                     }
-                    err = onOutputReceived(videoFrameBuffer, mOutputFormat, ptsUs, &done);
-                    mDecoder->releaseOutputBuffer(index);
+                    if (mSurface != nullptr) {
+                        mDecoder->renderOutputBufferAndRelease(index);
+                        err = onOutputReceived(videoFrameBuffer, mOutputFormat, ptsUs, &done);
+                    } else {
+                        err = onOutputReceived(videoFrameBuffer, mOutputFormat, ptsUs, &done);
+                        mDecoder->releaseOutputBuffer(index);
+                    }
                 } else {
                     ALOGW("Received error %d (%s) instead of output", err, asString(err));
                     done = true;
@@ -404,22 +453,23 @@
         const sp<MetaData> &trackMeta,
         const sp<IMediaSource> &source)
     : FrameDecoder(componentName, trackMeta, source),
+      mFrame(NULL),
       mIsAvcOrHevc(false),
       mSeekMode(MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC),
       mTargetTimeUs(-1LL),
-      mNumFrames(0),
-      mNumFramesDecoded(0) {
+      mDefaultSampleDurationUs(0) {
 }
 
 sp<AMessage> VideoFrameDecoder::onGetFormatAndSeekOptions(
-        int64_t frameTimeUs, size_t numFrames, int seekMode, MediaSource::ReadOptions *options) {
+        int64_t frameTimeUs, int seekMode,
+        MediaSource::ReadOptions *options,
+        sp<Surface> *window) {
     mSeekMode = static_cast<MediaSource::ReadOptions::SeekMode>(seekMode);
     if (mSeekMode < MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC ||
             mSeekMode > MediaSource::ReadOptions::SEEK_FRAME_INDEX) {
         ALOGE("Unknown seek mode: %d", mSeekMode);
         return NULL;
     }
-    mNumFrames = numFrames;
 
     const char *mime;
     if (!trackMeta()->findCString(kKeyMIMEType, &mime)) {
@@ -460,6 +510,23 @@
         videoFormat->setInt32("android._num-input-buffers", 1);
         videoFormat->setInt32("android._num-output-buffers", 1);
     }
+
+    if (isHDR(videoFormat)) {
+        *window = initSurfaceControl();
+        if (*window == NULL) {
+            ALOGE("Failed to init surface control for HDR, fallback to non-hdr");
+        } else {
+            videoFormat->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);
+        }
+    }
+
+    int32_t frameRate;
+    if (trackMeta()->findInt32(kKeyFrameRate, &frameRate) && frameRate > 0) {
+        mDefaultSampleDurationUs = 1000000LL / frameRate;
+    } else {
+        mDefaultSampleDurationUs = kDefaultSampleDurationUs;
+    }
+
     return videoFormat;
 }
 
@@ -480,6 +547,12 @@
         // option, in which case we need to actually decode to targetTimeUs.
         *flags |= MediaCodec::BUFFER_FLAG_EOS;
     }
+    int64_t durationUs;
+    if (sampleMeta.findInt64(kKeyDuration, &durationUs)) {
+        mSampleDurations.push_back(durationUs);
+    } else {
+        mSampleDurations.push_back(mDefaultSampleDurationUs);
+    }
     return OK;
 }
 
@@ -487,6 +560,11 @@
         const sp<MediaCodecBuffer> &videoFrameBuffer,
         const sp<AMessage> &outputFormat,
         int64_t timeUs, bool *done) {
+    int64_t durationUs = mDefaultSampleDurationUs;
+    if (!mSampleDurations.empty()) {
+        durationUs = *mSampleDurations.begin();
+        mSampleDurations.erase(mSampleDurations.begin());
+    }
     bool shouldOutput = (mTargetTimeUs < 0LL) || (timeUs >= mTargetTimeUs);
 
     // If this is not the target frame, skip color convert.
@@ -495,7 +573,7 @@
         return OK;
     }
 
-    *done = (++mNumFramesDecoded >= mNumFrames);
+    *done = true;
 
     if (outputFormat == NULL) {
         return ERROR_MALFORMED;
@@ -504,13 +582,22 @@
     int32_t width, height, stride, srcFormat;
     if (!outputFormat->findInt32("width", &width) ||
             !outputFormat->findInt32("height", &height) ||
-            !outputFormat->findInt32("stride", &stride) ||
             !outputFormat->findInt32("color-format", &srcFormat)) {
         ALOGE("format missing dimension or color: %s",
                 outputFormat->debugString().c_str());
         return ERROR_MALFORMED;
     }
 
+    if (!outputFormat->findInt32("stride", &stride)) {
+        if (mSurfaceControl == NULL) {
+            ALOGE("format must have stride for byte buffer mode: %s",
+                    outputFormat->debugString().c_str());
+            return ERROR_MALFORMED;
+        }
+        // for surface output, set stride to width, we don't actually need it.
+        stride = width;
+    }
+
     int32_t crop_left, crop_top, crop_right, crop_bottom;
     if (!outputFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) {
         crop_left = crop_top = 0;
@@ -518,15 +605,25 @@
         crop_bottom = height - 1;
     }
 
-    sp<IMemory> frameMem = allocVideoFrame(
-            trackMeta(),
-            (crop_right - crop_left + 1),
-            (crop_bottom - crop_top + 1),
-            0,
-            0,
-            dstBpp());
-    addFrame(frameMem);
-    VideoFrame* frame = static_cast<VideoFrame*>(frameMem->pointer());
+    if (mFrame == NULL) {
+        sp<IMemory> frameMem = allocVideoFrame(
+                trackMeta(),
+                (crop_right - crop_left + 1),
+                (crop_bottom - crop_top + 1),
+                0,
+                0,
+                dstBpp(),
+                mSurfaceControl != nullptr /*allocRotated*/);
+        mFrame = static_cast<VideoFrame*>(frameMem->unsecurePointer());
+
+        setFrame(frameMem);
+    }
+
+    mFrame->mDurationUs = durationUs;
+
+    if (mSurfaceControl != nullptr) {
+        return captureSurfaceControl();
+    }
 
     ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
 
@@ -547,8 +644,8 @@
                 (const uint8_t *)videoFrameBuffer->data(),
                 width, height, stride,
                 crop_left, crop_top, crop_right, crop_bottom,
-                frame->getFlattenedData(),
-                frame->mWidth, frame->mHeight, frame->mRowBytes,
+                mFrame->getFlattenedData(),
+                mFrame->mWidth, mFrame->mHeight, mFrame->mRowBytes,
                 crop_left, crop_top, crop_right, crop_bottom);
         return OK;
     }
@@ -558,6 +655,101 @@
     return ERROR_UNSUPPORTED;
 }
 
+sp<Surface> VideoFrameDecoder::initSurfaceControl() {
+    sp<SurfaceComposerClient> client = new SurfaceComposerClient();
+    if (client->initCheck() != NO_ERROR) {
+        ALOGE("failed to get SurfaceComposerClient");
+        return NULL;
+    }
+
+    // create a container layer to hold the capture layer, so that we can
+    // use full frame drop. If without the container, the crop will be set
+    // to display size.
+    sp<SurfaceControl> parent = client->createSurface(
+            String8("parent"),
+            0 /* width */, 0 /* height */,
+            PIXEL_FORMAT_RGBA_8888,
+            ISurfaceComposerClient::eFXSurfaceContainer );
+
+    if (!parent) {
+        ALOGE("failed to get surface control parent");
+        return NULL;
+    }
+
+    // create the surface with unknown size 1x1 for now, real size will
+    // be set before the capture when we have output format info.
+    sp<SurfaceControl> surfaceControl = client->createSurface(
+            String8("thumbnail"),
+            1 /* width */, 1 /* height */,
+            PIXEL_FORMAT_RGBA_8888,
+            ISurfaceComposerClient::eFXSurfaceBufferQueue,
+            parent.get());
+
+    if (!surfaceControl) {
+        ALOGE("failed to get surface control");
+        return NULL;
+    }
+
+    SurfaceComposerClient::Transaction t;
+    t.hide(parent)
+            .show(surfaceControl)
+            .apply(true);
+
+    mSurfaceControl = surfaceControl;
+    mParent = parent;
+
+    return surfaceControl->getSurface();
+}
+
+status_t VideoFrameDecoder::captureSurfaceControl() {
+    // set the layer size to the output size before the capture
+    SurfaceComposerClient::Transaction()
+        .setSize(mSurfaceControl, mFrame->mWidth, mFrame->mHeight)
+        .apply(true);
+
+    sp<GraphicBuffer> outBuffer;
+    status_t err = ScreenshotClient::captureChildLayers(
+            mParent->getHandle(),
+            ui::Dataspace::V0_SRGB,
+            captureFormat(),
+            Rect(0, 0, mFrame->mWidth, mFrame->mHeight),
+            {},
+            1.0f /*frameScale*/,
+            &outBuffer);
+
+    if (err != OK) {
+        ALOGE("failed to captureLayers: err %d", err);
+        return err;
+    }
+
+    ALOGV("capture: %dx%d, format %d, stride %d",
+            outBuffer->getWidth(),
+            outBuffer->getHeight(),
+            outBuffer->getPixelFormat(),
+            outBuffer->getStride());
+
+    uint8_t *base;
+    int32_t outBytesPerPixel, outBytesPerStride;
+    err = outBuffer->lock(
+            GraphicBuffer::USAGE_SW_READ_OFTEN,
+            reinterpret_cast<void**>(&base),
+            &outBytesPerPixel,
+            &outBytesPerStride);
+    if (err != OK) {
+        ALOGE("failed to lock graphic buffer: err %d", err);
+        return err;
+    }
+
+    uint8_t *dst = mFrame->getFlattenedData();
+    for (size_t y = 0 ; y < fmin(mFrame->mHeight, outBuffer->getHeight()) ; y++) {
+        memcpy(dst, base, fmin(mFrame->mWidth, outBuffer->getWidth()) * mFrame->mBytesPerPixel);
+        dst += mFrame->mRowBytes;
+        base += outBuffer->getStride() * mFrame->mBytesPerPixel;
+    }
+    outBuffer->unlock();
+    return OK;
+}
+
 ////////////////////////////////////////////////////////////////////////
 
 ImageDecoder::ImageDecoder(
@@ -577,8 +769,8 @@
 }
 
 sp<AMessage> ImageDecoder::onGetFormatAndSeekOptions(
-        int64_t frameTimeUs, size_t /*numFrames*/,
-        int /*seekMode*/, MediaSource::ReadOptions *options) {
+        int64_t frameTimeUs, int /*seekMode*/,
+        MediaSource::ReadOptions *options, sp<Surface> * /*window*/) {
     sp<MetaData> overrideMeta;
     if (frameTimeUs < 0) {
         uint32_t type;
@@ -703,9 +895,9 @@
     if (mFrame == NULL) {
         sp<IMemory> frameMem = allocVideoFrame(
                 trackMeta(), mWidth, mHeight, mTileWidth, mTileHeight, dstBpp());
-        mFrame = static_cast<VideoFrame*>(frameMem->pointer());
+        mFrame = static_cast<VideoFrame*>(frameMem->unsecurePointer());
 
-        addFrame(frameMem);
+        setFrame(frameMem);
     }
 
     int32_t srcFormat;
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 2f13dc9..f130c9b 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -1635,8 +1635,13 @@
         return BAD_VALUE;
     }
 
+    // Increase moovExtraSize once only irrespective of how many times
+    // setCaptureRate is called.
+    bool containsCaptureFps = mMetaKeys->contains(kMetaKey_CaptureFps);
     mMetaKeys->setFloat(kMetaKey_CaptureFps, captureFps);
-    mMoovExtraSize += sizeof(kMetaKey_CaptureFps) + 4 + 32;
+    if (!containsCaptureFps) {
+        mMoovExtraSize += sizeof(kMetaKey_CaptureFps) + 4 + 32;
+    }
 
     return OK;
 }
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index f579e9d..b1404de 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -48,6 +48,7 @@
 #include <media/stagefright/foundation/avc_utils.h>
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/ACodec.h>
+#include <media/stagefright/BatteryChecker.h>
 #include <media/stagefright/BufferProducerWrapper.h>
 #include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaCodecList.h>
@@ -57,7 +58,6 @@
 #include <media/stagefright/OMXClient.h>
 #include <media/stagefright/PersistentSurface.h>
 #include <media/stagefright/SurfaceUtils.h>
-#include <mediautils/BatteryNotifier.h>
 #include <private/android_filesystem_config.h>
 #include <utils/Singleton.h>
 
@@ -166,8 +166,9 @@
     DISALLOW_EVIL_CONSTRUCTORS(ResourceManagerClient);
 };
 
-MediaCodec::ResourceManagerServiceProxy::ResourceManagerServiceProxy(pid_t pid)
-        : mPid(pid) {
+MediaCodec::ResourceManagerServiceProxy::ResourceManagerServiceProxy(
+        pid_t pid, uid_t uid)
+        : mPid(pid), mUid(uid) {
     if (mPid == MediaCodec::kNoPid) {
         mPid = IPCThreadState::self()->getCallingPid();
     }
@@ -204,15 +205,25 @@
     if (mService == NULL) {
         return;
     }
-    mService->addResource(mPid, clientId, client, resources);
+    mService->addResource(mPid, mUid, clientId, client, resources);
 }
 
-void MediaCodec::ResourceManagerServiceProxy::removeResource(int64_t clientId) {
+void MediaCodec::ResourceManagerServiceProxy::removeResource(
+        int64_t clientId,
+        const Vector<MediaResource> &resources) {
     Mutex::Autolock _l(mLock);
     if (mService == NULL) {
         return;
     }
-    mService->removeResource(mPid, clientId);
+    mService->removeResource(mPid, clientId, resources);
+}
+
+void MediaCodec::ResourceManagerServiceProxy::removeClient(int64_t clientId) {
+    Mutex::Autolock _l(mLock);
+    if (mService == NULL) {
+        return;
+    }
+    mService->removeClient(mPid, clientId);
 }
 
 bool MediaCodec::ResourceManagerServiceProxy::reclaimResource(
@@ -516,10 +527,7 @@
       mFlags(0),
       mStickyError(OK),
       mSoftRenderer(NULL),
-      mAnalyticsItem(NULL),
-      mResourceManagerClient(new ResourceManagerClient(this)),
-      mResourceManagerService(new ResourceManagerServiceProxy(pid)),
-      mBatteryStatNotified(false),
+      mMetricsHandle(0),
       mIsVideo(false),
       mVideoWidth(0),
       mVideoHeight(0),
@@ -537,20 +545,22 @@
     } else {
         mUid = uid;
     }
+    mResourceManagerClient = new ResourceManagerClient(this);
+    mResourceManagerService = new ResourceManagerServiceProxy(pid, mUid);
 
-    initAnalyticsItem();
+    initMediametrics();
 }
 
 MediaCodec::~MediaCodec() {
     CHECK_EQ(mState, UNINITIALIZED);
-    mResourceManagerService->removeResource(getId(mResourceManagerClient));
+    mResourceManagerService->removeClient(getId(mResourceManagerClient));
 
-    flushAnalyticsItem();
+    flushMediametrics();
 }
 
-void MediaCodec::initAnalyticsItem() {
-    if (mAnalyticsItem == NULL) {
-        mAnalyticsItem = MediaAnalyticsItem::create(kCodecKeyName);
+void MediaCodec::initMediametrics() {
+    if (mMetricsHandle == 0) {
+        mMetricsHandle = mediametrics_create(kCodecKeyName);
     }
 
     mLatencyHist.setup(kLatencyHistBuckets, kLatencyHistWidth, kLatencyHistFloor);
@@ -564,38 +574,39 @@
     }
 }
 
-void MediaCodec::updateAnalyticsItem() {
-    ALOGV("MediaCodec::updateAnalyticsItem");
-    if (mAnalyticsItem == NULL) {
+void MediaCodec::updateMediametrics() {
+    ALOGV("MediaCodec::updateMediametrics");
+    if (mMetricsHandle == 0) {
         return;
     }
 
+
     if (mLatencyHist.getCount() != 0 ) {
-        mAnalyticsItem->setInt64(kCodecLatencyMax, mLatencyHist.getMax());
-        mAnalyticsItem->setInt64(kCodecLatencyMin, mLatencyHist.getMin());
-        mAnalyticsItem->setInt64(kCodecLatencyAvg, mLatencyHist.getAvg());
-        mAnalyticsItem->setInt64(kCodecLatencyCount, mLatencyHist.getCount());
+        mediametrics_setInt64(mMetricsHandle, kCodecLatencyMax, mLatencyHist.getMax());
+        mediametrics_setInt64(mMetricsHandle, kCodecLatencyMin, mLatencyHist.getMin());
+        mediametrics_setInt64(mMetricsHandle, kCodecLatencyAvg, mLatencyHist.getAvg());
+        mediametrics_setInt64(mMetricsHandle, kCodecLatencyCount, mLatencyHist.getCount());
 
         if (kEmitHistogram) {
             // and the histogram itself
             std::string hist = mLatencyHist.emit();
-            mAnalyticsItem->setCString(kCodecLatencyHist, hist.c_str());
+            mediametrics_setCString(mMetricsHandle, kCodecLatencyHist, hist.c_str());
         }
     }
     if (mLatencyUnknown > 0) {
-        mAnalyticsItem->setInt64(kCodecLatencyUnknown, mLatencyUnknown);
+        mediametrics_setInt64(mMetricsHandle, kCodecLatencyUnknown, mLatencyUnknown);
     }
 
 #if 0
     // enable for short term, only while debugging
-    updateEphemeralAnalytics(mAnalyticsItem);
+    updateEphemeralMediametrics(mMetricsHandle);
 #endif
 }
 
-void MediaCodec::updateEphemeralAnalytics(MediaAnalyticsItem *item) {
-    ALOGD("MediaCodec::updateEphemeralAnalytics()");
+void MediaCodec::updateEphemeralMediametrics(mediametrics_handle_t item) {
+    ALOGD("MediaCodec::updateEphemeralMediametrics()");
 
-    if (item == NULL) {
+    if (item == 0) {
         return;
     }
 
@@ -618,28 +629,27 @@
 
     // spit the data (if any) into the supplied analytics record
     if (recentHist.getCount()!= 0 ) {
-        item->setInt64(kCodecRecentLatencyMax, recentHist.getMax());
-        item->setInt64(kCodecRecentLatencyMin, recentHist.getMin());
-        item->setInt64(kCodecRecentLatencyAvg, recentHist.getAvg());
-        item->setInt64(kCodecRecentLatencyCount, recentHist.getCount());
+        mediametrics_setInt64(item, kCodecRecentLatencyMax, recentHist.getMax());
+        mediametrics_setInt64(item, kCodecRecentLatencyMin, recentHist.getMin());
+        mediametrics_setInt64(item, kCodecRecentLatencyAvg, recentHist.getAvg());
+        mediametrics_setInt64(item, kCodecRecentLatencyCount, recentHist.getCount());
 
         if (kEmitHistogram) {
             // and the histogram itself
             std::string hist = recentHist.emit();
-            item->setCString(kCodecRecentLatencyHist, hist.c_str());
+            mediametrics_setCString(item, kCodecRecentLatencyHist, hist.c_str());
         }
     }
 }
 
-void MediaCodec::flushAnalyticsItem() {
-    updateAnalyticsItem();
-    if (mAnalyticsItem != NULL) {
-        // don't log empty records
-        if (mAnalyticsItem->count() > 0) {
-            mAnalyticsItem->selfrecord();
+void MediaCodec::flushMediametrics() {
+    updateMediametrics();
+    if (mMetricsHandle != 0) {
+        if (mediametrics_count(mMetricsHandle) > 0) {
+            mediametrics_selfRecord(mMetricsHandle);
         }
-        delete mAnalyticsItem;
-        mAnalyticsItem = NULL;
+        mediametrics_delete(mMetricsHandle);
+        mMetricsHandle = 0;
     }
 }
 
@@ -742,6 +752,12 @@
         return;
     }
 
+    if (mBatteryChecker != nullptr) {
+        mBatteryChecker->onCodecActivity([this] () {
+            addResource(MediaResource::kBattery, MediaResource::kVideoCodec, 1);
+        });
+    }
+
     const int64_t nowNs = systemTime(SYSTEM_TIME_MONOTONIC);
     BufferFlightTiming_t startdata = { presentationUs, nowNs };
 
@@ -776,6 +792,12 @@
         return;
     }
 
+    if (mBatteryChecker != nullptr) {
+        mBatteryChecker->onCodecActivity([this] () {
+            addResource(MediaResource::kBattery, MediaResource::kVideoCodec, 1);
+        });
+    }
+
     BufferFlightTiming_t startdata;
     bool valid = false;
     while (mBuffersInFlight.size() > 0) {
@@ -959,9 +981,14 @@
     // ".secure"
     msg->setString("name", name);
 
-    if (mAnalyticsItem != NULL) {
-        mAnalyticsItem->setCString(kCodecCodec, name.c_str());
-        mAnalyticsItem->setCString(kCodecMode, mIsVideo ? kCodecModeVideo : kCodecModeAudio);
+    if (mMetricsHandle != 0) {
+        mediametrics_setCString(mMetricsHandle, kCodecCodec, name.c_str());
+        mediametrics_setCString(mMetricsHandle, kCodecMode,
+                                mIsVideo ? kCodecModeVideo : kCodecModeAudio);
+    }
+
+    if (mIsVideo) {
+        mBatteryChecker = new BatteryChecker(new AMessage(kWhatCheckBatteryStats, this));
     }
 
     status_t err;
@@ -1018,16 +1045,17 @@
         uint32_t flags) {
     sp<AMessage> msg = new AMessage(kWhatConfigure, this);
 
-    if (mAnalyticsItem != NULL) {
+    if (mMetricsHandle != 0) {
         int32_t profile = 0;
         if (format->findInt32("profile", &profile)) {
-            mAnalyticsItem->setInt32(kCodecProfile, profile);
+            mediametrics_setInt32(mMetricsHandle, kCodecProfile, profile);
         }
         int32_t level = 0;
         if (format->findInt32("level", &level)) {
-            mAnalyticsItem->setInt32(kCodecLevel, level);
+            mediametrics_setInt32(mMetricsHandle, kCodecLevel, level);
         }
-        mAnalyticsItem->setInt32(kCodecEncoder, (flags & CONFIGURE_FLAG_ENCODE) ? 1 : 0);
+        mediametrics_setInt32(mMetricsHandle, kCodecEncoder,
+                              (flags & CONFIGURE_FLAG_ENCODE) ? 1 : 0);
     }
 
     if (mIsVideo) {
@@ -1037,17 +1065,17 @@
             mRotationDegrees = 0;
         }
 
-        if (mAnalyticsItem != NULL) {
-            mAnalyticsItem->setInt32(kCodecWidth, mVideoWidth);
-            mAnalyticsItem->setInt32(kCodecHeight, mVideoHeight);
-            mAnalyticsItem->setInt32(kCodecRotation, mRotationDegrees);
+        if (mMetricsHandle != 0) {
+            mediametrics_setInt32(mMetricsHandle, kCodecWidth, mVideoWidth);
+            mediametrics_setInt32(mMetricsHandle, kCodecHeight, mVideoHeight);
+            mediametrics_setInt32(mMetricsHandle, kCodecRotation, mRotationDegrees);
             int32_t maxWidth = 0;
             if (format->findInt32("max-width", &maxWidth)) {
-                mAnalyticsItem->setInt32(kCodecMaxWidth, maxWidth);
+                mediametrics_setInt32(mMetricsHandle, kCodecMaxWidth, maxWidth);
             }
             int32_t maxHeight = 0;
             if (format->findInt32("max-height", &maxHeight)) {
-                mAnalyticsItem->setInt32(kCodecMaxHeight, maxHeight);
+                mediametrics_setInt32(mMetricsHandle, kCodecMaxHeight, maxHeight);
             }
         }
 
@@ -1069,8 +1097,8 @@
         } else {
             msg->setPointer("descrambler", descrambler.get());
         }
-        if (mAnalyticsItem != NULL) {
-            mAnalyticsItem->setInt32(kCodecCrypto, 1);
+        if (mMetricsHandle != 0) {
+            mediametrics_setInt32(mMetricsHandle, kCodecCrypto, 1);
         }
     } else if (mFlags & kFlagIsSecure) {
         ALOGW("Crypto or descrambler should be given for secure codec");
@@ -1221,6 +1249,13 @@
             getId(mResourceManagerClient), mResourceManagerClient, resources);
 }
 
+void MediaCodec::removeResource(
+        MediaResource::Type type, MediaResource::SubType subtype, uint64_t value) {
+    Vector<MediaResource> resources;
+    resources.push_back(MediaResource(type, subtype, value));
+    mResourceManagerService->removeResource(getId(mResourceManagerClient), resources);
+}
+
 status_t MediaCodec::start() {
     sp<AMessage> msg = new AMessage(kWhatStart, this);
 
@@ -1528,22 +1563,22 @@
     return OK;
 }
 
-status_t MediaCodec::getMetrics(MediaAnalyticsItem * &reply) {
+status_t MediaCodec::getMetrics(mediametrics_handle_t &reply) {
 
-    reply = NULL;
+    reply = 0;
 
     // shouldn't happen, but be safe
-    if (mAnalyticsItem == NULL) {
+    if (mMetricsHandle == 0) {
         return UNKNOWN_ERROR;
     }
 
     // update any in-flight data that's not carried within the record
-    updateAnalyticsItem();
+    updateMediametrics();
 
     // send it back to the caller.
-    reply = mAnalyticsItem->dup();
+    reply = mediametrics_dup(mMetricsHandle);
 
-    updateEphemeralAnalytics(reply);
+    updateEphemeralMediametrics(reply);
 
     return OK;
 }
@@ -1682,6 +1717,59 @@
     }
 }
 
+BatteryChecker::BatteryChecker(const sp<AMessage> &msg, int64_t timeoutUs)
+    : mTimeoutUs(timeoutUs)
+    , mLastActivityTimeUs(-1ll)
+    , mBatteryStatNotified(false)
+    , mBatteryCheckerGeneration(0)
+    , mIsExecuting(false)
+    , mBatteryCheckerMsg(msg) {}
+
+void BatteryChecker::onCodecActivity(std::function<void()> batteryOnCb) {
+    if (!isExecuting()) {
+        // ignore if not executing
+        return;
+    }
+    if (!mBatteryStatNotified) {
+        batteryOnCb();
+        mBatteryStatNotified = true;
+        sp<AMessage> msg = mBatteryCheckerMsg->dup();
+        msg->setInt32("generation", mBatteryCheckerGeneration);
+
+        // post checker and clear last activity time
+        msg->post(mTimeoutUs);
+        mLastActivityTimeUs = -1ll;
+    } else {
+        // update last activity time
+        mLastActivityTimeUs = ALooper::GetNowUs();
+    }
+}
+
+void BatteryChecker::onCheckBatteryTimer(
+        const sp<AMessage> &msg, std::function<void()> batteryOffCb) {
+    // ignore if this checker already expired because the client resource was removed
+    int32_t generation;
+    if (!msg->findInt32("generation", &generation)
+            || generation != mBatteryCheckerGeneration) {
+        return;
+    }
+
+    if (mLastActivityTimeUs < 0ll) {
+        // timed out inactive, do not repost checker
+        batteryOffCb();
+        mBatteryStatNotified = false;
+    } else {
+        // repost checker and clear last activity time
+        msg->post(mTimeoutUs + mLastActivityTimeUs - ALooper::GetNowUs());
+        mLastActivityTimeUs = -1ll;
+    }
+}
+
+void BatteryChecker::onClientRemoved() {
+    mBatteryStatNotified = false;
+    mBatteryCheckerGeneration++;
+}
+
 ////////////////////////////////////////////////////////////////////////////////
 
 void MediaCodec::cancelPendingDequeueOperations() {
@@ -1804,10 +1892,11 @@
                         case CONFIGURING:
                         {
                             if (actionCode == ACTION_CODE_FATAL) {
-                                mAnalyticsItem->setInt32(kCodecError, err);
-                                mAnalyticsItem->setCString(kCodecErrorState, stateString(mState).c_str());
-                                flushAnalyticsItem();
-                                initAnalyticsItem();
+                                mediametrics_setInt32(mMetricsHandle, kCodecError, err);
+                                mediametrics_setCString(mMetricsHandle, kCodecErrorState,
+                                                        stateString(mState).c_str());
+                                flushMediametrics();
+                                initMediametrics();
                             }
                             setState(actionCode == ACTION_CODE_FATAL ?
                                     UNINITIALIZED : INITIALIZED);
@@ -1817,10 +1906,11 @@
                         case STARTING:
                         {
                             if (actionCode == ACTION_CODE_FATAL) {
-                                mAnalyticsItem->setInt32(kCodecError, err);
-                                mAnalyticsItem->setCString(kCodecErrorState, stateString(mState).c_str());
-                                flushAnalyticsItem();
-                                initAnalyticsItem();
+                                mediametrics_setInt32(mMetricsHandle, kCodecError, err);
+                                mediametrics_setCString(mMetricsHandle, kCodecErrorState,
+                                                        stateString(mState).c_str());
+                                flushMediametrics();
+                                initMediametrics();
                             }
                             setState(actionCode == ACTION_CODE_FATAL ?
                                     UNINITIALIZED : CONFIGURED);
@@ -1858,10 +1948,11 @@
                         case FLUSHING:
                         {
                             if (actionCode == ACTION_CODE_FATAL) {
-                                mAnalyticsItem->setInt32(kCodecError, err);
-                                mAnalyticsItem->setCString(kCodecErrorState, stateString(mState).c_str());
-                                flushAnalyticsItem();
-                                initAnalyticsItem();
+                                mediametrics_setInt32(mMetricsHandle, kCodecError, err);
+                                mediametrics_setCString(mMetricsHandle, kCodecErrorState,
+                                                        stateString(mState).c_str());
+                                flushMediametrics();
+                                initMediametrics();
 
                                 setState(UNINITIALIZED);
                             } else {
@@ -1891,10 +1982,11 @@
                                 setState(INITIALIZED);
                                 break;
                             default:
-                                mAnalyticsItem->setInt32(kCodecError, err);
-                                mAnalyticsItem->setCString(kCodecErrorState, stateString(mState).c_str());
-                                flushAnalyticsItem();
-                                initAnalyticsItem();
+                                mediametrics_setInt32(mMetricsHandle, kCodecError, err);
+                                mediametrics_setCString(mMetricsHandle, kCodecErrorState,
+                                                        stateString(mState).c_str());
+                                flushMediametrics();
+                                initMediametrics();
                                 setState(UNINITIALIZED);
                                 break;
                             }
@@ -1951,7 +2043,8 @@
                     CHECK(msg->findString("componentName", &mComponentName));
 
                     if (mComponentName.c_str()) {
-                        mAnalyticsItem->setCString(kCodecCodec, mComponentName.c_str());
+                        mediametrics_setCString(mMetricsHandle, kCodecCodec,
+                                                mComponentName.c_str());
                     }
 
                     const char *owner = mCodecInfo->getOwnerName();
@@ -1967,11 +2060,11 @@
                     if (mComponentName.endsWith(".secure")) {
                         mFlags |= kFlagIsSecure;
                         resourceType = MediaResource::kSecureCodec;
-                        mAnalyticsItem->setInt32(kCodecSecure, 1);
+                        mediametrics_setInt32(mMetricsHandle, kCodecSecure, 1);
                     } else {
                         mFlags &= ~kFlagIsSecure;
                         resourceType = MediaResource::kNonSecureCodec;
-                        mAnalyticsItem->setInt32(kCodecSecure, 0);
+                        mediametrics_setInt32(mMetricsHandle, kCodecSecure, 0);
                     }
 
                     if (mIsVideo) {
@@ -2019,14 +2112,15 @@
                     (new AMessage)->postReply(mReplyID);
 
                     // augment our media metrics info, now that we know more things
-                    if (mAnalyticsItem != NULL) {
+                    if (mMetricsHandle != 0) {
                         sp<AMessage> format;
                         if (mConfigureMsg != NULL &&
                             mConfigureMsg->findMessage("format", &format)) {
                                 // format includes: mime
                                 AString mime;
                                 if (format->findString("mime", &mime)) {
-                                    mAnalyticsItem->setCString(kCodecMime, mime.c_str());
+                                    mediametrics_setCString(mMetricsHandle, kCodecMime,
+                                                            mime.c_str());
                                 }
                             }
                     }
@@ -2318,7 +2412,12 @@
 
                     mFlags &= ~kFlagIsComponentAllocated;
 
-                    mResourceManagerService->removeResource(getId(mResourceManagerClient));
+                    // off since we're removing all resources including the battery on
+                    if (mBatteryChecker != nullptr) {
+                        mBatteryChecker->onClientRemoved();
+                    }
+
+                    mResourceManagerService->removeClient(getId(mResourceManagerClient));
 
                     (new AMessage)->postReply(mReplyID);
                     break;
@@ -3029,6 +3128,16 @@
             break;
         }
 
+        case kWhatCheckBatteryStats:
+        {
+            if (mBatteryChecker != nullptr) {
+                mBatteryChecker->onCheckBatteryTimer(msg, [this] () {
+                    removeResource(MediaResource::kBattery, MediaResource::kVideoCodec, 1);
+                });
+            }
+            break;
+        }
+
         default:
             TRESPASS();
     }
@@ -3125,9 +3234,11 @@
 
     mState = newState;
 
-    cancelPendingDequeueOperations();
+    if (mBatteryChecker != nullptr) {
+        mBatteryChecker->setExecuting(isExecuting());
+    }
 
-    updateBatteryStat();
+    cancelPendingDequeueOperations();
 }
 
 void MediaCodec::returnBuffersToCodec(bool isReclaim) {
@@ -3631,20 +3742,6 @@
     return OK;
 }
 
-void MediaCodec::updateBatteryStat() {
-    if (!mIsVideo) {
-        return;
-    }
-
-    if (mState == CONFIGURED && !mBatteryStatNotified) {
-        BatteryNotifier::getInstance().noteStartVideo(mUid);
-        mBatteryStatNotified = true;
-    } else if (mState == UNINITIALIZED && mBatteryStatNotified) {
-        BatteryNotifier::getInstance().noteStopVideo(mUid);
-        mBatteryStatNotified = false;
-    }
-}
-
 std::string MediaCodec::stateString(State state) {
     const char *rval = NULL;
     char rawbuffer[16]; // room for "%d"
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index 9ba2add..7ebdb1a 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -96,10 +96,18 @@
 
     sp<MediaAdapter> newTrack = new MediaAdapter(trackMeta);
     status_t result = mWriter->addSource(newTrack);
-    if (result == OK) {
-        return mTrackList.add(newTrack);
+    if (result != OK) {
+        return -1;
     }
-    return -1;
+    float captureFps = -1.0;
+    if (format->findAsFloat("time-lapse-fps", &captureFps)) {
+        ALOGV("addTrack() time-lapse-fps: %f", captureFps);
+        result = mWriter->setCaptureRate(captureFps);
+        if (result != OK) {
+            ALOGW("addTrack() setCaptureRate failed :%d", result);
+        }
+    }
+    return mTrackList.add(newTrack);
 }
 
 status_t MediaMuxer::setOrientationHint(int degrees) {
diff --git a/media/libstagefright/StagefrightMediaScanner.cpp b/media/libstagefright/StagefrightMediaScanner.cpp
index cf4edae..ce73676 100644
--- a/media/libstagefright/StagefrightMediaScanner.cpp
+++ b/media/libstagefright/StagefrightMediaScanner.cpp
@@ -158,7 +158,11 @@
     if (mRetriever->setDataSource(fd, 0, size) == OK) {
         sp<IMemory> mem = mRetriever->extractAlbumArt();
         if (mem != NULL) {
-            MediaAlbumArt *art = static_cast<MediaAlbumArt *>(mem->pointer());
+            // TODO: Using unsecurePointer() has some associated security pitfalls
+            //       (see declaration for details).
+            //       Either document why it is safe in this case or address the
+            //       issue (e.g. by copying).
+            MediaAlbumArt *art = static_cast<MediaAlbumArt *>(mem->unsecurePointer());
             return art->clone();
         }
     }
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index fa3d372..6f536a9 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -44,7 +44,7 @@
 StagefrightMetadataRetriever::StagefrightMetadataRetriever()
     : mParsedMetaData(false),
       mAlbumArt(NULL),
-      mLastImageIndex(-1) {
+      mLastDecodedIndex(-1) {
     ALOGV("StagefrightMetadataRetriever()");
 }
 
@@ -143,8 +143,8 @@
 
     FrameRect rect = {left, top, right, bottom};
 
-    if (mImageDecoder != NULL && index == mLastImageIndex) {
-        return mImageDecoder->extractFrame(&rect);
+    if (mDecoder != NULL && index == mLastDecodedIndex) {
+        return mDecoder->extractFrame(&rect);
     }
 
     return getImageInternal(
@@ -153,6 +153,8 @@
 
 sp<IMemory> StagefrightMetadataRetriever::getImageInternal(
         int index, int colorFormat, bool metaOnly, bool thumbnail, FrameRect* rect) {
+    mDecoder.clear();
+    mLastDecodedIndex = -1;
 
     if (mExtractor.get() == NULL) {
         ALOGE("no extractor.");
@@ -227,14 +229,14 @@
         const AString &componentName = matchingCodecs[i];
         sp<ImageDecoder> decoder = new ImageDecoder(componentName, trackMeta, source);
         int64_t frameTimeUs = thumbnail ? -1 : 0;
-        if (decoder->init(frameTimeUs, 1 /*numFrames*/, 0 /*option*/, colorFormat) == OK) {
+        if (decoder->init(frameTimeUs, 0 /*option*/, colorFormat) == OK) {
             sp<IMemory> frame = decoder->extractFrame(rect);
 
             if (frame != NULL) {
                 if (rect != NULL) {
                     // keep the decoder if slice decoding
-                    mImageDecoder = decoder;
-                    mLastImageIndex = index;
+                    mDecoder = decoder;
+                    mLastDecodedIndex = index;
                 }
                 return frame;
             }
@@ -242,6 +244,7 @@
         ALOGV("%s failed to extract thumbnail, trying next decoder.", componentName.c_str());
     }
 
+    ALOGE("all codecs failed to extract frame.");
     return NULL;
 }
 
@@ -250,36 +253,40 @@
     ALOGV("getFrameAtTime: %" PRId64 " us option: %d colorFormat: %d, metaOnly: %d",
             timeUs, option, colorFormat, metaOnly);
 
-    sp<IMemory> frame;
-    status_t err = getFrameInternal(
-            timeUs, 1, option, colorFormat, metaOnly, &frame, NULL /*outFrames*/);
-    return (err == OK) ? frame : NULL;
+    return getFrameInternal(timeUs, option, colorFormat, metaOnly);
 }
 
-status_t StagefrightMetadataRetriever::getFrameAtIndex(
-        std::vector<sp<IMemory> >* frames,
-        int frameIndex, int numFrames, int colorFormat, bool metaOnly) {
-    ALOGV("getFrameAtIndex: frameIndex %d, numFrames %d, colorFormat: %d, metaOnly: %d",
-            frameIndex, numFrames, colorFormat, metaOnly);
+sp<IMemory> StagefrightMetadataRetriever::getFrameAtIndex(
+        int frameIndex, int colorFormat, bool metaOnly) {
+    ALOGV("getFrameAtIndex: frameIndex %d, colorFormat: %d, metaOnly: %d",
+            frameIndex, colorFormat, metaOnly);
+    if (mDecoder != NULL && frameIndex == mLastDecodedIndex + 1) {
+        sp<IMemory> frame = mDecoder->extractFrame();
+        if (frame != nullptr) {
+            mLastDecodedIndex = frameIndex;
+        }
+        return frame;
+    }
 
-    return getFrameInternal(
-            frameIndex, numFrames, MediaSource::ReadOptions::SEEK_FRAME_INDEX,
-            colorFormat, metaOnly, NULL /*outFrame*/, frames);
+    return getFrameInternal(frameIndex,
+            MediaSource::ReadOptions::SEEK_FRAME_INDEX, colorFormat, metaOnly);
 }
 
-status_t StagefrightMetadataRetriever::getFrameInternal(
-        int64_t timeUs, int numFrames, int option, int colorFormat, bool metaOnly,
-        sp<IMemory>* outFrame, std::vector<sp<IMemory> >* outFrames) {
+sp<IMemory> StagefrightMetadataRetriever::getFrameInternal(
+        int64_t timeUs, int option, int colorFormat, bool metaOnly) {
+    mDecoder.clear();
+    mLastDecodedIndex = -1;
+
     if (mExtractor.get() == NULL) {
         ALOGE("no extractor.");
-        return NO_INIT;
+        return NULL;
     }
 
     sp<MetaData> fileMeta = mExtractor->getMetaData();
 
     if (fileMeta == NULL) {
         ALOGE("extractor doesn't publish metadata, failed to initialize?");
-        return NO_INIT;
+        return NULL;
     }
 
     size_t n = mExtractor->countTracks();
@@ -300,30 +307,24 @@
 
     if (i == n) {
         ALOGE("no video track found.");
-        return INVALID_OPERATION;
+        return NULL;
     }
 
     sp<MetaData> trackMeta = mExtractor->getTrackMetaData(
             i, MediaExtractor::kIncludeExtensiveMetaData);
     if (!trackMeta) {
-        return UNKNOWN_ERROR;
+        return NULL;
     }
 
     if (metaOnly) {
-        if (outFrame != NULL) {
-            *outFrame = FrameDecoder::getMetadataOnly(trackMeta, colorFormat);
-            if (*outFrame != NULL) {
-                return OK;
-            }
-        }
-        return UNKNOWN_ERROR;
+        return FrameDecoder::getMetadataOnly(trackMeta, colorFormat);
     }
 
     sp<IMediaSource> source = mExtractor->getTrack(i);
 
     if (source.get() == NULL) {
         ALOGV("unable to instantiate video track.");
-        return UNKNOWN_ERROR;
+        return NULL;
     }
 
     const void *data;
@@ -350,24 +351,22 @@
     for (size_t i = 0; i < matchingCodecs.size(); ++i) {
         const AString &componentName = matchingCodecs[i];
         sp<VideoFrameDecoder> decoder = new VideoFrameDecoder(componentName, trackMeta, source);
-        if (decoder->init(timeUs, numFrames, option, colorFormat) == OK) {
-            if (outFrame != NULL) {
-                *outFrame = decoder->extractFrame();
-                if (*outFrame != NULL) {
-                    return OK;
+        if (decoder->init(timeUs, option, colorFormat) == OK) {
+            sp<IMemory> frame = decoder->extractFrame();
+            if (frame != nullptr) {
+                // keep the decoder if seeking by frame index
+                if (option == MediaSource::ReadOptions::SEEK_FRAME_INDEX) {
+                    mDecoder = decoder;
+                    mLastDecodedIndex = timeUs;
                 }
-            } else if (outFrames != NULL) {
-                status_t err = decoder->extractFrames(outFrames);
-                if (err == OK) {
-                    return OK;
-                }
+                return frame;
             }
         }
         ALOGV("%s failed to extract frame, trying next decoder.", componentName.c_str());
     }
 
     ALOGE("all codecs failed to extract frame.");
-    return UNKNOWN_ERROR;
+    return NULL;
 }
 
 MediaAlbumArt *StagefrightMetadataRetriever::extractAlbumArt() {
diff --git a/media/libstagefright/TEST_MAPPING b/media/libstagefright/TEST_MAPPING
index 96818eb..c1b270c 100644
--- a/media/libstagefright/TEST_MAPPING
+++ b/media/libstagefright/TEST_MAPPING
@@ -7,6 +7,9 @@
           "include-annotation": "android.platform.test.annotations.RequiresDevice"
         }
       ]
+    },
+    {
+       "name": "BatteryChecker_test"
     }
   ]
 }
diff --git a/media/libstagefright/bqhelper/Android.bp b/media/libstagefright/bqhelper/Android.bp
index db67034..6719bab 100644
--- a/media/libstagefright/bqhelper/Android.bp
+++ b/media/libstagefright/bqhelper/Android.bp
@@ -27,7 +27,6 @@
         "libcutils",
         "libhidlbase",
         "libhidlmemory",
-        "libhidltransport",
         "liblog",
         "libstagefright_foundation",
         "libui",
@@ -39,7 +38,6 @@
         "android.hidl.token@1.0-utils",
         "libbase",
         "libEGL",
-        "libhwbinder",
         "libnativewindow",
         "libvndksupport",
     ],
diff --git a/media/libstagefright/bqhelper/GraphicBufferSource.cpp b/media/libstagefright/bqhelper/GraphicBufferSource.cpp
index 59317e7..de9d12c 100644
--- a/media/libstagefright/bqhelper/GraphicBufferSource.cpp
+++ b/media/libstagefright/bqhelper/GraphicBufferSource.cpp
@@ -29,6 +29,7 @@
 #include <media/stagefright/foundation/ColorUtils.h>
 #include <media/stagefright/foundation/FileDescriptor.h>
 
+#include <android-base/properties.h>
 #include <media/hardware/MetadataBufferType.h>
 #include <ui/GraphicBuffer.h>
 #include <gui/BufferItem.h>
@@ -800,6 +801,9 @@
     }
 }
 
+#ifdef __clang__
+__attribute__((no_sanitize("integer")))
+#endif
 bool GraphicBufferSource::calculateCodecTimestamp_l(
         nsecs_t bufferTimeNs, int64_t *codecTimeUs) {
     int64_t timeUs = bufferTimeNs / 1000;
@@ -816,14 +820,15 @@
             mPrevFrameUs = mBaseFrameUs =
                     std::llround((timeUs * mCaptureFps) / mFps);
             mFrameCount = 0;
-        } else {
-            // snap to nearest capture point
+        } else if (mSnapTimestamps) {
             double nFrames = (timeUs - mPrevCaptureUs) * mCaptureFps / 1000000;
             if (nFrames < 0.5 - kTimestampFluctuation) {
                 // skip this frame as it's too close to previous capture
-                ALOGD("skipping frame, timeUs %lld", static_cast<long long>(timeUs));
+                ALOGD("skipping frame, timeUs %lld",
+                      static_cast<long long>(timeUs));
                 return false;
             }
+            // snap to nearest capture point
             if (nFrames <= 1.0) {
                 nFrames = 1.0;
             }
@@ -832,6 +837,22 @@
                     mFrameCount * 1000000 / mCaptureFps);
             mPrevFrameUs = mBaseFrameUs + std::llround(
                     mFrameCount * 1000000 / mFps);
+        } else {
+            if (timeUs <= mPrevCaptureUs) {
+                if (mFrameDropper != NULL && mFrameDropper->disabled()) {
+                    // Warn only, client has disabled frame drop logic possibly for image
+                    // encoding cases where camera's ZSL mode could send out of order frames.
+                    ALOGW("Received frame that's going backward in time");
+                } else {
+                    // Drop the frame if it's going backward in time. Bad timestamp
+                    // could disrupt encoder's rate control completely.
+                    ALOGW("Dropping frame that's going backward in time");
+                    return false;
+                }
+            }
+            mPrevCaptureUs = timeUs;
+            mPrevFrameUs = mBaseFrameUs + std::llround(
+                    (timeUs - mBaseCaptureUs) * (mCaptureFps / mFps));
         }
 
         ALOGV("timeUs %lld, captureUs %lld, frameUs %lld",
@@ -1359,6 +1380,12 @@
 
     mFps = fps;
     mCaptureFps = captureFps;
+    if (captureFps > fps) {
+        mSnapTimestamps = 1 == base::GetIntProperty(
+                "debug.stagefright.snap_timestamps", int64_t(0));
+    } else {
+        mSnapTimestamps = false;
+    }
 
     return OK;
 }
diff --git a/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/GraphicBufferSource.h b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/GraphicBufferSource.h
index b4c4d4a..ed5d7cb 100644
--- a/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/GraphicBufferSource.h
+++ b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/GraphicBufferSource.h
@@ -461,12 +461,33 @@
     // Slow motion mode is enabled if both encoding and capture frame rates are
     // defined and the encoding frame rate is less than half the capture frame
     // rate. In this mode, the source is expected to produce frames with an even
-    // timestamp interval (after rounding) with the configured capture fps. The
-    // first source timestamp is used as the source base time. Afterwards, the
-    // timestamp of each source frame is snapped to the nearest expected capture
-    // timestamp and scaled to match the configured encoding frame rate.
+    // timestamp interval (after rounding) with the configured capture fps.
+    //
+    // These modes must be configured by calling setTimeLapseConfig() before
+    // using this source.
+    //
+    // Timestamp snapping for slow motion recording
+    // ============================================
+    //
+    // When the slow motion mode is configured with setTimeLapseConfig(), the
+    // property "debug.stagefright.snap_timestamps" will be checked. If the
+    // value of the property is set to any value other than 1, mSnapTimestamps
+    // will be set to false. Otherwise, mSnapTimestamps will be set to true.
+    // (mSnapTimestamps will be false for time lapse recording regardless of the
+    // value of the property.)
+    //
+    // If mSnapTimestamps is true, i.e., timestamp snapping is enabled, the
+    // first source timestamp will be used as the source base time; afterwards,
+    // the timestamp of each source frame will be snapped to the nearest
+    // expected capture timestamp and scaled to match the configured encoding
+    // frame rate.
+    //
+    // If timestamp snapping is disabled, the timestamp of source frames will
+    // be scaled to match the ratio between the configured encoding frame rate
+    // and the configured capture frame rate.
 
-    // These modes must be enabled before using this source.
+    // whether timestamps will be snapped
+    bool mSnapTimestamps{true};
 
     // adjusted capture timestamp of the base frame
     int64_t mBaseCaptureUs;
diff --git a/media/libstagefright/codecs/flac/enc/Android.bp b/media/libstagefright/codecs/flac/enc/Android.bp
index d7d871a..f35bce1 100644
--- a/media/libstagefright/codecs/flac/enc/Android.bp
+++ b/media/libstagefright/codecs/flac/enc/Android.bp
@@ -15,8 +15,10 @@
     },
 
     header_libs: ["libbase_headers"],
-    static_libs: [
+    shared_libs: [
         "libaudioutils",
+    ],
+    static_libs: [
         "libFLAC",
     ],
 }
diff --git a/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp b/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp
index f18f789..679b091 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp
@@ -1355,6 +1355,14 @@
             int tmpHeight = (tmpDisplayHeight + 15) & -16;
             int tmpWidth = (tmpDisplayWidth + 15) & -16;
 
+            if (tmpWidth > video->width)
+            {
+                // while allowed by the spec, this decoder does not actually
+                // support an increase in size.
+                ALOGE("width increase not supported");
+                status = PV_FAIL;
+                goto return_point;
+            }
             if (tmpHeight * tmpWidth > video->size)
             {
                 // This is just possibly "b/37079296".
diff --git a/media/libstagefright/codecs/on2/enc/Android.bp b/media/libstagefright/codecs/on2/enc/Android.bp
index cd69e0d..705e554 100644
--- a/media/libstagefright/codecs/on2/enc/Android.bp
+++ b/media/libstagefright/codecs/on2/enc/Android.bp
@@ -21,4 +21,5 @@
     },
 
     shared_libs: ["libvpx"],
+    header_libs: ["libbase_headers"],
 }
diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
index 8d0ea3a..08e20cc 100644
--- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
+++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
@@ -290,7 +290,7 @@
 }
 
 bool SoftVorbis::isConfigured() const {
-    return mInputBufferCount >= 2;
+    return (mState != NULL && mVi != NULL);
 }
 
 static void makeBitReader(
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index 359df3d..4711315 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -31,9 +31,14 @@
 
 namespace android {
 
-static int ALIGN(int x, int y) {
-    // y must be a power of 2.
-    return (x + y - 1) & ~(y - 1);
+inline void initDstYUV(
+        const android_ycbcr &ycbcr, int32_t cropTop, int32_t cropLeft,
+        uint8_t **dst_y, uint8_t **dst_u, uint8_t **dst_v) {
+    *dst_y = (uint8_t *)ycbcr.y + cropTop * ycbcr.ystride + cropLeft;
+
+    int32_t c_offset = (cropTop / 2) * ycbcr.cstride + cropLeft / 2;
+    *dst_v = (uint8_t *)ycbcr.cr + c_offset;
+    *dst_u = (uint8_t *)ycbcr.cb + c_offset;
 }
 
 SoftwareRenderer::SoftwareRenderer(
@@ -269,10 +274,21 @@
 
     Rect bounds(mCropWidth, mCropHeight);
 
-    void *dst;
-    CHECK_EQ(0, mapper.lock(buf->handle,
-            GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_RARELY,
-            bounds, &dst));
+    void *dst = NULL;
+    struct android_ycbcr ycbcr;
+    if ( !mConverter &&
+         (mColorFormat == OMX_COLOR_FormatYUV420Planar ||
+         mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar ||
+         mColorFormat == OMX_TI_COLOR_FormatYUV420PackedSemiPlanar ||
+         mColorFormat == OMX_COLOR_FormatYUV420Planar16)) {
+        CHECK_EQ(0, mapper.lockYCbCr(buf->handle,
+                GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_RARELY,
+                bounds, &ycbcr));
+    } else {
+        CHECK_EQ(0, mapper.lock(buf->handle,
+                GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_RARELY,
+                bounds, &dst));
+    }
 
     // TODO move the other conversions also into ColorConverter, and
     // fix cropping issues (when mCropLeft/Top != 0 or mWidth != mCropWidth)
@@ -289,22 +305,14 @@
         const uint8_t *src_u = (const uint8_t *)data + mStride * mHeight + mCropTop * mStride / 4;
         const uint8_t *src_v = (const uint8_t *)src_u + mStride * mHeight / 4;
 
-        uint8_t *dst_y = (uint8_t *)dst;
-        size_t dst_y_size = buf->stride * buf->height;
-        size_t dst_c_stride = ALIGN(buf->stride / 2, 16);
-        size_t dst_c_size = dst_c_stride * buf->height / 2;
-        uint8_t *dst_v = dst_y + dst_y_size;
-        uint8_t *dst_u = dst_v + dst_c_size;
-
-        dst_y += mCropTop * buf->stride + mCropLeft;
-        dst_v += (mCropTop/2) * dst_c_stride + mCropLeft/2;
-        dst_u += (mCropTop/2) * dst_c_stride + mCropLeft/2;
+        uint8_t *dst_y, *dst_u, *dst_v;
+        initDstYUV(ycbcr, mCropTop, mCropLeft, &dst_y, &dst_u, &dst_v);
 
         for (int y = 0; y < mCropHeight; ++y) {
             memcpy(dst_y, src_y, mCropWidth);
 
             src_y += mStride;
-            dst_y += buf->stride;
+            dst_y += ycbcr.ystride;
         }
 
         for (int y = 0; y < (mCropHeight + 1) / 2; ++y) {
@@ -313,24 +321,16 @@
 
             src_u += mStride / 2;
             src_v += mStride / 2;
-            dst_u += dst_c_stride;
-            dst_v += dst_c_stride;
+            dst_u += ycbcr.cstride;
+            dst_v += ycbcr.cstride;
         }
     } else if (mColorFormat == OMX_COLOR_FormatYUV420Planar16) {
         const uint8_t *src_y = (const uint8_t *)data + mCropTop * mStride + mCropLeft * 2;
         const uint8_t *src_u = (const uint8_t *)data + mStride * mHeight + mCropTop * mStride / 4;
         const uint8_t *src_v = (const uint8_t *)src_u + mStride * mHeight / 4;
 
-        uint8_t *dst_y = (uint8_t *)dst;
-        size_t dst_y_size = buf->stride * buf->height;
-        size_t dst_c_stride = ALIGN(buf->stride / 2, 16);
-        size_t dst_c_size = dst_c_stride * buf->height / 2;
-        uint8_t *dst_v = dst_y + dst_y_size;
-        uint8_t *dst_u = dst_v + dst_c_size;
-
-        dst_y += mCropTop * buf->stride + mCropLeft;
-        dst_v += (mCropTop / 2) * dst_c_stride + mCropLeft / 2;
-        dst_u += (mCropTop / 2) * dst_c_stride + mCropLeft / 2;
+        uint8_t *dst_y, *dst_u, *dst_v;
+        initDstYUV(ycbcr, mCropTop, mCropLeft, &dst_y, &dst_u, &dst_v);
 
         for (int y = 0; y < mCropHeight; ++y) {
             for (int x = 0; x < mCropWidth; ++x) {
@@ -338,7 +338,7 @@
             }
 
             src_y += mStride;
-            dst_y += buf->stride;
+            dst_y += ycbcr.ystride;
         }
 
         for (int y = 0; y < (mCropHeight + 1) / 2; ++y) {
@@ -349,8 +349,8 @@
 
             src_u += mStride / 2;
             src_v += mStride / 2;
-            dst_u += dst_c_stride;
-            dst_v += dst_c_stride;
+            dst_u += ycbcr.cstride;
+            dst_v += ycbcr.cstride;
         }
     } else if (mColorFormat == OMX_TI_COLOR_FormatYUV420PackedSemiPlanar
             || mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
@@ -361,23 +361,14 @@
         src_y += mCropLeft + mCropTop * mWidth;
         src_uv += (mCropLeft + mCropTop * mWidth) / 2;
 
-        uint8_t *dst_y = (uint8_t *)dst;
-
-        size_t dst_y_size = buf->stride * buf->height;
-        size_t dst_c_stride = ALIGN(buf->stride / 2, 16);
-        size_t dst_c_size = dst_c_stride * buf->height / 2;
-        uint8_t *dst_v = dst_y + dst_y_size;
-        uint8_t *dst_u = dst_v + dst_c_size;
-
-        dst_y += mCropTop * buf->stride + mCropLeft;
-        dst_v += (mCropTop/2) * dst_c_stride + mCropLeft/2;
-        dst_u += (mCropTop/2) * dst_c_stride + mCropLeft/2;
+        uint8_t *dst_y, *dst_u, *dst_v;
+        initDstYUV(ycbcr, mCropTop, mCropLeft, &dst_y, &dst_u, &dst_v);
 
         for (int y = 0; y < mCropHeight; ++y) {
             memcpy(dst_y, src_y, mCropWidth);
 
             src_y += mWidth;
-            dst_y += buf->stride;
+            dst_y += ycbcr.ystride;
         }
 
         for (int y = 0; y < (mCropHeight + 1) / 2; ++y) {
@@ -388,8 +379,8 @@
             }
 
             src_uv += mWidth;
-            dst_u += dst_c_stride;
-            dst_v += dst_c_stride;
+            dst_u += ycbcr.cstride;
+            dst_v += ycbcr.cstride;
         }
     } else if (mColorFormat == OMX_COLOR_Format24bitRGB888) {
         uint8_t* srcPtr = (uint8_t*)data + mWidth * mCropTop * 3 + mCropLeft * 3;
diff --git a/media/libstagefright/data/media_codecs_google_c2_video.xml b/media/libstagefright/data/media_codecs_google_c2_video.xml
index a07eb8c..04041eb 100644
--- a/media/libstagefright/data/media_codecs_google_c2_video.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_video.xml
@@ -77,7 +77,7 @@
             <Limit name="bitrate" range="1-40000000" />
             <Feature name="adaptive-playback" />
         </MediaCodec>
-        <MediaCodec name="c2.android.gav1.decoder" type="video/av01">
+        <MediaCodec name="c2.android.av1.decoder" type="video/av01">
             <Limit name="size" min="96x96" max="1920x1080" />
             <Limit name="alignment" value="2x2" />
             <Limit name="block-size" value="16x16" />
diff --git a/media/libstagefright/data/media_codecs_sw.xml b/media/libstagefright/data/media_codecs_sw.xml
index 9532ba6..67d3f1a 100644
--- a/media/libstagefright/data/media_codecs_sw.xml
+++ b/media/libstagefright/data/media_codecs_sw.xml
@@ -182,7 +182,7 @@
             </Variant>
             <Feature name="adaptive-playback" />
         </MediaCodec>
-        <MediaCodec name="c2.android.gav1.decoder" type="video/av01" variant="!slow-cpu">
+        <MediaCodec name="c2.android.av1.decoder" type="video/av01" variant="!slow-cpu">
             <Limit name="size" min="2x2" max="1920x1080" />
             <Limit name="alignment" value="2x2" />
             <Limit name="block-size" value="16x16" />
diff --git a/media/libstagefright/filters/Android.bp b/media/libstagefright/filters/Android.bp
index 7a67e55..b1f62c7 100644
--- a/media/libstagefright/filters/Android.bp
+++ b/media/libstagefright/filters/Android.bp
@@ -8,7 +8,7 @@
         "MediaFilter.cpp",
         "RSFilter.cpp",
         "SaturationFilter.cpp",
-        "saturationARGB.rs",
+        "saturationARGB.rscript",
         "SimpleFilter.cpp",
         "ZeroFilter.cpp",
     ],
diff --git a/media/libstagefright/filters/saturation.rs b/media/libstagefright/filters/saturation.rscript
similarity index 100%
rename from media/libstagefright/filters/saturation.rs
rename to media/libstagefright/filters/saturation.rscript
diff --git a/media/libstagefright/filters/saturationARGB.rs b/media/libstagefright/filters/saturationARGB.rscript
similarity index 100%
rename from media/libstagefright/filters/saturationARGB.rs
rename to media/libstagefright/filters/saturationARGB.rscript
diff --git a/media/libstagefright/flac/dec/Android.bp b/media/libstagefright/flac/dec/Android.bp
index b494e16..7ebe71f 100644
--- a/media/libstagefright/flac/dec/Android.bp
+++ b/media/libstagefright/flac/dec/Android.bp
@@ -1,4 +1,4 @@
-cc_library {
+cc_library_shared {
     name: "libstagefright_flacdec",
     vendor_available: true,
 
@@ -18,29 +18,20 @@
         cfi: true,
     },
 
-    static: {
-        whole_static_libs: [
-            "libFLAC",
-            "libaudioutils",
-        ],
-    },
-
-    shared: {
-        static_libs: [
-            "libFLAC",
-            "libaudioutils",
-        ],
-        export_static_lib_headers: [
-            "libFLAC",
-        ],
-    },
-
     shared_libs: [
+        "libaudioutils",
         "liblog",
     ],
 
+    static_libs: [
+        "libFLAC",
+    ],
+
+    export_static_lib_headers: [
+        "libFLAC",
+    ],
+
     header_libs: [
         "libmedia_headers",
-        "libFLAC-headers",
     ],
 }
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index df66ac6..7752bda 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -22,7 +22,6 @@
 
 #include "AMessage.h"
 
-#include <binder/Parcel.h>
 #include <log/log.h>
 
 #include "AAtomizer.h"
@@ -34,6 +33,10 @@
 
 #include <media/stagefright/foundation/hexdump.h>
 
+#ifndef __ANDROID_VNDK__
+#include <binder/Parcel.h>
+#endif
+
 namespace android {
 
 extern ALooperRoster gLooperRoster;
@@ -643,6 +646,7 @@
     return s;
 }
 
+#ifndef __ANDROID_VNDK__
 // static
 sp<AMessage> AMessage::FromParcel(const Parcel &parcel, size_t maxNestingLevel) {
     int32_t what = parcel.readInt32();
@@ -809,6 +813,7 @@
         }
     }
 }
+#endif  // __ANDROID_VNDK__
 
 sp<AMessage> AMessage::changesFrom(const sp<const AMessage> &other, bool deep) const {
     if (other == NULL) {
diff --git a/media/libstagefright/foundation/AString.cpp b/media/libstagefright/foundation/AString.cpp
index fb51cc5..4bd186c 100644
--- a/media/libstagefright/foundation/AString.cpp
+++ b/media/libstagefright/foundation/AString.cpp
@@ -23,11 +23,14 @@
 #include <stdlib.h>
 #include <string.h>
 
-#include <binder/Parcel.h>
 #include <utils/String8.h>
 #include "ADebug.h"
 #include "AString.h"
 
+#ifndef __ANDROID_VNDK__
+#include <binder/Parcel.h>
+#endif
+
 namespace android {
 
 // static
@@ -362,11 +365,10 @@
     return !strcasecmp(mData + mSize - suffixLen, suffix);
 }
 
+#ifndef __ANDROID_VNDK__
 // static
 AString AString::FromParcel(const Parcel &parcel) {
     size_t size = static_cast<size_t>(parcel.readInt32());
-    // The static analyzer incorrectly reports a false-positive here in c++17.
-    // https://bugs.llvm.org/show_bug.cgi?id=38176 . NOLINTNEXTLINE
     return AString(static_cast<const char *>(parcel.readInplace(size)), size);
 }
 
@@ -378,6 +380,7 @@
     }
     return err;
 }
+#endif
 
 AString AStringPrintf(const char *format, ...) {
     va_list ap;
diff --git a/media/libstagefright/foundation/Android.bp b/media/libstagefright/foundation/Android.bp
index 533cd72..4b88ae4 100644
--- a/media/libstagefright/foundation/Android.bp
+++ b/media/libstagefright/foundation/Android.bp
@@ -78,6 +78,17 @@
         "hexdump.cpp",
     ],
 
+    target: {
+        vendor: {
+            exclude_shared_libs: [
+                "libbinder",
+            ],
+            cflags: [
+                "-DNO_IMEMORY",
+            ],
+        },
+    },
+
     clang: true,
 
     sanitize: {
diff --git a/media/libstagefright/foundation/MediaBuffer.cpp b/media/libstagefright/foundation/MediaBuffer.cpp
index 9beac05..8e245dc 100644
--- a/media/libstagefright/foundation/MediaBuffer.cpp
+++ b/media/libstagefright/foundation/MediaBuffer.cpp
@@ -72,7 +72,7 @@
             }
         } else {
             getSharedControl()->clear();
-            mData = (uint8_t *)mMemory->pointer() + sizeof(SharedControl);
+            mData = (uint8_t *)mMemory->unsecurePointer() + sizeof(SharedControl);
             ALOGV("Allocated shared mem buffer of size %zu @ %p", size, mData);
         }
     }
diff --git a/media/libstagefright/foundation/MediaBufferGroup.cpp b/media/libstagefright/foundation/MediaBufferGroup.cpp
index 84ff9a6..3c25047 100644
--- a/media/libstagefright/foundation/MediaBufferGroup.cpp
+++ b/media/libstagefright/foundation/MediaBufferGroup.cpp
@@ -75,7 +75,7 @@
 
         for (size_t i = 0; i < buffers; ++i) {
             sp<IMemory> mem = memoryDealer->allocate(augmented_size);
-            if (mem.get() == nullptr || mem->pointer() == nullptr) {
+            if (mem.get() == nullptr || mem->unsecurePointer() == nullptr) {
                 ALOGW("Only allocated %zu shared buffers of size %zu", i, buffer_size);
                 break;
             }
diff --git a/media/libstagefright/foundation/MetaData.cpp b/media/libstagefright/foundation/MetaData.cpp
index 1d0a607..8174597 100644
--- a/media/libstagefright/foundation/MetaData.cpp
+++ b/media/libstagefright/foundation/MetaData.cpp
@@ -17,7 +17,6 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "MetaData"
 #include <inttypes.h>
-#include <binder/Parcel.h>
 #include <utils/KeyedVector.h>
 #include <utils/Log.h>
 
@@ -29,6 +28,10 @@
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/MetaData.h>
 
+#ifndef __ANDROID_VNDK__
+#include <binder/Parcel.h>
+#endif
+
 namespace android {
 
 
@@ -45,6 +48,7 @@
 MetaData::~MetaData() {
 }
 
+#ifndef __ANDROID_VNDK__
 /* static */
 sp<MetaData> MetaData::createFromParcel(const Parcel &parcel) {
 
@@ -52,6 +56,7 @@
     meta->updateFromParcel(parcel);
     return meta;
 }
+#endif
 
 }  // namespace android
 
diff --git a/media/libstagefright/foundation/MetaDataBase.cpp b/media/libstagefright/foundation/MetaDataBase.cpp
index bfea6f1..4b439c6 100644
--- a/media/libstagefright/foundation/MetaDataBase.cpp
+++ b/media/libstagefright/foundation/MetaDataBase.cpp
@@ -17,7 +17,6 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "MetaDataBase"
 #include <inttypes.h>
-#include <binder/Parcel.h>
 #include <utils/KeyedVector.h>
 #include <utils/Log.h>
 
@@ -29,6 +28,10 @@
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/MetaDataBase.h>
 
+#ifndef __ANDROID_VNDK__
+#include <binder/Parcel.h>
+#endif
+
 namespace android {
 
 struct MetaDataBase::typed_data {
@@ -449,6 +452,7 @@
     }
 }
 
+#ifndef __ANDROID_VNDK__
 status_t MetaDataBase::writeToParcel(Parcel &parcel) {
     status_t ret;
     size_t numItems = mInternalData->mItems.size();
@@ -528,6 +532,7 @@
     ALOGW("no metadata in parcel");
     return UNKNOWN_ERROR;
 }
+#endif
 
 }  // namespace android
 
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h b/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
index 742651e..b5d6666 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
@@ -63,6 +63,7 @@
     AMessage();
     AMessage(uint32_t what, const sp<const AHandler> &handler);
 
+#ifndef __ANDROID_VNDK__
     // Construct an AMessage from a parcel.
     // nestingAllowed determines how many levels AMessage can be nested inside
     // AMessage. The default value here is arbitrarily set to 255.
@@ -87,6 +88,7 @@
     // All items in the AMessage must have types that are recognized by
     // FromParcel(); otherwise, TRESPASS error will occur.
     void writeToParcel(Parcel *parcel) const;
+#endif
 
     void setWhat(uint32_t what);
     uint32_t what() const;
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/AString.h b/media/libstagefright/foundation/include/media/stagefright/foundation/AString.h
index 0f6299c..deef0d4 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/AString.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/AString.h
@@ -89,8 +89,10 @@
 
     void tolower();
 
+#ifndef __ANDROID_VNDK__
     static AString FromParcel(const Parcel &parcel);
     status_t writeToParcel(Parcel *parcel) const;
+#endif
 
 private:
     constexpr static const char *kEmptyString = "";
diff --git a/media/libstagefright/include/FrameDecoder.h b/media/libstagefright/include/FrameDecoder.h
index dc58c15..8e42fcf 100644
--- a/media/libstagefright/include/FrameDecoder.h
+++ b/media/libstagefright/include/FrameDecoder.h
@@ -24,15 +24,17 @@
 #include <media/stagefright/foundation/ABase.h>
 #include <media/MediaSource.h>
 #include <media/openmax/OMX_Video.h>
-#include <system/graphics-base.h>
+#include <ui/GraphicTypes.h>
 
 namespace android {
 
 struct AMessage;
-class MediaCodecBuffer;
-class IMediaSource;
-class VideoFrame;
 struct MediaCodec;
+class IMediaSource;
+class MediaCodecBuffer;
+class Surface;
+class SurfaceControl;
+class VideoFrame;
 
 struct FrameRect {
     int32_t left, top, right, bottom;
@@ -44,13 +46,10 @@
             const sp<MetaData> &trackMeta,
             const sp<IMediaSource> &source);
 
-    status_t init(
-            int64_t frameTimeUs, size_t numFrames, int option, int colorFormat);
+    status_t init(int64_t frameTimeUs, int option, int colorFormat);
 
     sp<IMemory> extractFrame(FrameRect *rect = NULL);
 
-    status_t extractFrames(std::vector<sp<IMemory> >* frames);
-
     static sp<IMemory> getMetadataOnly(
             const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail = false);
 
@@ -59,9 +58,9 @@
 
     virtual sp<AMessage> onGetFormatAndSeekOptions(
             int64_t frameTimeUs,
-            size_t numFrames,
             int seekMode,
-            MediaSource::ReadOptions *options) = 0;
+            MediaSource::ReadOptions *options,
+            sp<Surface> *window) = 0;
 
     virtual status_t onExtractRect(FrameRect *rect) = 0;
 
@@ -79,24 +78,24 @@
 
     sp<MetaData> trackMeta()     const      { return mTrackMeta; }
     OMX_COLOR_FORMATTYPE dstFormat() const  { return mDstFormat; }
+    ui::PixelFormat captureFormat() const   { return mCaptureFormat; }
     int32_t dstBpp()             const      { return mDstBpp; }
-
-    void addFrame(const sp<IMemory> &frame) {
-        mFrames.push_back(frame);
-    }
+    void setFrame(const sp<IMemory> &frameMem) { mFrameMemory = frameMem; }
 
 private:
     AString mComponentName;
     sp<MetaData> mTrackMeta;
     sp<IMediaSource> mSource;
     OMX_COLOR_FORMATTYPE mDstFormat;
+    ui::PixelFormat mCaptureFormat;
     int32_t mDstBpp;
-    std::vector<sp<IMemory> > mFrames;
+    sp<IMemory> mFrameMemory;
     MediaSource::ReadOptions mReadOptions;
     sp<MediaCodec> mDecoder;
     sp<AMessage> mOutputFormat;
     bool mHaveMoreInputs;
     bool mFirstSample;
+    sp<Surface> mSurface;
 
     status_t extractInternal();
 
@@ -112,9 +111,9 @@
 protected:
     virtual sp<AMessage> onGetFormatAndSeekOptions(
             int64_t frameTimeUs,
-            size_t numFrames,
             int seekMode,
-            MediaSource::ReadOptions *options) override;
+            MediaSource::ReadOptions *options,
+            sp<Surface> *window) override;
 
     virtual status_t onExtractRect(FrameRect *rect) override {
         // Rect extraction for sequences is not supported for now.
@@ -134,11 +133,17 @@
             bool *done) override;
 
 private:
+    sp<SurfaceControl> mSurfaceControl;
+    sp<SurfaceControl> mParent;
+    VideoFrame *mFrame;
     bool mIsAvcOrHevc;
     MediaSource::ReadOptions::SeekMode mSeekMode;
     int64_t mTargetTimeUs;
-    size_t mNumFrames;
-    size_t mNumFramesDecoded;
+    List<int64_t> mSampleDurations;
+    int64_t mDefaultSampleDurationUs;
+
+    sp<Surface> initSurfaceControl();
+    status_t captureSurfaceControl();
 };
 
 struct ImageDecoder : public FrameDecoder {
@@ -150,9 +155,9 @@
 protected:
     virtual sp<AMessage> onGetFormatAndSeekOptions(
             int64_t frameTimeUs,
-            size_t numFrames,
             int seekMode,
-            MediaSource::ReadOptions *options) override;
+            MediaSource::ReadOptions *options,
+            sp<Surface> *window) override;
 
     virtual status_t onExtractRect(FrameRect *rect) override;
 
diff --git a/media/libstagefright/include/StagefrightMetadataRetriever.h b/media/libstagefright/include/StagefrightMetadataRetriever.h
index c50677a..ee51290 100644
--- a/media/libstagefright/include/StagefrightMetadataRetriever.h
+++ b/media/libstagefright/include/StagefrightMetadataRetriever.h
@@ -26,7 +26,7 @@
 namespace android {
 
 class DataSource;
-struct ImageDecoder;
+struct FrameDecoder;
 struct FrameRect;
 
 struct StagefrightMetadataRetriever : public MediaMetadataRetrieverBase {
@@ -47,9 +47,8 @@
             int index, int colorFormat, bool metaOnly, bool thumbnail);
     virtual sp<IMemory> getImageRectAtIndex(
             int index, int colorFormat, int left, int top, int right, int bottom);
-    virtual status_t getFrameAtIndex(
-            std::vector<sp<IMemory> >* frames,
-            int frameIndex, int numFrames, int colorFormat, bool metaOnly);
+    virtual sp<IMemory> getFrameAtIndex(
+            int index, int colorFormat, bool metaOnly);
 
     virtual MediaAlbumArt *extractAlbumArt();
     virtual const char *extractMetadata(int keyCode);
@@ -62,17 +61,17 @@
     KeyedVector<int, String8> mMetaData;
     MediaAlbumArt *mAlbumArt;
 
-    sp<ImageDecoder> mImageDecoder;
-    int mLastImageIndex;
+    sp<FrameDecoder> mDecoder;
+    int mLastDecodedIndex;
     void parseMetaData();
     void parseColorAspects(const sp<MetaData>& meta);
     // Delete album art and clear metadata.
     void clearMetadata();
 
-    status_t getFrameInternal(
-            int64_t timeUs, int numFrames, int option, int colorFormat, bool metaOnly,
-            sp<IMemory>* outFrame, std::vector<sp<IMemory> >* outFrames);
-    virtual sp<IMemory> getImageInternal(
+    sp<IMemory> getFrameInternal(
+            int64_t timeUs, int option, int colorFormat, bool metaOnly);
+
+    sp<IMemory> getImageInternal(
             int index, int colorFormat, bool metaOnly, bool thumbnail, FrameRect* rect);
 
     StagefrightMetadataRetriever(const StagefrightMetadataRetriever &);
diff --git a/media/libstagefright/include/media/stagefright/BatteryChecker.h b/media/libstagefright/include/media/stagefright/BatteryChecker.h
new file mode 100644
index 0000000..2ec4ac0
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/BatteryChecker.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BATTERY_CHECKER_H_
+#define BATTERY_CHECKER_H_
+
+#include <media/stagefright/foundation/AMessage.h>
+
+namespace android {
+
+struct BatteryChecker : public RefBase {
+    BatteryChecker(const sp<AMessage> &msg, int64_t timeout = 3000000ll);
+
+    void setExecuting(bool executing) { mIsExecuting = executing; }
+    void onCodecActivity(std::function<void()> batteryOnCb);
+    void onCheckBatteryTimer(const sp<AMessage>& msg, std::function<void()> batteryOffCb);
+    void onClientRemoved();
+
+private:
+    const int64_t mTimeoutUs;
+    int64_t mLastActivityTimeUs;
+    bool mBatteryStatNotified;
+    int32_t mBatteryCheckerGeneration;
+    bool mIsExecuting;
+    sp<AMessage> mBatteryCheckerMsg;
+
+    bool isExecuting() { return mIsExecuting; }
+
+    DISALLOW_EVIL_CONSTRUCTORS(BatteryChecker);
+};
+
+}  // namespace android
+
+#endif // BATTERY_CHECKER_H_
diff --git a/media/libstagefright/include/media/stagefright/DataSourceBase.h b/media/libstagefright/include/media/stagefright/DataSourceBase.h
index af5b83d..c607c91 100644
--- a/media/libstagefright/include/media/stagefright/DataSourceBase.h
+++ b/media/libstagefright/include/media/stagefright/DataSourceBase.h
@@ -18,6 +18,8 @@
 
 #define DATA_SOURCE_BASE_H_
 
+#include <media/stagefright/foundation/ByteUtils.h>
+#include <media/stagefright/MediaErrors.h>
 #include <sys/types.h>
 #include <utils/Errors.h>
 
@@ -45,20 +47,106 @@
     virtual ssize_t readAt(off64_t offset, void *data, size_t size) = 0;
 
     // Convenience methods:
-    bool getUInt16(off64_t offset, uint16_t *x);
-    bool getUInt24(off64_t offset, uint32_t *x); // 3 byte int, returned as a 32-bit int
-    bool getUInt32(off64_t offset, uint32_t *x);
-    bool getUInt64(off64_t offset, uint64_t *x);
+    bool getUInt16(off64_t offset, uint16_t *x) {
+        *x = 0;
+
+        uint8_t byte[2];
+        if (readAt(offset, byte, 2) != 2) {
+            return false;
+        }
+
+        *x = (byte[0] << 8) | byte[1];
+
+        return true;
+    }
+    // 3 byte int, returned as a 32-bit int
+    bool getUInt24(off64_t offset, uint32_t *x) {
+        *x = 0;
+
+        uint8_t byte[3];
+        if (readAt(offset, byte, 3) != 3) {
+            return false;
+        }
+
+        *x = (byte[0] << 16) | (byte[1] << 8) | byte[2];
+
+        return true;
+    }
+    bool getUInt32(off64_t offset, uint32_t *x) {
+        *x = 0;
+
+        uint32_t tmp;
+        if (readAt(offset, &tmp, 4) != 4) {
+            return false;
+        }
+
+        *x = ntohl(tmp);
+
+        return true;
+    }
+    bool getUInt64(off64_t offset, uint64_t *x) {
+        *x = 0;
+
+        uint64_t tmp;
+        if (readAt(offset, &tmp, 8) != 8) {
+            return false;
+        }
+
+        *x = ntoh64(tmp);
+
+        return true;
+    }
 
     // read either int<N> or int<2N> into a uint<2N>_t, size is the int size in bytes.
-    bool getUInt16Var(off64_t offset, uint16_t *x, size_t size);
-    bool getUInt32Var(off64_t offset, uint32_t *x, size_t size);
-    bool getUInt64Var(off64_t offset, uint64_t *x, size_t size);
+    bool getUInt16Var(off64_t offset, uint16_t *x, size_t size) {
+        if (size == 2) {
+            return getUInt16(offset, x);
+        }
+        if (size == 1) {
+            uint8_t tmp;
+            if (readAt(offset, &tmp, 1) == 1) {
+                *x = tmp;
+                return true;
+            }
+        }
+        return false;
+    }
+    bool getUInt32Var(off64_t offset, uint32_t *x, size_t size) {
+        if (size == 4) {
+            return getUInt32(offset, x);
+        }
+        if (size == 2) {
+            uint16_t tmp;
+            if (getUInt16(offset, &tmp)) {
+                *x = tmp;
+                return true;
+            }
+        }
+        return false;
+    }
+    bool getUInt64Var(off64_t offset, uint64_t *x, size_t size) {
+        if (size == 8) {
+            return getUInt64(offset, x);
+        }
+        if (size == 4) {
+            uint32_t tmp;
+            if (getUInt32(offset, &tmp)) {
+                *x = tmp;
+                return true;
+            }
+        }
+        return false;
+    }
 
     // May return ERROR_UNSUPPORTED.
-    virtual status_t getSize(off64_t *size);
+    virtual status_t getSize(off64_t *size) {
+        *size = 0;
+        return ERROR_UNSUPPORTED;
+    }
 
-    virtual bool getUri(char *uriString, size_t bufferSize);
+    virtual bool getUri(char * /*uriString*/, size_t /*bufferSize*/) {
+        return false;
+    }
 
     virtual uint32_t flags() {
         return 0;
diff --git a/media/libstagefright/include/media/stagefright/MediaBuffer.h b/media/libstagefright/include/media/stagefright/MediaBuffer.h
index ace63ae..9145b63 100644
--- a/media/libstagefright/include/media/stagefright/MediaBuffer.h
+++ b/media/libstagefright/include/media/stagefright/MediaBuffer.h
@@ -48,7 +48,11 @@
     explicit MediaBuffer(const sp<ABuffer> &buffer);
 #ifndef NO_IMEMORY
     MediaBuffer(const sp<IMemory> &mem) :
-        MediaBuffer((uint8_t *)mem->pointer() + sizeof(SharedControl), mem->size()) {
+         // TODO: Using unsecurePointer() has some associated security pitfalls
+         //       (see declaration for details).
+         //       Either document why it is safe in this case or address the
+         //       issue (e.g. by copying).
+        MediaBuffer((uint8_t *)mem->unsecurePointer() + sizeof(SharedControl), mem->size()) {
         // delegate and override mMemory
         mMemory = mem;
     }
@@ -94,9 +98,13 @@
 
     virtual int remoteRefcount() const {
 #ifndef NO_IMEMORY
-        if (mMemory.get() == nullptr || mMemory->pointer() == nullptr) return 0;
+         // TODO: Using unsecurePointer() has some associated security pitfalls
+         //       (see declaration for details).
+         //       Either document why it is safe in this case or address the
+         //       issue (e.g. by copying).
+        if (mMemory.get() == nullptr || mMemory->unsecurePointer() == nullptr) return 0;
         int32_t remoteRefcount =
-                reinterpret_cast<SharedControl *>(mMemory->pointer())->getRemoteRefcount();
+                reinterpret_cast<SharedControl *>(mMemory->unsecurePointer())->getRemoteRefcount();
         // Sanity check so that remoteRefCount() is non-negative.
         return remoteRefcount >= 0 ? remoteRefcount : 0; // do not allow corrupted data.
 #else
@@ -107,8 +115,12 @@
     // returns old value
     int addRemoteRefcount(int32_t value) {
 #ifndef NO_IMEMORY
-        if (mMemory.get() == nullptr || mMemory->pointer() == nullptr) return 0;
-        return reinterpret_cast<SharedControl *>(mMemory->pointer())->addRemoteRefcount(value);
+          // TODO: Using unsecurePointer() has some associated security pitfalls
+         //       (see declaration for details).
+         //       Either document why it is safe in this case or address the
+         //       issue (e.g. by copying).
+       if (mMemory.get() == nullptr || mMemory->unsecurePointer() == nullptr) return 0;
+        return reinterpret_cast<SharedControl *>(mMemory->unsecurePointer())->addRemoteRefcount(value);
 #else
         (void) value;
         return 0;
@@ -121,8 +133,12 @@
 
     static bool isDeadObject(const sp<IMemory> &memory) {
 #ifndef NO_IMEMORY
-        if (memory.get() == nullptr || memory->pointer() == nullptr) return false;
-        return reinterpret_cast<SharedControl *>(memory->pointer())->isDeadObject();
+         // TODO: Using unsecurePointer() has some associated security pitfalls
+         //       (see declaration for details).
+         //       Either document why it is safe in this case or address the
+         //       issue (e.g. by copying).
+        if (memory.get() == nullptr || memory->unsecurePointer() == nullptr) return false;
+        return reinterpret_cast<SharedControl *>(memory->unsecurePointer())->isDeadObject();
 #else
         (void) memory;
         return false;
@@ -220,7 +236,11 @@
 
     inline SharedControl *getSharedControl() const {
 #ifndef NO_IMEMORY
-         return reinterpret_cast<SharedControl *>(mMemory->pointer());
+         // TODO: Using unsecurePointer() has some associated security pitfalls
+         //       (see declaration for details).
+         //       Either document why it is safe in this case or address the
+         //       issue (e.g. by copying).
+         return reinterpret_cast<SharedControl *>(mMemory->unsecurePointer());
 #else
          return nullptr;
 #endif
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index 89cca63..01d0325 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -25,7 +25,7 @@
 #include <media/hardware/CryptoAPI.h>
 #include <media/MediaCodecInfo.h>
 #include <media/MediaResource.h>
-#include <media/MediaAnalyticsItem.h>
+#include <media/MediaMetrics.h>
 #include <media/stagefright/foundation/AHandler.h>
 #include <media/stagefright/FrameRenderTracker.h>
 #include <utils/Vector.h>
@@ -36,6 +36,7 @@
 struct AMessage;
 struct AReplyToken;
 struct AString;
+struct BatteryChecker;
 class BufferChannelBase;
 struct CodecBase;
 class IBatteryStats;
@@ -188,7 +189,7 @@
 
     status_t getCodecInfo(sp<MediaCodecInfo> *codecInfo) const;
 
-    status_t getMetrics(MediaAnalyticsItem * &reply);
+    status_t getMetrics(mediametrics_handle_t &reply);
 
     status_t setParameters(const sp<AMessage> &params);
 
@@ -257,6 +258,7 @@
         kWhatSetCallback                    = 'setC',
         kWhatSetNotification                = 'setN',
         kWhatDrmReleaseCrypto               = 'rDrm',
+        kWhatCheckBatteryStats              = 'chkB',
     };
 
     enum {
@@ -283,7 +285,7 @@
     };
 
     struct ResourceManagerServiceProxy : public IBinder::DeathRecipient {
-        ResourceManagerServiceProxy(pid_t pid);
+        ResourceManagerServiceProxy(pid_t pid, uid_t uid);
         ~ResourceManagerServiceProxy();
 
         void init();
@@ -296,7 +298,11 @@
                 const sp<IResourceManagerClient> &client,
                 const Vector<MediaResource> &resources);
 
-        void removeResource(int64_t clientId);
+        void removeResource(
+                int64_t clientId,
+                const Vector<MediaResource> &resources);
+
+        void removeClient(int64_t clientId);
 
         bool reclaimResource(const Vector<MediaResource> &resources);
 
@@ -304,6 +310,7 @@
         Mutex mLock;
         sp<IResourceManagerService> mService;
         pid_t mPid;
+        uid_t mUid;
     };
 
     State mState;
@@ -321,11 +328,11 @@
     sp<Surface> mSurface;
     SoftwareRenderer *mSoftRenderer;
 
-    MediaAnalyticsItem *mAnalyticsItem;
-    void initAnalyticsItem();
-    void updateAnalyticsItem();
-    void flushAnalyticsItem();
-    void updateEphemeralAnalytics(MediaAnalyticsItem *item);
+    mediametrics_handle_t mMetricsHandle;
+    void initMediametrics();
+    void updateMediametrics();
+    void flushMediametrics();
+    void updateEphemeralMediametrics(mediametrics_handle_t item);
 
     sp<AMessage> mOutputFormat;
     sp<AMessage> mInputFormat;
@@ -335,7 +342,6 @@
     sp<IResourceManagerClient> mResourceManagerClient;
     sp<ResourceManagerServiceProxy> mResourceManagerService;
 
-    bool mBatteryStatNotified;
     bool mIsVideo;
     int32_t mVideoWidth;
     int32_t mVideoHeight;
@@ -425,11 +431,11 @@
     status_t onSetParameters(const sp<AMessage> &params);
 
     status_t amendOutputFormatWithCodecSpecificData(const sp<MediaCodecBuffer> &buffer);
-    void updateBatteryStat();
     bool isExecuting() const;
 
     uint64_t getGraphicBufferSize();
     void addResource(MediaResource::Type type, MediaResource::SubType subtype, uint64_t value);
+    void removeResource(MediaResource::Type type, MediaResource::SubType subtype, uint64_t value);
     void requestCpuBoostIfNeeded();
 
     bool hasPendingBuffer(int portIndex);
@@ -458,6 +464,8 @@
     Mutex mLatencyLock;
     int64_t mLatencyUnknown;    // buffers for which we couldn't calculate latency
 
+    sp<BatteryChecker> mBatteryChecker;
+
     void statsBufferSent(int64_t presentationUs);
     void statsBufferReceived(int64_t presentationUs);
 
diff --git a/media/libstagefright/include/media/stagefright/MediaWriter.h b/media/libstagefright/include/media/stagefright/MediaWriter.h
index 2c12a87..972ae1d 100644
--- a/media/libstagefright/include/media/stagefright/MediaWriter.h
+++ b/media/libstagefright/include/media/stagefright/MediaWriter.h
@@ -35,6 +35,10 @@
     virtual status_t start(MetaData *params = NULL) = 0;
     virtual status_t stop() = 0;
     virtual status_t pause() = 0;
+    virtual status_t setCaptureRate(float /* captureFps */) {
+        ALOGW("setCaptureRate unsupported");
+        return ERROR_UNSUPPORTED;
+    }
 
     virtual void setMaxFileSize(int64_t bytes) { mMaxFileSizeLimitBytes = bytes; }
     virtual void setMaxFileDuration(int64_t durationUs) { mMaxFileDurationLimitUs = durationUs; }
diff --git a/media/libstagefright/include/media/stagefright/MetaData.h b/media/libstagefright/include/media/stagefright/MetaData.h
index f625358..68adf346 100644
--- a/media/libstagefright/include/media/stagefright/MetaData.h
+++ b/media/libstagefright/include/media/stagefright/MetaData.h
@@ -41,7 +41,9 @@
     friend class BnMediaSource;
     friend class BpMediaSource;
     friend class BpMediaExtractor;
+#ifndef __ANDROID_VNDK__
     static sp<MetaData> createFromParcel(const Parcel &parcel);
+#endif
 };
 
 }  // namespace android
diff --git a/media/libstagefright/include/media/stagefright/MetaDataBase.h b/media/libstagefright/include/media/stagefright/MetaDataBase.h
index 8dc2dd5..659bd5b 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataBase.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataBase.h
@@ -319,8 +319,10 @@
     struct Rect;
     struct MetaDataInternal;
     MetaDataInternal *mInternalData;
+#ifndef __ANDROID_VNDK__
     status_t writeToParcel(Parcel &parcel);
     status_t updateFromParcel(const Parcel &parcel);
+#endif
 };
 
 }  // namespace android
diff --git a/media/libstagefright/include/media/stagefright/RemoteDataSource.h b/media/libstagefright/include/media/stagefright/RemoteDataSource.h
index e191e6a..b634505 100644
--- a/media/libstagefright/include/media/stagefright/RemoteDataSource.h
+++ b/media/libstagefright/include/media/stagefright/RemoteDataSource.h
@@ -48,7 +48,7 @@
         if (size > kBufferSize) {
             size = kBufferSize;
         }
-        return mSource->readAt(offset, mMemory->pointer(), size);
+        return mSource->readAt(offset, mMemory->unsecurePointer(), size);
     }
     virtual status_t getSize(off64_t *size) {
         return mSource->getSize(size);
diff --git a/media/libstagefright/omx/Android.bp b/media/libstagefright/omx/Android.bp
index e260cae..7d612b4 100644
--- a/media/libstagefright/omx/Android.bp
+++ b/media/libstagefright/omx/Android.bp
@@ -45,7 +45,6 @@
         "libdl",
         "libhidlbase",
         "libhidlmemory",
-        "libhidltransport",
         "libvndksupport",
         "android.hardware.media.omx@1.0",
         "android.hardware.graphics.bufferqueue@1.0",
@@ -72,7 +71,6 @@
         cfi: true,
     },
 
-    compile_multilib: "32",
 }
 
 cc_library_shared {
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index ddb4ba0..e1c3916 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -128,7 +128,7 @@
     }
 
     OMX_U8 *getPointer() {
-        return mMem.get() ? static_cast<OMX_U8*>(mMem->pointer()) :
+        return mMem.get() ? static_cast<OMX_U8*>(mMem->unsecurePointer()) :
                 mHidlMemory.get() ? static_cast<OMX_U8*>(
                 static_cast<void*>(mHidlMemory->getPointer())) : nullptr;
     }
@@ -1173,7 +1173,11 @@
         return BAD_VALUE;
     }
     if (params != NULL) {
-        paramsPointer = params->pointer();
+        // TODO: Using unsecurePointer() has some associated security pitfalls
+        //       (see declaration for details).
+        //       Either document why it is safe in this case or address the
+        //       issue (e.g. by copying).
+        paramsPointer = params->unsecurePointer();
         paramsSize = params->size();
     } else if (hParams != NULL) {
         paramsPointer = hParams->getPointer();
diff --git a/media/libstagefright/rtsp/ARTSPConnection.h b/media/libstagefright/rtsp/ARTSPConnection.h
index 8df2676..56b604d 100644
--- a/media/libstagefright/rtsp/ARTSPConnection.h
+++ b/media/libstagefright/rtsp/ARTSPConnection.h
@@ -46,6 +46,8 @@
             const char *url, AString *host, unsigned *port, AString *path,
             AString *user, AString *pass);
 
+    int getSocket() { return mSocket; }
+
 protected:
     virtual ~ARTSPConnection();
     virtual void onMessageReceived(const sp<AMessage> &msg);
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index 48bc8ce..85ffba2 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -257,6 +257,10 @@
         msg->post();
     }
 
+    sp<ARTSPConnection> getARTSPConnection() {
+      return mConn;
+    }
+
     static void addRR(const sp<ABuffer> &buf) {
         uint8_t *ptr = buf->data() + buf->size();
         ptr[0] = 0x80 | 0;
diff --git a/media/libstagefright/tests/Android.bp b/media/libstagefright/tests/Android.bp
index be10fdc..a7f94c1 100644
--- a/media/libstagefright/tests/Android.bp
+++ b/media/libstagefright/tests/Android.bp
@@ -27,3 +27,21 @@
         "-Wall",
     ],
 }
+
+cc_test {
+    name: "BatteryChecker_test",
+    srcs: ["BatteryChecker_test.cpp"],
+    test_suites: ["device-tests"],
+
+    shared_libs: [
+        "libstagefright",
+        "libstagefright_foundation",
+        "libutils",
+        "liblog",
+    ],
+
+    cflags: [
+        "-Werror",
+        "-Wall",
+    ],
+}
\ No newline at end of file
diff --git a/media/libstagefright/tests/BatteryChecker_test.cpp b/media/libstagefright/tests/BatteryChecker_test.cpp
new file mode 100644
index 0000000..0c5ee9b
--- /dev/null
+++ b/media/libstagefright/tests/BatteryChecker_test.cpp
@@ -0,0 +1,242 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "BatteryChecker_test"
+#include <utils/Log.h>
+
+#include <gtest/gtest.h>
+
+#include <media/stagefright/BatteryChecker.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AHandler.h>
+
+#include <vector>
+
+namespace android {
+
+static const int kBatteryTimeoutUs = 1000000ll; // 1 seconds
+static const int kTestMarginUs = 50000ll; // 50ms
+static const int kWaitStatusChangeUs = kBatteryTimeoutUs + kTestMarginUs;
+static const int kSparseFrameIntervalUs = kBatteryTimeoutUs - kTestMarginUs;
+
+class BatteryCheckerTestHandler : public AHandler {
+    enum EventType {
+        // Events simulating MediaCodec
+        kWhatStart = 0,             // codec entering executing state
+        kWhatStop,                  // codec exiting executing state
+        kWhatActivity,        // codec queue input or dequeue output
+        kWhatReleased,     // codec released
+        kWhatCheckpoint,        // test checkpoing with expected values on On/Off
+
+        // Message for battery checker monitor (not for testing through runTest())
+        kWhatBatteryChecker,
+    };
+
+    struct Operation {
+        int32_t event;
+        int64_t delay = 0;
+        uint32_t repeatCount = 0;
+        int32_t expectedOnCounter = 0;
+        int32_t expectedOffCounter = 0;
+    };
+
+    std::vector<Operation> mOps;
+    sp<BatteryChecker> mBatteryChecker;
+    int32_t mOnCounter;
+    int32_t mOffCounter;
+    Condition mDone;
+    Mutex mLock;
+
+    BatteryCheckerTestHandler() : mOnCounter(0), mOffCounter(0) {}
+
+    void runTest(const std::vector<Operation> &ops, int64_t timeoutUs) {
+        mOps = ops;
+
+        mBatteryChecker = new BatteryChecker(
+                new AMessage(kWhatBatteryChecker, this), kBatteryTimeoutUs);
+
+        (new AMessage(ops[0].event, this))->post();
+
+        // wait for done
+        AutoMutex lock(mLock);
+        EXPECT_NE(TIMED_OUT, mDone.waitRelative(mLock, timeoutUs * 1000ll));
+    }
+
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+    friend class BatteryCheckerTest;
+};
+
+class BatteryCheckerTest : public ::testing::Test {
+public:
+    BatteryCheckerTest()
+        : mLooper(new ALooper)
+        , mHandler(new BatteryCheckerTestHandler()) {
+        mLooper->setName("BatterCheckerLooper");
+        mLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
+        mLooper->registerHandler(mHandler);
+    }
+
+protected:
+    using EventType = BatteryCheckerTestHandler::EventType;
+    using Operation = BatteryCheckerTestHandler::Operation;
+
+    virtual ~BatteryCheckerTest() {
+        mLooper->stop();
+        mLooper->unregisterHandler(mHandler->id());
+    }
+
+    void runTest(const std::vector<Operation> &ops, int64_t timeoutUs) {
+        mHandler->runTest(ops, timeoutUs);
+    }
+
+    sp<ALooper> mLooper;
+    sp<BatteryCheckerTestHandler> mHandler;
+};
+
+void BatteryCheckerTestHandler::onMessageReceived(const sp<AMessage> &msg) {
+    switch(msg->what()) {
+    case kWhatStart:
+        mBatteryChecker->setExecuting(true);
+        break;
+    case kWhatStop:
+        mBatteryChecker->setExecuting(false);
+        break;
+    case kWhatActivity:
+        mBatteryChecker->onCodecActivity([this] () { mOnCounter++; });
+        break;
+    case kWhatReleased:
+        mBatteryChecker->onClientRemoved();
+        break;
+    case kWhatBatteryChecker:
+        mBatteryChecker->onCheckBatteryTimer(msg, [this] () { mOffCounter++;  });
+        break;
+    case kWhatCheckpoint:
+        // verify ON/OFF state and total events
+        EXPECT_EQ(mOnCounter, mOps[0].expectedOnCounter);
+        EXPECT_EQ(mOffCounter, mOps[0].expectedOffCounter);
+        break;
+    default:
+        TRESPASS();
+    }
+    if (msg->what() != kWhatBatteryChecker) {
+        EXPECT_EQ(msg->what(), mOps[0].event);
+        // post next message
+        if (!mOps[0].repeatCount) {
+            mOps.erase(mOps.begin());
+        } else {
+            mOps[0].repeatCount--;
+        }
+        int64_t duration = mOps[0].delay;
+        if (!mOps.empty()) {
+            (new AMessage(mOps[0].event, this))->post(duration);
+        } else {
+            AutoMutex lock(mLock);
+            mDone.signal();
+        }
+    }
+}
+
+TEST_F(BatteryCheckerTest, testNormalOperations) {
+    runTest({
+        {EventType::kWhatStart,        0ll},
+        {EventType::kWhatCheckpoint,   0ll, 0, 0, 0},
+        {EventType::kWhatActivity,     33333ll},
+        {EventType::kWhatCheckpoint,   0ll, 0, 1, 0},                 // ON
+        {EventType::kWhatActivity,     33333ll, 2*kWaitStatusChangeUs/33333ll},
+        {EventType::kWhatCheckpoint,   0ll, 0, 1, 0},
+        {EventType::kWhatCheckpoint,   kWaitStatusChangeUs, 0, 1, 1}, // OFF
+    }, 10000000ll);
+}
+
+TEST_F(BatteryCheckerTest, testPauseResume) {
+    runTest({
+        {EventType::kWhatStart,        0ll},
+        {EventType::kWhatCheckpoint,   0ll, 0, 0, 0},
+        {EventType::kWhatActivity,     33333ll},
+        {EventType::kWhatCheckpoint,   0ll, 0, 1, 0},                 // ON
+        {EventType::kWhatCheckpoint,   kWaitStatusChangeUs, 0, 1, 1}, // OFF
+        {EventType::kWhatActivity,     33333ll},
+        {EventType::kWhatCheckpoint,   0ll, 0, 2, 1},                 // ON
+        {EventType::kWhatCheckpoint,   kWaitStatusChangeUs, 0, 2, 2}, // OFF
+    }, 10000000ll);
+}
+
+TEST_F(BatteryCheckerTest, testClientRemovedAndRestart) {
+    runTest({
+        {EventType::kWhatStart,        0ll},
+        {EventType::kWhatActivity,     33333ll, kWaitStatusChangeUs/33333ll},
+        {EventType::kWhatCheckpoint,   0ll, 0, 1, 0},
+
+        // stop executing state itself shouldn't trigger any calls
+        {EventType::kWhatStop,         0ll},
+        {EventType::kWhatCheckpoint,   0ll, 0, 1, 0},
+
+        // release shouldn't trigger any calls either,
+        // client resource will be removed entirely
+        {EventType::kWhatReleased,     0ll},
+        {EventType::kWhatCheckpoint,   0ll, 0, 1, 0},
+        {EventType::kWhatCheckpoint,   kWaitStatusChangeUs, 0, 1, 0},
+
+        // start pushing buffers again, On should be received without any Off
+        {EventType::kWhatStart,        0ll},
+        {EventType::kWhatActivity,     33333ll},
+        {EventType::kWhatCheckpoint,   0ll, 0, 2, 0},
+
+        // double check that only new checker msg triggers OFF,
+        // left-over checker msg from stale generate discarded
+        {EventType::kWhatCheckpoint,   kWaitStatusChangeUs, 0, 2, 1},
+    }, 10000000ll);
+}
+
+TEST_F(BatteryCheckerTest, testActivityWhileNotExecuting) {
+    runTest({
+        // activity before start shouldn't trigger
+        {EventType::kWhatActivity,     0ll},
+        {EventType::kWhatCheckpoint,   0ll, 0, 0, 0},
+
+        {EventType::kWhatStart,        0ll},
+        {EventType::kWhatCheckpoint,   0ll, 0, 0, 0},
+
+        // activity after start before stop should trigger
+        {EventType::kWhatActivity,     33333ll},
+        {EventType::kWhatCheckpoint,   0ll, 0, 1, 0},
+
+        // stop executing state itself shouldn't trigger any calls
+        {EventType::kWhatStop,         0ll},
+        {EventType::kWhatCheckpoint,   0ll, 0, 1, 0},
+
+        // keep pushing another 3 seconds after stop, expected to OFF
+        {EventType::kWhatActivity,     33333ll, kWaitStatusChangeUs/33333ll},
+        {EventType::kWhatCheckpoint,   0ll, 0, 1, 1},
+    }, 10000000ll);
+}
+
+TEST_F(BatteryCheckerTest, testSparseActivity) {
+    runTest({
+        {EventType::kWhatStart,        0ll},
+        {EventType::kWhatCheckpoint,   0ll, 0, 0, 0},
+
+        // activity arrives sparsely with interval only slightly small than timeout
+        // should only trigger 1 ON
+        {EventType::kWhatActivity,     kSparseFrameIntervalUs, 2},
+        {EventType::kWhatCheckpoint,   0ll, 0, 1, 0},
+        {EventType::kWhatCheckpoint,   kSparseFrameIntervalUs, 0, 1, 0},
+        {EventType::kWhatCheckpoint,   kTestMarginUs, 0, 1, 1}, // OFF
+    }, 10000000ll);
+}
+} // namespace android
diff --git a/media/libstagefright/xmlparser/api/current.txt b/media/libstagefright/xmlparser/api/current.txt
index 9d7c57d..16c8af8 100644
--- a/media/libstagefright/xmlparser/api/current.txt
+++ b/media/libstagefright/xmlparser/api/current.txt
@@ -138,7 +138,12 @@
 
   public class Variant {
     ctor public Variant();
+    method public java.util.List<media.codecs.Alias> getAlias_optional();
+    method public java.util.List<media.codecs.Quirk> getAttribute_optional();
+    method public java.util.List<media.codecs.Feature> getFeature_optional();
+    method public java.util.List<media.codecs.Limit> getLimit_optional();
     method public String getName();
+    method public java.util.List<media.codecs.Quirk> getQuirk_optional();
     method public void setName(String);
   }
 
diff --git a/media/libstagefright/xmlparser/media_codecs.xsd b/media/libstagefright/xmlparser/media_codecs.xsd
index 63ec5d0..3b5681f 100644
--- a/media/libstagefright/xmlparser/media_codecs.xsd
+++ b/media/libstagefright/xmlparser/media_codecs.xsd
@@ -107,6 +107,13 @@
         <xs:attribute name="value" type="xs:string"/>
     </xs:complexType>
     <xs:complexType name="Variant">
+        <xs:choice minOccurs="0" maxOccurs="unbounded">
+            <xs:element name="Quirk" type="Quirk"/>
+            <xs:element name="Attribute" type="Quirk"/>
+            <xs:element name="Alias" type="Alias"/>
+            <xs:element name="Limit" type="Limit"/>
+            <xs:element name="Feature" type="Feature"/>
+        </xs:choice>
         <xs:attribute name="name" type="xs:string"/>
     </xs:complexType>
     <xs:complexType name="Setting">
diff --git a/media/mtp/MtpServer.h b/media/mtp/MtpServer.h
index 1f8799f..8cc9a9a 100644
--- a/media/mtp/MtpServer.h
+++ b/media/mtp/MtpServer.h
@@ -34,8 +34,11 @@
 
 class IMtpDatabase;
 class MtpStorage;
+class MtpMockServer;
 
 class MtpServer {
+    // libFuzzer testing
+    friend class MtpMockServer;
 
 private:
     IMtpDatabase*       mDatabase;
diff --git a/media/mtp/MtpStringBuffer.cpp b/media/mtp/MtpStringBuffer.cpp
index cd379bf..d8d425b 100644
--- a/media/mtp/MtpStringBuffer.cpp
+++ b/media/mtp/MtpStringBuffer.cpp
@@ -26,14 +26,31 @@
 
 namespace {
 
-std::wstring_convert<std::codecvt_utf8_utf16<char16_t>,char16_t> gConvert;
+const char * utf16_cerror = "__CONVERSION_ERROR__";
+const char16_t * utf8_cerror = u"__CONVERSION_ERROR__";
+
+std::wstring_convert<std::codecvt_utf8_utf16<char16_t>,char16_t> gConvert(utf16_cerror, utf8_cerror);
 
 static std::string utf16ToUtf8(std::u16string input_str) {
-    return gConvert.to_bytes(input_str);
+    std::string conversion = gConvert.to_bytes(input_str);
+
+    if (conversion == utf16_cerror) {
+        ALOGE("Unable to convert UTF-16 string to UTF-8");
+        return "";
+    } else {
+        return conversion;
+    }
 }
 
 static std::u16string utf8ToUtf16(std::string input_str) {
-    return gConvert.from_bytes(input_str);
+    std::u16string conversion = gConvert.from_bytes(input_str);
+
+    if (conversion == utf8_cerror) {
+        ALOGE("Unable to convert UTF-8 string to UTF-16");
+        return u"";
+    } else {
+        return conversion;
+    }
 }
 
 } // namespace
diff --git a/media/mtp/MtpUtils.cpp b/media/mtp/MtpUtils.cpp
index 8564576..84a20d3 100644
--- a/media/mtp/MtpUtils.cpp
+++ b/media/mtp/MtpUtils.cpp
@@ -150,6 +150,7 @@
             ret += copyFile(oldFile.c_str(), newFile.c_str());
         }
     }
+    closedir(dir);
     return ret;
 }
 
diff --git a/media/ndk/NdkImage.cpp b/media/ndk/NdkImage.cpp
index 1883f63..1145b7b 100644
--- a/media/ndk/NdkImage.cpp
+++ b/media/ndk/NdkImage.cpp
@@ -35,6 +35,7 @@
         int64_t timestamp, int32_t width, int32_t height, int32_t numPlanes) :
         mReader(reader), mFormat(format), mUsage(usage), mBuffer(buffer), mLockedBuffer(nullptr),
         mTimestamp(timestamp), mWidth(width), mHeight(height), mNumPlanes(numPlanes) {
+    LOG_FATAL_IF(reader == nullptr, "AImageReader shouldn't be null while creating AImage");
 }
 
 AImage::~AImage() {
@@ -57,14 +58,9 @@
     if (mIsClosed) {
         return;
     }
-    sp<AImageReader> reader = mReader.promote();
-    if (reader != nullptr) {
-        reader->releaseImageLocked(this, releaseFenceFd);
-    } else if (mBuffer != nullptr) {
-        LOG_ALWAYS_FATAL("%s: parent AImageReader closed without releasing image %p",
-                __FUNCTION__, this);
+    if (!mReader->mIsClosed) {
+        mReader->releaseImageLocked(this, releaseFenceFd);
     }
-
     // Should have been set to nullptr in releaseImageLocked
     // Set to nullptr here for extra safety only
     mBuffer = nullptr;
@@ -83,22 +79,12 @@
 
 void
 AImage::lockReader() const {
-    sp<AImageReader> reader = mReader.promote();
-    if (reader == nullptr) {
-        // Reader has been closed
-        return;
-    }
-    reader->mLock.lock();
+    mReader->mLock.lock();
 }
 
 void
 AImage::unlockReader() const {
-    sp<AImageReader> reader = mReader.promote();
-    if (reader == nullptr) {
-        // Reader has been closed
-        return;
-    }
-    reader->mLock.unlock();
+    mReader->mLock.unlock();
 }
 
 media_status_t
diff --git a/media/ndk/NdkImagePriv.h b/media/ndk/NdkImagePriv.h
index e0f16da..0e8cbcb 100644
--- a/media/ndk/NdkImagePriv.h
+++ b/media/ndk/NdkImagePriv.h
@@ -72,7 +72,7 @@
     uint32_t getJpegSize() const;
 
     // When reader is close, AImage will only accept close API call
-    wp<AImageReader>           mReader;
+    const sp<AImageReader>     mReader;
     const int32_t              mFormat;
     const uint64_t             mUsage;  // AHARDWAREBUFFER_USAGE_* flags.
     BufferItem*                mBuffer;
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index 830f752..c0ceb3d 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -272,6 +272,11 @@
       mFrameListener(new FrameListener(this)),
       mBufferRemovedListener(new BufferRemovedListener(this)) {}
 
+AImageReader::~AImageReader() {
+    Mutex::Autolock _l(mLock);
+    LOG_FATAL_IF("AImageReader not closed before destruction", mIsClosed != true);
+}
+
 media_status_t
 AImageReader::init() {
     PublicFormat publicFormat = static_cast<PublicFormat>(mFormat);
@@ -347,8 +352,12 @@
     return AMEDIA_OK;
 }
 
-AImageReader::~AImageReader() {
+void AImageReader::close() {
     Mutex::Autolock _l(mLock);
+    if (mIsClosed) {
+        return;
+    }
+    mIsClosed = true;
     AImageReader_ImageListener nullListener = {nullptr, nullptr};
     setImageListenerLocked(&nullListener);
 
@@ -741,6 +750,7 @@
 void AImageReader_delete(AImageReader* reader) {
     ALOGV("%s", __FUNCTION__);
     if (reader != nullptr) {
+        reader->close();
         reader->decStrong((void*) AImageReader_delete);
     }
     return;
diff --git a/media/ndk/NdkImageReaderPriv.h b/media/ndk/NdkImageReaderPriv.h
index 19bd704..0779a71 100644
--- a/media/ndk/NdkImageReaderPriv.h
+++ b/media/ndk/NdkImageReaderPriv.h
@@ -76,6 +76,7 @@
     int32_t        getHeight()    const { return mHeight; };
     int32_t        getFormat()    const { return mFormat; };
     int32_t        getMaxImages() const { return mMaxImages; };
+    void           close();
 
   private:
 
@@ -165,6 +166,7 @@
     native_handle_t*           mWindowHandle = nullptr;
 
     List<AImage*>              mAcquiredImages;
+    bool                       mIsClosed = false;
 
     Mutex                      mLock;
 };
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index c23f19b..e041533 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -221,7 +221,13 @@
                          break;
                      }
 
-                     AMediaFormat *aMediaFormat = AMediaFormat_fromMsg(&format);
+                     // Here format is MediaCodec's internal copy of output format.
+                     // Make a copy since the client might modify it.
+                     sp<AMessage> copy;
+                     if (format != nullptr) {
+                         copy = format->dup();
+                     }
+                     AMediaFormat *aMediaFormat = AMediaFormat_fromMsg(&copy);
 
                      Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
                      if (mCodec->mAsyncCallbackUserData != NULL
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index 85dbffe..cd5a23a 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -89,7 +89,7 @@
 };
 
 void DrmListener::notify(DrmPlugin::EventType eventType, int extra, const Parcel *obj) {
-    if (!mEventListener && !mExpirationUpdateListener && !mKeysChangeListener) {
+    if (!mEventListener || !mExpirationUpdateListener || !mKeysChangeListener) {
         ALOGE("No listeners are specified");
         return;
     }
diff --git a/media/tests/benchmark/.clang-format b/media/tests/benchmark/.clang-format
new file mode 100644
index 0000000..bf1e355
--- /dev/null
+++ b/media/tests/benchmark/.clang-format
@@ -0,0 +1,13 @@
+BasedOnStyle: Google
+Standard: Cpp11
+AccessModifierOffset: -2
+AllowShortFunctionsOnASingleLine: Inline
+ColumnLimit: 100
+CommentPragmas: NOLINT:.*
+DerivePointerAlignment: false
+IncludeBlocks: Preserve
+IndentWidth: 4
+ContinuationIndentWidth: 8
+PointerAlignment: Right
+TabWidth: 4
+UseTab: Never
diff --git a/media/tests/benchmark/Android.bp b/media/tests/benchmark/Android.bp
new file mode 100644
index 0000000..8a7a59f
--- /dev/null
+++ b/media/tests/benchmark/Android.bp
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+subdirs = [
+    "src",
+    "tests",
+]
\ No newline at end of file
diff --git a/media/tests/benchmark/README.md b/media/tests/benchmark/README.md
new file mode 100644
index 0000000..8db3fd3
--- /dev/null
+++ b/media/tests/benchmark/README.md
@@ -0,0 +1,60 @@
+# Benchmark tests
+
+Run the following steps to build the test suite:
+```
+mmm frameworks/av/media/tests/benchmark/
+```
+
+The binaries will be created in the following path : ${OUT}/data/nativetest64/
+
+adb push $(OUT)/data/nativetest64/* /data/local/tmp/
+
+Eg. adb push $(OUT)/data/nativetest64/extractorTest/extractorTest /data/local/tmp/
+
+To run the binary, follow the commands mentioned below under each module.
+
+The resource files for the tests are taken from [here](https://drive.google.com/open?id=1ghMr17BBJ7n0pqbm7oREiTN_MNemJUqy)
+
+## Extractor
+
+The test extracts elementary stream and benchmarks the extractors available in NDK.
+
+Push the resource files to /sdcard/res on the device.
+
+You can use a different location, but you have to modify the rest of the instructions to replace /sdcard/res with wherever you chose to put the files.
+
+The path to these files on the device is required to be given for the test.
+
+```
+adb shell /data/local/tmp/extractorTest -P /sdcard/res/
+```
+
+## Decoder
+
+The test decodes input stream and benchmarks the decoders available in NDK.
+
+Setup steps are same as extractor.
+
+```
+adb shell /data/local/tmp/decoderTest -P /sdcard/res/
+```
+
+## Muxer
+
+The test muxes elementary stream and benchmarks the muxers available in NDK.
+
+Setup steps are same as extractor.
+
+```
+adb shell /data/local/tmp/muxerTest -P /sdcard/res/
+```
+
+## Encoder
+
+The test encodes input stream and benchmarks the encoders available in NDK.
+
+Setup steps are same as extractor.
+
+```
+adb shell /data/local/tmp/encoderTest -P /sdcard/res/
+```
diff --git a/media/tests/benchmark/src/native/common/Android.bp b/media/tests/benchmark/src/native/common/Android.bp
new file mode 100644
index 0000000..527f588
--- /dev/null
+++ b/media/tests/benchmark/src/native/common/Android.bp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+cc_library_static {
+    name: "libbenchmark_common",
+    defaults: [
+        "libbenchmark-defaults",
+        "libbenchmark_soft_sanitize_all-defaults",
+    ],
+
+    srcs: [
+        "BenchmarkCommon.cpp",
+        "Stats.cpp",
+    ],
+
+    export_include_dirs: ["."],
+
+    ldflags: ["-Wl,-Bsymbolic"]
+}
+
+cc_defaults {
+    name: "libbenchmark_common-defaults",
+
+    defaults: [
+        "libbenchmark-defaults",
+    ],
+
+    static_libs: [
+        "libbenchmark_common",
+    ],
+}
+
+cc_defaults {
+    name: "libbenchmark-defaults",
+
+    header_libs: [
+        "media_ndk_headers",
+    ],
+
+    shared_libs: [
+        "libmediandk",
+        "liblog",
+        "libutils",
+    ],
+
+    cflags: [
+        "-Wall",
+        "-Werror",
+    ]
+}
+
+// public dependency for native implementation
+// to be used by code under media/benchmark/* only
+cc_defaults {
+    name: "libbenchmark_soft_sanitize_all-defaults",
+
+    sanitize: {
+        misc_undefined: [
+            "unsigned-integer-overflow",
+            "signed-integer-overflow",
+        ],
+        cfi: true,
+        address: true,
+    }
+}
diff --git a/media/tests/benchmark/src/native/common/BenchmarkCommon.cpp b/media/tests/benchmark/src/native/common/BenchmarkCommon.cpp
new file mode 100644
index 0000000..5bdb48a
--- /dev/null
+++ b/media/tests/benchmark/src/native/common/BenchmarkCommon.cpp
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "BenchmarkCommon"
+
+#include "BenchmarkCommon.h"
+#include <iostream>
+
+void CallBackHandle::ioThread() {
+    ALOGV("In %s mIsDone : %d, mSawError : %d ", __func__, mIsDone, mSawError);
+    while (!mIsDone && !mSawError) {
+        auto task = mIOQueue.pop();
+        task();
+    }
+}
+
+void OnInputAvailableCB(AMediaCodec *codec, void *userdata, int32_t index) {
+    ALOGV("OnInputAvailableCB: index(%d)", index);
+    CallBackHandle *self = (CallBackHandle *)userdata;
+    self->getStats()->addInputTime();
+    self->mIOQueue.push([self, codec, index]() { self->onInputAvailable(codec, index); });
+}
+
+void OnOutputAvailableCB(AMediaCodec *codec, void *userdata, int32_t index,
+                         AMediaCodecBufferInfo *bufferInfo) {
+    ALOGV("OnOutputAvailableCB: index(%d), (%d, %d, %lld, 0x%x)", index, bufferInfo->offset,
+          bufferInfo->size, (long long)bufferInfo->presentationTimeUs, bufferInfo->flags);
+    CallBackHandle *self = (CallBackHandle *)userdata;
+    self->getStats()->addOutputTime();
+    AMediaCodecBufferInfo bufferInfoCopy = *bufferInfo;
+    self->mIOQueue.push([self, codec, index, bufferInfoCopy]() {
+        AMediaCodecBufferInfo bc = bufferInfoCopy;
+        self->onOutputAvailable(codec, index, &bc);
+    });
+}
+
+void OnFormatChangedCB(AMediaCodec *codec, void *userdata, AMediaFormat *format) {
+    ALOGV("OnFormatChangedCB: format(%s)", AMediaFormat_toString(format));
+    CallBackHandle *self = (CallBackHandle *)userdata;
+    self->mIOQueue.push([self, codec, format]() { self->onFormatChanged(codec, format); });
+}
+
+void OnErrorCB(AMediaCodec *codec, void *userdata, media_status_t err, int32_t actionCode,
+               const char *detail) {
+    (void)codec;
+    ALOGV("OnErrorCB: err(%d), actionCode(%d), detail(%s)", err, actionCode, detail);
+    CallBackHandle *self = (CallBackHandle *)userdata;
+    self->mSawError = true;
+}
+
+AMediaCodec *createMediaCodec(AMediaFormat *format, const char *mime, string codecName,
+                              bool isEncoder) {
+    ALOGV("In %s", __func__);
+    if (!mime) {
+        ALOGE("Please specify a mime type to create codec");
+        return nullptr;
+    }
+
+    AMediaCodec *codec;
+    if (!codecName.empty()) {
+        codec = AMediaCodec_createCodecByName(codecName.c_str());
+        if (!codec) {
+            ALOGE("Unable to create codec by name: %s", codecName.c_str());
+            return nullptr;
+        }
+    } else {
+        if (isEncoder) {
+            codec = AMediaCodec_createEncoderByType(mime);
+        } else {
+            codec = AMediaCodec_createDecoderByType(mime);
+        }
+        if (!codec) {
+            ALOGE("Unable to create codec by mime: %s", mime);
+            return nullptr;
+        }
+    }
+
+    /* Configure codec with the given format*/
+    const char *s = AMediaFormat_toString(format);
+    ALOGV("Input format: %s\n", s);
+
+    media_status_t status = AMediaCodec_configure(codec, format, nullptr, nullptr, isEncoder);
+    if (status != AMEDIA_OK) {
+        ALOGE("AMediaCodec_configure failed %d", status);
+        return nullptr;
+    }
+    return codec;
+}
\ No newline at end of file
diff --git a/media/tests/benchmark/src/native/common/BenchmarkCommon.h b/media/tests/benchmark/src/native/common/BenchmarkCommon.h
new file mode 100644
index 0000000..df16baf
--- /dev/null
+++ b/media/tests/benchmark/src/native/common/BenchmarkCommon.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BENCHMARK_COMMON_H__
+#define __BENCHMARK_COMMON_H__
+
+#include <utils/Log.h>
+
+#include <inttypes.h>
+#include <mutex>
+#include <queue>
+#include <thread>
+
+#include <media/NdkMediaCodec.h>
+#include <media/NdkMediaError.h>
+
+#include "Stats.h"
+
+using namespace std;
+
+constexpr uint32_t kQueueDequeueTimeoutUs = 1000;
+constexpr uint32_t kMaxCSDStrlen = 16;
+constexpr uint32_t kMaxBufferSize = 1024 * 1024 * 16;
+
+template <typename T>
+class CallBackQueue {
+  public:
+    CallBackQueue() {}
+    ~CallBackQueue() {}
+
+    void push(T elem) {
+        bool needsNotify = false;
+        {
+            lock_guard<mutex> lock(mMutex);
+            needsNotify = mQueue.empty();
+            mQueue.push(move(elem));
+        }
+        if (needsNotify) mQueueNotEmptyCondition.notify_one();
+    }
+
+    T pop() {
+        unique_lock<mutex> lock(mMutex);
+        if (mQueue.empty()) {
+            mQueueNotEmptyCondition.wait(lock, [this]() { return !mQueue.empty(); });
+        }
+        auto result = mQueue.front();
+        mQueue.pop();
+        return result;
+    }
+
+  private:
+    mutex mMutex;
+    queue<T> mQueue;
+    condition_variable mQueueNotEmptyCondition;
+};
+
+class CallBackHandle {
+  public:
+    CallBackHandle() : mSawError(false), mIsDone(false), mStats(nullptr) {
+        mStats = new Stats();
+    }
+
+    virtual ~CallBackHandle() {
+        if (mIOThread.joinable()) mIOThread.join();
+        if (mStats) delete mStats;
+    }
+
+    void ioThread();
+
+    // Implementation in child class (Decoder/Encoder)
+    virtual void onInputAvailable(AMediaCodec *codec, int32_t index) {
+        (void)codec;
+        (void)index;
+    }
+    virtual void onFormatChanged(AMediaCodec *codec, AMediaFormat *format) {
+        (void)codec;
+        (void)format;
+    }
+    virtual void onOutputAvailable(AMediaCodec *codec, int32_t index,
+                                   AMediaCodecBufferInfo *bufferInfo) {
+        (void)codec;
+        (void)index;
+        (void)bufferInfo;
+    }
+
+    Stats *getStats() { return mStats; }
+
+    // Keep a queue of all function callbacks.
+    typedef function<void()> IOTask;
+    CallBackQueue<IOTask> mIOQueue;
+    thread mIOThread;
+    bool mSawError;
+    bool mIsDone;
+
+  protected:
+    Stats *mStats;
+};
+
+// Async API's callback
+void OnInputAvailableCB(AMediaCodec *codec, void *userdata, int32_t index);
+
+void OnOutputAvailableCB(AMediaCodec *codec, void *userdata, int32_t index,
+                         AMediaCodecBufferInfo *bufferInfo);
+
+void OnFormatChangedCB(AMediaCodec *codec, void *userdata, AMediaFormat *format);
+
+void OnErrorCB(AMediaCodec *codec, void * /* userdata */, media_status_t err, int32_t actionCode,
+               const char *detail);
+
+// Utility to create and configure AMediaCodec
+AMediaCodec *createMediaCodec(AMediaFormat *format, const char *mime, string codecName,
+                              bool isEncoder);
+
+#endif  // __BENCHMARK_COMMON_H__
diff --git a/media/tests/benchmark/src/native/common/Stats.cpp b/media/tests/benchmark/src/native/common/Stats.cpp
new file mode 100644
index 0000000..6bcd3ce
--- /dev/null
+++ b/media/tests/benchmark/src/native/common/Stats.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Stats"
+
+#include <iostream>
+#include <stdint.h>
+#include <utils/Log.h>
+
+#include "Stats.h"
+
+/**
+ * Dumps the stats of the operation for a given input media.
+ *
+ * \param operation      describes the operation performed on the input media
+ *                       (i.e. extract/mux/decode/encode)
+ * \param inputReference input media
+ * \param duarationUs    is a duration of the input media in microseconds.
+ */
+void Stats::dumpStatistics(std::string operation, std::string inputReference, int64_t duarationUs) {
+    ALOGV("In %s", __func__);
+    if (!mOutputTimer.size()) {
+        ALOGE("No output produced");
+        return;
+    }
+    nsecs_t totalTimeTakenNs = getTotalTime();
+    nsecs_t timeTakenPerSec = (totalTimeTakenNs * 1000000) / duarationUs;
+    nsecs_t timeToFirstFrameNs = *mOutputTimer.begin() - mStartTimeNs;
+    int32_t size = std::accumulate(mFrameSizes.begin(), mFrameSizes.end(), 0);
+    // get min and max output intervals.
+    nsecs_t intervalNs;
+    nsecs_t minTimeTakenNs = INT64_MAX;
+    nsecs_t maxTimeTakenNs = 0;
+    nsecs_t prevIntervalNs = mStartTimeNs;
+    for (int32_t idx = 0; idx < mOutputTimer.size() - 1; idx++) {
+        intervalNs = mOutputTimer.at(idx) - prevIntervalNs;
+        prevIntervalNs = mOutputTimer.at(idx);
+        if (minTimeTakenNs > intervalNs) minTimeTakenNs = intervalNs;
+        else if (maxTimeTakenNs < intervalNs) maxTimeTakenNs = intervalNs;
+    }
+
+    // Print the Stats
+    std::cout << "Input Reference : " << inputReference << endl;
+    std::cout << "Setup Time in nano sec : " << mInitTimeNs << endl;
+    std::cout << "Average Time in nano sec : " << totalTimeTakenNs / mOutputTimer.size() << endl;
+    std::cout << "Time to first frame in nano sec : " << timeToFirstFrameNs << endl;
+    std::cout << "Time taken (in nano sec) to " << operation
+              << " 1 sec of content : " << timeTakenPerSec << endl;
+    std::cout << "Total bytes " << operation << "ed : " << size << endl;
+    std::cout << "Minimum Time in nano sec : " << minTimeTakenNs << endl;
+    std::cout << "Maximum Time in nano sec : " << maxTimeTakenNs << endl;
+    std::cout << "Destroy Time in nano sec : " << mDeInitTimeNs << endl;
+}
diff --git a/media/tests/benchmark/src/native/common/Stats.h b/media/tests/benchmark/src/native/common/Stats.h
new file mode 100644
index 0000000..024319a
--- /dev/null
+++ b/media/tests/benchmark/src/native/common/Stats.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __STATS_H__
+#define __STATS_H__
+
+#include <sys/time.h>
+#include <algorithm>
+#include <numeric>
+#include <vector>
+#include <utils/Timers.h>
+
+using namespace std;
+
+class Stats {
+  public:
+    Stats() {
+        mInitTimeNs = 0;
+        mDeInitTimeNs = 0;
+    }
+
+    ~Stats() {
+        reset();
+    }
+
+  private:
+    nsecs_t mInitTimeNs;
+    nsecs_t mDeInitTimeNs;
+    nsecs_t mStartTimeNs;
+    std::vector<int32_t> mFrameSizes;
+    std::vector<nsecs_t> mInputTimer;
+    std::vector<nsecs_t> mOutputTimer;
+
+  public:
+    nsecs_t getCurTime() { return systemTime(CLOCK_MONOTONIC); }
+
+    void setInitTime(nsecs_t initTime) { mInitTimeNs = initTime; }
+
+    void setDeInitTime(nsecs_t deInitTime) { mDeInitTimeNs = deInitTime; }
+
+    void setStartTime() { mStartTimeNs = systemTime(CLOCK_MONOTONIC); }
+
+    void addFrameSize(int32_t size) { mFrameSizes.push_back(size); }
+
+    void addInputTime() { mInputTimer.push_back(systemTime(CLOCK_MONOTONIC)); }
+
+    void addOutputTime() { mOutputTimer.push_back(systemTime(CLOCK_MONOTONIC)); }
+
+    void reset() {
+        if (!mFrameSizes.empty()) mFrameSizes.clear();
+        if (!mInputTimer.empty()) mInputTimer.clear();
+        if (!mOutputTimer.empty()) mOutputTimer.clear();
+    }
+
+    std::vector<nsecs_t> getOutputTimer() { return mOutputTimer; }
+
+    nsecs_t getInitTime() { return mInitTimeNs; }
+
+    nsecs_t getDeInitTime() { return mDeInitTimeNs; }
+
+    nsecs_t getTimeDiff(nsecs_t sTime, nsecs_t eTime) { return (eTime - sTime); }
+
+    nsecs_t getTotalTime() {
+        if (mOutputTimer.empty()) return -1;
+        return (*(mOutputTimer.end() - 1) - mStartTimeNs);
+    }
+
+    void dumpStatistics(std::string operation, std::string inputReference, int64_t duarationUs);
+};
+
+#endif  // __STATS_H__
diff --git a/media/tests/benchmark/src/native/decoder/Android.bp b/media/tests/benchmark/src/native/decoder/Android.bp
new file mode 100644
index 0000000..f2d3db5
--- /dev/null
+++ b/media/tests/benchmark/src/native/decoder/Android.bp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+cc_library_static {
+    name: "libbenchmark_decoder",
+    defaults: [
+        "libbenchmark_common-defaults",
+        "libbenchmark_soft_sanitize_all-defaults",
+    ],
+
+    srcs: ["Decoder.cpp"],
+
+    static_libs: ["libbenchmark_extractor"],
+
+    export_include_dirs: ["."],
+
+    ldflags: ["-Wl,-Bsymbolic"]
+}
diff --git a/media/tests/benchmark/src/native/decoder/Decoder.cpp b/media/tests/benchmark/src/native/decoder/Decoder.cpp
new file mode 100644
index 0000000..ef84537
--- /dev/null
+++ b/media/tests/benchmark/src/native/decoder/Decoder.cpp
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "decoder"
+
+#include <iostream>
+
+#include "Decoder.h"
+
+tuple<ssize_t, uint32_t, int64_t> readSampleData(uint8_t *inputBuffer, int32_t &offset,
+                                                 vector<AMediaCodecBufferInfo> &frameInfo,
+                                                 uint8_t *buf, int32_t frameID, size_t bufSize) {
+    ALOGV("In %s", __func__);
+    if (frameID == (int32_t)frameInfo.size()) {
+        return make_tuple(0, AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM, 0);
+    }
+    uint32_t flags = frameInfo[frameID].flags;
+    int64_t timestamp = frameInfo[frameID].presentationTimeUs;
+    ssize_t bytesCount = frameInfo[frameID].size;
+    if (bufSize < bytesCount) {
+        ALOGE("Error : insufficient resource");
+        return make_tuple(0, AMEDIACODEC_ERROR_INSUFFICIENT_RESOURCE, 0);
+    }
+
+    memcpy(buf, inputBuffer + offset, bytesCount);
+    offset += bytesCount;
+    return make_tuple(bytesCount, flags, timestamp);
+}
+
+void Decoder::onInputAvailable(AMediaCodec *mediaCodec, int32_t bufIdx) {
+    ALOGV("In %s", __func__);
+    if (mediaCodec == mCodec && mediaCodec) {
+        if (mSawInputEOS || bufIdx < 0) return;
+        if (mSignalledError) {
+            CallBackHandle::mSawError = true;
+            mDecoderDoneCondition.notify_one();
+            return;
+        }
+
+        size_t bufSize;
+        uint8_t *buf = AMediaCodec_getInputBuffer(mCodec, bufIdx, &bufSize);
+        if (!buf) {
+            mSignalledError = true;
+            mDecoderDoneCondition.notify_one();
+            return;
+        }
+
+        ssize_t bytesRead = 0;
+        uint32_t flag = 0;
+        int64_t presentationTimeUs = 0;
+        tie(bytesRead, flag, presentationTimeUs) = readSampleData(
+                mInputBuffer, mOffset, mFrameMetaData, buf, mNumInputFrame, bufSize);
+        if (flag == AMEDIACODEC_ERROR_INSUFFICIENT_RESOURCE) {
+            mSignalledError = true;
+            mDecoderDoneCondition.notify_one();
+            return;
+        }
+
+        if (flag == AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM) mSawInputEOS = true;
+        ALOGV("%s bytesRead : %zd presentationTimeUs : %" PRId64 " mSawInputEOS : %s", __FUNCTION__,
+              bytesRead, presentationTimeUs, mSawInputEOS ? "TRUE" : "FALSE");
+
+        int status = AMediaCodec_queueInputBuffer(mCodec, bufIdx, 0 /* offset */, bytesRead,
+                                                  presentationTimeUs, flag);
+        if (AMEDIA_OK != status) {
+            mSignalledError = true;
+            mDecoderDoneCondition.notify_one();
+            return;
+        }
+        mStats->addFrameSize(bytesRead);
+        mNumInputFrame++;
+    }
+}
+
+void Decoder::onOutputAvailable(AMediaCodec *mediaCodec, int32_t bufIdx,
+                                AMediaCodecBufferInfo *bufferInfo) {
+    ALOGV("In %s", __func__);
+    if (mediaCodec == mCodec && mediaCodec) {
+        if (mSawOutputEOS || bufIdx < 0) return;
+        if (mSignalledError) {
+            CallBackHandle::mSawError = true;
+            mDecoderDoneCondition.notify_one();
+            return;
+        }
+
+        if (mOutFp != nullptr) {
+            size_t bufSize;
+            uint8_t *buf = AMediaCodec_getOutputBuffer(mCodec, bufIdx, &bufSize);
+            if (buf) {
+                fwrite(buf, sizeof(char), bufferInfo->size, mOutFp);
+                ALOGV("bytes written into file  %d\n", bufferInfo->size);
+            }
+        }
+
+        AMediaCodec_releaseOutputBuffer(mCodec, bufIdx, false);
+        mSawOutputEOS = (0 != (bufferInfo->flags & AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM));
+        mNumOutputFrame++;
+        ALOGV("%s index : %d  mSawOutputEOS : %s count : %u", __FUNCTION__, bufIdx,
+              mSawOutputEOS ? "TRUE" : "FALSE", mNumOutputFrame);
+
+        if (mSawOutputEOS) {
+            CallBackHandle::mIsDone = true;
+            mDecoderDoneCondition.notify_one();
+        }
+    }
+}
+
+void Decoder::onFormatChanged(AMediaCodec *mediaCodec, AMediaFormat *format) {
+    ALOGV("In %s", __func__);
+    if (mediaCodec == mCodec && mediaCodec) {
+        ALOGV("%s { %s }", __FUNCTION__, AMediaFormat_toString(format));
+        mFormat = format;
+    }
+}
+
+void Decoder::setupDecoder() {
+    if (!mFormat) mFormat = mExtractor->getFormat();
+}
+
+int32_t Decoder::decode(uint8_t *inputBuffer, vector<AMediaCodecBufferInfo> &frameInfo,
+                        string &codecName, bool asyncMode, FILE *outFp) {
+    ALOGV("In %s", __func__);
+    mInputBuffer = inputBuffer;
+    mFrameMetaData = frameInfo;
+    mOffset = 0;
+    mOutFp = outFp;
+
+    const char *mime = nullptr;
+    AMediaFormat_getString(mFormat, AMEDIAFORMAT_KEY_MIME, &mime);
+    if (!mime) return AMEDIA_ERROR_INVALID_OBJECT;
+
+    int64_t sTime = mStats->getCurTime();
+    mCodec = createMediaCodec(mFormat, mime, codecName, false /*isEncoder*/);
+    if (!mCodec) return AMEDIA_ERROR_INVALID_OBJECT;
+
+    if (asyncMode) {
+        AMediaCodecOnAsyncNotifyCallback aCB = {OnInputAvailableCB, OnOutputAvailableCB,
+                                                OnFormatChangedCB, OnErrorCB};
+        AMediaCodec_setAsyncNotifyCallback(mCodec, aCB, this);
+
+        mIOThread = thread(&CallBackHandle::ioThread, this);
+    }
+
+    AMediaCodec_start(mCodec);
+    int64_t eTime = mStats->getCurTime();
+    int64_t timeTaken = mStats->getTimeDiff(sTime, eTime);
+    mStats->setInitTime(timeTaken);
+
+    mStats->setStartTime();
+    if (!asyncMode) {
+        while (!mSawOutputEOS && !mSignalledError) {
+            /* Queue input data */
+            if (!mSawInputEOS) {
+                ssize_t inIdx = AMediaCodec_dequeueInputBuffer(mCodec, kQueueDequeueTimeoutUs);
+                if (inIdx < 0 && inIdx != AMEDIACODEC_INFO_TRY_AGAIN_LATER) {
+                    ALOGE("AMediaCodec_dequeueInputBuffer returned invalid index %zd\n", inIdx);
+                    return AMEDIA_ERROR_IO;
+                } else if (inIdx >= 0) {
+                    mStats->addInputTime();
+                    onInputAvailable(mCodec, inIdx);
+                }
+            }
+
+            /* Dequeue output data */
+            AMediaCodecBufferInfo info;
+            ssize_t outIdx = AMediaCodec_dequeueOutputBuffer(mCodec, &info, kQueueDequeueTimeoutUs);
+            if (outIdx == AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED) {
+                mFormat = AMediaCodec_getOutputFormat(mCodec);
+                const char *s = AMediaFormat_toString(mFormat);
+                ALOGI("Output format: %s\n", s);
+            } else if (outIdx >= 0) {
+                mStats->addOutputTime();
+                onOutputAvailable(mCodec, outIdx, &info);
+            } else if (!(outIdx == AMEDIACODEC_INFO_TRY_AGAIN_LATER ||
+                         outIdx == AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED)) {
+                ALOGE("AMediaCodec_dequeueOutputBuffer returned invalid index %zd\n", outIdx);
+                return AMEDIA_ERROR_IO;
+            }
+        }
+    } else {
+        unique_lock<mutex> lock(mMutex);
+        mDecoderDoneCondition.wait(lock, [this]() { return (mSawOutputEOS || mSignalledError); });
+    }
+
+    if (codecName.empty()) {
+        char *decName;
+        AMediaCodec_getName(mCodec, &decName);
+        codecName.assign(decName);
+        AMediaCodec_releaseName(mCodec, decName);
+    }
+    return AMEDIA_OK;
+}
+
+void Decoder::deInitCodec() {
+    int64_t sTime = mStats->getCurTime();
+    if (mFormat) {
+        AMediaFormat_delete(mFormat);
+        mFormat = nullptr;
+    }
+    if (!mCodec) return;
+    AMediaCodec_stop(mCodec);
+    AMediaCodec_delete(mCodec);
+    int64_t eTime = mStats->getCurTime();
+    int64_t timeTaken = mStats->getTimeDiff(sTime, eTime);
+    mStats->setDeInitTime(timeTaken);
+}
+
+void Decoder::dumpStatistics(string inputReference) {
+    int64_t durationUs = mExtractor->getClipDuration();
+    string operation = "decode";
+    mStats->dumpStatistics(operation, inputReference, durationUs);
+}
+
+void Decoder::resetDecoder() {
+    if (mStats) mStats->reset();
+    if (mInputBuffer) mInputBuffer = nullptr;
+    if (!mFrameMetaData.empty()) mFrameMetaData.clear();
+}
diff --git a/media/tests/benchmark/src/native/decoder/Decoder.h b/media/tests/benchmark/src/native/decoder/Decoder.h
new file mode 100644
index 0000000..7630e7b
--- /dev/null
+++ b/media/tests/benchmark/src/native/decoder/Decoder.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __DECODER_H__
+#define __DECODER_H__
+
+#include <chrono>
+#include <condition_variable>
+#include <mutex>
+#include <queue>
+#include <thread>
+
+#include "BenchmarkCommon.h"
+#include "Extractor.h"
+#include "Stats.h"
+
+class Decoder : public CallBackHandle {
+  public:
+    Decoder()
+        : mCodec(nullptr),
+          mFormat(nullptr),
+          mExtractor(nullptr),
+          mNumInputFrame(0),
+          mNumOutputFrame(0),
+          mSawInputEOS(false),
+          mSawOutputEOS(false),
+          mSignalledError(false),
+          mInputBuffer(nullptr),
+          mOutFp(nullptr) {
+        mExtractor = new Extractor();
+    }
+
+    virtual ~Decoder() {
+        if (mExtractor) delete mExtractor;
+    }
+
+    Extractor *getExtractor() { return mExtractor; }
+
+    // Decoder related utilities
+    void setupDecoder();
+
+    void deInitCodec();
+
+    void resetDecoder();
+
+    // Async callback APIs
+    void onInputAvailable(AMediaCodec *codec, int32_t index) override;
+
+    void onFormatChanged(AMediaCodec *codec, AMediaFormat *format) override;
+
+    void onOutputAvailable(AMediaCodec *codec, int32_t index,
+                           AMediaCodecBufferInfo *bufferInfo) override;
+
+    // Process the frames and give decoded output
+    int32_t decode(uint8_t *inputBuffer, vector<AMediaCodecBufferInfo> &frameInfo,
+                   string &codecName, bool asyncMode, FILE *outFp = nullptr);
+
+    void dumpStatistics(string inputReference);
+
+  private:
+    AMediaCodec *mCodec;
+    AMediaFormat *mFormat;
+
+    Extractor *mExtractor;
+
+    int32_t mNumInputFrame;
+    int32_t mNumOutputFrame;
+
+    bool mSawInputEOS;
+    bool mSawOutputEOS;
+    bool mSignalledError;
+
+    int32_t mOffset;
+    uint8_t *mInputBuffer;
+    vector<AMediaCodecBufferInfo> mFrameMetaData;
+    FILE *mOutFp;
+
+    /* Asynchronous locks */
+    mutex mMutex;
+    condition_variable mDecoderDoneCondition;
+};
+
+// Read input samples
+tuple<ssize_t, uint32_t, int64_t> readSampleData(uint8_t *inputBuffer, int32_t &offset,
+                                                 vector<AMediaCodecBufferInfo> &frameSizes,
+                                                 uint8_t *buf, int32_t frameID, size_t bufSize);
+
+#endif  // __DECODER_H__
diff --git a/media/tests/benchmark/src/native/encoder/Android.bp b/media/tests/benchmark/src/native/encoder/Android.bp
new file mode 100644
index 0000000..c14c319
--- /dev/null
+++ b/media/tests/benchmark/src/native/encoder/Android.bp
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+cc_library_static {
+    name: "libbenchmark_encoder",
+    defaults: [
+        "libbenchmark_common-defaults",
+        "libbenchmark_soft_sanitize_all-defaults",
+    ],
+
+    srcs: ["Encoder.cpp"],
+
+    static_libs: ["libbenchmark_extractor",
+                  "libbenchmark_decoder",
+    ],
+
+    export_include_dirs: ["."],
+
+    ldflags: ["-Wl,-Bsymbolic"]
+}
diff --git a/media/tests/benchmark/src/native/encoder/Encoder.cpp b/media/tests/benchmark/src/native/encoder/Encoder.cpp
new file mode 100644
index 0000000..5fdf9e3
--- /dev/null
+++ b/media/tests/benchmark/src/native/encoder/Encoder.cpp
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "encoder"
+
+#include <fstream>
+
+#include "Encoder.h"
+
+void Encoder::onInputAvailable(AMediaCodec *mediaCodec, int32_t bufIdx) {
+    ALOGV("In %s", __func__);
+    if (mediaCodec == mCodec && mediaCodec) {
+        if (mSawInputEOS || bufIdx < 0) return;
+        if (mSignalledError) {
+            CallBackHandle::mSawError = true;
+            mEncoderDoneCondition.notify_one();
+            return;
+        }
+
+        size_t bufSize = 0;
+        char *buf = (char *)AMediaCodec_getInputBuffer(mCodec, bufIdx, &bufSize);
+        if (!buf) {
+            mSignalledError = true;
+            mEncoderDoneCondition.notify_one();
+            return;
+        }
+
+        if (mInputBufferSize < mOffset) {
+            ALOGE("Out of bound access of input buffer\n");
+            mSignalledError = true;
+            mEncoderDoneCondition.notify_one();
+            return;
+        }
+        size_t bytesRead = mParams.frameSize;
+        if (mInputBufferSize - mOffset < mParams.frameSize) {
+            bytesRead = mInputBufferSize - mOffset;
+        }
+        if (bufSize < bytesRead) {
+            ALOGE("bytes to read %zu bufSize %zu \n", bytesRead, bufSize);
+            mSignalledError = true;
+            mEncoderDoneCondition.notify_one();
+            return;
+        }
+        if (bytesRead < mParams.frameSize && mNumInputFrame < mParams.numFrames - 1) {
+            ALOGE("Partial frame at frameID %d bytesRead %zu frameSize %d total numFrames %d\n",
+                  mNumInputFrame, bytesRead, mParams.frameSize, mParams.numFrames);
+            mSignalledError = true;
+            mEncoderDoneCondition.notify_one();
+            return;
+        }
+        mEleStream->read(buf, bytesRead);
+        size_t bytesgcount = mEleStream->gcount();
+        if (bytesgcount != bytesRead) {
+            ALOGE("bytes to read %zu actual bytes read %zu \n", bytesRead, bytesgcount);
+            mSignalledError = true;
+            mEncoderDoneCondition.notify_one();
+            return;
+        }
+
+        uint32_t flag = 0;
+        if (mNumInputFrame == mParams.numFrames - 1 || bytesRead == 0) {
+            ALOGD("Sending EOS on input Last frame\n");
+            flag |= AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM;
+        }
+
+        uint64_t presentationTimeUs;
+        if (!strncmp(mMime, "video/", 6)) {
+            presentationTimeUs = mNumInputFrame * (1000000 / mParams.frameRate);
+        } else {
+            presentationTimeUs =
+                    (uint64_t)mNumInputFrame * mParams.frameSize * 1000000 / mParams.sampleRate;
+        }
+
+        if (flag == AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM) mSawInputEOS = true;
+        ALOGV("%s bytesRead : %zd presentationTimeUs : %" PRIu64 " mSawInputEOS : %s", __FUNCTION__,
+              bytesRead, presentationTimeUs, mSawInputEOS ? "TRUE" : "FALSE");
+
+        int status = AMediaCodec_queueInputBuffer(mCodec, bufIdx, 0 /* offset */, bytesRead,
+                                                  presentationTimeUs, flag);
+        if (AMEDIA_OK != status) {
+            mSignalledError = true;
+            mEncoderDoneCondition.notify_one();
+            return;
+        }
+        mNumInputFrame++;
+        mOffset += bytesRead;
+    }
+}
+
+void Encoder::onOutputAvailable(AMediaCodec *mediaCodec, int32_t bufIdx,
+                                AMediaCodecBufferInfo *bufferInfo) {
+    ALOGV("In %s", __func__);
+    if (mediaCodec == mCodec && mediaCodec) {
+        if (mSawOutputEOS || bufIdx < 0) return;
+        if (mSignalledError) {
+            CallBackHandle::mSawError = true;
+            mEncoderDoneCondition.notify_one();
+            return;
+        }
+
+        mStats->addFrameSize(bufferInfo->size);
+        AMediaCodec_releaseOutputBuffer(mCodec, bufIdx, false);
+        mSawOutputEOS = (0 != (bufferInfo->flags & AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM));
+        mNumOutputFrame++;
+        ALOGV("%s index : %d  mSawOutputEOS : %s count : %u", __FUNCTION__, bufIdx,
+              mSawOutputEOS ? "TRUE" : "FALSE", mNumOutputFrame);
+        if (mSawOutputEOS) {
+            CallBackHandle::mIsDone = true;
+            mEncoderDoneCondition.notify_one();
+        }
+    }
+}
+
+void Encoder::onFormatChanged(AMediaCodec *mediaCodec, AMediaFormat *format) {
+    ALOGV("In %s", __func__);
+    if (mediaCodec == mCodec && mediaCodec) {
+        ALOGV("%s { %s }", __FUNCTION__, AMediaFormat_toString(format));
+        mFormat = format;
+    }
+}
+
+void Encoder::setupEncoder() {
+    if (!mFormat) mFormat = AMediaFormat_new();
+}
+
+void Encoder::deInitCodec() {
+    int64_t sTime = mStats->getCurTime();
+    if (mFormat) {
+        AMediaFormat_delete(mFormat);
+        mFormat = nullptr;
+    }
+    AMediaCodec_stop(mCodec);
+    AMediaCodec_delete(mCodec);
+    int64_t eTime = mStats->getCurTime();
+    int64_t timeTaken = mStats->getTimeDiff(sTime, eTime);
+    mStats->setDeInitTime(timeTaken);
+}
+
+void Encoder::resetEncoder() {
+    if (mStats) mStats->reset();
+    if (mEleStream) mEleStream = nullptr;
+    if (mMime) mMime = nullptr;
+    mInputBufferSize = 0;
+    memset(&mParams, 0, sizeof mParams);
+}
+
+void Encoder::dumpStatistics(string inputReference, int64_t durationUs) {
+    string operation = "encode";
+    mStats->dumpStatistics(operation, inputReference, durationUs);
+}
+
+int32_t Encoder::encode(string &codecName, ifstream &eleStream, size_t eleSize,
+                        bool asyncMode, encParameter encParams, char *mime) {
+    ALOGV("In %s", __func__);
+    mEleStream = &eleStream;
+    mInputBufferSize = eleSize;
+    mParams = encParams;
+    mOffset = 0;
+    mMime = mime;
+    AMediaFormat_setString(mFormat, AMEDIAFORMAT_KEY_MIME, mMime);
+
+    // Set Format
+    if (!strncmp(mMime, "video/", 6)) {
+        AMediaFormat_setInt32(mFormat, AMEDIAFORMAT_KEY_WIDTH, mParams.width);
+        AMediaFormat_setInt32(mFormat, AMEDIAFORMAT_KEY_HEIGHT, mParams.height);
+        AMediaFormat_setInt32(mFormat, AMEDIAFORMAT_KEY_FRAME_RATE, mParams.frameRate);
+        AMediaFormat_setInt32(mFormat, AMEDIAFORMAT_KEY_BIT_RATE, mParams.bitrate);
+        AMediaFormat_setInt32(mFormat, AMEDIAFORMAT_KEY_I_FRAME_INTERVAL, 1);
+        if (mParams.profile && mParams.level) {
+            AMediaFormat_setInt32(mFormat, AMEDIAFORMAT_KEY_PROFILE, mParams.profile);
+            AMediaFormat_setInt32(mFormat, AMEDIAFORMAT_KEY_LEVEL, mParams.level);
+        }
+    } else {
+        AMediaFormat_setInt32(mFormat, AMEDIAFORMAT_KEY_SAMPLE_RATE, mParams.sampleRate);
+        AMediaFormat_setInt32(mFormat, AMEDIAFORMAT_KEY_CHANNEL_COUNT, mParams.numChannels);
+        AMediaFormat_setInt32(mFormat, AMEDIAFORMAT_KEY_BIT_RATE, mParams.bitrate);
+    }
+    const char *s = AMediaFormat_toString(mFormat);
+    ALOGV("Input format: %s\n", s);
+
+    int64_t sTime = mStats->getCurTime();
+    mCodec = createMediaCodec(mFormat, mMime, codecName, true /*isEncoder*/);
+    if (!mCodec) return AMEDIA_ERROR_INVALID_OBJECT;
+    int64_t eTime = mStats->getCurTime();
+    int64_t timeTaken = mStats->getTimeDiff(sTime, eTime);
+
+    if (!strncmp(mMime, "video/", 6)) {
+        mParams.frameSize = mParams.width * mParams.height * 3 / 2;
+    } else {
+        mParams.frameSize = 4096;
+        // Get mInputMaxBufSize
+        AMediaFormat *inputFormat = AMediaCodec_getInputFormat(mCodec);
+        AMediaFormat_getInt32(inputFormat, AMEDIAFORMAT_KEY_MAX_INPUT_SIZE, &mParams.maxFrameSize);
+        if (mParams.maxFrameSize < 0) {
+            ALOGE("Invalid mParams.maxFrameSize %d\n", mParams.maxFrameSize);
+            return AMEDIA_ERROR_INVALID_PARAMETER;
+        }
+        if (mParams.frameSize > mParams.maxFrameSize) {
+            mParams.frameSize = mParams.maxFrameSize;
+        }
+    }
+    mParams.numFrames = (mInputBufferSize + mParams.frameSize - 1) / mParams.frameSize;
+
+    sTime = mStats->getCurTime();
+    if (asyncMode) {
+        AMediaCodecOnAsyncNotifyCallback aCB = {OnInputAvailableCB, OnOutputAvailableCB,
+                                                OnFormatChangedCB, OnErrorCB};
+        AMediaCodec_setAsyncNotifyCallback(mCodec, aCB, this);
+        mIOThread = thread(&CallBackHandle::ioThread, this);
+    }
+    AMediaCodec_start(mCodec);
+    eTime = mStats->getCurTime();
+    timeTaken += mStats->getTimeDiff(sTime, eTime);
+    mStats->setInitTime(timeTaken);
+
+    mStats->setStartTime();
+    if (!asyncMode) {
+        while (!mSawOutputEOS && !mSignalledError) {
+            // Queue input data
+            if (!mSawInputEOS) {
+                ssize_t inIdx = AMediaCodec_dequeueInputBuffer(mCodec, kQueueDequeueTimeoutUs);
+                if (inIdx < 0 && inIdx != AMEDIACODEC_INFO_TRY_AGAIN_LATER) {
+                    ALOGE("AMediaCodec_dequeueInputBuffer returned invalid index %zd\n", inIdx);
+                    return AMEDIA_ERROR_IO;
+                } else if (inIdx >= 0) {
+                    mStats->addInputTime();
+                    onInputAvailable(mCodec, inIdx);
+                }
+            }
+
+            // Dequeue output data
+            AMediaCodecBufferInfo info;
+            ssize_t outIdx = AMediaCodec_dequeueOutputBuffer(mCodec, &info, kQueueDequeueTimeoutUs);
+            if (outIdx == AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED) {
+                mFormat = AMediaCodec_getOutputFormat(mCodec);
+                const char *s = AMediaFormat_toString(mFormat);
+                ALOGI("Output format: %s\n", s);
+            } else if (outIdx >= 0) {
+                mStats->addOutputTime();
+                onOutputAvailable(mCodec, outIdx, &info);
+            } else if (!(outIdx == AMEDIACODEC_INFO_TRY_AGAIN_LATER ||
+                         outIdx == AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED)) {
+                ALOGE("AMediaCodec_dequeueOutputBuffer returned invalid index %zd\n", outIdx);
+                return AMEDIA_ERROR_IO;
+            }
+        }
+    } else {
+        unique_lock<mutex> lock(mMutex);
+        mEncoderDoneCondition.wait(lock, [this]() { return (mSawOutputEOS || mSignalledError); });
+    }
+
+    if (codecName.empty()) {
+        char *encName;
+        AMediaCodec_getName(mCodec, &encName);
+        codecName.assign(encName);
+        AMediaCodec_releaseName(mCodec, encName);
+    }
+    return AMEDIA_OK;
+}
diff --git a/media/tests/benchmark/src/native/encoder/Encoder.h b/media/tests/benchmark/src/native/encoder/Encoder.h
new file mode 100644
index 0000000..75d9941
--- /dev/null
+++ b/media/tests/benchmark/src/native/encoder/Encoder.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCODER_H__
+#define __ENCODER_H__
+
+#include <chrono>
+#include <condition_variable>
+#include <mutex>
+#include <queue>
+#include <thread>
+
+#include "BenchmarkCommon.h"
+#include "Stats.h"
+
+struct encParameter {
+    int32_t bitrate = -1;
+    int32_t numFrames = -1;
+    int32_t frameSize = -1;
+    int32_t sampleRate = 0;
+    int32_t numChannels = 0;
+    int32_t maxFrameSize = -1;
+    int32_t width = 0;
+    int32_t height = 0;
+    int32_t frameRate = -1;
+    int32_t profile = 0;
+    int32_t level = 0;
+};
+
+class Encoder : public CallBackHandle {
+  public:
+    Encoder()
+        : mCodec(nullptr),
+          mFormat(nullptr),
+          mNumInputFrame(0),
+          mNumOutputFrame(0),
+          mSawInputEOS(false),
+          mSawOutputEOS(false),
+          mSignalledError(false) {}
+
+    virtual ~Encoder() {}
+
+    // Encoder related utilities
+    void setupEncoder();
+
+    void deInitCodec();
+
+    void resetEncoder();
+
+    // Async callback APIs
+    void onInputAvailable(AMediaCodec *codec, int32_t index) override;
+
+    void onFormatChanged(AMediaCodec *codec, AMediaFormat *format) override;
+
+    void onOutputAvailable(AMediaCodec *codec, int32_t index,
+                           AMediaCodecBufferInfo *bufferInfo) override;
+
+    // Process the frames and give encoded output
+    int32_t encode(std::string &codecName, std::ifstream &eleStream, size_t eleSize, bool asyncMode,
+                   encParameter encParams, char *mime);
+
+    void dumpStatistics(string inputReference, int64_t durationUs);
+
+  private:
+    AMediaCodec *mCodec;
+    AMediaFormat *mFormat;
+
+    int32_t mNumInputFrame;
+    int32_t mNumOutputFrame;
+    bool mSawInputEOS;
+    bool mSawOutputEOS;
+    bool mSignalledError;
+
+    char *mMime;
+    int32_t mOffset;
+    std::ifstream *mEleStream;
+    size_t mInputBufferSize;
+    encParameter mParams;
+
+    // Asynchronous locks
+    std::mutex mMutex;
+    std::condition_variable mEncoderDoneCondition;
+};
+#endif  // __ENCODER_H__
diff --git a/media/tests/benchmark/src/native/extractor/Android.bp b/media/tests/benchmark/src/native/extractor/Android.bp
new file mode 100644
index 0000000..2fbe4e8
--- /dev/null
+++ b/media/tests/benchmark/src/native/extractor/Android.bp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+cc_library_static {
+    name: "libbenchmark_extractor",
+    defaults: [
+        "libbenchmark_common-defaults",
+        "libbenchmark_soft_sanitize_all-defaults",
+    ],
+
+    srcs: ["Extractor.cpp"],
+
+    export_include_dirs: ["."],
+
+    ldflags: ["-Wl,-Bsymbolic"]
+}
diff --git a/media/tests/benchmark/src/native/extractor/Extractor.cpp b/media/tests/benchmark/src/native/extractor/Extractor.cpp
new file mode 100644
index 0000000..b4cad0b
--- /dev/null
+++ b/media/tests/benchmark/src/native/extractor/Extractor.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "extractor"
+
+#include <iostream>
+
+#include "Extractor.h"
+
+int32_t Extractor::initExtractor(int32_t fd, size_t fileSize) {
+    mStats = new Stats();
+
+    mFrameBuf = (uint8_t *)calloc(kMaxBufferSize, sizeof(uint8_t));
+    if (!mFrameBuf) return -1;
+
+    int64_t sTime = mStats->getCurTime();
+
+    mExtractor = AMediaExtractor_new();
+    if (!mExtractor) return AMEDIACODEC_ERROR_INSUFFICIENT_RESOURCE;
+    media_status_t status = AMediaExtractor_setDataSourceFd(mExtractor, fd, 0, fileSize);
+    if (status != AMEDIA_OK) return status;
+
+    int64_t eTime = mStats->getCurTime();
+    int64_t timeTaken = mStats->getTimeDiff(sTime, eTime);
+    mStats->setInitTime(timeTaken);
+
+    return AMediaExtractor_getTrackCount(mExtractor);
+}
+
+void *Extractor::getCSDSample(AMediaCodecBufferInfo &frameInfo, int32_t csdIndex) {
+    char csdName[kMaxCSDStrlen];
+    void *csdBuffer = nullptr;
+    frameInfo.presentationTimeUs = 0;
+    frameInfo.flags = AMEDIACODEC_BUFFER_FLAG_CODEC_CONFIG;
+    snprintf(csdName, sizeof(csdName), "csd-%d", csdIndex);
+
+    size_t size;
+    bool csdFound = AMediaFormat_getBuffer(mFormat, csdName, &csdBuffer, &size);
+    if (!csdFound) return nullptr;
+    frameInfo.size = (int32_t)size;
+    mStats->addFrameSize(frameInfo.size);
+
+    return csdBuffer;
+}
+
+int32_t Extractor::getFrameSample(AMediaCodecBufferInfo &frameInfo) {
+    int32_t size = AMediaExtractor_readSampleData(mExtractor, mFrameBuf, kMaxBufferSize);
+    if (size < 0) return -1;
+
+    frameInfo.flags = AMediaExtractor_getSampleFlags(mExtractor);
+    frameInfo.size = size;
+    mStats->addFrameSize(frameInfo.size);
+    frameInfo.presentationTimeUs = AMediaExtractor_getSampleTime(mExtractor);
+    AMediaExtractor_advance(mExtractor);
+
+    return 0;
+}
+
+int32_t Extractor::setupTrackFormat(int32_t trackId) {
+    AMediaExtractor_selectTrack(mExtractor, trackId);
+    mFormat = AMediaExtractor_getTrackFormat(mExtractor, trackId);
+    if (!mFormat) return AMEDIA_ERROR_INVALID_OBJECT;
+
+    bool durationFound = AMediaFormat_getInt64(mFormat, AMEDIAFORMAT_KEY_DURATION, &mDurationUs);
+    if (!durationFound) return AMEDIA_ERROR_INVALID_OBJECT;
+
+    return AMEDIA_OK;
+}
+
+int32_t Extractor::extract(int32_t trackId) {
+    int32_t status = setupTrackFormat(trackId);
+    if (status != AMEDIA_OK) return status;
+
+    int32_t idx = 0;
+    AMediaCodecBufferInfo frameInfo;
+    while (1) {
+        memset(&frameInfo, 0, sizeof(AMediaCodecBufferInfo));
+        void *csdBuffer = getCSDSample(frameInfo, idx);
+        if (!csdBuffer || !frameInfo.size) break;
+        idx++;
+    }
+
+    mStats->setStartTime();
+    while (1) {
+        int32_t status = getFrameSample(frameInfo);
+        if (status || !frameInfo.size) break;
+        mStats->addOutputTime();
+    }
+
+    if (mFormat) {
+        AMediaFormat_delete(mFormat);
+        mFormat = nullptr;
+    }
+
+    AMediaExtractor_unselectTrack(mExtractor, trackId);
+
+    return AMEDIA_OK;
+}
+
+void Extractor::dumpStatistics(string inputReference) {
+    string operation = "extract";
+    mStats->dumpStatistics(operation, inputReference, mDurationUs);
+}
+
+void Extractor::deInitExtractor() {
+    if (mFrameBuf) {
+        free(mFrameBuf);
+        mFrameBuf = nullptr;
+    }
+
+    int64_t sTime = mStats->getCurTime();
+    if (mExtractor) {
+        // TODO: (b/140128505) Multiple calls result in DoS.
+        // Uncomment call to AMediaExtractor_delete() once this is resolved
+        // AMediaExtractor_delete(mExtractor);
+        mExtractor = nullptr;
+    }
+    int64_t eTime = mStats->getCurTime();
+    int64_t deInitTime = mStats->getTimeDiff(sTime, eTime);
+    mStats->setDeInitTime(deInitTime);
+}
diff --git a/media/tests/benchmark/src/native/extractor/Extractor.h b/media/tests/benchmark/src/native/extractor/Extractor.h
new file mode 100644
index 0000000..4c39a72
--- /dev/null
+++ b/media/tests/benchmark/src/native/extractor/Extractor.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __EXTRACTOR_H__
+#define __EXTRACTOR_H__
+
+#include <media/NdkMediaExtractor.h>
+
+#include "BenchmarkCommon.h"
+#include "Stats.h"
+
+class Extractor {
+  public:
+    Extractor()
+        : mFormat(nullptr),
+          mExtractor(nullptr),
+          mStats(nullptr),
+          mFrameBuf{nullptr},
+          mDurationUs{0} {}
+
+    ~Extractor() {
+        if (mStats) delete mStats;
+    }
+
+    int32_t initExtractor(int32_t fd, size_t fileSize);
+
+    int32_t setupTrackFormat(int32_t trackId);
+
+    void *getCSDSample(AMediaCodecBufferInfo &frameInfo, int32_t csdIndex);
+
+    int32_t getFrameSample(AMediaCodecBufferInfo &frameInfo);
+
+    int32_t extract(int32_t trackId);
+
+    void dumpStatistics(std::string inputReference);
+
+    void deInitExtractor();
+
+    AMediaFormat *getFormat() { return mFormat; }
+
+    uint8_t *getFrameBuf() { return mFrameBuf; }
+
+    int64_t getClipDuration() { return mDurationUs; }
+
+  private:
+    AMediaFormat *mFormat;
+    AMediaExtractor *mExtractor;
+    Stats *mStats;
+    uint8_t *mFrameBuf;
+    int64_t mDurationUs;
+};
+
+#endif  // __EXTRACTOR_H__
\ No newline at end of file
diff --git a/media/tests/benchmark/src/native/muxer/Android.bp b/media/tests/benchmark/src/native/muxer/Android.bp
new file mode 100644
index 0000000..6ef2a2e
--- /dev/null
+++ b/media/tests/benchmark/src/native/muxer/Android.bp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+cc_library_static {
+    name: "libbenchmark_muxer",
+    defaults: [
+        "libbenchmark_common-defaults",
+        "libbenchmark_soft_sanitize_all-defaults",
+    ],
+
+    srcs: ["Muxer.cpp"],
+
+    static_libs: ["libbenchmark_extractor"],
+
+    export_include_dirs: ["."],
+
+    ldflags: ["-Wl,-Bsymbolic"]
+}
diff --git a/media/tests/benchmark/src/native/muxer/Muxer.cpp b/media/tests/benchmark/src/native/muxer/Muxer.cpp
new file mode 100644
index 0000000..b297a66
--- /dev/null
+++ b/media/tests/benchmark/src/native/muxer/Muxer.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "muxer"
+
+#include <fstream>
+#include <iostream>
+
+#include "Muxer.h"
+
+int32_t Muxer::initMuxer(int32_t fd, MUXER_OUTPUT_T outputFormat) {
+    if (!mFormat) mFormat = mExtractor->getFormat();
+    if (!mStats) mStats = new Stats();
+
+    int64_t sTime = mStats->getCurTime();
+    mMuxer = AMediaMuxer_new(fd, (OutputFormat)outputFormat);
+    if (!mMuxer) {
+        cout << "[   WARN   ] Test Skipped. Unable to create muxer \n";
+        return AMEDIA_ERROR_INVALID_OBJECT;
+    }
+    /*
+     * AMediaMuxer_addTrack returns the index of the new track or a negative value
+     * in case of failure, which can be interpreted as a media_status_t.
+     */
+    ssize_t index = AMediaMuxer_addTrack(mMuxer, mFormat);
+    if (index < 0) {
+        cout << "[   WARN   ] Test Skipped. Format not supported \n";
+        return index;
+    }
+    AMediaMuxer_start(mMuxer);
+    int64_t eTime = mStats->getCurTime();
+    int64_t timeTaken = mStats->getTimeDiff(sTime, eTime);
+    mStats->setInitTime(timeTaken);
+    return AMEDIA_OK;
+}
+
+void Muxer::deInitMuxer() {
+    int64_t sTime = mStats->getCurTime();
+    if (mFormat) {
+        AMediaFormat_delete(mFormat);
+        mFormat = nullptr;
+    }
+    if (!mMuxer) return;
+    AMediaMuxer_stop(mMuxer);
+    AMediaMuxer_delete(mMuxer);
+    int64_t eTime = mStats->getCurTime();
+    int64_t timeTaken = mStats->getTimeDiff(sTime, eTime);
+    mStats->setDeInitTime(timeTaken);
+}
+
+void Muxer::resetMuxer() {
+    if (mStats) mStats->reset();
+}
+
+void Muxer::dumpStatistics(string inputReference) {
+    string operation = "mux";
+    mStats->dumpStatistics(operation, inputReference, mExtractor->getClipDuration());
+}
+
+int32_t Muxer::mux(uint8_t *inputBuffer, vector<AMediaCodecBufferInfo> &frameInfos) {
+    // Mux frame data
+    size_t frameIdx = 0;
+    mStats->setStartTime();
+    while (frameIdx < frameInfos.size()) {
+        AMediaCodecBufferInfo info = frameInfos.at(frameIdx);
+        media_status_t status = AMediaMuxer_writeSampleData(mMuxer, 0, inputBuffer, &info);
+        if (status != 0) {
+            ALOGE("Error in AMediaMuxer_writeSampleData");
+            return status;
+        }
+        mStats->addOutputTime();
+        mStats->addFrameSize(info.size);
+        frameIdx++;
+    }
+    return AMEDIA_OK;
+}
diff --git a/media/tests/benchmark/src/native/muxer/Muxer.h b/media/tests/benchmark/src/native/muxer/Muxer.h
new file mode 100644
index 0000000..eee3146
--- /dev/null
+++ b/media/tests/benchmark/src/native/muxer/Muxer.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MUXER_H__
+#define __MUXER_H__
+
+#include <media/NdkMediaMuxer.h>
+
+#include "BenchmarkCommon.h"
+#include "Stats.h"
+#include "Extractor.h"
+
+typedef enum {
+    MUXER_OUTPUT_FORMAT_MPEG_4 = 0,
+    MUXER_OUTPUT_FORMAT_WEBM = 1,
+    MUXER_OUTPUT_FORMAT_3GPP = 2,
+    MUXER_OUTPUT_FORMAT_OGG = 4,
+    MUXER_OUTPUT_FORMAT_INVALID = 5,
+} MUXER_OUTPUT_T;
+
+class Muxer {
+  public:
+    Muxer() : mFormat(nullptr), mMuxer(nullptr), mStats(nullptr) { mExtractor = new Extractor(); }
+
+    virtual ~Muxer() {
+        if (mStats) delete mStats;
+        if (mExtractor) delete mExtractor;
+    }
+
+    Stats *getStats() { return mStats; }
+    Extractor *getExtractor() { return mExtractor; }
+
+    /* Muxer related utilities */
+    int32_t initMuxer(int32_t fd, MUXER_OUTPUT_T outputFormat);
+    void deInitMuxer();
+    void resetMuxer();
+
+    /* Process the frames and give Muxed output */
+    int32_t mux(uint8_t *inputBuffer, vector<AMediaCodecBufferInfo> &frameSizes);
+
+    void dumpStatistics(string inputReference);
+
+  private:
+    AMediaFormat *mFormat;
+    AMediaMuxer *mMuxer;
+    Extractor *mExtractor;
+    Stats *mStats;
+};
+
+#endif  // __MUXER_H__
diff --git a/media/tests/benchmark/tests/Android.bp b/media/tests/benchmark/tests/Android.bp
new file mode 100644
index 0000000..fc21ef7
--- /dev/null
+++ b/media/tests/benchmark/tests/Android.bp
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+cc_test {
+    name: "extractorTest",
+    gtest: true,
+    defaults: [
+        "libbenchmark_common-defaults",
+        "libbenchmark_soft_sanitize_all-defaults",
+    ],
+
+    srcs: ["ExtractorTest.cpp"],
+
+    static_libs: ["libbenchmark_extractor"]
+}
+
+cc_test {
+    name: "decoderTest",
+    gtest: true,
+    defaults: [
+        "libbenchmark_common-defaults",
+        "libbenchmark_soft_sanitize_all-defaults",
+    ],
+
+    srcs: ["DecoderTest.cpp"],
+
+    static_libs: [
+        "libbenchmark_extractor",
+        "libbenchmark_decoder",
+    ],
+}
+
+cc_test {
+    name: "muxerTest",
+    gtest: true,
+    defaults: [
+        "libbenchmark_common-defaults",
+        "libbenchmark_soft_sanitize_all-defaults",
+    ],
+
+    srcs: ["MuxerTest.cpp"],
+
+    static_libs: [
+        "libbenchmark_extractor",
+        "libbenchmark_muxer",
+    ],
+}
+
+cc_test {
+    name: "encoderTest",
+    gtest: true,
+    defaults: [
+        "libbenchmark_common-defaults",
+        "libbenchmark_soft_sanitize_all-defaults",
+    ],
+
+    srcs: ["EncoderTest.cpp"],
+
+    static_libs: [
+        "libbenchmark_extractor",
+        "libbenchmark_decoder",
+        "libbenchmark_encoder",
+    ],
+}
diff --git a/media/tests/benchmark/tests/BenchmarkTestEnvironment.h b/media/tests/benchmark/tests/BenchmarkTestEnvironment.h
new file mode 100644
index 0000000..ae2eee1
--- /dev/null
+++ b/media/tests/benchmark/tests/BenchmarkTestEnvironment.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BENCHMARK_TEST_ENVIRONMENT_H__
+#define __BENCHMARK_TEST_ENVIRONMENT_H__
+
+#include <gtest/gtest.h>
+
+#include <getopt.h>
+
+using namespace std;
+
+class BenchmarkTestEnvironment : public ::testing::Environment {
+  public:
+    BenchmarkTestEnvironment() : res("/sdcard/media/") {}
+
+    // Parses the command line argument
+    int initFromOptions(int argc, char **argv);
+
+    void setRes(const char *_res) { res = _res; }
+
+    const string getRes() const { return res; }
+
+  private:
+    string res;
+};
+
+int BenchmarkTestEnvironment::initFromOptions(int argc, char **argv) {
+    static struct option options[] = {{"path", required_argument, 0, 'P'}, {0, 0, 0, 0}};
+
+    while (true) {
+        int index = 0;
+        int c = getopt_long(argc, argv, "P:", options, &index);
+        if (c == -1) {
+            break;
+        }
+
+        switch (c) {
+            case 'P': {
+                setRes(optarg);
+                break;
+            }
+            default:
+                break;
+        }
+    }
+
+    if (optind < argc) {
+        fprintf(stderr,
+                "unrecognized option: %s\n\n"
+                "usage: %s <gtest options> <test options>\n\n"
+                "test options are:\n\n"
+                "-P, --path: Resource files directory location\n",
+                argv[optind ?: 1], argv[0]);
+        return 2;
+    }
+    return 0;
+}
+
+#endif  // __BENCHMARK_TEST_ENVIRONMENT_H__
diff --git a/media/tests/benchmark/tests/DecoderTest.cpp b/media/tests/benchmark/tests/DecoderTest.cpp
new file mode 100644
index 0000000..6cb42d6
--- /dev/null
+++ b/media/tests/benchmark/tests/DecoderTest.cpp
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "decoderTest"
+
+#include <fstream>
+#include <iostream>
+#include <limits>
+
+#include "Decoder.h"
+#include "BenchmarkTestEnvironment.h"
+
+static BenchmarkTestEnvironment *gEnv = nullptr;
+
+class DecoderTest : public ::testing::TestWithParam<tuple<string, string, bool>> {};
+
+TEST_P(DecoderTest, Decode) {
+    ALOGV("Decode the samples given by extractor");
+    tuple<string /* InputFile */, string /* CodecName */, bool /* asyncMode */> params = GetParam();
+
+    string inputFile = gEnv->getRes() + get<0>(params);
+    FILE *inputFp = fopen(inputFile.c_str(), "rb");
+    if (!inputFp) {
+        cout << "[   WARN   ] Test Skipped. Unable to open input file for reading \n";
+        return;
+    }
+
+    Decoder *decoder = new Decoder();
+    Extractor *extractor = decoder->getExtractor();
+    if (!extractor) {
+        cout << "[   WARN   ] Test Skipped. Extractor creation failed \n";
+        return;
+    }
+
+    // Read file properties
+    fseek(inputFp, 0, SEEK_END);
+    size_t fileSize = ftell(inputFp);
+    fseek(inputFp, 0, SEEK_SET);
+    int32_t fd = fileno(inputFp);
+
+    int32_t trackCount = extractor->initExtractor(fd, fileSize);
+    if (trackCount <= 0) {
+        cout << "[   WARN   ] Test Skipped. initExtractor failed\n";
+        return;
+    }
+    for (int curTrack = 0; curTrack < trackCount; curTrack++) {
+        int32_t status = extractor->setupTrackFormat(curTrack);
+        if (status != 0) {
+            cout << "[   WARN   ] Test Skipped. Track Format invalid \n";
+            return;
+        }
+
+        uint8_t *inputBuffer = (uint8_t *)malloc(kMaxBufferSize);
+        if (!inputBuffer) {
+            cout << "[   WARN   ] Test Skipped. Insufficient memory \n";
+            return;
+        }
+        vector<AMediaCodecBufferInfo> frameInfo;
+        AMediaCodecBufferInfo info;
+        uint32_t inputBufferOffset = 0;
+        int32_t idx = 0;
+
+        // Get CSD data
+        while (1) {
+            void *csdBuffer = extractor->getCSDSample(info, idx);
+            if (!csdBuffer || !info.size) break;
+
+            // copy the meta data and buffer to be passed to decoder
+            if (inputBufferOffset + info.size > kMaxBufferSize) {
+                cout << "[   WARN   ] Test Skipped. Memory allocated not sufficient\n";
+                free(inputBuffer);
+                return;
+            }
+            memcpy(inputBuffer + inputBufferOffset, csdBuffer, info.size);
+            frameInfo.push_back(info);
+            inputBufferOffset += info.size;
+            idx++;
+        }
+        // Get frame data
+        while (1) {
+            status = extractor->getFrameSample(info);
+            if (status || !info.size) break;
+            // copy the meta data and buffer to be passed to decoder
+            if (inputBufferOffset + info.size > kMaxBufferSize) {
+                cout << "[   WARN   ] Test Skipped. Memory allocated not sufficient\n";
+                free(inputBuffer);
+                return;
+            }
+            memcpy(inputBuffer + inputBufferOffset, extractor->getFrameBuf(), info.size);
+            frameInfo.push_back(info);
+            inputBufferOffset += info.size;
+        }
+
+        string codecName = get<1>(params);
+        bool asyncMode = get<2>(params);
+        decoder->setupDecoder();
+        status = decoder->decode(inputBuffer, frameInfo, codecName, asyncMode);
+        if (status != AMEDIA_OK) {
+            cout << "[   WARN   ] Test Skipped. Decode returned error \n";
+            free(inputBuffer);
+            return;
+        }
+        decoder->deInitCodec();
+        cout << "codec : " << codecName << endl;
+        string inputReference = get<0>(params);
+        decoder->dumpStatistics(inputReference);
+        free(inputBuffer);
+        decoder->resetDecoder();
+    }
+    fclose(inputFp);
+    extractor->deInitExtractor();
+    delete decoder;
+}
+
+// TODO: (b/140549596)
+// Add wav files
+INSTANTIATE_TEST_SUITE_P(
+        AudioDecoderSyncTest, DecoderTest,
+        ::testing::Values(make_tuple("bbb_44100hz_2ch_128kbps_aac_30sec.mp4", "", false),
+                          make_tuple("bbb_44100hz_2ch_128kbps_mp3_30sec.mp3", "", false),
+                          make_tuple("bbb_8000hz_1ch_8kbps_amrnb_30sec.3gp", "", false),
+                          make_tuple("bbb_16000hz_1ch_9kbps_amrwb_30sec.3gp", "", false),
+                          make_tuple("bbb_44100hz_2ch_80kbps_vorbis_30sec.mp4", "", false),
+                          make_tuple("bbb_48000hz_2ch_100kbps_opus_30sec.webm", "", false)));
+
+INSTANTIATE_TEST_SUITE_P(
+        AudioDecoderAsyncTest, DecoderTest,
+        ::testing::Values(make_tuple("bbb_44100hz_2ch_128kbps_aac_30sec.mp4", "", true),
+                          make_tuple("bbb_44100hz_2ch_128kbps_mp3_30sec.mp3", "", true),
+                          make_tuple("bbb_8000hz_1ch_8kbps_amrnb_30sec.3gp", "", true),
+                          make_tuple("bbb_16000hz_1ch_9kbps_amrwb_30sec.3gp", "", true),
+                          make_tuple("bbb_44100hz_2ch_80kbps_vorbis_30sec.mp4", "", true),
+                          make_tuple("bbb_48000hz_2ch_100kbps_opus_30sec.webm", "", true)));
+
+INSTANTIATE_TEST_SUITE_P(VideDecoderSyncTest, DecoderTest,
+                         ::testing::Values(
+                                 // Hardware codecs
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_vp9.webm", "", false),
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_vp8.webm", "", false),
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_av1.webm", "", false),
+                                 make_tuple("crowd_1920x1080_25fps_7300kbps_mpeg2.mp4", "", false),
+                                 make_tuple("crowd_1920x1080_25fps_6000kbps_mpeg4.mp4", "", false),
+                                 make_tuple("crowd_352x288_25fps_6000kbps_h263.3gp", "", false),
+                                 make_tuple("crowd_1920x1080_25fps_6700kbps_h264.ts", "", false),
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_h265.mkv", "", false),
+                                 // Software codecs
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_vp9.webm",
+                                            "c2.android.vp9.decoder", false),
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_vp8.webm",
+                                            "c2.android.vp8.decoder", false),
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_av1.webm",
+                                            "c2.android.av1.decoder", false),
+                                 make_tuple("crowd_1920x1080_25fps_7300kbps_mpeg2.mp4",
+                                            "c2.android.mpeg2.decoder", false),
+                                 make_tuple("crowd_1920x1080_25fps_6000kbps_mpeg4.mp4",
+                                            "c2.android.mpeg4.decoder", false),
+                                 make_tuple("crowd_352x288_25fps_6000kbps_h263.3gp",
+                                            "c2.android.h263.decoder", false),
+                                 make_tuple("crowd_1920x1080_25fps_6700kbps_h264.ts",
+                                            "c2.android.avc.decoder", false),
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_h265.mkv",
+                                            "c2.android.hevc.decoder", false)));
+
+INSTANTIATE_TEST_SUITE_P(VideoDecoderAsyncTest, DecoderTest,
+                         ::testing::Values(
+                                 // Hardware codecs
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_vp9.webm", "", true),
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_vp8.webm", "", true),
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_av1.webm", "", true),
+                                 make_tuple("crowd_1920x1080_25fps_7300kbps_mpeg2.mp4", "", true),
+                                 make_tuple("crowd_1920x1080_25fps_6000kbps_mpeg4.mp4", "", true),
+                                 make_tuple("crowd_352x288_25fps_6000kbps_h263.3gp", "", true),
+                                 make_tuple("crowd_1920x1080_25fps_6700kbps_h264.ts", "", true),
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_h265.mkv", "", true),
+                                 // Software codecs
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_vp9.webm",
+                                            "c2.android.vp9.decoder", true),
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_vp8.webm",
+                                            "c2.android.vp8.decoder", true),
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_av1.webm",
+                                            "c2.android.av1.decoder", true),
+                                 make_tuple("crowd_1920x1080_25fps_7300kbps_mpeg2.mp4",
+                                            "c2.android.mpeg2.decoder", true),
+                                 make_tuple("crowd_1920x1080_25fps_6000kbps_mpeg4.mp4",
+                                            "c2.android.mpeg4.decoder", true),
+                                 make_tuple("crowd_352x288_25fps_6000kbps_h263.3gp",
+                                            "c2.android.h263.decoder", true),
+                                 make_tuple("crowd_1920x1080_25fps_6700kbps_h264.ts",
+                                            "c2.android.avc.decoder", true),
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_h265.mkv",
+                                            "c2.android.hevc.decoder", true)));
+
+int main(int argc, char **argv) {
+    gEnv = new BenchmarkTestEnvironment();
+    ::testing::AddGlobalTestEnvironment(gEnv);
+    ::testing::InitGoogleTest(&argc, argv);
+    int status = gEnv->initFromOptions(argc, argv);
+    if (status == 0) {
+        status = RUN_ALL_TESTS();
+        ALOGD("Decoder Test result = %d\n", status);
+    }
+    return status;
+}
\ No newline at end of file
diff --git a/media/tests/benchmark/tests/EncoderTest.cpp b/media/tests/benchmark/tests/EncoderTest.cpp
new file mode 100644
index 0000000..574083d
--- /dev/null
+++ b/media/tests/benchmark/tests/EncoderTest.cpp
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "encoderTest"
+
+#include <fstream>
+
+#include "BenchmarkTestEnvironment.h"
+#include "Encoder.h"
+#include "Decoder.h"
+
+static BenchmarkTestEnvironment *gEnv = nullptr;
+
+class EncoderTest : public ::testing::TestWithParam<tuple<string, string, bool>> {};
+
+TEST_P(EncoderTest, Encode) {
+    ALOGD("Encode test for all codecs");
+    tuple<string /* InputFile */, string /* CodecName */, bool /* asyncMode */> params = GetParam();
+
+    string inputFile = gEnv->getRes() + get<0>(params);
+    FILE *inputFp = fopen(inputFile.c_str(), "rb");
+    if (!inputFp) {
+        cout << "[   WARN   ] Test Skipped. Unable to open input file for reading \n";
+        return;
+    }
+
+    Decoder *decoder = new Decoder();
+    Extractor *extractor = decoder->getExtractor();
+    if (!extractor) {
+        cout << "[   WARN   ] Test Skipped. Extractor creation failed \n";
+        return;
+    }
+    // Read file properties
+    fseek(inputFp, 0, SEEK_END);
+    size_t fileSize = ftell(inputFp);
+    fseek(inputFp, 0, SEEK_SET);
+    int32_t fd = fileno(inputFp);
+
+    int32_t trackCount = extractor->initExtractor(fd, fileSize);
+    if (trackCount <= 0) {
+        cout << "[   WARN   ] Test Skipped. initExtractor failed\n";
+        return;
+    }
+
+    Encoder *encoder = new Encoder();
+    for (int curTrack = 0; curTrack < trackCount; curTrack++) {
+        int32_t status = extractor->setupTrackFormat(curTrack);
+        if (status != 0) {
+            cout << "[   WARN   ] Test Skipped. Track Format invalid \n";
+            return;
+        }
+
+        uint8_t *inputBuffer = (uint8_t *)malloc(kMaxBufferSize);
+        if (!inputBuffer) {
+            cout << "[   WARN   ] Test Skipped. Insufficient memory \n";
+            return;
+        }
+        vector<AMediaCodecBufferInfo> frameInfo;
+        AMediaCodecBufferInfo info;
+        uint32_t inputBufferOffset = 0;
+        int32_t idx = 0;
+
+        // Get CSD data
+        while (1) {
+            void *csdBuffer = extractor->getCSDSample(info, idx);
+            if (!csdBuffer || !info.size) break;
+
+            // copy the meta data and buffer to be passed to decoder
+            if (inputBufferOffset + info.size > kMaxBufferSize) {
+                cout << "[   WARN   ] Test Skipped. Memory allocated not sufficient\n";
+                free(inputBuffer);
+                return;
+            }
+            memcpy(inputBuffer + inputBufferOffset, csdBuffer, info.size);
+            frameInfo.push_back(info);
+            inputBufferOffset += info.size;
+            idx++;
+        }
+
+        // Get frame data
+        while (1) {
+            status = extractor->getFrameSample(info);
+            if (status || !info.size) break;
+            // copy the meta data and buffer to be passed to decoder
+            if (inputBufferOffset + info.size > kMaxBufferSize) {
+                cout << "[   WARN   ] Test Skipped. Memory allocated not sufficient\n";
+                free(inputBuffer);
+                return;
+            }
+            memcpy(inputBuffer + inputBufferOffset, extractor->getFrameBuf(), info.size);
+            frameInfo.push_back(info);
+            inputBufferOffset += info.size;
+        }
+
+        string decName = "";
+        string outputFileName = "decode.out";
+        FILE *outFp = fopen(outputFileName.c_str(), "wb");
+        if (outFp == nullptr) {
+            ALOGE("Unable to open output file for writing");
+            return;
+        }
+        decoder->setupDecoder();
+        status = decoder->decode(inputBuffer, frameInfo, decName, false /*asyncMode */, outFp);
+        if (status != AMEDIA_OK) {
+            cout << "[   WARN   ] Test Skipped. Decode returned error \n";
+            return;
+        }
+
+        ifstream eleStream;
+        eleStream.open(outputFileName.c_str(), ifstream::binary | ifstream::ate);
+        ASSERT_EQ(eleStream.is_open(), true) << outputFileName.c_str() << " - file not found";
+        size_t eleSize = eleStream.tellg();
+        eleStream.seekg(0, ifstream::beg);
+
+        AMediaFormat *format = extractor->getFormat();
+        const char *mime = nullptr;
+        AMediaFormat_getString(format, AMEDIAFORMAT_KEY_MIME, &mime);
+        if (!mime) {
+            ALOGE("Error in AMediaFormat_getString");
+            return;
+        }
+        // Get encoder params
+        encParameter encParams;
+        if (!strncmp(mime, "video/", 6)) {
+            AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_WIDTH, &encParams.width);
+            AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_HEIGHT, &encParams.height);
+            AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_FRAME_RATE, &encParams.frameRate);
+            AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_BIT_RATE, &encParams.bitrate);
+            if (encParams.bitrate <= 0 || encParams.frameRate <= 0) {
+                encParams.frameRate = 25;
+                if (!strcmp(mime, "video/3gpp") || !strcmp(mime, "video/mp4v-es")) {
+                    encParams.bitrate = 600000 /* 600 Kbps */;
+                } else {
+                    encParams.bitrate = 8000000 /* 8 Mbps */;
+                }
+            }
+            AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_PROFILE, &encParams.profile);
+            AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_LEVEL, &encParams.level);
+        } else {
+            AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_SAMPLE_RATE, &encParams.sampleRate);
+            AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_CHANNEL_COUNT, &encParams.numChannels);
+            encParams.bitrate =
+                    encParams.sampleRate * encParams.numChannels * 16 /* bitsPerSample */;
+        }
+
+        encoder->setupEncoder();
+        string codecName = get<1>(params);
+        bool asyncMode = get<2>(params);
+        status = encoder->encode(codecName, eleStream, eleSize, asyncMode, encParams, (char *)mime);
+        ASSERT_EQ(status, 0);
+        encoder->deInitCodec();
+        cout << "codec : " << codecName << endl;
+        string inputReference = get<0>(params);
+        encoder->dumpStatistics(inputReference, extractor->getClipDuration());
+        eleStream.close();
+        if (outFp) fclose(outFp);
+
+        if (format) {
+            AMediaFormat_delete(format);
+            format = nullptr;
+        }
+        encoder->resetEncoder();
+        decoder->deInitCodec();
+        free(inputBuffer);
+        decoder->resetDecoder();
+    }
+    delete encoder;
+    fclose(inputFp);
+    extractor->deInitExtractor();
+    delete decoder;
+}
+
+INSTANTIATE_TEST_SUITE_P(
+        AudioEncoderSyncTest, EncoderTest,
+        ::testing::Values(make_tuple("bbb_44100hz_2ch_128kbps_aac_30sec.mp4", "", false),
+                          make_tuple("bbb_8000hz_1ch_8kbps_amrnb_30sec.3gp", "", false),
+                          make_tuple("bbb_16000hz_1ch_9kbps_amrwb_30sec.3gp", "", false),
+                          make_tuple("bbb_48000hz_2ch_100kbps_opus_30sec.webm", "", false)));
+
+INSTANTIATE_TEST_SUITE_P(
+        AudioEncoderAsyncTest, EncoderTest,
+        ::testing::Values(make_tuple("bbb_44100hz_2ch_128kbps_aac_30sec.mp4", "", true),
+                          make_tuple("bbb_8000hz_1ch_8kbps_amrnb_30sec.3gp", "", true),
+                          make_tuple("bbb_16000hz_1ch_9kbps_amrwb_30sec.3gp", "", true),
+                          make_tuple("bbb_48000hz_2ch_100kbps_opus_30sec.webm", "", true)));
+
+INSTANTIATE_TEST_SUITE_P(VideEncoderSyncTest, EncoderTest,
+                         ::testing::Values(
+                                 // Hardware codecs
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_vp8.webm", "", false),
+                                 make_tuple("crowd_1920x1080_25fps_6700kbps_h264.ts", "", false),
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_h265.mkv", "", false),
+                                 // Software codecs
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_vp9.webm",
+                                            "c2.android.vp9.encoder", false),
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_vp8.webm",
+                                            "c2.android.vp8.encoder", false),
+                                 make_tuple("crowd_176x144_25fps_6000kbps_mpeg4.mp4",
+                                            "c2.android.mpeg4.encoder", false),
+                                 make_tuple("crowd_176x144_25fps_6000kbps_h263.3gp",
+                                            "c2.android.h263.encoder", false),
+                                 make_tuple("crowd_1920x1080_25fps_6700kbps_h264.ts",
+                                            "c2.android.avc.encoder", false),
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_h265.mkv",
+                                            "c2.android.hevc.encoder", false)));
+
+INSTANTIATE_TEST_SUITE_P(VideoEncoderAsyncTest, EncoderTest,
+                         ::testing::Values(
+                                 // Hardware codecs
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_vp8.webm", "", true),
+                                 make_tuple("crowd_1920x1080_25fps_6700kbps_h264.ts", "", true),
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_h265.mkv", "", true),
+                                 // Software codecs
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_vp9.webm",
+                                            "c2.android.vp9.encoder", true),
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_vp8.webm",
+                                            "c2.android.vp8.encoder", true),
+                                 make_tuple("crowd_176x144_25fps_6000kbps_mpeg4.mp4",
+                                            "c2.android.mpeg4.encoder", true),
+                                 make_tuple("crowd_176x144_25fps_6000kbps_h263.3gp",
+                                            "c2.android.h263.encoder", true),
+                                 make_tuple("crowd_1920x1080_25fps_6700kbps_h264.ts",
+                                            "c2.android.avc.encoder", true),
+                                 make_tuple("crowd_1920x1080_25fps_4000kbps_h265.mkv",
+                                            "c2.android.hevc.encoder", true)));
+
+int main(int argc, char **argv) {
+    gEnv = new BenchmarkTestEnvironment();
+    ::testing::AddGlobalTestEnvironment(gEnv);
+    ::testing::InitGoogleTest(&argc, argv);
+    int status = gEnv->initFromOptions(argc, argv);
+    if (status == 0) {
+        status = RUN_ALL_TESTS();
+        ALOGD("Encoder Test result = %d\n", status);
+    }
+    return status;
+}
diff --git a/media/tests/benchmark/tests/ExtractorTest.cpp b/media/tests/benchmark/tests/ExtractorTest.cpp
new file mode 100644
index 0000000..dd0d711
--- /dev/null
+++ b/media/tests/benchmark/tests/ExtractorTest.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "extractorTest"
+
+#include <gtest/gtest.h>
+
+#include "Extractor.h"
+#include "BenchmarkTestEnvironment.h"
+
+static BenchmarkTestEnvironment *gEnv = nullptr;
+
+class ExtractorTest : public ::testing::TestWithParam<pair<string, int32_t>> {};
+
+TEST_P(ExtractorTest, Extract) {
+    Extractor *extractObj = new Extractor();
+
+    string inputFile = gEnv->getRes() + GetParam().first;
+    FILE *inputFp = fopen(inputFile.c_str(), "rb");
+    if (!inputFp) {
+        cout << "[   WARN   ] Test Skipped. Unable to open input file for reading \n";
+        return;
+    }
+
+    // Read file properties
+    size_t fileSize = 0;
+    fseek(inputFp, 0, SEEK_END);
+    fileSize = ftell(inputFp);
+    fseek(inputFp, 0, SEEK_SET);
+    int32_t fd = fileno(inputFp);
+
+    int32_t trackCount = extractObj->initExtractor(fd, fileSize);
+    if (trackCount <= 0) {
+        cout << "[   WARN   ] Test Skipped. initExtractor failed\n";
+        return;
+    }
+
+    int32_t trackID = GetParam().second;
+    int32_t status = extractObj->extract(trackID);
+    if (status != AMEDIA_OK) {
+        cout << "[   WARN   ] Test Skipped. Extraction failed \n";
+        return;
+    }
+
+    extractObj->deInitExtractor();
+
+    extractObj->dumpStatistics(GetParam().first);
+
+    fclose(inputFp);
+    delete extractObj;
+}
+
+INSTANTIATE_TEST_SUITE_P(ExtractorTestAll, ExtractorTest,
+                         ::testing::Values(make_pair("crowd_1920x1080_25fps_4000kbps_vp9.webm", 0),
+                                           make_pair("crowd_1920x1080_25fps_6000kbps_h263.3gp", 0),
+                                           make_pair("crowd_1920x1080_25fps_6000kbps_mpeg4.mp4", 0),
+                                           make_pair("crowd_1920x1080_25fps_6700kbps_h264.ts", 0),
+                                           make_pair("crowd_1920x1080_25fps_7300kbps_mpeg2.mp4", 0),
+                                           make_pair("crowd_1920x1080_25fps_4000kbps_av1.webm", 0),
+                                           make_pair("crowd_1920x1080_25fps_4000kbps_h265.mkv", 0),
+                                           make_pair("crowd_1920x1080_25fps_4000kbps_vp8.webm", 0),
+                                           make_pair("bbb_44100hz_2ch_128kbps_aac_5mins.mp4", 0),
+                                           make_pair("bbb_44100hz_2ch_128kbps_mp3_5mins.mp3", 0),
+                                           make_pair("bbb_44100hz_2ch_600kbps_flac_5mins.flac", 0),
+                                           make_pair("bbb_8000hz_1ch_8kbps_amrnb_5mins.3gp", 0),
+                                           make_pair("bbb_16000hz_1ch_9kbps_amrwb_5mins.3gp", 0),
+                                           make_pair("bbb_44100hz_2ch_80kbps_vorbis_5mins.mp4", 0),
+                                           make_pair("bbb_48000hz_2ch_100kbps_opus_5mins.webm", 0)));
+
+int main(int argc, char **argv) {
+    gEnv = new BenchmarkTestEnvironment();
+    ::testing::AddGlobalTestEnvironment(gEnv);
+    ::testing::InitGoogleTest(&argc, argv);
+    int status = gEnv->initFromOptions(argc, argv);
+    if (status == 0) {
+        status = RUN_ALL_TESTS();
+        ALOGD(" Extractor Test result = %d\n", status);
+    }
+    return status;
+}
diff --git a/media/tests/benchmark/tests/MuxerTest.cpp b/media/tests/benchmark/tests/MuxerTest.cpp
new file mode 100644
index 0000000..e814f90
--- /dev/null
+++ b/media/tests/benchmark/tests/MuxerTest.cpp
@@ -0,0 +1,181 @@
+
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "muxerTest"
+
+#include <fstream>
+#include <iostream>
+
+#include "Muxer.h"
+#include "BenchmarkTestEnvironment.h"
+
+#define OUTPUT_FILE_NAME "/data/local/tmp/mux.out"
+
+static BenchmarkTestEnvironment *gEnv = nullptr;
+
+class MuxerTest : public ::testing::TestWithParam<pair<string, string>> {};
+
+static MUXER_OUTPUT_T getMuxerOutFormat(string fmt) {
+    static const struct {
+        string name;
+        MUXER_OUTPUT_T value;
+    } kFormatMaps[] = {{"mp4", MUXER_OUTPUT_FORMAT_MPEG_4},
+                       {"webm", MUXER_OUTPUT_FORMAT_WEBM},
+                       {"3gpp", MUXER_OUTPUT_FORMAT_3GPP},
+                       {"ogg", MUXER_OUTPUT_FORMAT_OGG}};
+
+    MUXER_OUTPUT_T format = MUXER_OUTPUT_FORMAT_INVALID;
+    for (size_t i = 0; i < sizeof(kFormatMaps) / sizeof(kFormatMaps[0]); ++i) {
+        if (!fmt.compare(kFormatMaps[i].name)) {
+            format = kFormatMaps[i].value;
+            break;
+        }
+    }
+    return format;
+}
+
+TEST_P(MuxerTest, Mux) {
+    ALOGV("Mux the samples given by extractor");
+    string inputFile = gEnv->getRes() + GetParam().first;
+    FILE *inputFp = fopen(inputFile.c_str(), "rb");
+    if (!inputFp) {
+        cout << "[   WARN   ] Test Skipped. Unable to open input file for reading \n";
+        return;
+    }
+    string fmt = GetParam().second;
+    MUXER_OUTPUT_T outputFormat = getMuxerOutFormat(fmt);
+    if (outputFormat == MUXER_OUTPUT_FORMAT_INVALID) {
+        ALOGE("output format is MUXER_OUTPUT_FORMAT_INVALID");
+        return;
+    }
+
+    Muxer *muxerObj = new Muxer();
+    Extractor *extractor = muxerObj->getExtractor();
+    if (!extractor) {
+        cout << "[   WARN   ] Test Skipped. Extractor creation failed \n";
+        return;
+    }
+
+    // Read file properties
+    size_t fileSize = 0;
+    fseek(inputFp, 0, SEEK_END);
+    fileSize = ftell(inputFp);
+    fseek(inputFp, 0, SEEK_SET);
+    int32_t fd = fileno(inputFp);
+
+    int32_t trackCount = extractor->initExtractor(fd, fileSize);
+    if (trackCount <= 0) {
+        cout << "[   WARN   ] Test Skipped. initExtractor failed\n";
+        return;
+    }
+
+    for (int curTrack = 0; curTrack < trackCount; curTrack++) {
+        int32_t status = extractor->setupTrackFormat(curTrack);
+        if (status != 0) {
+            cout << "[   WARN   ] Test Skipped. Track Format invalid \n";
+            return;
+        }
+
+        uint8_t *inputBuffer = (uint8_t *)malloc(kMaxBufferSize);
+        if (!inputBuffer) {
+            std::cout << "[   WARN   ] Test Skipped. Insufficient memory \n";
+            return;
+        }
+        // AMediaCodecBufferInfo : <size of frame> <flags> <presentationTimeUs> <offset>
+        vector<AMediaCodecBufferInfo> frameInfos;
+        AMediaCodecBufferInfo info;
+        uint32_t inputBufferOffset = 0;
+
+        // Get Frame Data
+        while (1) {
+            status = extractor->getFrameSample(info);
+            if (status || !info.size) break;
+            // copy the meta data and buffer to be passed to muxer
+            if (inputBufferOffset + info.size > kMaxBufferSize) {
+                cout << "[   WARN   ] Test Skipped. Memory allocated not sufficient\n";
+                free(inputBuffer);
+                return;
+            }
+            memcpy(inputBuffer + inputBufferOffset, extractor->getFrameBuf(), info.size);
+            info.offset = inputBufferOffset;
+            frameInfos.push_back(info);
+            inputBufferOffset += info.size;
+        }
+
+        string outputFileName = OUTPUT_FILE_NAME;
+        FILE *outputFp = fopen(outputFileName.c_str(), "w+b");
+        if (!outputFp) {
+            cout << "[   WARN   ] Test Skipped. Unable to open output file for writing \n";
+            return;
+        }
+        int32_t fd = fileno(outputFp);
+        status = muxerObj->initMuxer(fd, outputFormat);
+        if (status != 0) {
+            cout << "[   WARN   ] Test Skipped. initMuxer failed\n";
+            return;
+        }
+
+        status = muxerObj->mux(inputBuffer, frameInfos);
+        if (status != 0) {
+            cout << "[   WARN   ] Test Skipped. Mux failed \n";
+            return;
+        }
+        muxerObj->deInitMuxer();
+        muxerObj->dumpStatistics(GetParam().first + "." + fmt.c_str());
+        free(inputBuffer);
+        fclose(outputFp);
+        muxerObj->resetMuxer();
+    }
+    fclose(inputFp);
+    extractor->deInitExtractor();
+    delete muxerObj;
+}
+
+INSTANTIATE_TEST_SUITE_P(
+        MuxerTestAll, MuxerTest,
+        ::testing::Values(make_pair("crowd_1920x1080_25fps_4000kbps_vp8.webm", "webm"),
+                          make_pair("crowd_1920x1080_25fps_4000kbps_vp9.webm", "webm"),
+                          make_pair("crowd_1920x1080_25fps_6000kbps_mpeg4.mp4", "mp4"),
+                          make_pair("crowd_352x288_25fps_6000kbps_h263.3gp", "mp4"),
+                          make_pair("crowd_1920x1080_25fps_6700kbps_h264.ts", "mp4"),
+                          make_pair("crowd_1920x1080_25fps_4000kbps_h265.mkv", "mp4"),
+                          make_pair("crowd_1920x1080_25fps_6000kbps_mpeg4.mp4", "3gpp"),
+                          make_pair("crowd_352x288_25fps_6000kbps_h263.3gp", "3gpp"),
+                          make_pair("crowd_1920x1080_25fps_6700kbps_h264.ts", "3gpp"),
+                          make_pair("crowd_1920x1080_25fps_4000kbps_h265.mkv", "3gpp"),
+                          make_pair("bbb_48000hz_2ch_100kbps_opus_5mins.webm", "ogg"),
+                          make_pair("bbb_44100hz_2ch_80kbps_vorbis_5mins.mp4", "webm"),
+                          make_pair("bbb_48000hz_2ch_100kbps_opus_5mins.webm", "webm"),
+                          make_pair("bbb_44100hz_2ch_128kbps_aac_5mins.mp4", "mp4"),
+                          make_pair("bbb_8000hz_1ch_8kbps_amrnb_5mins.3gp", "mp4"),
+                          make_pair("bbb_16000hz_1ch_9kbps_amrwb_5mins.3gp", "mp4"),
+                          make_pair("bbb_44100hz_2ch_128kbps_aac_5mins.mp4", "3gpp"),
+                          make_pair("bbb_8000hz_1ch_8kbps_amrnb_5mins.3gp", "3gpp"),
+                          make_pair("bbb_16000hz_1ch_9kbps_amrwb_5mins.3gp", "3gpp")));
+
+int main(int argc, char **argv) {
+    gEnv = new BenchmarkTestEnvironment();
+    ::testing::AddGlobalTestEnvironment(gEnv);
+    ::testing::InitGoogleTest(&argc, argv);
+    int status = gEnv->initFromOptions(argc, argv);
+    if (status == 0) {
+        status = RUN_ALL_TESTS();
+        ALOGV("Test result = %d\n", status);
+    }
+    return status;
+}
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index d81cde8..c0967d9 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -30,6 +30,7 @@
     ],
     shared_libs: [
         "libbinder",
+        "libcutils",
         "liblog",
         "libutils",
         "libhidlbase",
@@ -45,16 +46,10 @@
         "-Werror",
     ],
 
-    product_variables: {
-        product_is_iot: {
-            cflags: ["-DTARGET_ANDROID_THINGS"],
-        },
-    },
-
-    include_dirs: [
-        // For android_mallopt definitions.
-        "bionic/libc/private"
+    header_libs: [
+        "bionic_libc_platform_headers",
     ],
+
     local_include_dirs: ["include"],
     export_include_dirs: ["include"],
 }
diff --git a/media/utils/MemoryLeakTrackUtil.cpp b/media/utils/MemoryLeakTrackUtil.cpp
index 2988b52..6166859 100644
--- a/media/utils/MemoryLeakTrackUtil.cpp
+++ b/media/utils/MemoryLeakTrackUtil.cpp
@@ -22,7 +22,7 @@
 #include "media/MemoryLeakTrackUtil.h"
 #include <sstream>
 
-#include <bionic_malloc.h>
+#include <bionic/malloc.h>
 
 /*
  * The code here originally resided in MediaPlayerService.cpp
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index b824212..990f318 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -63,7 +63,10 @@
         uid_t uid, bool start) {
     // Okay to not track in app ops as audio server is us and if
     // device is rooted security model is considered compromised.
-    if (isAudioServerOrRootUid(uid)) return true;
+    // system_server loses its RECORD_AUDIO permission when a secondary
+    // user is active, but it is a core system service so let it through.
+    // TODO(b/141210120): UserManager.DISALLOW_RECORD_AUDIO should not affect system user 0
+    if (isAudioServerOrSystemServerOrRootUid(uid)) return true;
 
     // We specify a pid and uid here as mediaserver (aka MediaRecorder or StageFrightRecorder)
     // may open a record track on behalf of a client.  Note that pid may be a tid.
@@ -176,18 +179,7 @@
     // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
     bool ok = PermissionCache::checkCallingPermission(sModifyDefaultAudioEffectsAllowed);
 
-#ifdef TARGET_ANDROID_THINGS
-    if (!ok) {
-        // Use a secondary permission on Android Things to allow a more lenient level of protection.
-        static const String16 sModifyDefaultAudioEffectsAndroidThingsAllowed(
-                "com.google.android.things.permission.MODIFY_DEFAULT_AUDIO_EFFECTS");
-        ok = PermissionCache::checkCallingPermission(
-                sModifyDefaultAudioEffectsAndroidThingsAllowed);
-    }
-    if (!ok) ALOGE("com.google.android.things.permission.MODIFY_DEFAULT_AUDIO_EFFECTS");
-#else
     if (!ok) ALOGE("android.permission.MODIFY_DEFAULT_AUDIO_EFFECTS");
-#endif
     return ok;
 }
 
@@ -233,9 +225,9 @@
     off_t size = lseek(heap->getHeapID(), 0, SEEK_END);
     lseek(heap->getHeapID(), 0, SEEK_SET);
 
-    if (iMemory->pointer() == NULL || size < (off_t)iMemory->size()) {
+    if (iMemory->unsecurePointer() == NULL || size < (off_t)iMemory->size()) {
         ALOGE("%s check failed: pointer %p size %zu fd size %u",
-              __FUNCTION__, iMemory->pointer(), iMemory->size(), (uint32_t)size);
+              __FUNCTION__, iMemory->unsecurePointer(), iMemory->size(), (uint32_t)size);
         return BAD_VALUE;
     }
 
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
index 2a6e609..e1089d5 100644
--- a/media/utils/include/mediautils/ServiceUtilities.h
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -58,6 +58,12 @@
     return multiuser_get_app_id(uid) == AID_SYSTEM || uid == AID_AUDIOSERVER;
 }
 
+// used for calls that should come from system_server or audio_server and
+// include AID_ROOT for command-line tests.
+static inline bool isAudioServerOrSystemServerOrRootUid(uid_t uid) {
+    return multiuser_get_app_id(uid) == AID_SYSTEM || uid == AID_AUDIOSERVER || uid == AID_ROOT;
+}
+
 // Mediaserver may forward the client PID and UID as part of a binder interface call;
 // otherwise the calling UID must be equal to the client UID.
 static inline bool isAudioServerOrMediaServerUid(uid_t uid) {
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 355d945..27d7856 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -652,7 +652,7 @@
         return new NBLog::Writer();
     }
 success:
-    NBLog::Shared *sharedRawPtr = (NBLog::Shared *) shared->pointer();
+    NBLog::Shared *sharedRawPtr = (NBLog::Shared *) shared->unsecurePointer();
     new((void *) sharedRawPtr) NBLog::Shared(); // placement new here, but the corresponding
                                                 // explicit destructor not needed since it is POD
     sMediaLogService->registerWriter(shared, size, name);
@@ -1132,16 +1132,16 @@
     return mute;
 }
 
-void AudioFlinger::setRecordSilenced(uid_t uid, bool silenced)
+void AudioFlinger::setRecordSilenced(audio_port_handle_t portId, bool silenced)
 {
-    ALOGV("AudioFlinger::setRecordSilenced(uid:%d, silenced:%d)", uid, silenced);
+    ALOGV("AudioFlinger::setRecordSilenced(portId:%d, silenced:%d)", portId, silenced);
 
     AutoMutex lock(mLock);
     for (size_t i = 0; i < mRecordThreads.size(); i++) {
-        mRecordThreads[i]->setRecordSilenced(uid, silenced);
+        mRecordThreads[i]->setRecordSilenced(portId, silenced);
     }
     for (size_t i = 0; i < mMmapThreads.size(); i++) {
-        mMmapThreads[i]->setRecordSilenced(uid, silenced);
+        mMmapThreads[i]->setRecordSilenced(portId, silenced);
     }
 }
 
@@ -1933,7 +1933,8 @@
                                                   &output.notificationFrameCount,
                                                   callingPid, clientUid, &output.flags,
                                                   input.clientInfo.clientTid,
-                                                  &lStatus, portId);
+                                                  &lStatus, portId,
+                                                  input.opPackageName);
         LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (recordTrack == 0));
 
         // lStatus == BAD_TYPE means FAST flag was rejected: request a new input from
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 72e669a..5e4509f 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -162,7 +162,7 @@
     virtual     status_t    setMicMute(bool state);
     virtual     bool        getMicMute() const;
 
-    virtual     void        setRecordSilenced(uid_t uid, bool silenced);
+    virtual     void        setRecordSilenced(audio_port_handle_t portId, bool silenced);
 
     virtual     status_t    setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs);
     virtual     String8     getParameters(audio_io_handle_t ioHandle, const String8& keys) const;
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 3c4fbba..d54ab42 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -24,6 +24,7 @@
 #include "Configuration.h"
 #include <utils/Log.h>
 #include <system/audio_effects/effect_aec.h>
+#include <system/audio_effects/effect_dynamicsprocessing.h>
 #include <system/audio_effects/effect_ns.h>
 #include <system/audio_effects/effect_visualizer.h>
 #include <audio_utils/channels.h>
@@ -1587,7 +1588,7 @@
     int bufOffset = ((sizeof(effect_param_cblk_t) - 1) / sizeof(int) + 1) * sizeof(int);
     mCblkMemory = client->heap()->allocate(EFFECT_PARAM_BUFFER_SIZE + bufOffset);
     if (mCblkMemory == 0 ||
-            (mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->pointer())) == NULL) {
+            (mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->unsecurePointer())) == NULL) {
         ALOGE("not enough memory for Effect size=%zu", EFFECT_PARAM_BUFFER_SIZE +
                 sizeof(effect_param_cblk_t));
         mCblkMemory.clear();
@@ -2569,7 +2570,8 @@
     if ((mSessionId == AUDIO_SESSION_OUTPUT_MIX) &&
         (((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) ||
          (memcmp(&desc.type, SL_IID_VISUALIZATION, sizeof(effect_uuid_t)) == 0) ||
-         (memcmp(&desc.type, SL_IID_VOLUME, sizeof(effect_uuid_t)) == 0))) {
+         (memcmp(&desc.type, SL_IID_VOLUME, sizeof(effect_uuid_t)) == 0) ||
+         (memcmp(&desc.type, SL_IID_DYNAMICSPROCESSING, sizeof(effect_uuid_t)) == 0))) {
         return false;
     }
     return true;
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index c5b9953..3eacc8c 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -105,13 +105,8 @@
     return mSQ.poll();
 }
 
-void FastMixer::setNBLogWriter(NBLog::Writer *logWriter)
+void FastMixer::setNBLogWriter(NBLog::Writer *logWriter __unused)
 {
-    // FIXME If mMixer is set or changed prior to this, we don't inform correctly.
-    //       Should cache logWriter and re-apply it at the assignment to mMixer.
-    if (mMixer != NULL) {
-        mMixer->setNBLogWriter(logWriter);
-    }
 }
 
 void FastMixer::onIdle()
diff --git a/services/audioflinger/FastThread.cpp b/services/audioflinger/FastThread.cpp
index 04b32c2..8b7a124 100644
--- a/services/audioflinger/FastThread.cpp
+++ b/services/audioflinger/FastThread.cpp
@@ -124,7 +124,7 @@
             mDumpState = next->mDumpState != NULL ? next->mDumpState : mDummyDumpState;
             tlNBLogWriter = next->mNBLogWriter != NULL ?
                     next->mNBLogWriter : mDummyNBLogWriter.get();
-            setNBLogWriter(tlNBLogWriter); // FastMixer informs its AudioMixer, FastCapture ignores
+            setNBLogWriter(tlNBLogWriter); // This is used for debugging only
 
             // We want to always have a valid reference to the previous (non-idle) state.
             // However, the state queue only guarantees access to current and previous states.
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 08660dd..c8397cd 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -19,6 +19,39 @@
     #error This header file should only be included from AudioFlinger.h
 #endif
 
+// Checks and monitors OP_RECORD_AUDIO
+class OpRecordAudioMonitor : public RefBase {
+public:
+    ~OpRecordAudioMonitor() override;
+    bool hasOpRecordAudio() const;
+
+    static sp<OpRecordAudioMonitor> createIfNeeded(uid_t uid, const String16& opPackageName);
+
+private:
+    OpRecordAudioMonitor(uid_t uid, const String16& opPackageName);
+    void onFirstRef() override;
+
+    AppOpsManager mAppOpsManager;
+
+    class RecordAudioOpCallback : public BnAppOpsCallback {
+    public:
+        explicit RecordAudioOpCallback(const wp<OpRecordAudioMonitor>& monitor);
+        void opChanged(int32_t op, const String16& packageName) override;
+
+    private:
+        const wp<OpRecordAudioMonitor> mMonitor;
+    };
+
+    sp<RecordAudioOpCallback> mOpCallback;
+    // called by RecordAudioOpCallback when OP_RECORD_AUDIO is updated in AppOp callback
+    // and in onFirstRef()
+    void checkRecordAudio();
+
+    std::atomic_bool mHasOpRecordAudio;
+    const uid_t mUid;
+    const String16 mPackage;
+};
+
 // record track
 class RecordTrack : public TrackBase {
 public:
@@ -36,6 +69,7 @@
                                 uid_t uid,
                                 audio_input_flags_t flags,
                                 track_type type,
+                                const String16& opPackageName,
                                 audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
     virtual             ~RecordTrack();
     virtual status_t    initCheck() const;
@@ -68,7 +102,7 @@
                                 { return (mFlags & AUDIO_INPUT_FLAG_DIRECT) != 0; }
 
             void        setSilenced(bool silenced) { if (!isPatchTrack()) mSilenced = silenced; }
-            bool        isSilenced() const { return mSilenced; }
+            bool        isSilenced() const;
 
             status_t    getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
 
@@ -111,6 +145,11 @@
             audio_input_flags_t                mFlags;
 
             bool                               mSilenced;
+
+            // used to enforce OP_RECORD_AUDIO
+            uid_t                              mUid;
+            String16                           mOpPackageName;
+            sp<OpRecordAudioMonitor>           mOpRecordAudioMonitor;
 };
 
 // playback track, used by PatchPanel
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index f81dacf..8fddc13 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -2079,9 +2079,9 @@
             // More than 2 channels does not require stronger alignment than stereo
             alignment <<= 1;
         }
-        if (((uintptr_t)sharedBuffer->pointer() & (alignment - 1)) != 0) {
+        if (((uintptr_t)sharedBuffer->unsecurePointer() & (alignment - 1)) != 0) {
             ALOGE("Invalid buffer alignment: address %p, channel count %u",
-                  sharedBuffer->pointer(), channelCount);
+                  sharedBuffer->unsecurePointer(), channelCount);
             lStatus = BAD_VALUE;
             goto Exit;
         }
@@ -2953,9 +2953,11 @@
             ALOG_ASSERT(mCallbackThread != 0);
             mCallbackThread->setWriteBlocked(mWriteAckSequence);
         }
+        ATRACE_BEGIN("write");
         // FIXME We should have an implementation of timestamps for direct output threads.
         // They are used e.g for multichannel PCM playback over HDMI.
         bytesWritten = mOutput->write((char *)mSinkBuffer + offset, mBytesRemaining);
+        ATRACE_END();
 
         if (mUseAsyncWrite &&
                 ((bytesWritten < 0) || (bytesWritten == (ssize_t)mBytesRemaining))) {
@@ -5301,11 +5303,11 @@
         return false;
     }
     // Check validity as we don't call AudioMixer::create() here.
-    if (!AudioMixer::isValidFormat(format)) {
+    if (!mAudioMixer->isValidFormat(format)) {
         ALOGW("%s: invalid format: %#x", __func__, format);
         return false;
     }
-    if (!AudioMixer::isValidChannelMask(channelMask)) {
+    if (!mAudioMixer->isValidChannelMask(channelMask)) {
         ALOGW("%s: invalid channelMask: %#x", __func__, channelMask);
         return false;
     }
@@ -5658,10 +5660,17 @@
             minFrames = 1;
         }
 
-        if ((track->framesReady() >= minFrames) && track->isReady() && !track->isPaused() &&
+        const size_t framesReady = track->framesReady();
+        const int trackId = track->id();
+        if (ATRACE_ENABLED()) {
+            std::string traceName("nRdy");
+            traceName += std::to_string(trackId);
+            ATRACE_INT(traceName.c_str(), framesReady);
+        }
+        if ((framesReady >= minFrames) && track->isReady() && !track->isPaused() &&
                 !track->isStopping_2() && !track->isStopped())
         {
-            ALOGVV("track(%d) s=%08x [OK]", track->id(), cblk->mServer);
+            ALOGVV("track(%d) s=%08x [OK]", trackId, cblk->mServer);
 
             if (track->mFillingUpStatus == Track::FS_FILLED) {
                 track->mFillingUpStatus = Track::FS_ACTIVE;
@@ -5738,7 +5747,7 @@
                 // fill a buffer, then remove it from active list.
                 // Only consider last track started for mixer state control
                 if (--(track->mRetryCount) <= 0) {
-                    ALOGV("BUFFER TIMEOUT: remove track(%d) from active list", track->id());
+                    ALOGV("BUFFER TIMEOUT: remove track(%d) from active list", trackId);
                     tracksToRemove->add(track);
                     // indicate to client process that the track was disabled because of underrun;
                     // it will then automatically call start() when data is available
@@ -5746,7 +5755,7 @@
                 } else if (last) {
                     ALOGW("pause because of UNDERRUN, framesReady = %zu,"
                             "minFrames = %u, mFormat = %#x",
-                            track->framesReady(), minFrames, mFormat);
+                            framesReady, minFrames, mFormat);
                     mixerStatus = MIXER_TRACKS_ENABLED;
                     if (mHwSupportsPause && !mHwPaused && !mStandby) {
                         doHwPause = true;
@@ -6354,7 +6363,9 @@
             }
         }
         // compute volume for this track
-        processVolume_l(track, last);
+        if (track->isReady()) {  // check ready to prevent premature start.
+            processVolume_l(track, last);
+        }
     }
 
     // make sure the pause/flush/resume sequence is executed in the right order.
@@ -6745,7 +6756,7 @@
         sp<IMemory> pipeMemory;
         if ((roHeap == 0) ||
                 (pipeMemory = roHeap->allocate(pipeSize)) == 0 ||
-                (pipeBuffer = pipeMemory->pointer()) == nullptr) {
+                (pipeBuffer = pipeMemory->unsecurePointer()) == nullptr) {
             ALOGE("not enough memory for pipe buffer size=%zu; "
                     "roHeap=%p, pipeMemory=%p, pipeBuffer=%p; roHeapSize: %lld",
                     pipeSize, roHeap.get(), pipeMemory.get(), pipeBuffer,
@@ -7291,7 +7302,7 @@
                         // Sanitize before releasing if the track has no access to the source data
                         // An idle UID receives silence from non virtual devices until active
                         if (activeTrack->isSilenced()) {
-                            memset(activeTrack->mSink.raw, 0, framesOut * mFrameSize);
+                            memset(activeTrack->mSink.raw, 0, framesOut * activeTrack->frameSize());
                         }
                         activeTrack->releaseBuffer(&activeTrack->mSink);
                     }
@@ -7452,7 +7463,8 @@
         audio_input_flags_t *flags,
         pid_t tid,
         status_t *status,
-        audio_port_handle_t portId)
+        audio_port_handle_t portId,
+        const String16& opPackageName)
 {
     size_t frameCount = *pFrameCount;
     size_t notificationFrameCount = *pNotificationFrameCount;
@@ -7586,7 +7598,7 @@
         track = new RecordTrack(this, client, attr, sampleRate,
                       format, channelMask, frameCount,
                       nullptr /* buffer */, (size_t)0 /* bufferSize */, sessionId, creatorPid, uid,
-                      *flags, TrackBase::TYPE_DEFAULT, portId);
+                      *flags, TrackBase::TYPE_DEFAULT, opPackageName, portId);
 
         lStatus = track->initCheck();
         if (lStatus != NO_ERROR) {
@@ -7922,12 +7934,12 @@
     write(fd, result.string(), result.size());
 }
 
-void AudioFlinger::RecordThread::setRecordSilenced(uid_t uid, bool silenced)
+void AudioFlinger::RecordThread::setRecordSilenced(audio_port_handle_t portId, bool silenced)
 {
     Mutex::Autolock _l(mLock);
     for (size_t i = 0; i < mTracks.size() ; i++) {
         sp<RecordTrack> track = mTracks[i];
-        if (track != 0 && track->uid() == uid) {
+        if (track != 0 && track->portId() == portId) {
             track->setSilenced(silenced);
         }
     }
@@ -9465,11 +9477,11 @@
     mInput->stream->updateSinkMetadata(metadata);
 }
 
-void AudioFlinger::MmapCaptureThread::setRecordSilenced(uid_t uid, bool silenced)
+void AudioFlinger::MmapCaptureThread::setRecordSilenced(audio_port_handle_t portId, bool silenced)
 {
     Mutex::Autolock _l(mLock);
     for (size_t i = 0; i < mActiveTracks.size() ; i++) {
-        if (mActiveTracks[i]->uid() == uid) {
+        if (mActiveTracks[i]->portId() == portId) {
             mActiveTracks[i]->setSilenced_l(silenced);
             broadcast_l();
         }
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index fc8aa13..6a9c0e7 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1553,7 +1553,8 @@
                     audio_input_flags_t *flags,
                     pid_t tid,
                     status_t *status /*non-NULL*/,
-                    audio_port_handle_t portId);
+                    audio_port_handle_t portId,
+                    const String16& opPackageName);
 
             status_t    start(RecordTrack* recordTrack,
                               AudioSystem::sync_event_t event,
@@ -1615,7 +1616,7 @@
             void        checkBtNrec();
 
             // Sets the UID records silence
-            void        setRecordSilenced(uid_t uid, bool silenced);
+            void        setRecordSilenced(audio_port_handle_t portId, bool silenced);
 
             status_t    getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
 
@@ -1784,7 +1785,8 @@
     virtual     void        invalidateTracks(audio_stream_type_t streamType __unused) {}
 
                 // Sets the UID records silence
-    virtual     void        setRecordSilenced(uid_t uid __unused, bool silenced __unused) {}
+    virtual     void        setRecordSilenced(audio_port_handle_t portId __unused,
+                                              bool silenced __unused) {}
 
  protected:
                 void        dumpInternals_l(int fd, const Vector<String16>& args) override;
@@ -1871,7 +1873,8 @@
 
                 void           updateMetadata_l() override;
                 void           processVolume_l() override;
-                void           setRecordSilenced(uid_t uid, bool silenced) override;
+                void           setRecordSilenced(audio_port_handle_t portId,
+                                                 bool silenced) override;
 
     virtual     void           toAudioPortConfig(struct audio_port_config *config);
 
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 8f720b5..7cf34c1 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -205,6 +205,16 @@
 protected:
     DISALLOW_COPY_AND_ASSIGN(TrackBase);
 
+    void releaseCblk() {
+        if (mCblk != nullptr) {
+            mCblk->~audio_track_cblk_t();   // destroy our shared-structure.
+            if (mClient == 0) {
+                free(mCblk);
+            }
+            mCblk = nullptr;
+        }
+    }
+
     // AudioBufferProvider interface
     virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer) = 0;
     virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
@@ -215,6 +225,8 @@
 
     uint32_t channelCount() const { return mChannelCount; }
 
+    size_t frameSize() const { return mFrameSize; }
+
     audio_channel_mask_t channelMask() const { return mChannelMask; }
 
     virtual uint32_t sampleRate() const { return mSampleRate; }
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 78db80c..6a999fc 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -18,12 +18,14 @@
 
 #define LOG_TAG "AudioFlinger"
 //#define LOG_NDEBUG 0
+#define ATRACE_TAG ATRACE_TAG_AUDIO
 
 #include "Configuration.h"
 #include <linux/futex.h>
 #include <math.h>
 #include <sys/syscall.h>
 #include <utils/Log.h>
+#include <utils/Trace.h>
 
 #include <private/media/AudioTrackShared.h>
 
@@ -148,7 +150,7 @@
     if (client != 0) {
         mCblkMemory = client->heap()->allocate(size);
         if (mCblkMemory == 0 ||
-                (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer())) == NULL) {
+                (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->unsecurePointer())) == NULL) {
             ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
             client->heap()->dump("AudioTrack");
             mCblkMemory.clear();
@@ -170,7 +172,7 @@
             const sp<MemoryDealer> roHeap(thread->readOnlyHeap());
             if (roHeap == 0 ||
                     (mBufferMemory = roHeap->allocate(bufferSize)) == 0 ||
-                    (mBuffer = mBufferMemory->pointer()) == NULL) {
+                    (mBuffer = mBufferMemory->unsecurePointer()) == NULL) {
                 ALOGE("%s(%d): not enough memory for read-only buffer size=%zu",
                         __func__, mId, bufferSize);
                 if (roHeap != 0) {
@@ -185,7 +187,7 @@
         case ALLOC_PIPE:
             mBufferMemory = thread->pipeMemory();
             // mBuffer is the virtual address as seen from current process (mediaserver),
-            // and should normally be coming from mBufferMemory->pointer().
+            // and should normally be coming from mBufferMemory->unsecurePointer().
             // However in this case the TrackBase does not reference the buffer directly.
             // It should references the buffer via the pipe.
             // Therefore, to detect incorrect usage of the buffer, we set mBuffer to NULL.
@@ -237,12 +239,7 @@
 {
     // delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
     mServerProxy.clear();
-    if (mCblk != NULL) {
-        mCblk->~audio_track_cblk_t();   // destroy our shared-structure.
-        if (mClient == 0) {
-            free(mCblk);
-        }
-    }
+    releaseCblk();
     mCblkMemory.clear();    // free the shared memory before releasing the heap it belongs to
     if (mClient != 0) {
         // Client destructor must run with AudioFlinger client mutex locked
@@ -442,7 +439,7 @@
     return mHasOpPlayAudio.load();
 }
 
-// Note this method is never called (and never to be) for audio server / root track
+// Note this method is never called (and never to be) for audio server / patch record track
 // - not called from constructor due to check on UID,
 // - not called from PlayAudioOpCallback because the callback is not installed in this case
 void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::checkPlayAudioForUsage()
@@ -513,7 +510,11 @@
             track_type type,
             audio_port_handle_t portId)
     :   TrackBase(thread, client, attr, sampleRate, format, channelMask, frameCount,
-                  (sharedBuffer != 0) ? sharedBuffer->pointer() : buffer,
+                  // TODO: Using unsecurePointer() has some associated security pitfalls
+                  //       (see declaration for details).
+                  //       Either document why it is safe in this case or address the
+                  //       issue (e.g. by copying).
+                  (sharedBuffer != 0) ? sharedBuffer->unsecurePointer() : buffer,
                   (sharedBuffer != 0) ? sharedBuffer->size() : bufferSize,
                   sessionId, creatorPid, uid, true /*isOut*/,
                   (type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
@@ -543,12 +544,18 @@
     ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
 
     ALOGV_IF(sharedBuffer != 0, "%s(%d): sharedBuffer: %p, size: %zu",
-            __func__, mId, sharedBuffer->pointer(), sharedBuffer->size());
+            __func__, mId, sharedBuffer->unsecurePointer(), sharedBuffer->size());
 
     if (mCblk == NULL) {
         return;
     }
 
+    if (!thread->isTrackAllowed_l(channelMask, format, sessionId, uid)) {
+        ALOGE("%s(%d): no more tracks available", __func__, mId);
+        releaseCblk(); // this makes the track invalid.
+        return;
+    }
+
     if (sharedBuffer == 0) {
         mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
                 mFrameSize, !isExternalTrack(), sampleRate);
@@ -558,10 +565,6 @@
     }
     mServerProxy = mAudioTrackServerProxy;
 
-    if (!thread->isTrackAllowed_l(channelMask, format, sessionId, uid)) {
-        ALOGE("%s(%d): no more tracks available", __func__, mId);
-        return;
-    }
     // only allocate a fast track index if we were able to allocate a normal track name
     if (flags & AUDIO_OUTPUT_FLAG_FAST) {
         // FIXME: Not calling framesReadyIsCalledByMultipleThreads() exposes a potential
@@ -1828,9 +1831,19 @@
     ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
     Proxy::Buffer buf;
     buf.mFrameCount = buffer->frameCount;
+    if (ATRACE_ENABLED()) {
+        std::string traceName("PTnReq");
+        traceName += std::to_string(id());
+        ATRACE_INT(traceName.c_str(), buf.mFrameCount);
+    }
     status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
     ALOGV_IF(status != NO_ERROR, "%s(%d): getNextBuffer status %d", __func__, mId, status);
     buffer->frameCount = buf.mFrameCount;
+    if (ATRACE_ENABLED()) {
+        std::string traceName("PTnObt");
+        traceName += std::to_string(id());
+        ATRACE_INT(traceName.c_str(), buf.mFrameCount);
+    }
     if (buf.mFrameCount == 0) {
         return WOULD_BLOCK;
     }
@@ -1883,6 +1896,105 @@
 // ----------------------------------------------------------------------------
 //      Record
 // ----------------------------------------------------------------------------
+
+
+// ----------------------------------------------------------------------------
+//      AppOp for audio recording
+// -------------------------------
+
+#undef LOG_TAG
+#define LOG_TAG "AF::OpRecordAudioMonitor"
+
+// static
+sp<AudioFlinger::RecordThread::OpRecordAudioMonitor>
+AudioFlinger::RecordThread::OpRecordAudioMonitor::createIfNeeded(
+            uid_t uid, const String16& opPackageName)
+{
+    if (isServiceUid(uid)) {
+        ALOGV("not silencing record for service uid:%d pack:%s",
+                uid, String8(opPackageName).string());
+        return nullptr;
+    }
+
+    if (opPackageName.size() == 0) {
+        Vector<String16> packages;
+        // no package name, happens with SL ES clients
+        // query package manager to find one
+        PermissionController permissionController;
+        permissionController.getPackagesForUid(uid, packages);
+        if (packages.isEmpty()) {
+            return nullptr;
+        } else {
+            ALOGV("using pack:%s for uid:%d", String8(packages[0]).string(), uid);
+            return new OpRecordAudioMonitor(uid, packages[0]);
+        }
+    }
+
+    return new OpRecordAudioMonitor(uid, opPackageName);
+}
+
+AudioFlinger::RecordThread::OpRecordAudioMonitor::OpRecordAudioMonitor(
+        uid_t uid, const String16& opPackageName)
+        : mHasOpRecordAudio(true), mUid(uid), mPackage(opPackageName)
+{
+}
+
+AudioFlinger::RecordThread::OpRecordAudioMonitor::~OpRecordAudioMonitor()
+{
+    if (mOpCallback != 0) {
+        mAppOpsManager.stopWatchingMode(mOpCallback);
+    }
+    mOpCallback.clear();
+}
+
+void AudioFlinger::RecordThread::OpRecordAudioMonitor::onFirstRef()
+{
+    checkRecordAudio();
+    mOpCallback = new RecordAudioOpCallback(this);
+    ALOGV("start watching OP_RECORD_AUDIO for pack:%s", String8(mPackage).string());
+    mAppOpsManager.startWatchingMode(AppOpsManager::OP_RECORD_AUDIO, mPackage, mOpCallback);
+}
+
+bool AudioFlinger::RecordThread::OpRecordAudioMonitor::hasOpRecordAudio() const {
+    return mHasOpRecordAudio.load();
+}
+
+// Called by RecordAudioOpCallback when OP_RECORD_AUDIO is updated in AppOp callback
+// and in onFirstRef()
+// Note this method is never called (and never to be) for audio server / root track
+// due to the UID in createIfNeeded(). As a result for those record track, it's:
+// - not called from constructor,
+// - not called from RecordAudioOpCallback because the callback is not installed in this case
+void AudioFlinger::RecordThread::OpRecordAudioMonitor::checkRecordAudio()
+{
+    const int32_t mode = mAppOpsManager.checkOp(AppOpsManager::OP_RECORD_AUDIO,
+            mUid, mPackage);
+    const bool hasIt =  (mode == AppOpsManager::MODE_ALLOWED);
+    // verbose logging only log when appOp changed
+    ALOGI_IF(hasIt != mHasOpRecordAudio.load(),
+            "OP_RECORD_AUDIO missing, %ssilencing record uid%d pack:%s",
+            hasIt ? "un" : "", mUid, String8(mPackage).string());
+    mHasOpRecordAudio.store(hasIt);
+}
+
+AudioFlinger::RecordThread::OpRecordAudioMonitor::RecordAudioOpCallback::RecordAudioOpCallback(
+        const wp<OpRecordAudioMonitor>& monitor) : mMonitor(monitor)
+{ }
+
+void AudioFlinger::RecordThread::OpRecordAudioMonitor::RecordAudioOpCallback::opChanged(int32_t op,
+            const String16& packageName) {
+    UNUSED(packageName);
+    if (op != AppOpsManager::OP_RECORD_AUDIO) {
+        return;
+    }
+    sp<OpRecordAudioMonitor> monitor = mMonitor.promote();
+    if (monitor != NULL) {
+        monitor->checkRecordAudio();
+    }
+}
+
+
+
 #undef LOG_TAG
 #define LOG_TAG "AF::RecordHandle"
 
@@ -1954,6 +2066,7 @@
             uid_t uid,
             audio_input_flags_t flags,
             track_type type,
+            const String16& opPackageName,
             audio_port_handle_t portId)
     :   TrackBase(thread, client, attr, sampleRate, format,
                   channelMask, frameCount, buffer, bufferSize, sessionId,
@@ -1967,7 +2080,8 @@
         mResamplerBufferProvider(NULL), // initialize in case of early constructor exit
         mRecordBufferConverter(NULL),
         mFlags(flags),
-        mSilenced(false)
+        mSilenced(false),
+        mOpRecordAudioMonitor(OpRecordAudioMonitor::createIfNeeded(uid, opPackageName))
 {
     if (mCblk == NULL) {
         return;
@@ -2218,6 +2332,14 @@
     mServerLatencyMs.store(latencyMs);
 }
 
+bool AudioFlinger::RecordThread::RecordTrack::isSilenced() const {
+    if (mSilenced) {
+        return true;
+    }
+    // The monitor is only created for record tracks that can be silenced.
+    return mOpRecordAudioMonitor ? !mOpRecordAudioMonitor->hasOpRecordAudio() : false;
+}
+
 status_t AudioFlinger::RecordThread::RecordTrack::getActiveMicrophones(
         std::vector<media::MicrophoneInfo>* activeMicrophones)
 {
@@ -2268,7 +2390,7 @@
                 audio_attributes_t{} /* currently unused for patch track */,
                 sampleRate, format, channelMask, frameCount,
                 buffer, bufferSize, AUDIO_SESSION_NONE, getpid(), AID_AUDIOSERVER,
-                flags, TYPE_PATCH),
+                flags, TYPE_PATCH, String16()),
         PatchTrackBase(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true),
                        *recordThread, timeout)
 {
@@ -2294,6 +2416,11 @@
     ALOGV_IF(status != NO_ERROR,
              "%s(%d): mPeerProxy->obtainBuffer status %d", __func__, mId, status);
     buffer->frameCount = buf.mFrameCount;
+    if (ATRACE_ENABLED()) {
+        std::string traceName("PRnObt");
+        traceName += std::to_string(id());
+        ATRACE_INT(traceName.c_str(), buf.mFrameCount);
+    }
     if (buf.mFrameCount == 0) {
         return WOULD_BLOCK;
     }
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 30f29d6..35126ad 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -258,7 +258,7 @@
     virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
                 std::vector<audio_format_t> *formats) = 0;
 
-    virtual void     setAppState(uid_t uid, app_state_t state) = 0;
+    virtual void     setAppState(audio_port_handle_t portId, app_state_t state) = 0;
 
     virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies) = 0;
 
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 605fc1c..630efc1 100644
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -43,14 +43,6 @@
 #define MAX_MIXER_CHANNEL_COUNT FCC_8
 
 /**
- * A device mask for all audio input and output devices where matching inputs/outputs on device
- * type alone is not enough: the address must match too
- */
-#define APM_AUDIO_DEVICE_OUT_MATCH_ADDRESS_ALL (AUDIO_DEVICE_OUT_REMOTE_SUBMIX|AUDIO_DEVICE_OUT_BUS)
-
-#define APM_AUDIO_DEVICE_IN_MATCH_ADDRESS_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX|AUDIO_DEVICE_IN_BUS)
-
-/**
  * Alias to AUDIO_DEVICE_OUT_DEFAULT defined for clarification when this value is used by volume
  * control APIs (e.g setStreamVolumeIndex().
  */
@@ -71,6 +63,34 @@
 }
 
 /**
+ * Check whether the output device type is one
+ * where addresses are used to distinguish between one connected device and another
+ *
+ * @param[in] device to consider
+ *
+ * @return true if the device needs distinguish on address, false otherwise..
+ */
+static inline bool apm_audio_out_device_distinguishes_on_address(audio_devices_t device)
+{
+    return device == AUDIO_DEVICE_OUT_REMOTE_SUBMIX ||
+           device == AUDIO_DEVICE_OUT_BUS;
+}
+
+/**
+ * Check whether the input device type is one
+ * where addresses are used to distinguish between one connected device and another
+ *
+ * @param[in] device to consider
+ *
+ * @return true if the device needs distinguish on address, false otherwise..
+ */
+static inline bool apm_audio_in_device_distinguishes_on_address(audio_devices_t device)
+{
+    return device == AUDIO_DEVICE_IN_REMOTE_SUBMIX ||
+           device == AUDIO_DEVICE_IN_BUS;
+}
+
+/**
  * Check whether the device type is one
  * where addresses are used to distinguish between one connected device and another
  *
@@ -80,10 +100,8 @@
  */
 static inline bool device_distinguishes_on_address(audio_devices_t device)
 {
-    return (((device & AUDIO_DEVICE_BIT_IN) != 0) &&
-            ((~AUDIO_DEVICE_BIT_IN & device & APM_AUDIO_DEVICE_IN_MATCH_ADDRESS_ALL) != 0)) ||
-           (((device & AUDIO_DEVICE_BIT_IN) == 0) &&
-            ((device & APM_AUDIO_DEVICE_OUT_MATCH_ADDRESS_ALL) != 0));
+    return apm_audio_in_device_distinguishes_on_address(device) ||
+           apm_audio_out_device_distinguishes_on_address(device);
 }
 
 /**
@@ -95,10 +113,7 @@
  */
 static inline bool device_has_encoding_capability(audio_devices_t device)
 {
-    if (device & AUDIO_DEVICE_OUT_ALL_A2DP) {
-        return true;
-    }
-    return false;
+    return audio_is_a2dp_out_device(device);
 }
 
 /**
diff --git a/services/audiopolicy/common/managerdefinitions/Android.bp b/services/audiopolicy/common/managerdefinitions/Android.bp
index f02f3cf..fad3c5b 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.bp
+++ b/services/audiopolicy/common/managerdefinitions/Android.bp
@@ -3,24 +3,24 @@
 
     srcs: [
         "src/AudioCollections.cpp",
-        "src/AudioGain.cpp",
         "src/AudioInputDescriptor.cpp",
         "src/AudioOutputDescriptor.cpp",
         "src/AudioPatch.cpp",
         "src/AudioPolicyMix.cpp",
-        "src/AudioPort.cpp",
-        "src/AudioProfile.cpp",
+        "src/AudioProfileVectorHelper.cpp",
         "src/AudioRoute.cpp",
         "src/ClientDescriptor.cpp",
         "src/DeviceDescriptor.cpp",
         "src/EffectDescriptor.cpp",
         "src/HwModule.cpp",
         "src/IOProfile.cpp",
+        "src/PolicyAudioPort.cpp",
         "src/Serializer.cpp",
         "src/SoundTriggerSession.cpp",
         "src/TypeConverter.cpp",
     ],
     shared_libs: [
+        "libaudiofoundation",
         "libcutils",
         "libhidlbase",
         "liblog",
@@ -28,7 +28,10 @@
         "libutils",
         "libxml2",
     ],
-    export_shared_lib_headers: ["libmedia"],
+    export_shared_lib_headers: [
+        "libaudiofoundation",
+        "libmedia",
+    ],
     static_libs: [
         "libaudioutils",
     ],
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioCollections.h b/services/audiopolicy/common/managerdefinitions/include/AudioCollections.h
index a948ea9..b692592 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioCollections.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioCollections.h
@@ -25,20 +25,15 @@
 
 namespace android {
 
-class AudioPort;
+class PolicyAudioPort;
 class AudioRoute;
 
-class AudioPortVector : public Vector<sp<AudioPort> >
-{
-public:
-    sp<AudioPort> findByTagName(const String8 &tagName) const;
-};
+using PolicyAudioPortVector = Vector<sp<PolicyAudioPort>>;
+using AudioRouteVector = Vector<sp<AudioRoute>>;
 
+sp<PolicyAudioPort> findByTagName(const PolicyAudioPortVector& policyAudioPortVector,
+                                  const std::string &tagName);
 
-class AudioRouteVector : public Vector<sp<AudioRoute> >
-{
-public:
-    void dump(String8 *dst, int spaces) const;
-};
+void dumpAudioRouteVector(const AudioRouteVector& audioRouteVector, String8 *dst, int spaces);
 
 } // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index 37f9d14..c67a006 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -21,11 +21,11 @@
 #include <utils/SortedVector.h>
 #include <utils/KeyedVector.h>
 #include "AudioIODescriptorInterface.h"
-#include "AudioPort.h"
 #include "ClientDescriptor.h"
 #include "DeviceDescriptor.h"
 #include "EffectDescriptor.h"
 #include "IOProfile.h"
+#include "PolicyAudioPort.h"
 
 namespace android {
 
@@ -34,13 +34,17 @@
 
 // descriptor for audio inputs. Used to maintain current configuration of each opened audio input
 // and keep track of the usage of this input.
-class AudioInputDescriptor: public AudioPortConfig, public AudioIODescriptorInterface
-    , public ClientMapHandler<RecordClientDescriptor>
+class AudioInputDescriptor: public AudioPortConfig,
+        public PolicyAudioPortConfig,
+        public AudioIODescriptorInterface,
+        public ClientMapHandler<RecordClientDescriptor>
 {
 public:
-    explicit AudioInputDescriptor(const sp<IOProfile>& profile,
-                                  AudioPolicyClientInterface *clientInterface);
-    audio_port_handle_t getId() const;
+    AudioInputDescriptor(const sp<IOProfile>& profile,
+                         AudioPolicyClientInterface *clientInterface);
+
+    virtual ~AudioInputDescriptor() = default;
+
     audio_module_handle_t getModuleHandle() const;
 
     audio_devices_t getDeviceType() const { return (mDevice != nullptr) ?
@@ -56,9 +60,18 @@
     wp<AudioPolicyMix>  mPolicyMix;                   // non NULL when used by a dynamic policy
     const sp<IOProfile> mProfile;                     // I/O profile this output derives from
 
+    // PolicyAudioPortConfig
+    virtual sp<PolicyAudioPort> getPolicyAudioPort() const {
+        return mProfile;
+    }
+
+    // AudioPortConfig
+    virtual status_t applyAudioPortConfig(const struct audio_port_config *config,
+                                          struct audio_port_config *backupConfig = NULL);
     virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
             const struct audio_port_config *srcConfig = NULL) const;
     virtual sp<AudioPort> getAudioPort() const { return mProfile; }
+
     void toAudioPort(struct audio_port *port) const;
     void setPreemptedSessions(const SortedVector<audio_session_t>& sessions);
     SortedVector<audio_session_t> getPreemptedSessions() const;
@@ -97,7 +110,7 @@
     RecordClientVector clientsList(bool activeOnly = false,
         audio_source_t source = AUDIO_SOURCE_DEFAULT, bool preferredDeviceOnly = false) const;
 
-    void setAppState(uid_t uid, app_state_t state);
+    void setAppState(audio_port_handle_t portId, app_state_t state);
 
     // implementation of ClientMapHandler<RecordClientDescriptor>
     void addClient(const sp<RecordClientDescriptor> &client) override;
@@ -111,7 +124,6 @@
     void updateClientRecordingConfiguration(int event, const sp<RecordClientDescriptor>& client);
 
     audio_patch_handle_t mPatchHandle = AUDIO_PATCH_HANDLE_NONE;
-    audio_port_handle_t  mId = AUDIO_PORT_HANDLE_NONE;
     sp<DeviceDescriptor> mDevice = nullptr; /**< current device this input is routed to */
 
     // Because a preemptible capture session can preempt another one, we end up in an endless loop
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index cd54085..96fec4a 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -26,9 +26,9 @@
 #include <utils/KeyedVector.h>
 #include <system/audio.h>
 #include "AudioIODescriptorInterface.h"
-#include "AudioPort.h"
 #include "ClientDescriptor.h"
 #include "DeviceDescriptor.h"
+#include "PolicyAudioPort.h"
 #include <vector>
 
 namespace android {
@@ -138,18 +138,19 @@
 
 // descriptor for audio outputs. Used to maintain current configuration of each opened audio output
 // and keep track of the usage of this output by each audio stream type.
-class AudioOutputDescriptor: public AudioPortConfig, public AudioIODescriptorInterface
-    , public ClientMapHandler<TrackClientDescriptor>
+class AudioOutputDescriptor: public AudioPortConfig,
+        public PolicyAudioPortConfig,
+        public AudioIODescriptorInterface,
+        public ClientMapHandler<TrackClientDescriptor>
 {
 public:
-    AudioOutputDescriptor(const sp<AudioPort>& port,
+    AudioOutputDescriptor(const sp<PolicyAudioPort>& policyAudioPort,
                           AudioPolicyClientInterface *clientInterface);
     virtual ~AudioOutputDescriptor() {}
 
     void dump(String8 *dst) const override;
     void        log(const char* indent);
 
-    audio_port_handle_t getId() const;
     virtual DeviceVector devices() const { return mDevices; }
     bool sharesHwModuleWith(const sp<AudioOutputDescriptor>& outputDesc);
     virtual DeviceVector supportedDevices() const  { return mDevices; }
@@ -245,9 +246,19 @@
         mRoutingActivities[ps].setMutedByDevice(isMuted);
     }
 
+    // PolicyAudioPortConfig
+    virtual sp<PolicyAudioPort> getPolicyAudioPort() const
+    {
+        return mPolicyAudioPort;
+    }
+
+    // AudioPortConfig
+    virtual status_t applyAudioPortConfig(const struct audio_port_config *config,
+                                          struct audio_port_config *backupConfig = NULL);
     virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
                            const struct audio_port_config *srcConfig = NULL) const;
-    virtual sp<AudioPort> getAudioPort() const { return mPort; }
+    virtual sp<AudioPort> getAudioPort() const { return mPolicyAudioPort->asAudioPort(); }
+
     virtual void toAudioPort(struct audio_port *port) const;
 
     audio_module_handle_t getModuleHandle() const;
@@ -289,11 +300,10 @@
     wp<AudioPolicyMix> mPolicyMix;  // non NULL when used by a dynamic policy
 
 protected:
-    const sp<AudioPort> mPort;
+    const sp<PolicyAudioPort> mPolicyAudioPort;
     AudioPolicyClientInterface * const mClientInterface;
     uint32_t mGlobalActiveCount = 0;  // non-client-specific active count
     audio_patch_handle_t mPatchHandle = AUDIO_PATCH_HANDLE_NONE;
-    audio_port_handle_t mId = AUDIO_PORT_HANDLE_NONE;
 
     // The ActiveClients shows the clients that contribute to the @VolumeSource counts
     // and may include upstream clients from a duplicating thread.
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
index 0776a8d..56596f5 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
@@ -19,17 +19,17 @@
 #include <unordered_map>
 #include <unordered_set>
 
-#include <AudioGain.h>
-#include <AudioPort.h>
 #include <AudioPatch.h>
 #include <DeviceDescriptor.h>
 #include <IOProfile.h>
 #include <HwModule.h>
+#include <PolicyAudioPort.h>
 #include <AudioInputDescriptor.h>
 #include <AudioOutputDescriptor.h>
 #include <AudioPolicyMix.h>
 #include <EffectDescriptor.h>
 #include <SoundTriggerSession.h>
+#include <media/AudioProfile.h>
 
 namespace android {
 
@@ -119,9 +119,9 @@
         mSource = "AudioPolicyConfig::setDefault";
         mEngineLibraryNameSuffix = kDefaultEngineLibraryNameSuffix;
         mDefaultOutputDevice = new DeviceDescriptor(AUDIO_DEVICE_OUT_SPEAKER);
-        mDefaultOutputDevice->addAudioProfile(AudioProfile::createFullDynamic());
+        mDefaultOutputDevice->addAudioProfile(AudioProfile::createFullDynamic(gDynamicFormat));
         sp<DeviceDescriptor> defaultInputDevice = new DeviceDescriptor(AUDIO_DEVICE_IN_BUILTIN_MIC);
-        defaultInputDevice->addAudioProfile(AudioProfile::createFullDynamic());
+        defaultInputDevice->addAudioProfile(AudioProfile::createFullDynamic(gDynamicFormat));
         sp<AudioProfile> micProfile = new AudioProfile(
                 AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_MONO, 8000);
         defaultInputDevice->addAudioProfile(micProfile);
@@ -133,14 +133,14 @@
         mDefaultOutputDevice->attach(module);
         defaultInputDevice->attach(module);
 
-        sp<OutputProfile> outProfile = new OutputProfile(String8("primary"));
+        sp<OutputProfile> outProfile = new OutputProfile("primary");
         outProfile->addAudioProfile(
                 new AudioProfile(AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 44100));
         outProfile->addSupportedDevice(mDefaultOutputDevice);
         outProfile->setFlags(AUDIO_OUTPUT_FLAG_PRIMARY);
         module->addOutputProfile(outProfile);
 
-        sp<InputProfile> inProfile = new InputProfile(String8("primary"));
+        sp<InputProfile> inProfile = new InputProfile("primary");
         inProfile->addAudioProfile(micProfile);
         inProfile->addSupportedDevice(defaultInputDevice);
         module->addInputProfile(inProfile);
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
deleted file mode 100644
index d906f11..0000000
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include "AudioCollections.h"
-#include "AudioProfile.h"
-#include "AudioGain.h"
-#include "HandleGenerator.h"
-#include <utils/String8.h>
-#include <utils/Vector.h>
-#include <utils/RefBase.h>
-#include <utils/Errors.h>
-#include <system/audio.h>
-#include <cutils/config_utils.h>
-
-namespace android {
-
-class HwModule;
-class AudioRoute;
-
-class AudioPort : public virtual RefBase, private HandleGenerator<audio_port_handle_t>
-{
-public:
-    AudioPort(const String8& name, audio_port_type_t type,  audio_port_role_t role) :
-        mName(name), mType(type), mRole(role), mFlags(AUDIO_OUTPUT_FLAG_NONE) {}
-
-    virtual ~AudioPort() {}
-
-    void setName(const String8 &name) { mName = name; }
-    const String8 &getName() const { return mName; }
-
-    audio_port_type_t getType() const { return mType; }
-    audio_port_role_t getRole() const { return mRole; }
-
-    virtual const String8 getTagName() const = 0;
-
-    void setGains(const AudioGains &gains) { mGains = gains; }
-    const AudioGains &getGains() const { return mGains; }
-
-    virtual void setFlags(uint32_t flags)
-    {
-        //force direct flag if offload flag is set: offloading implies a direct output stream
-        // and all common behaviors are driven by checking only the direct flag
-        // this should normally be set appropriately in the policy configuration file
-        if (mRole == AUDIO_PORT_ROLE_SOURCE && (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
-            flags |= AUDIO_OUTPUT_FLAG_DIRECT;
-        }
-        mFlags = flags;
-    }
-    uint32_t getFlags() const { return mFlags; }
-
-    virtual void attach(const sp<HwModule>& module);
-    virtual void detach();
-    bool isAttached() { return mModule != 0; }
-
-    // Audio port IDs are in a different namespace than AudioFlinger unique IDs
-    static audio_port_handle_t getNextUniqueId();
-
-    virtual void toAudioPort(struct audio_port *port) const;
-
-    virtual void importAudioPort(const sp<AudioPort>& port, bool force = false);
-
-    void addAudioProfile(const sp<AudioProfile> &profile) { mProfiles.add(profile); }
-
-    void setAudioProfiles(const AudioProfileVector &profiles) { mProfiles = profiles; }
-    AudioProfileVector &getAudioProfiles() { return mProfiles; }
-
-    bool hasValidAudioProfile() const { return mProfiles.hasValidProfile(); }
-
-    bool hasDynamicAudioProfile() const { return mProfiles.hasDynamicProfile(); }
-
-    // searches for an exact match
-    virtual status_t checkExactAudioProfile(const struct audio_port_config *config) const;
-
-    // searches for a compatible match, currently implemented for input
-    // parameters are input|output, returned value is the best match.
-    status_t checkCompatibleAudioProfile(uint32_t &samplingRate,
-                                         audio_channel_mask_t &channelMask,
-                                         audio_format_t &format) const
-    {
-        return mProfiles.checkCompatibleProfile(samplingRate, channelMask, format, mType, mRole);
-    }
-
-    void clearAudioProfiles() { return mProfiles.clearProfiles(); }
-
-    status_t checkGain(const struct audio_gain_config *gainConfig, int index) const;
-
-    void pickAudioProfile(uint32_t &samplingRate,
-                          audio_channel_mask_t &channelMask,
-                          audio_format_t &format) const;
-
-    static const audio_format_t sPcmFormatCompareTable[];
-
-    static int compareFormats(audio_format_t format1, audio_format_t format2);
-
-    // Used to select an audio HAL output stream with a sample format providing the
-    // less degradation for a given AudioTrack sample format.
-    static bool isBetterFormatMatch(audio_format_t newFormat,
-                                        audio_format_t currentFormat,
-                                        audio_format_t targetFormat);
-    static uint32_t formatDistance(audio_format_t format1,
-                                   audio_format_t format2);
-    static const uint32_t kFormatDistanceMax = 4;
-
-    audio_module_handle_t getModuleHandle() const;
-    uint32_t getModuleVersionMajor() const;
-    const char *getModuleName() const;
-    sp<HwModule> getModule() const { return mModule; }
-
-    bool useInputChannelMask() const
-    {
-        return ((mType == AUDIO_PORT_TYPE_DEVICE) && (mRole == AUDIO_PORT_ROLE_SOURCE)) ||
-                ((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SINK));
-    }
-
-    inline bool isDirectOutput() const
-    {
-        return (mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SOURCE) &&
-                (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD));
-    }
-
-    void addRoute(const sp<AudioRoute> &route) { mRoutes.add(route); }
-    const AudioRouteVector &getRoutes() const { return mRoutes; }
-
-    void dump(String8 *dst, int spaces, bool verbose = true) const;
-
-    void log(const char* indent) const;
-
-    AudioGains mGains; // gain controllers
-
-private:
-    void pickChannelMask(audio_channel_mask_t &channelMask, const ChannelsVector &channelMasks) const;
-    void pickSamplingRate(uint32_t &rate,const SampleRateVector &samplingRates) const;
-
-    sp<HwModule> mModule;                 // audio HW module exposing this I/O stream
-    String8  mName;
-    audio_port_type_t mType;
-    audio_port_role_t mRole;
-    uint32_t mFlags; // attribute flags mask (e.g primary output, direct output...).
-    AudioProfileVector mProfiles; // AudioProfiles supported by this port (format, Rates, Channels)
-    AudioRouteVector mRoutes; // Routes involving this port
-};
-
-class AudioPortConfig : public virtual RefBase
-{
-public:
-    status_t applyAudioPortConfig(const struct audio_port_config *config,
-                                  struct audio_port_config *backupConfig = NULL);
-    virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
-                                   const struct audio_port_config *srcConfig = NULL) const = 0;
-    virtual sp<AudioPort> getAudioPort() const = 0;
-    virtual bool hasSameHwModuleAs(const sp<AudioPortConfig>& other) const {
-        return (other != 0) && (other->getAudioPort() != 0) && (getAudioPort() != 0) &&
-                (other->getAudioPort()->getModuleHandle() == getAudioPort()->getModuleHandle());
-    }
-    bool hasGainController(bool canUseForVolume = false) const;
-
-    unsigned int mSamplingRate = 0u;
-    audio_format_t mFormat = AUDIO_FORMAT_INVALID;
-    audio_channel_mask_t mChannelMask = AUDIO_CHANNEL_NONE;
-    struct audio_gain_config mGain = { .index = -1 };
-    union audio_io_flags mFlags = { AUDIO_INPUT_FLAG_NONE };
-};
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h b/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
deleted file mode 100644
index b588d57..0000000
--- a/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <vector>
-
-#include <system/audio.h>
-#include <utils/RefBase.h>
-#include <utils/SortedVector.h>
-#include <utils/String8.h>
-
-#include "policy.h"
-
-namespace android {
-
-typedef SortedVector<uint32_t> SampleRateVector;
-typedef Vector<audio_format_t> FormatVector;
-
-template <typename T>
-bool operator== (const SortedVector<T> &left, const SortedVector<T> &right)
-{
-    if (left.size() != right.size()) {
-        return false;
-    }
-    for (size_t index = 0; index < right.size(); index++) {
-        if (left[index] != right[index]) {
-            return false;
-        }
-    }
-    return true;
-}
-
-template <typename T>
-bool operator!= (const SortedVector<T> &left, const SortedVector<T> &right)
-{
-    return !(left == right);
-}
-
-class ChannelsVector : public SortedVector<audio_channel_mask_t>
-{
-public:
-    ChannelsVector() = default;
-    ChannelsVector(const ChannelsVector&) = default;
-    ChannelsVector(const SortedVector<audio_channel_mask_t>& sv) :
-            SortedVector<audio_channel_mask_t>(sv) {}
-    ChannelsVector& operator=(const ChannelsVector&) = default;
-
-    // Applies audio_channel_mask_out_to_in to all elements and returns the result.
-    ChannelsVector asInMask() const;
-    // Applies audio_channel_mask_in_to_out to all elements and returns the result.
-    ChannelsVector asOutMask() const;
-};
-
-class AudioProfile : public virtual RefBase
-{
-public:
-    static sp<AudioProfile> createFullDynamic();
-
-    AudioProfile(audio_format_t format, audio_channel_mask_t channelMasks, uint32_t samplingRate);
-    AudioProfile(audio_format_t format,
-                 const ChannelsVector &channelMasks,
-                 const SampleRateVector &samplingRateCollection);
-
-    audio_format_t getFormat() const { return mFormat; }
-    const ChannelsVector &getChannels() const { return mChannelMasks; }
-    const SampleRateVector &getSampleRates() const { return mSamplingRates; }
-    void setChannels(const ChannelsVector &channelMasks);
-    void setSampleRates(const SampleRateVector &sampleRates);
-
-    void clear();
-    bool isValid() const { return hasValidFormat() && hasValidRates() && hasValidChannels(); }
-    bool supportsChannels(audio_channel_mask_t channels) const
-    {
-        return mChannelMasks.indexOf(channels) >= 0;
-    }
-    bool supportsRate(uint32_t rate) const { return mSamplingRates.indexOf(rate) >= 0; }
-
-    status_t checkExact(uint32_t rate, audio_channel_mask_t channels, audio_format_t format) const;
-    status_t checkCompatibleChannelMask(audio_channel_mask_t channelMask,
-                                        audio_channel_mask_t &updatedChannelMask,
-                                        audio_port_type_t portType,
-                                        audio_port_role_t portRole) const;
-    status_t checkCompatibleSamplingRate(uint32_t samplingRate,
-                                         uint32_t &updatedSamplingRate) const;
-
-    bool hasValidFormat() const { return mFormat != AUDIO_FORMAT_DEFAULT; }
-    bool hasValidRates() const { return !mSamplingRates.isEmpty(); }
-    bool hasValidChannels() const { return !mChannelMasks.isEmpty(); }
-
-    void setDynamicChannels(bool dynamic) { mIsDynamicChannels = dynamic; }
-    bool isDynamicChannels() const { return mIsDynamicChannels; }
-
-    void setDynamicRate(bool dynamic) { mIsDynamicRate = dynamic; }
-    bool isDynamicRate() const { return mIsDynamicRate; }
-
-    void setDynamicFormat(bool dynamic) { mIsDynamicFormat = dynamic; }
-    bool isDynamicFormat() const { return mIsDynamicFormat; }
-
-    bool isDynamic() { return mIsDynamicFormat || mIsDynamicChannels || mIsDynamicRate; }
-
-    void dump(String8 *dst, int spaces) const;
-
-private:
-    String8  mName;
-    audio_format_t mFormat;
-    ChannelsVector mChannelMasks;
-    SampleRateVector mSamplingRates;
-
-    bool mIsDynamicFormat = false;
-    bool mIsDynamicChannels = false;
-    bool mIsDynamicRate = false;
-};
-
-
-class AudioProfileVector : public Vector<sp<AudioProfile> >
-{
-public:
-    ssize_t add(const sp<AudioProfile> &profile);
-    // This API is intended to be used by the policy manager once retrieving capabilities
-    // for a profile with dynamic format, rate and channels attributes
-    ssize_t addProfileFromHal(const sp<AudioProfile> &profileToAdd);
-
-    status_t checkExactProfile(uint32_t samplingRate, audio_channel_mask_t channelMask,
-                               audio_format_t format) const;
-    status_t checkCompatibleProfile(uint32_t &samplingRate, audio_channel_mask_t &channelMask,
-                                    audio_format_t &format,
-                                    audio_port_type_t portType,
-                                    audio_port_role_t portRole) const;
-    void clearProfiles();
-    // Assuming that this profile vector contains input profiles,
-    // find the best matching config from 'outputProfiles', according to
-    // the given preferences for audio formats and channel masks.
-    // Note: std::vectors are used because specialized containers for formats
-    //       and channels can be sorted and use their own ordering.
-    status_t findBestMatchingOutputConfig(const AudioProfileVector& outputProfiles,
-            const std::vector<audio_format_t>& preferredFormats, // order: most pref -> least pref
-            const std::vector<audio_channel_mask_t>& preferredOutputChannels,
-            bool preferHigherSamplingRates,
-            audio_config_base *bestOutputConfig) const;
-
-    sp<AudioProfile> getFirstValidProfile() const;
-    sp<AudioProfile> getFirstValidProfileFor(audio_format_t format) const;
-    bool hasValidProfile() const { return getFirstValidProfile() != 0; }
-
-    FormatVector getSupportedFormats() const;
-    bool hasDynamicChannelsFor(audio_format_t format) const;
-    bool hasDynamicFormat() const { return getProfileFor(gDynamicFormat) != 0; }
-    bool hasDynamicProfile() const;
-    bool hasDynamicRateFor(audio_format_t format) const;
-
-    // One audio profile will be added for each format supported by Audio HAL
-    void setFormats(const FormatVector &formats);
-
-    void dump(String8 *dst, int spaces) const;
-
-private:
-    sp<AudioProfile> getProfileFor(audio_format_t format) const;
-    void setSampleRatesFor(const SampleRateVector &sampleRates, audio_format_t format);
-    void setChannelsFor(const ChannelsVector &channelMasks, audio_format_t format);
-
-    static int compareFormats(const sp<AudioProfile> *profile1, const sp<AudioProfile> *profile2);
-};
-
-bool operator == (const AudioProfile &left, const AudioProfile &right);
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioProfileVectorHelper.h b/services/audiopolicy/common/managerdefinitions/include/AudioProfileVectorHelper.h
new file mode 100644
index 0000000..f84bda7
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioProfileVectorHelper.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <media/AudioProfile.h>
+#include <system/audio.h>
+
+namespace android {
+
+void sortAudioProfiles(AudioProfileVector &audioProfileVector);
+
+ssize_t addAudioProfileAndSort(AudioProfileVector &audioProfileVector,
+                               const sp<AudioProfile> &profile);
+
+// One audio profile will be added for each format supported by Audio HAL
+void addProfilesForFormats(AudioProfileVector &audioProfileVector,
+                           const FormatVector &formatVector);
+
+// This API is intended to be used by the policy manager once retrieving capabilities
+// for a profile with dynamic format, rate and channels attributes
+void addDynamicAudioProfileAndSort(AudioProfileVector &audioProfileVector,
+                                   const sp<AudioProfile> &profileToAdd);
+
+void appendAudioProfiles(AudioProfileVector &audioProfileVector,
+                         const AudioProfileVector &audioProfileVectorToAppend);
+
+status_t checkExactProfile(const AudioProfileVector &audioProfileVector,
+                           const uint32_t samplingRate,
+                           audio_channel_mask_t channelMask,
+                           audio_format_t format);
+
+status_t checkCompatibleProfile(const AudioProfileVector &audioProfileVector,
+                                uint32_t &samplingRate,
+                                audio_channel_mask_t &channelMask,
+                                audio_format_t &format,
+                                audio_port_type_t portType,
+                                audio_port_role_t portRole);
+
+// Assuming that this profile vector contains input profiles,
+// find the best matching config from 'outputProfiles', according to
+// the given preferences for audio formats and channel masks.
+// Note: std::vectors are used because specialized containers for formats
+//       and channels can be sorted and use their own ordering.
+status_t findBestMatchingOutputConfig(
+        const AudioProfileVector &audioProfileVector,
+        const AudioProfileVector &outputProfileVector,
+        const std::vector<audio_format_t> &preferredFormatVector, // order: most pref -> least pref
+        const std::vector<audio_channel_mask_t> &preferredOutputChannelVector,
+        bool preferHigherSamplingRates,
+        audio_config_base &bestOutputConfig);
+
+
+} // namespace android
\ No newline at end of file
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioRoute.h b/services/audiopolicy/common/managerdefinitions/include/AudioRoute.h
index 0357ff4..a7def3e 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioRoute.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioRoute.h
@@ -25,7 +25,7 @@
 namespace android
 {
 
-class AudioPort;
+class PolicyAudioPort;
 class DeviceDescriptor;
 
 typedef enum {
@@ -38,11 +38,11 @@
 public:
     explicit AudioRoute(audio_route_type_t type) : mType(type) {}
 
-    void setSources(const AudioPortVector &sources) { mSources = sources; }
-    const AudioPortVector &getSources() const { return mSources; }
+    void setSources(const PolicyAudioPortVector &sources) { mSources = sources; }
+    const PolicyAudioPortVector &getSources() const { return mSources; }
 
-    void setSink(const sp<AudioPort> &sink) { mSink = sink; }
-    const sp<AudioPort> &getSink() const { return mSink; }
+    void setSink(const sp<PolicyAudioPort> &sink) { mSink = sink; }
+    const sp<PolicyAudioPort> &getSink() const { return mSink; }
 
     audio_route_type_t getType() const { return mType; }
 
@@ -57,13 +57,14 @@
      * @return true if the audio route supports the connection between the sink and the source,
      * false otherwise
      */
-    bool supportsPatch(const sp<AudioPort> &srcPort, const sp<AudioPort> &dstPort) const;
+    bool supportsPatch(const sp<PolicyAudioPort> &srcPort,
+                       const sp<PolicyAudioPort> &dstPort) const;
 
     void dump(String8 *dst, int spaces) const;
 
 private:
-    AudioPortVector mSources;
-    sp<AudioPort> mSink;
+    PolicyAudioPortVector mSources;
+    sp<PolicyAudioPort> mSink;
     audio_route_type_t mType;
 
 };
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 33e506f..eb1bfa7 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -16,7 +16,8 @@
 
 #pragma once
 
-#include "AudioPort.h"
+#include "PolicyAudioPort.h"
+#include <media/DeviceDescriptorBase.h>
 #include <utils/Errors.h>
 #include <utils/String8.h>
 #include <utils/SortedVector.h>
@@ -26,21 +27,22 @@
 
 namespace android {
 
-class DeviceDescriptor : public AudioPort, public AudioPortConfig
+class DeviceDescriptor : public DeviceDescriptorBase,
+                         public PolicyAudioPort, public PolicyAudioPortConfig
 {
 public:
      // Note that empty name refers by convention to a generic device.
-    explicit DeviceDescriptor(audio_devices_t type, const String8 &tagName = String8(""));
+    explicit DeviceDescriptor(audio_devices_t type, const std::string &tagName = "");
     DeviceDescriptor(audio_devices_t type, const FormatVector &encodedFormats,
-            const String8 &tagName = String8(""));
+            const std::string &tagName = "");
 
     virtual ~DeviceDescriptor() {}
 
-    virtual const String8 getTagName() const { return mTagName; }
+    virtual void addAudioProfile(const sp<AudioProfile> &profile) {
+        addAudioProfileAndSort(mProfiles, profile);
+    }
 
-    audio_devices_t type() const { return mDeviceType; }
-    String8 address() const { return mAddress; }
-    void setAddress(const String8 &address) { mAddress = address; }
+    virtual const std::string getTagName() const { return mTagName; }
 
     const FormatVector& encodedFormats() const { return mEncodedFormats; }
 
@@ -56,29 +58,35 @@
 
     bool supportsFormat(audio_format_t format);
 
+    // PolicyAudioPortConfig
+    virtual sp<PolicyAudioPort> getPolicyAudioPort() const {
+        return static_cast<PolicyAudioPort*>(const_cast<DeviceDescriptor*>(this));
+    }
+
     // AudioPortConfig
-    virtual sp<AudioPort> getAudioPort() const { return (AudioPort*) this; }
+    virtual status_t applyAudioPortConfig(const struct audio_port_config *config,
+                                          struct audio_port_config *backupConfig = NULL);
     virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
             const struct audio_port_config *srcConfig = NULL) const;
 
-    // AudioPort
+    // PolicyAudioPort
+    virtual sp<AudioPort> asAudioPort() const {
+        return static_cast<AudioPort*>(const_cast<DeviceDescriptor*>(this));
+    }
     virtual void attach(const sp<HwModule>& module);
     virtual void detach();
 
+    // AudioPort
     virtual void toAudioPort(struct audio_port *port) const;
-    virtual void importAudioPort(const sp<AudioPort>& port, bool force = false);
 
-    audio_port_handle_t getId() const;
+    void importAudioPortAndPickAudioProfile(const sp<PolicyAudioPort>& policyPort,
+                                            bool force = false);
+
     void dump(String8 *dst, int spaces, int index, bool verbose = true) const;
-    void log() const;
-    std::string toString() const;
 
 private:
-    String8 mAddress{""};
-    String8 mTagName; // Unique human readable identifier for a device port found in conf file.
-    audio_devices_t     mDeviceType;
+    std::string mTagName; // Unique human readable identifier for a device port found in conf file.
     FormatVector        mEncodedFormats;
-    audio_port_handle_t mId = AUDIO_PORT_HANDLE_NONE;
     audio_format_t      mCurrentEncodedFormat;
 };
 
@@ -112,10 +120,17 @@
      * equal to AUDIO_PORT_HANDLE_NONE, it also returns a nullptr.
      */
     sp<DeviceDescriptor> getDeviceFromId(audio_port_handle_t id) const;
-    sp<DeviceDescriptor> getDeviceFromTagName(const String8 &tagName) const;
+    sp<DeviceDescriptor> getDeviceFromTagName(const std::string &tagName) const;
     DeviceVector getDevicesFromHwModule(audio_module_handle_t moduleHandle) const;
     audio_devices_t getDeviceTypesFromHwModule(audio_module_handle_t moduleHandle) const;
 
+    DeviceVector getFirstDevicesFromTypes(std::vector<audio_devices_t> orderedTypes) const;
+    sp<DeviceDescriptor> getFirstExistingDevice(std::vector<audio_devices_t> orderedTypes) const;
+
+    // If there are devices with the given type and the devices to add is not empty,
+    // remove all the devices with the given type and add all the devices to add.
+    void replaceDevicesByType(audio_devices_t typeToRemove, const DeviceVector &devicesToAdd);
+
     bool contains(const sp<DeviceDescriptor>& item) const { return indexOf(item) >= 0; }
 
     /**
@@ -196,7 +211,7 @@
     {
         for (const auto &device : *this) {
             if (device->address() != "") {
-                return device->address();
+                return String8(device->address().c_str());
             }
         }
         return String8("");
diff --git a/services/audiopolicy/common/managerdefinitions/include/HwModule.h b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
index eb34da4..028cb63 100644
--- a/services/audiopolicy/common/managerdefinitions/include/HwModule.h
+++ b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
@@ -82,19 +82,19 @@
     status_t addInputProfile(const sp<IOProfile> &profile);
     status_t addProfile(const sp<IOProfile> &profile);
 
-    status_t addOutputProfile(const String8& name, const audio_config_t *config,
+    status_t addOutputProfile(const std::string& name, const audio_config_t *config,
             audio_devices_t device, const String8& address);
-    status_t removeOutputProfile(const String8& name);
-    status_t addInputProfile(const String8& name, const audio_config_t *config,
+    status_t removeOutputProfile(const std::string& name);
+    status_t addInputProfile(const std::string& name, const audio_config_t *config,
             audio_devices_t device, const String8& address);
-    status_t removeInputProfile(const String8& name);
+    status_t removeInputProfile(const std::string& name);
 
     audio_module_handle_t getHandle() const { return mHandle; }
     void setHandle(audio_module_handle_t handle);
 
-    sp<AudioPort> findPortByTagName(const String8 &tagName) const
+    sp<PolicyAudioPort> findPortByTagName(const std::string &tagName) const
     {
-        return mPorts.findByTagName(tagName);
+        return findByTagName(mPorts, tagName);
     }
 
     /**
@@ -106,7 +106,8 @@
      * @return true if the HwModule supports the connection between the sink and the source,
      * false otherwise
      */
-    bool supportsPatch(const sp<AudioPort> &srcPort, const sp<AudioPort> &dstPort) const;
+    bool supportsPatch(const sp<PolicyAudioPort> &srcPort,
+                       const sp<PolicyAudioPort> &dstPort) const;
 
     // TODO remove from here (split serialization)
     void dump(String8 *dst) const;
@@ -122,7 +123,7 @@
     DeviceVector mDeclaredDevices; // devices declared in audio_policy configuration file.
     DeviceVector mDynamicDevices; /**< devices that can be added/removed at runtime (e.g. rsbumix)*/
     AudioRouteVector mRoutes;
-    AudioPortVector mPorts;
+    PolicyAudioPortVector mPorts;
 };
 
 class HwModuleCollection : public Vector<sp<HwModule> >
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index e0b56d4..dcb4730 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -16,8 +16,9 @@
 
 #pragma once
 
-#include "AudioPort.h"
 #include "DeviceDescriptor.h"
+#include "PolicyAudioPort.h"
+#include "policy.h"
 #include <utils/String8.h>
 #include <system/audio.h>
 
@@ -30,18 +31,28 @@
 // It is used by the policy manager to determine if an output or input is suitable for
 // a given use case,  open/close it accordingly and connect/disconnect audio tracks
 // to/from it.
-class IOProfile : public AudioPort
+class IOProfile : public AudioPort, public PolicyAudioPort
 {
 public:
-    IOProfile(const String8 &name, audio_port_role_t role)
+    IOProfile(const std::string &name, audio_port_role_t role)
         : AudioPort(name, AUDIO_PORT_TYPE_MIX, role),
           maxOpenCount(1),
           curOpenCount(0),
           maxActiveCount(1),
           curActiveCount(0) {}
 
+    virtual ~IOProfile() = default;
+
     // For a Profile aka MixPort, tag name and name are equivalent.
-    virtual const String8 getTagName() const { return getName(); }
+    virtual const std::string getTagName() const { return getName(); }
+
+    virtual void addAudioProfile(const sp<AudioProfile> &profile) {
+        addAudioProfileAndSort(mProfiles, profile);
+    }
+
+    virtual sp<AudioPort> asAudioPort() const {
+        return static_cast<AudioPort*>(const_cast<IOProfile*>(this));
+    }
 
     // FIXME: this is needed because shared MMAP stream clients use the same audio session.
     // Once capture clients are tracked individually and not per session this can be removed
@@ -51,7 +62,7 @@
     // flags are parsed before maxActiveCount by the serializer.
     void setFlags(uint32_t flags) override
     {
-        AudioPort::setFlags(flags);
+        PolicyAudioPort::setFlags(flags);
         if (getRole() == AUDIO_PORT_ROLE_SINK && (flags & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0) {
             maxActiveCount = 0;
         }
@@ -183,13 +194,13 @@
 class InputProfile : public IOProfile
 {
 public:
-    explicit InputProfile(const String8 &name) : IOProfile(name, AUDIO_PORT_ROLE_SINK) {}
+    explicit InputProfile(const std::string &name) : IOProfile(name, AUDIO_PORT_ROLE_SINK) {}
 };
 
 class OutputProfile : public IOProfile
 {
 public:
-    explicit OutputProfile(const String8 &name) : IOProfile(name, AUDIO_PORT_ROLE_SOURCE) {}
+    explicit OutputProfile(const std::string &name) : IOProfile(name, AUDIO_PORT_ROLE_SOURCE) {}
 };
 
 } // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h b/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
new file mode 100644
index 0000000..99df3c0
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "AudioCollections.h"
+#include "AudioProfileVectorHelper.h"
+#include "HandleGenerator.h"
+#include <media/AudioGain.h>
+#include <media/AudioPort.h>
+#include <utils/String8.h>
+#include <utils/Vector.h>
+#include <utils/RefBase.h>
+#include <utils/Errors.h>
+#include <system/audio.h>
+#include <cutils/config_utils.h>
+
+namespace android {
+
+class HwModule;
+class AudioRoute;
+
+class PolicyAudioPort : public virtual RefBase, private HandleGenerator<audio_port_handle_t>
+{
+public:
+    PolicyAudioPort() : mFlags(AUDIO_OUTPUT_FLAG_NONE) {}
+
+    virtual ~PolicyAudioPort() = default;
+
+    virtual const std::string getTagName() const = 0;
+
+    virtual sp<AudioPort> asAudioPort() const = 0;
+
+    virtual void setFlags(uint32_t flags)
+    {
+        //force direct flag if offload flag is set: offloading implies a direct output stream
+        // and all common behaviors are driven by checking only the direct flag
+        // this should normally be set appropriately in the policy configuration file
+        if (asAudioPort()->getRole() == AUDIO_PORT_ROLE_SOURCE &&
+                (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+            flags |= AUDIO_OUTPUT_FLAG_DIRECT;
+        }
+        mFlags = flags;
+    }
+    uint32_t getFlags() const { return mFlags; }
+
+    virtual void attach(const sp<HwModule>& module);
+    virtual void detach();
+    bool isAttached() { return mModule != 0; }
+
+    // Audio port IDs are in a different namespace than AudioFlinger unique IDs
+    static audio_port_handle_t getNextUniqueId();
+
+    // searches for an exact match
+    virtual status_t checkExactAudioProfile(const struct audio_port_config *config) const;
+
+    // searches for a compatible match, currently implemented for input
+    // parameters are input|output, returned value is the best match.
+    status_t checkCompatibleAudioProfile(uint32_t &samplingRate,
+                                         audio_channel_mask_t &channelMask,
+                                         audio_format_t &format) const
+    {
+        return checkCompatibleProfile(
+                asAudioPort()->getAudioProfiles(), samplingRate, channelMask, format,
+                asAudioPort()->getType(), asAudioPort()->getRole());
+    }
+
+    void pickAudioProfile(uint32_t &samplingRate,
+                          audio_channel_mask_t &channelMask,
+                          audio_format_t &format) const;
+
+    static const audio_format_t sPcmFormatCompareTable[];
+
+    static int compareFormats(audio_format_t format1, audio_format_t format2);
+
+    // Used to select an audio HAL output stream with a sample format providing the
+    // less degradation for a given AudioTrack sample format.
+    static bool isBetterFormatMatch(audio_format_t newFormat,
+                                    audio_format_t currentFormat,
+                                    audio_format_t targetFormat);
+    static uint32_t formatDistance(audio_format_t format1,
+                                   audio_format_t format2);
+    static const uint32_t kFormatDistanceMax = 4;
+
+    audio_module_handle_t getModuleHandle() const;
+    uint32_t getModuleVersionMajor() const;
+    const char *getModuleName() const;
+    sp<HwModule> getModule() const { return mModule; }
+
+    inline bool isDirectOutput() const
+    {
+        return (asAudioPort()->getType() == AUDIO_PORT_TYPE_MIX) &&
+                (asAudioPort()->getRole() == AUDIO_PORT_ROLE_SOURCE) &&
+                (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD));
+    }
+
+    void addRoute(const sp<AudioRoute> &route) { mRoutes.add(route); }
+    const AudioRouteVector &getRoutes() const { return mRoutes; }
+
+private:
+    void pickChannelMask(audio_channel_mask_t &channelMask,
+                         const ChannelMaskSet &channelMasks) const;
+    void pickSamplingRate(uint32_t &rate, const SampleRateSet &samplingRates) const;
+
+    uint32_t mFlags; // attribute flags mask (e.g primary output, direct output...).
+    sp<HwModule> mModule;     // audio HW module exposing this I/O stream
+    AudioRouteVector mRoutes; // Routes involving this port
+};
+
+class PolicyAudioPortConfig : public virtual RefBase
+{
+public:
+    virtual ~PolicyAudioPortConfig() = default;
+
+    virtual sp<PolicyAudioPort> getPolicyAudioPort() const = 0;
+
+    status_t validationBeforeApplyConfig(const struct audio_port_config *config) const;
+
+    void applyPolicyAudioPortConfig(const struct audio_port_config *config) {
+        if (config->config_mask & AUDIO_PORT_CONFIG_FLAGS) {
+            mFlags = config->flags;
+        }
+    }
+
+    void toPolicyAudioPortConfig(
+            struct audio_port_config *dstConfig,
+            const struct audio_port_config *srcConfig = NULL) const;
+
+
+    virtual bool hasSameHwModuleAs(const sp<PolicyAudioPortConfig>& other) const {
+        return (other.get() != nullptr) && (other->getPolicyAudioPort().get() != nullptr) &&
+                (getPolicyAudioPort().get() != nullptr) &&
+                (other->getPolicyAudioPort()->getModuleHandle() ==
+                        getPolicyAudioPort()->getModuleHandle());
+    }
+
+    union audio_io_flags mFlags = { AUDIO_INPUT_FLAG_NONE };
+};
+
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
index c90a582..cd10010 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
@@ -18,16 +18,16 @@
 //#define LOG_NDEBUG 0
 
 #include "AudioCollections.h"
-#include "AudioPort.h"
 #include "AudioRoute.h"
 #include "HwModule.h"
-#include "AudioGain.h"
+#include "PolicyAudioPort.h"
 
 namespace android {
 
-sp<AudioPort> AudioPortVector::findByTagName(const String8 &tagName) const
+sp<PolicyAudioPort> findByTagName(const PolicyAudioPortVector& policyAudioPortVector,
+                                  const std::string &tagName)
 {
-    for (const auto& port : *this) {
+    for (const auto& port : policyAudioPortVector) {
         if (port->getTagName() == tagName) {
             return port;
         }
@@ -35,15 +35,15 @@
     return nullptr;
 }
 
-void AudioRouteVector::dump(String8 *dst, int spaces) const
+void dumpAudioRouteVector(const AudioRouteVector& audioRouteVector, String8 *dst, int spaces)
 {
-    if (isEmpty()) {
+    if (audioRouteVector.isEmpty()) {
         return;
     }
-    dst->appendFormat("\n%*sAudio Routes (%zu):\n", spaces, "", size());
-    for (size_t i = 0; i < size(); i++) {
+    dst->appendFormat("\n%*sAudio Routes (%zu):\n", spaces, "", audioRouteVector.size());
+    for (size_t i = 0; i < audioRouteVector.size(); i++) {
         dst->appendFormat("%*s- Route %zu:\n", spaces, "", i + 1);
-        itemAt(i)->dump(dst, 4);
+        audioRouteVector.itemAt(i)->dump(dst, 4);
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp
deleted file mode 100644
index 2725870..0000000
--- a/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "APM::AudioGain"
-//#define LOG_NDEBUG 0
-
-//#define VERY_VERBOSE_LOGGING
-#ifdef VERY_VERBOSE_LOGGING
-#define ALOGVV ALOGV
-#else
-#define ALOGVV(a...) do { } while(0)
-#endif
-
-#include "AudioGain.h"
-#include <utils/Log.h>
-#include <utils/String8.h>
-
-#include <math.h>
-
-namespace android {
-
-AudioGain::AudioGain(int index, bool useInChannelMask)
-{
-    mIndex = index;
-    mUseInChannelMask = useInChannelMask;
-    memset(&mGain, 0, sizeof(struct audio_gain));
-}
-
-void AudioGain::getDefaultConfig(struct audio_gain_config *config)
-{
-    config->index = mIndex;
-    config->mode = mGain.mode;
-    config->channel_mask = mGain.channel_mask;
-    if ((mGain.mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
-        config->values[0] = mGain.default_value;
-    } else {
-        uint32_t numValues;
-        if (mUseInChannelMask) {
-            numValues = audio_channel_count_from_in_mask(mGain.channel_mask);
-        } else {
-            numValues = audio_channel_count_from_out_mask(mGain.channel_mask);
-        }
-        for (size_t i = 0; i < numValues; i++) {
-            config->values[i] = mGain.default_value;
-        }
-    }
-    if ((mGain.mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) {
-        config->ramp_duration_ms = mGain.min_ramp_ms;
-    }
-}
-
-status_t AudioGain::checkConfig(const struct audio_gain_config *config)
-{
-    if ((config->mode & ~mGain.mode) != 0) {
-        return BAD_VALUE;
-    }
-    if ((config->mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
-        if ((config->values[0] < mGain.min_value) ||
-                    (config->values[0] > mGain.max_value)) {
-            return BAD_VALUE;
-        }
-    } else {
-        if ((config->channel_mask & ~mGain.channel_mask) != 0) {
-            return BAD_VALUE;
-        }
-        uint32_t numValues;
-        if (mUseInChannelMask) {
-            numValues = audio_channel_count_from_in_mask(config->channel_mask);
-        } else {
-            numValues = audio_channel_count_from_out_mask(config->channel_mask);
-        }
-        for (size_t i = 0; i < numValues; i++) {
-            if ((config->values[i] < mGain.min_value) ||
-                    (config->values[i] > mGain.max_value)) {
-                return BAD_VALUE;
-            }
-        }
-    }
-    if ((config->mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) {
-        if ((config->ramp_duration_ms < mGain.min_ramp_ms) ||
-                    (config->ramp_duration_ms > mGain.max_ramp_ms)) {
-            return BAD_VALUE;
-        }
-    }
-    return NO_ERROR;
-}
-
-void AudioGain::dump(String8 *dst, int spaces, int index) const
-{
-    dst->appendFormat("%*sGain %d:\n", spaces, "", index+1);
-    dst->appendFormat("%*s- mode: %08x\n", spaces, "", mGain.mode);
-    dst->appendFormat("%*s- channel_mask: %08x\n", spaces, "", mGain.channel_mask);
-    dst->appendFormat("%*s- min_value: %d mB\n", spaces, "", mGain.min_value);
-    dst->appendFormat("%*s- max_value: %d mB\n", spaces, "", mGain.max_value);
-    dst->appendFormat("%*s- default_value: %d mB\n", spaces, "", mGain.default_value);
-    dst->appendFormat("%*s- step_value: %d mB\n", spaces, "", mGain.step_value);
-    dst->appendFormat("%*s- min_ramp_ms: %d ms\n", spaces, "", mGain.min_ramp_ms);
-    dst->appendFormat("%*s- max_ramp_ms: %d ms\n", spaces, "", mGain.max_ramp_ms);
-}
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index a096e8f..b963121 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -22,7 +22,6 @@
 #include <policy.h>
 #include <AudioPolicyInterface.h>
 #include "AudioInputDescriptor.h"
-#include "AudioGain.h"
 #include "AudioPolicyMix.h"
 #include "HwModule.h"
 
@@ -35,8 +34,8 @@
 {
     if (profile != NULL) {
         profile->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
-        if (profile->mGains.size() > 0) {
-            profile->mGains[0]->getDefaultConfig(&mGain);
+        if (profile->getGains().size() > 0) {
+            profile->getGains()[0]->getDefaultConfig(&mGain);
         }
     }
 }
@@ -49,16 +48,29 @@
     return mProfile->getModuleHandle();
 }
 
-audio_port_handle_t AudioInputDescriptor::getId() const
-{
-    return mId;
-}
-
 audio_source_t AudioInputDescriptor::source() const
 {
     return getHighestPriorityAttributes().source;
 }
 
+status_t AudioInputDescriptor::applyAudioPortConfig(const struct audio_port_config *config,
+                                                    audio_port_config *backupConfig)
+{
+    struct audio_port_config localBackupConfig = { .config_mask = config->config_mask };
+    status_t status = NO_ERROR;
+
+    toAudioPortConfig(&localBackupConfig);
+    if ((status = validationBeforeApplyConfig(config)) == NO_ERROR) {
+        AudioPortConfig::applyAudioPortConfig(config, backupConfig);
+        applyPolicyAudioPortConfig(config);
+    }
+
+    if (backupConfig != NULL) {
+        *backupConfig = localBackupConfig;
+    }
+    return status;
+}
+
 void AudioInputDescriptor::toAudioPortConfig(struct audio_port_config *dstConfig,
                                              const struct audio_port_config *srcConfig) const
 {
@@ -71,8 +83,8 @@
     }
 
     AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
+    toPolicyAudioPortConfig(dstConfig, srcConfig);
 
-    dstConfig->id = mId;
     dstConfig->role = AUDIO_PORT_ROLE_SINK;
     dstConfig->type = AUDIO_PORT_TYPE_MIX;
     dstConfig->ext.mix.hw_module = getModuleHandle();
@@ -213,7 +225,7 @@
     mDevice = device;
 
     ALOGV("opening input for device %s profile %p name %s",
-          mDevice->toString().c_str(), mProfile.get(), mProfile->getName().string());
+          mDevice->toString().c_str(), mProfile.get(), mProfile->getName().c_str());
 
     audio_devices_t deviceType = mDevice->type();
 
@@ -221,7 +233,7 @@
                                                   input,
                                                   &lConfig,
                                                   &deviceType,
-                                                  mDevice->address(),
+                                                  String8(mDevice->address().c_str()),
                                                   source,
                                                   flags);
     LOG_ALWAYS_FATAL_IF(mDevice->type() != deviceType,
@@ -235,7 +247,7 @@
         mSamplingRate = lConfig.sample_rate;
         mChannelMask = lConfig.channel_mask;
         mFormat = lConfig.format;
-        mId = AudioPort::getNextUniqueId();
+        mId = PolicyAudioPort::getNextUniqueId();
         mIoHandle = *input;
         mProfile->curOpenCount++;
     }
@@ -451,13 +463,13 @@
     return enabledEffects;
 }
 
-void AudioInputDescriptor::setAppState(uid_t uid, app_state_t state)
+void AudioInputDescriptor::setAppState(audio_port_handle_t portId, app_state_t state)
 {
     RecordClientVector clients = clientsList(false /*activeOnly*/);
     RecordClientVector updatedClients;
 
     for (const auto& client : clients) {
-        if (uid == client->uid()) {
+        if (portId == client->portId()) {
             bool wasSilenced = client->isSilenced();
             client->setAppState(state);
             if (client->active() && wasSilenced != client->isSilenced()) {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 8a60cf2..00000fe 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -21,10 +21,10 @@
 #include "AudioOutputDescriptor.h"
 #include "AudioPolicyMix.h"
 #include "IOProfile.h"
-#include "AudioGain.h"
 #include "Volume.h"
 #include "HwModule.h"
 #include "TypeConverter.h"
+#include <media/AudioGain.h>
 #include <media/AudioParameter.h>
 #include <media/AudioPolicy.h>
 
@@ -34,14 +34,14 @@
 
 namespace android {
 
-AudioOutputDescriptor::AudioOutputDescriptor(const sp<AudioPort>& port,
+AudioOutputDescriptor::AudioOutputDescriptor(const sp<PolicyAudioPort>& policyAudioPort,
                                              AudioPolicyClientInterface *clientInterface)
-    : mPort(port), mClientInterface(clientInterface)
+    : mPolicyAudioPort(policyAudioPort), mClientInterface(clientInterface)
 {
-    if (mPort.get() != nullptr) {
-        mPort->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
-        if (mPort->mGains.size() > 0) {
-            mPort->mGains[0]->getDefaultConfig(&mGain);
+    if (mPolicyAudioPort.get() != nullptr) {
+        mPolicyAudioPort->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
+        if (mPolicyAudioPort->asAudioPort()->getGains().size() > 0) {
+            mPolicyAudioPort->asAudioPort()->getGains()[0]->getDefaultConfig(&mGain);
         }
     }
 }
@@ -55,7 +55,8 @@
 
 audio_module_handle_t AudioOutputDescriptor::getModuleHandle() const
 {
-    return mPort.get() != nullptr ? mPort->getModuleHandle() : AUDIO_MODULE_HANDLE_NONE;
+    return mPolicyAudioPort.get() != nullptr ?
+            mPolicyAudioPort->getModuleHandle() : AUDIO_MODULE_HANDLE_NONE;
 }
 
 audio_patch_handle_t AudioOutputDescriptor::getPatchHandle() const
@@ -68,11 +69,6 @@
     mPatchHandle = handle;
 }
 
-audio_port_handle_t AudioOutputDescriptor::getId() const
-{
-    return mId;
-}
-
 bool AudioOutputDescriptor::sharesHwModuleWith(
         const sp<AudioOutputDescriptor>& outputDesc)
 {
@@ -167,9 +163,27 @@
     return false;
 }
 
-void AudioOutputDescriptor::toAudioPortConfig(
-                                                 struct audio_port_config *dstConfig,
-                                                 const struct audio_port_config *srcConfig) const
+status_t AudioOutputDescriptor::applyAudioPortConfig(const struct audio_port_config *config,
+                                                     audio_port_config *backupConfig)
+{
+    struct audio_port_config localBackupConfig = { .config_mask = config->config_mask };
+    status_t status = NO_ERROR;
+
+    toAudioPortConfig(&localBackupConfig);
+    if ((status = validationBeforeApplyConfig(config)) == NO_ERROR) {
+        AudioPortConfig::applyAudioPortConfig(config, backupConfig);
+        applyPolicyAudioPortConfig(config);
+    }
+
+    if (backupConfig != NULL) {
+        *backupConfig = localBackupConfig;
+    }
+    return status;
+}
+
+
+void AudioOutputDescriptor::toAudioPortConfig(struct audio_port_config *dstConfig,
+                                              const struct audio_port_config *srcConfig) const
 {
     dstConfig->config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE|AUDIO_PORT_CONFIG_CHANNEL_MASK|
                             AUDIO_PORT_CONFIG_FORMAT|AUDIO_PORT_CONFIG_GAIN;
@@ -177,8 +191,8 @@
         dstConfig->config_mask |= srcConfig->config_mask;
     }
     AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
+    toPolicyAudioPortConfig(dstConfig, srcConfig);
 
-    dstConfig->id = mId;
     dstConfig->role = AUDIO_PORT_ROLE_SOURCE;
     dstConfig->type = AUDIO_PORT_TYPE_MIX;
     dstConfig->ext.mix.hw_module = getModuleHandle();
@@ -188,7 +202,7 @@
 void AudioOutputDescriptor::toAudioPort(struct audio_port *port) const
 {
     // Should not be called for duplicated ports, see SwAudioOutputDescriptor::toAudioPortConfig.
-    mPort->toAudioPort(port);
+    mPolicyAudioPort->asAudioPort()->toAudioPort(port);
     port->id = mId;
     port->ext.mix.hw_module = getModuleHandle();
 }
@@ -483,7 +497,7 @@
     mFlags = (audio_output_flags_t)(mFlags | flags);
 
     ALOGV("opening output for device %s profile %p name %s",
-          mDevices.toString().c_str(), mProfile.get(), mProfile->getName().string());
+          mDevices.toString().c_str(), mProfile.get(), mProfile->getName().c_str());
 
     status_t status = mClientInterface->openOutput(mProfile->getModuleHandle(),
                                                    output,
@@ -503,7 +517,7 @@
         mSamplingRate = lConfig.sample_rate;
         mChannelMask = lConfig.channel_mask;
         mFormat = lConfig.format;
-        mId = AudioPort::getNextUniqueId();
+        mId = PolicyAudioPort::getNextUniqueId();
         mIoHandle = *output;
         mProfile->curOpenCount++;
     }
@@ -589,7 +603,7 @@
         return INVALID_OPERATION;
     }
 
-    mId = AudioPort::getNextUniqueId();
+    mId = PolicyAudioPort::getNextUniqueId();
     mIoHandle = *ioHandle;
     mOutput1 = output1;
     mOutput2 = output2;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
index 3a4db90..bf0cc94 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -18,7 +18,6 @@
 //#define LOG_NDEBUG 0
 
 #include "AudioPatch.h"
-#include "AudioGain.h"
 #include "TypeConverter.h"
 
 #include <log/log.h>
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index c42923a..6f8ea36 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -20,9 +20,8 @@
 #include "AudioPolicyMix.h"
 #include "TypeConverter.h"
 #include "HwModule.h"
-#include "AudioPort.h"
+#include "PolicyAudioPort.h"
 #include "IOProfile.h"
-#include "AudioGain.h"
 #include <AudioOutputDescriptor.h>
 
 namespace android {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
deleted file mode 100644
index c11490a..0000000
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ /dev/null
@@ -1,487 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "APM::AudioPort"
-//#define LOG_NDEBUG 0
-#include "TypeConverter.h"
-#include "AudioPort.h"
-#include "HwModule.h"
-#include "AudioGain.h"
-#include <policy.h>
-
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
-#endif
-
-namespace android {
-
-// --- AudioPort class implementation
-void AudioPort::attach(const sp<HwModule>& module)
-{
-    ALOGV("%s: attaching module %s to port %s", __FUNCTION__, getModuleName(), mName.string());
-    mModule = module;
-}
-
-void AudioPort::detach()
-{
-    mModule = nullptr;
-}
-
-// Note that is a different namespace than AudioFlinger unique IDs
-audio_port_handle_t AudioPort::getNextUniqueId()
-{
-    return getNextHandle();
-}
-
-audio_module_handle_t AudioPort::getModuleHandle() const
-{
-    return mModule != 0 ? mModule->getHandle() : AUDIO_MODULE_HANDLE_NONE;
-}
-
-uint32_t AudioPort::getModuleVersionMajor() const
-{
-    return mModule != 0 ? mModule->getHalVersionMajor() : 0;
-}
-
-const char *AudioPort::getModuleName() const
-{
-    return mModule != 0 ? mModule->getName() : "invalid module";
-}
-
-void AudioPort::toAudioPort(struct audio_port *port) const
-{
-    // TODO: update this function once audio_port structure reflects the new profile definition.
-    // For compatibility reason: flatening the AudioProfile into audio_port structure.
-    SortedVector<audio_format_t> flatenedFormats;
-    SampleRateVector flatenedRates;
-    ChannelsVector flatenedChannels;
-    for (const auto& profile : mProfiles) {
-        if (profile->isValid()) {
-            audio_format_t formatToExport = profile->getFormat();
-            const SampleRateVector &ratesToExport = profile->getSampleRates();
-            const ChannelsVector &channelsToExport = profile->getChannels();
-
-            if (flatenedFormats.indexOf(formatToExport) < 0) {
-                flatenedFormats.add(formatToExport);
-            }
-            for (size_t rateIndex = 0; rateIndex < ratesToExport.size(); rateIndex++) {
-                uint32_t rate = ratesToExport[rateIndex];
-                if (flatenedRates.indexOf(rate) < 0) {
-                    flatenedRates.add(rate);
-                }
-            }
-            for (size_t chanIndex = 0; chanIndex < channelsToExport.size(); chanIndex++) {
-                audio_channel_mask_t channels = channelsToExport[chanIndex];
-                if (flatenedChannels.indexOf(channels) < 0) {
-                    flatenedChannels.add(channels);
-                }
-            }
-            if (flatenedRates.size() > AUDIO_PORT_MAX_SAMPLING_RATES ||
-                    flatenedChannels.size() > AUDIO_PORT_MAX_CHANNEL_MASKS ||
-                    flatenedFormats.size() > AUDIO_PORT_MAX_FORMATS) {
-                ALOGE("%s: bailing out: cannot export profiles to port config", __FUNCTION__);
-                return;
-            }
-        }
-    }
-    port->role = mRole;
-    port->type = mType;
-    strlcpy(port->name, mName, AUDIO_PORT_MAX_NAME_LEN);
-    port->num_sample_rates = flatenedRates.size();
-    port->num_channel_masks = flatenedChannels.size();
-    port->num_formats = flatenedFormats.size();
-    for (size_t i = 0; i < flatenedRates.size(); i++) {
-        port->sample_rates[i] = flatenedRates[i];
-    }
-    for (size_t i = 0; i < flatenedChannels.size(); i++) {
-        port->channel_masks[i] = flatenedChannels[i];
-    }
-    for (size_t i = 0; i < flatenedFormats.size(); i++) {
-        port->formats[i] = flatenedFormats[i];
-    }
-
-    ALOGV("AudioPort::toAudioPort() num gains %zu", mGains.size());
-
-    uint32_t i;
-    for (i = 0; i < mGains.size() && i < AUDIO_PORT_MAX_GAINS; i++) {
-        port->gains[i] = mGains[i]->getGain();
-    }
-    port->num_gains = i;
-}
-
-void AudioPort::importAudioPort(const sp<AudioPort>& port, bool force __unused)
-{
-    for (const auto& profileToImport : port->mProfiles) {
-        if (profileToImport->isValid()) {
-            // Import only valid port, i.e. valid format, non empty rates and channels masks
-            bool hasSameProfile = false;
-            for (const auto& profile : mProfiles) {
-                if (*profile == *profileToImport) {
-                    // never import a profile twice
-                    hasSameProfile = true;
-                    break;
-                }
-            }
-            if (hasSameProfile) { // never import a same profile twice
-                continue;
-            }
-            addAudioProfile(profileToImport);
-        }
-    }
-}
-
-status_t AudioPort::checkExactAudioProfile(const struct audio_port_config *config) const
-{
-    status_t status = NO_ERROR;
-    auto config_mask = config->config_mask;
-    if (config_mask & AUDIO_PORT_CONFIG_GAIN) {
-        config_mask &= ~AUDIO_PORT_CONFIG_GAIN;
-        status = checkGain(&config->gain, config->gain.index);
-        if (status != NO_ERROR) {
-            return status;
-        }
-    }
-    if (config_mask != 0) {
-        // TODO should we check sample_rate / channel_mask / format separately?
-        status = mProfiles.checkExactProfile(config->sample_rate,
-                                             config->channel_mask,
-                                             config->format);
-    }
-    return status;
-}
-
-void AudioPort::pickSamplingRate(uint32_t &pickedRate,const SampleRateVector &samplingRates) const
-{
-    pickedRate = 0;
-    // For direct outputs, pick minimum sampling rate: this helps ensuring that the
-    // channel count / sampling rate combination chosen will be supported by the connected
-    // sink
-    if (isDirectOutput()) {
-        uint32_t samplingRate = UINT_MAX;
-        for (size_t i = 0; i < samplingRates.size(); i ++) {
-            if ((samplingRates[i] < samplingRate) && (samplingRates[i] > 0)) {
-                samplingRate = samplingRates[i];
-            }
-        }
-        pickedRate = (samplingRate == UINT_MAX) ? 0 : samplingRate;
-    } else {
-        uint32_t maxRate = SAMPLE_RATE_HZ_MAX;
-
-        // For mixed output and inputs, use max mixer sampling rates. Do not
-        // limit sampling rate otherwise
-        // For inputs, also see checkCompatibleSamplingRate().
-        if (mType != AUDIO_PORT_TYPE_MIX) {
-            maxRate = UINT_MAX;
-        }
-        // TODO: should mSamplingRates[] be ordered in terms of our preference
-        // and we return the first (and hence most preferred) match?  This is of concern if
-        // we want to choose 96kHz over 192kHz for USB driver stability or resource constraints.
-        for (size_t i = 0; i < samplingRates.size(); i ++) {
-            if ((samplingRates[i] > pickedRate) && (samplingRates[i] <= maxRate)) {
-                pickedRate = samplingRates[i];
-            }
-        }
-    }
-}
-
-void AudioPort::pickChannelMask(audio_channel_mask_t &pickedChannelMask,
-                                const ChannelsVector &channelMasks) const
-{
-    pickedChannelMask = AUDIO_CHANNEL_NONE;
-    // For direct outputs, pick minimum channel count: this helps ensuring that the
-    // channel count / sampling rate combination chosen will be supported by the connected
-    // sink
-    if (isDirectOutput()) {
-        uint32_t channelCount = UINT_MAX;
-        for (size_t i = 0; i < channelMasks.size(); i ++) {
-            uint32_t cnlCount;
-            if (useInputChannelMask()) {
-                cnlCount = audio_channel_count_from_in_mask(channelMasks[i]);
-            } else {
-                cnlCount = audio_channel_count_from_out_mask(channelMasks[i]);
-            }
-            if ((cnlCount < channelCount) && (cnlCount > 0)) {
-                pickedChannelMask = channelMasks[i];
-                channelCount = cnlCount;
-            }
-        }
-    } else {
-        uint32_t channelCount = 0;
-        uint32_t maxCount = MAX_MIXER_CHANNEL_COUNT;
-
-        // For mixed output and inputs, use max mixer channel count. Do not
-        // limit channel count otherwise
-        if (mType != AUDIO_PORT_TYPE_MIX) {
-            maxCount = UINT_MAX;
-        }
-        for (size_t i = 0; i < channelMasks.size(); i ++) {
-            uint32_t cnlCount;
-            if (useInputChannelMask()) {
-                cnlCount = audio_channel_count_from_in_mask(channelMasks[i]);
-            } else {
-                cnlCount = audio_channel_count_from_out_mask(channelMasks[i]);
-            }
-            if ((cnlCount > channelCount) && (cnlCount <= maxCount)) {
-                pickedChannelMask = channelMasks[i];
-                channelCount = cnlCount;
-            }
-        }
-    }
-}
-
-/* format in order of increasing preference */
-const audio_format_t AudioPort::sPcmFormatCompareTable[] = {
-        AUDIO_FORMAT_DEFAULT,
-        AUDIO_FORMAT_PCM_16_BIT,
-        AUDIO_FORMAT_PCM_8_24_BIT,
-        AUDIO_FORMAT_PCM_24_BIT_PACKED,
-        AUDIO_FORMAT_PCM_32_BIT,
-        AUDIO_FORMAT_PCM_FLOAT,
-};
-
-int AudioPort::compareFormats(audio_format_t format1, audio_format_t format2)
-{
-    // NOTE: AUDIO_FORMAT_INVALID is also considered not PCM and will be compared equal to any
-    // compressed format and better than any PCM format. This is by design of pickFormat()
-    if (!audio_is_linear_pcm(format1)) {
-        if (!audio_is_linear_pcm(format2)) {
-            return 0;
-        }
-        return 1;
-    }
-    if (!audio_is_linear_pcm(format2)) {
-        return -1;
-    }
-
-    int index1 = -1, index2 = -1;
-    for (size_t i = 0;
-            (i < ARRAY_SIZE(sPcmFormatCompareTable)) && ((index1 == -1) || (index2 == -1));
-            i ++) {
-        if (sPcmFormatCompareTable[i] == format1) {
-            index1 = i;
-        }
-        if (sPcmFormatCompareTable[i] == format2) {
-            index2 = i;
-        }
-    }
-    // format1 not found => index1 < 0 => format2 > format1
-    // format2 not found => index2 < 0 => format2 < format1
-    return index1 - index2;
-}
-
-uint32_t AudioPort::formatDistance(audio_format_t format1, audio_format_t format2)
-{
-    if (format1 == format2) {
-        return 0;
-    }
-    if (format1 == AUDIO_FORMAT_INVALID || format2 == AUDIO_FORMAT_INVALID) {
-        return kFormatDistanceMax;
-    }
-    int diffBytes = (int)audio_bytes_per_sample(format1) -
-            audio_bytes_per_sample(format2);
-
-    return abs(diffBytes);
-}
-
-bool AudioPort::isBetterFormatMatch(audio_format_t newFormat,
-                                    audio_format_t currentFormat,
-                                    audio_format_t targetFormat)
-{
-    return formatDistance(newFormat, targetFormat) < formatDistance(currentFormat, targetFormat);
-}
-
-void AudioPort::pickAudioProfile(uint32_t &samplingRate,
-                                 audio_channel_mask_t &channelMask,
-                                 audio_format_t &format) const
-{
-    format = AUDIO_FORMAT_DEFAULT;
-    samplingRate = 0;
-    channelMask = AUDIO_CHANNEL_NONE;
-
-    // special case for uninitialized dynamic profile
-    if (!mProfiles.hasValidProfile()) {
-        return;
-    }
-    audio_format_t bestFormat = sPcmFormatCompareTable[ARRAY_SIZE(sPcmFormatCompareTable) - 1];
-    // For mixed output and inputs, use best mixer output format.
-    // Do not limit format otherwise
-    if ((mType != AUDIO_PORT_TYPE_MIX) || isDirectOutput()) {
-        bestFormat = AUDIO_FORMAT_INVALID;
-    }
-
-    for (size_t i = 0; i < mProfiles.size(); i ++) {
-        if (!mProfiles[i]->isValid()) {
-            continue;
-        }
-        audio_format_t formatToCompare = mProfiles[i]->getFormat();
-        if ((compareFormats(formatToCompare, format) > 0) &&
-                (compareFormats(formatToCompare, bestFormat) <= 0)) {
-            uint32_t pickedSamplingRate = 0;
-            audio_channel_mask_t pickedChannelMask = AUDIO_CHANNEL_NONE;
-            pickChannelMask(pickedChannelMask, mProfiles[i]->getChannels());
-            pickSamplingRate(pickedSamplingRate, mProfiles[i]->getSampleRates());
-
-            if (formatToCompare != AUDIO_FORMAT_DEFAULT && pickedChannelMask != AUDIO_CHANNEL_NONE
-                    && pickedSamplingRate != 0) {
-                format = formatToCompare;
-                channelMask = pickedChannelMask;
-                samplingRate = pickedSamplingRate;
-                // TODO: shall we return on the first one or still trying to pick a better Profile?
-            }
-        }
-    }
-    ALOGV("%s Port[nm:%s] profile rate=%d, format=%d, channels=%d", __FUNCTION__, mName.string(),
-          samplingRate, channelMask, format);
-}
-
-status_t AudioPort::checkGain(const struct audio_gain_config *gainConfig, int index) const
-{
-    if (index < 0 || (size_t)index >= mGains.size()) {
-        return BAD_VALUE;
-    }
-    return mGains[index]->checkConfig(gainConfig);
-}
-
-void AudioPort::dump(String8 *dst, int spaces, bool verbose) const
-{
-    if (!mName.isEmpty()) {
-        dst->appendFormat("%*s- name: %s\n", spaces, "", mName.string());
-    }
-    if (verbose) {
-        mProfiles.dump(dst, spaces);
-
-        if (mGains.size() != 0) {
-            dst->appendFormat("%*s- gains:\n", spaces, "");
-            for (size_t i = 0; i < mGains.size(); i++) {
-                mGains[i]->dump(dst, spaces + 2, i);
-            }
-        }
-    }
-}
-
-void AudioPort::log(const char* indent) const
-{
-    ALOGI("%s Port[nm:%s, type:%d, role:%d]", indent, mName.string(), mType, mRole);
-}
-
-// --- AudioPortConfig class implementation
-
-status_t AudioPortConfig::applyAudioPortConfig(const struct audio_port_config *config,
-                                               struct audio_port_config *backupConfig)
-{
-    struct audio_port_config localBackupConfig = { .config_mask = config->config_mask };
-    status_t status = NO_ERROR;
-
-    toAudioPortConfig(&localBackupConfig);
-
-    sp<AudioPort> audioport = getAudioPort();
-    if (audioport == 0) {
-        status = NO_INIT;
-        goto exit;
-    }
-    status = audioport->checkExactAudioProfile(config);
-    if (status != NO_ERROR) {
-        goto exit;
-    }
-    if (config->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
-        mSamplingRate = config->sample_rate;
-    }
-    if (config->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
-        mChannelMask = config->channel_mask;
-    }
-    if (config->config_mask & AUDIO_PORT_CONFIG_FORMAT) {
-        mFormat = config->format;
-    }
-    if (config->config_mask & AUDIO_PORT_CONFIG_GAIN) {
-        mGain = config->gain;
-    }
-    if (config->config_mask & AUDIO_PORT_CONFIG_FLAGS) {
-        mFlags = config->flags;
-    }
-
-exit:
-    if (status != NO_ERROR) {
-        applyAudioPortConfig(&localBackupConfig);
-    }
-    if (backupConfig != NULL) {
-        *backupConfig = localBackupConfig;
-    }
-    return status;
-}
-
-namespace {
-
-template<typename T>
-void updateField(
-        const T& portConfigField, T audio_port_config::*port_config_field,
-        struct audio_port_config *dstConfig, const struct audio_port_config *srcConfig,
-        unsigned int configMask, T defaultValue)
-{
-    if (dstConfig->config_mask & configMask) {
-        if ((srcConfig != nullptr) && (srcConfig->config_mask & configMask)) {
-            dstConfig->*port_config_field = srcConfig->*port_config_field;
-        } else {
-            dstConfig->*port_config_field = portConfigField;
-        }
-    } else {
-        dstConfig->*port_config_field = defaultValue;
-    }
-}
-
-} // namespace
-
-void AudioPortConfig::toAudioPortConfig(struct audio_port_config *dstConfig,
-                                        const struct audio_port_config *srcConfig) const
-{
-    updateField(mSamplingRate, &audio_port_config::sample_rate,
-            dstConfig, srcConfig, AUDIO_PORT_CONFIG_SAMPLE_RATE, 0u);
-    updateField(mChannelMask, &audio_port_config::channel_mask,
-            dstConfig, srcConfig, AUDIO_PORT_CONFIG_CHANNEL_MASK,
-            (audio_channel_mask_t)AUDIO_CHANNEL_NONE);
-    updateField(mFormat, &audio_port_config::format,
-            dstConfig, srcConfig, AUDIO_PORT_CONFIG_FORMAT, AUDIO_FORMAT_INVALID);
-
-    sp<AudioPort> audioport = getAudioPort();
-    if ((dstConfig->config_mask & AUDIO_PORT_CONFIG_GAIN) && audioport != NULL) {
-        dstConfig->gain = mGain;
-        if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_GAIN)
-                && audioport->checkGain(&srcConfig->gain, srcConfig->gain.index) == OK) {
-            dstConfig->gain = srcConfig->gain;
-        }
-    } else {
-        dstConfig->gain.index = -1;
-    }
-    if (dstConfig->gain.index != -1) {
-        dstConfig->config_mask |= AUDIO_PORT_CONFIG_GAIN;
-    } else {
-        dstConfig->config_mask &= ~AUDIO_PORT_CONFIG_GAIN;
-    }
-
-    updateField(mFlags, &audio_port_config::flags,
-            dstConfig, srcConfig, AUDIO_PORT_CONFIG_FLAGS, { AUDIO_INPUT_FLAG_NONE });
-}
-
-bool AudioPortConfig::hasGainController(bool canUseForVolume) const
-{
-    sp<AudioPort> audioport = getAudioPort();
-    if (audioport == nullptr) {
-        return false;
-    }
-    return canUseForVolume ? audioport->getGains().canUseForVolume()
-                           : audioport->getGains().size() > 0;
-}
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
deleted file mode 100644
index 69d6b0c..0000000
--- a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
+++ /dev/null
@@ -1,621 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <algorithm>
-#include <set>
-#include <string>
-
-#define LOG_TAG "APM::AudioProfile"
-//#define LOG_NDEBUG 0
-
-#include <media/AudioResamplerPublic.h>
-#include <utils/Errors.h>
-
-#include "AudioGain.h"
-#include "AudioPort.h"
-#include "AudioProfile.h"
-#include "HwModule.h"
-#include "TypeConverter.h"
-
-namespace android {
-
-ChannelsVector ChannelsVector::asInMask() const
-{
-    ChannelsVector inMaskVector;
-    for (const auto& channel : *this) {
-        if (audio_channel_mask_out_to_in(channel) != AUDIO_CHANNEL_INVALID) {
-            inMaskVector.add(audio_channel_mask_out_to_in(channel));
-        }
-    }
-    return inMaskVector;
-}
-
-ChannelsVector ChannelsVector::asOutMask() const
-{
-    ChannelsVector outMaskVector;
-    for (const auto& channel : *this) {
-        if (audio_channel_mask_in_to_out(channel) != AUDIO_CHANNEL_INVALID) {
-            outMaskVector.add(audio_channel_mask_in_to_out(channel));
-        }
-    }
-    return outMaskVector;
-}
-
-bool operator == (const AudioProfile &left, const AudioProfile &compareTo)
-{
-    return (left.getFormat() == compareTo.getFormat()) &&
-            (left.getChannels() == compareTo.getChannels()) &&
-            (left.getSampleRates() == compareTo.getSampleRates());
-}
-
-static AudioProfile* createFullDynamicImpl()
-{
-    AudioProfile* dynamicProfile = new AudioProfile(gDynamicFormat,
-            ChannelsVector(), SampleRateVector());
-    dynamicProfile->setDynamicFormat(true);
-    dynamicProfile->setDynamicChannels(true);
-    dynamicProfile->setDynamicRate(true);
-    return dynamicProfile;
-}
-
-// static
-sp<AudioProfile> AudioProfile::createFullDynamic()
-{
-    static sp<AudioProfile> dynamicProfile = createFullDynamicImpl();
-    return dynamicProfile;
-}
-
-AudioProfile::AudioProfile(audio_format_t format,
-                           audio_channel_mask_t channelMasks,
-                           uint32_t samplingRate) :
-        mName(String8("")),
-        mFormat(format)
-{
-    mChannelMasks.add(channelMasks);
-    mSamplingRates.add(samplingRate);
-}
-
-AudioProfile::AudioProfile(audio_format_t format,
-                           const ChannelsVector &channelMasks,
-                           const SampleRateVector &samplingRateCollection) :
-        mName(String8("")),
-        mFormat(format),
-        mChannelMasks(channelMasks),
-        mSamplingRates(samplingRateCollection) {}
-
-void AudioProfile::setChannels(const ChannelsVector &channelMasks)
-{
-    if (mIsDynamicChannels) {
-        mChannelMasks = channelMasks;
-    }
-}
-
-void AudioProfile::setSampleRates(const SampleRateVector &sampleRates)
-{
-    if (mIsDynamicRate) {
-        mSamplingRates = sampleRates;
-    }
-}
-
-void AudioProfile::clear()
-{
-    if (mIsDynamicChannels) {
-        mChannelMasks.clear();
-    }
-    if (mIsDynamicRate) {
-        mSamplingRates.clear();
-    }
-}
-
-status_t AudioProfile::checkExact(uint32_t samplingRate, audio_channel_mask_t channelMask,
-                                  audio_format_t format) const
-{
-    if (audio_formats_match(format, mFormat) &&
-            supportsChannels(channelMask) &&
-            supportsRate(samplingRate)) {
-        return NO_ERROR;
-    }
-    return BAD_VALUE;
-}
-
-status_t AudioProfile::checkCompatibleSamplingRate(uint32_t samplingRate,
-                                                   uint32_t &updatedSamplingRate) const
-{
-    ALOG_ASSERT(samplingRate > 0);
-
-    if (mSamplingRates.isEmpty()) {
-        updatedSamplingRate = samplingRate;
-        return NO_ERROR;
-    }
-
-    // Search for the closest supported sampling rate that is above (preferred)
-    // or below (acceptable) the desired sampling rate, within a permitted ratio.
-    // The sampling rates are sorted in ascending order.
-    size_t orderOfDesiredRate = mSamplingRates.orderOf(samplingRate);
-
-    // Prefer to down-sample from a higher sampling rate, as we get the desired frequency spectrum.
-    if (orderOfDesiredRate < mSamplingRates.size()) {
-        uint32_t candidate = mSamplingRates[orderOfDesiredRate];
-        if (candidate / AUDIO_RESAMPLER_DOWN_RATIO_MAX <= samplingRate) {
-            updatedSamplingRate = candidate;
-            return NO_ERROR;
-        }
-    }
-    // But if we have to up-sample from a lower sampling rate, that's OK.
-    if (orderOfDesiredRate != 0) {
-        uint32_t candidate = mSamplingRates[orderOfDesiredRate - 1];
-        if (candidate * AUDIO_RESAMPLER_UP_RATIO_MAX >= samplingRate) {
-            updatedSamplingRate = candidate;
-            return NO_ERROR;
-        }
-    }
-    // leave updatedSamplingRate unmodified
-    return BAD_VALUE;
-}
-
-status_t AudioProfile::checkCompatibleChannelMask(audio_channel_mask_t channelMask,
-                                                  audio_channel_mask_t &updatedChannelMask,
-                                                  audio_port_type_t portType,
-                                                  audio_port_role_t portRole) const
-{
-    if (mChannelMasks.isEmpty()) {
-        updatedChannelMask = channelMask;
-        return NO_ERROR;
-    }
-    const bool isRecordThread = portType == AUDIO_PORT_TYPE_MIX && portRole == AUDIO_PORT_ROLE_SINK;
-    const bool isIndex = audio_channel_mask_get_representation(channelMask)
-            == AUDIO_CHANNEL_REPRESENTATION_INDEX;
-    const uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
-    int bestMatch = 0;
-    for (size_t i = 0; i < mChannelMasks.size(); i ++) {
-        audio_channel_mask_t supported = mChannelMasks[i];
-        if (supported == channelMask) {
-            // Exact matches always taken.
-            updatedChannelMask = channelMask;
-            return NO_ERROR;
-        }
-
-        // AUDIO_CHANNEL_NONE (value: 0) is used for dynamic channel support
-        if (isRecordThread && supported != AUDIO_CHANNEL_NONE) {
-            // Approximate (best) match:
-            // The match score measures how well the supported channel mask matches the
-            // desired mask, where increasing-is-better.
-            //
-            // TODO: Some tweaks may be needed.
-            // Should be a static function of the data processing library.
-            //
-            // In priority:
-            // match score = 1000 if legacy channel conversion equivalent (always prefer this)
-            // OR
-            // match score += 100 if the channel mask representations match
-            // match score += number of channels matched.
-            // match score += 100 if the channel mask representations DO NOT match
-            //   but the profile has positional channel mask and less than 2 channels.
-            //   This is for audio HAL convention to not list index masks for less than 2 channels
-            //
-            // If there are no matched channels, the mask may still be accepted
-            // but the playback or record will be silent.
-            const bool isSupportedIndex = (audio_channel_mask_get_representation(supported)
-                    == AUDIO_CHANNEL_REPRESENTATION_INDEX);
-            const uint32_t supportedChannelCount = audio_channel_count_from_in_mask(supported);
-            int match;
-            if (isIndex && isSupportedIndex) {
-                // index equivalence
-                match = 100 + __builtin_popcount(
-                        audio_channel_mask_get_bits(channelMask)
-                            & audio_channel_mask_get_bits(supported));
-            } else if (isIndex && !isSupportedIndex) {
-                const uint32_t equivalentBits = (1 << supportedChannelCount) - 1 ;
-                match = __builtin_popcount(
-                        audio_channel_mask_get_bits(channelMask) & equivalentBits);
-                if (supportedChannelCount <= FCC_2) {
-                    match += 100;
-                }
-            } else if (!isIndex && isSupportedIndex) {
-                const uint32_t equivalentBits = (1 << channelCount) - 1;
-                match = __builtin_popcount(
-                        equivalentBits & audio_channel_mask_get_bits(supported));
-            } else {
-                // positional equivalence
-                match = 100 + __builtin_popcount(
-                        audio_channel_mask_get_bits(channelMask)
-                            & audio_channel_mask_get_bits(supported));
-                switch (supported) {
-                case AUDIO_CHANNEL_IN_FRONT_BACK:
-                case AUDIO_CHANNEL_IN_STEREO:
-                    if (channelMask == AUDIO_CHANNEL_IN_MONO) {
-                        match = 1000;
-                    }
-                    break;
-                case AUDIO_CHANNEL_IN_MONO:
-                    if (channelMask == AUDIO_CHANNEL_IN_FRONT_BACK
-                            || channelMask == AUDIO_CHANNEL_IN_STEREO) {
-                        match = 1000;
-                    }
-                    break;
-                default:
-                    break;
-                }
-            }
-            if (match > bestMatch) {
-                bestMatch = match;
-                updatedChannelMask = supported;
-            }
-        }
-    }
-    return bestMatch > 0 ? NO_ERROR : BAD_VALUE;
-}
-
-void AudioProfile::dump(String8 *dst, int spaces) const
-{
-    dst->appendFormat("%s%s%s\n", mIsDynamicFormat ? "[dynamic format]" : "",
-             mIsDynamicChannels ? "[dynamic channels]" : "",
-             mIsDynamicRate ? "[dynamic rates]" : "");
-    if (mName.length() != 0) {
-        dst->appendFormat("%*s- name: %s\n", spaces, "", mName.string());
-    }
-    std::string formatLiteral;
-    if (FormatConverter::toString(mFormat, formatLiteral)) {
-        dst->appendFormat("%*s- format: %s\n", spaces, "", formatLiteral.c_str());
-    }
-    if (!mSamplingRates.isEmpty()) {
-        dst->appendFormat("%*s- sampling rates:", spaces, "");
-        for (size_t i = 0; i < mSamplingRates.size(); i++) {
-            dst->appendFormat("%d", mSamplingRates[i]);
-            dst->append(i == (mSamplingRates.size() - 1) ? "" : ", ");
-        }
-        dst->append("\n");
-    }
-
-    if (!mChannelMasks.isEmpty()) {
-        dst->appendFormat("%*s- channel masks:", spaces, "");
-        for (size_t i = 0; i < mChannelMasks.size(); i++) {
-            dst->appendFormat("0x%04x", mChannelMasks[i]);
-            dst->append(i == (mChannelMasks.size() - 1) ? "" : ", ");
-        }
-        dst->append("\n");
-    }
-}
-
-ssize_t AudioProfileVector::add(const sp<AudioProfile> &profile)
-{
-    ssize_t index = Vector::add(profile);
-    // we sort from worst to best, so that AUDIO_FORMAT_DEFAULT is always the first entry.
-    // TODO: compareFormats could be a lambda to convert between pointer-to-format to format:
-    // [](const audio_format_t *format1, const audio_format_t *format2) {
-    //     return compareFormats(*format1, *format2);
-    // }
-    sort(compareFormats);
-    return index;
-}
-
-ssize_t AudioProfileVector::addProfileFromHal(const sp<AudioProfile> &profileToAdd)
-{
-    // Check valid profile to add:
-    if (!profileToAdd->hasValidFormat()) {
-        return -1;
-    }
-    if (!profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
-        FormatVector formats;
-        formats.add(profileToAdd->getFormat());
-        setFormats(FormatVector(formats));
-        return 0;
-    }
-    if (!profileToAdd->hasValidChannels() && profileToAdd->hasValidRates()) {
-        setSampleRatesFor(profileToAdd->getSampleRates(), profileToAdd->getFormat());
-        return 0;
-    }
-    if (profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
-        setChannelsFor(profileToAdd->getChannels(), profileToAdd->getFormat());
-        return 0;
-    }
-    // Go through the list of profile to avoid duplicates
-    for (size_t profileIndex = 0; profileIndex < size(); profileIndex++) {
-        const sp<AudioProfile> &profile = itemAt(profileIndex);
-        if (profile->isValid() && profile == profileToAdd) {
-            // Nothing to do
-            return profileIndex;
-        }
-    }
-    profileToAdd->setDynamicFormat(true); // set the format as dynamic to allow removal
-    return add(profileToAdd);
-}
-
-status_t AudioProfileVector::checkExactProfile(uint32_t samplingRate,
-                                               audio_channel_mask_t channelMask,
-                                               audio_format_t format) const
-{
-    if (isEmpty()) {
-        return NO_ERROR;
-    }
-
-    for (const auto& profile : *this) {
-        if (profile->checkExact(samplingRate, channelMask, format) == NO_ERROR) {
-            return NO_ERROR;
-        }
-    }
-    return BAD_VALUE;
-}
-
-status_t AudioProfileVector::checkCompatibleProfile(uint32_t &samplingRate,
-                                                    audio_channel_mask_t &channelMask,
-                                                    audio_format_t &format,
-                                                    audio_port_type_t portType,
-                                                    audio_port_role_t portRole) const
-{
-    if (isEmpty()) {
-        return NO_ERROR;
-    }
-
-    const bool checkInexact = // when port is input and format is linear pcm
-            portType == AUDIO_PORT_TYPE_MIX && portRole == AUDIO_PORT_ROLE_SINK
-            && audio_is_linear_pcm(format);
-
-    // iterate from best format to worst format (reverse order)
-    for (ssize_t i = size() - 1; i >= 0 ; --i) {
-        const sp<AudioProfile> profile = itemAt(i);
-        audio_format_t formatToCompare = profile->getFormat();
-        if (formatToCompare == format ||
-                (checkInexact
-                        && formatToCompare != AUDIO_FORMAT_DEFAULT
-                        && audio_is_linear_pcm(formatToCompare))) {
-            // Compatible profile has been found, checks if this profile has compatible
-            // rate and channels as well
-            audio_channel_mask_t updatedChannels;
-            uint32_t updatedRate;
-            if (profile->checkCompatibleChannelMask(channelMask, updatedChannels,
-                                                    portType, portRole) == NO_ERROR &&
-                    profile->checkCompatibleSamplingRate(samplingRate, updatedRate) == NO_ERROR) {
-                // for inexact checks we take the first linear pcm format due to sorting.
-                format = formatToCompare;
-                channelMask = updatedChannels;
-                samplingRate = updatedRate;
-                return NO_ERROR;
-            }
-        }
-    }
-    return BAD_VALUE;
-}
-
-void AudioProfileVector::clearProfiles()
-{
-    for (size_t i = size(); i != 0; ) {
-        sp<AudioProfile> profile = itemAt(--i);
-        if (profile->isDynamicFormat() && profile->hasValidFormat()) {
-            removeAt(i);
-            continue;
-        }
-        profile->clear();
-    }
-}
-
-// Returns an intersection between two possibly unsorted vectors and the contents of 'order'.
-// The result is ordered according to 'order'.
-template<typename T, typename Order>
-std::vector<typename T::value_type> intersectFilterAndOrder(
-        const T& input1, const T& input2, const Order& order)
-{
-    std::set<typename T::value_type> set1{input1.begin(), input1.end()};
-    std::set<typename T::value_type> set2{input2.begin(), input2.end()};
-    std::set<typename T::value_type> common;
-    std::set_intersection(set1.begin(), set1.end(), set2.begin(), set2.end(),
-            std::inserter(common, common.begin()));
-    std::vector<typename T::value_type> result;
-    for (const auto& e : order) {
-        if (common.find(e) != common.end()) result.push_back(e);
-    }
-    return result;
-}
-
-// Intersect two possibly unsorted vectors, return common elements according to 'comp' ordering.
-// 'comp' is a comparator function.
-template<typename T, typename Compare>
-std::vector<typename T::value_type> intersectAndOrder(
-        const T& input1, const T& input2, Compare comp)
-{
-    std::set<typename T::value_type, Compare> set1{input1.begin(), input1.end(), comp};
-    std::set<typename T::value_type, Compare> set2{input2.begin(), input2.end(), comp};
-    std::vector<typename T::value_type> result;
-    std::set_intersection(set1.begin(), set1.end(), set2.begin(), set2.end(),
-            std::back_inserter(result), comp);
-    return result;
-}
-
-status_t AudioProfileVector::findBestMatchingOutputConfig(const AudioProfileVector& outputProfiles,
-            const std::vector<audio_format_t>& preferredFormats,
-            const std::vector<audio_channel_mask_t>& preferredOutputChannels,
-            bool preferHigherSamplingRates,
-            audio_config_base *bestOutputConfig) const
-{
-    auto formats = intersectFilterAndOrder(getSupportedFormats(),
-            outputProfiles.getSupportedFormats(), preferredFormats);
-    // Pick the best compatible profile.
-    for (const auto& f : formats) {
-        sp<AudioProfile> inputProfile = getFirstValidProfileFor(f);
-        sp<AudioProfile> outputProfile = outputProfiles.getFirstValidProfileFor(f);
-        if (inputProfile == nullptr || outputProfile == nullptr) {
-            continue;
-        }
-        auto channels = intersectFilterAndOrder(inputProfile->getChannels().asOutMask(),
-                outputProfile->getChannels(), preferredOutputChannels);
-        if (channels.empty()) {
-            continue;
-        }
-        auto sampleRates = preferHigherSamplingRates ?
-                intersectAndOrder(inputProfile->getSampleRates(), outputProfile->getSampleRates(),
-                        std::greater<typename SampleRateVector::value_type>()) :
-                intersectAndOrder(inputProfile->getSampleRates(), outputProfile->getSampleRates(),
-                        std::less<typename SampleRateVector::value_type>());
-        if (sampleRates.empty()) {
-            continue;
-        }
-        ALOGD("%s() found channel mask %#x and sample rate %d for format %#x.",
-                __func__, *channels.begin(), *sampleRates.begin(), f);
-        bestOutputConfig->format = f;
-        bestOutputConfig->sample_rate = *sampleRates.begin();
-        bestOutputConfig->channel_mask = *channels.begin();
-        return NO_ERROR;
-    }
-    return BAD_VALUE;
-}
-
-sp<AudioProfile> AudioProfileVector::getFirstValidProfile() const
-{
-    for (size_t i = 0; i < size(); i++) {
-        if (itemAt(i)->isValid()) {
-            return itemAt(i);
-        }
-    }
-    return 0;
-}
-
-sp<AudioProfile> AudioProfileVector::getFirstValidProfileFor(audio_format_t format) const
-{
-    for (size_t i = 0; i < size(); i++) {
-        if (itemAt(i)->isValid() && itemAt(i)->getFormat() == format) {
-            return itemAt(i);
-        }
-    }
-    return 0;
-}
-
-FormatVector AudioProfileVector::getSupportedFormats() const
-{
-    FormatVector supportedFormats;
-    for (size_t i = 0; i < size(); i++) {
-        if (itemAt(i)->hasValidFormat()) {
-            supportedFormats.add(itemAt(i)->getFormat());
-        }
-    }
-    return supportedFormats;
-}
-
-bool AudioProfileVector::hasDynamicChannelsFor(audio_format_t format) const
-{
-    for (size_t i = 0; i < size(); i++) {
-        sp<AudioProfile> profile = itemAt(i);
-        if (profile->getFormat() == format && profile->isDynamicChannels()) {
-            return true;
-        }
-    }
-    return false;
-}
-
-bool AudioProfileVector::hasDynamicProfile() const
-{
-    for (size_t i = 0; i < size(); i++) {
-        if (itemAt(i)->isDynamic()) {
-            return true;
-        }
-    }
-    return false;
-}
-
-bool AudioProfileVector::hasDynamicRateFor(audio_format_t format) const
-{
-    for (size_t i = 0; i < size(); i++) {
-        sp<AudioProfile> profile = itemAt(i);
-        if (profile->getFormat() == format && profile->isDynamicRate()) {
-            return true;
-        }
-    }
-    return false;
-}
-
-void AudioProfileVector::setFormats(const FormatVector &formats)
-{
-    // Only allow to change the format of dynamic profile
-    sp<AudioProfile> dynamicFormatProfile = getProfileFor(gDynamicFormat);
-    if (dynamicFormatProfile == 0) {
-        return;
-    }
-    for (size_t i = 0; i < formats.size(); i++) {
-        sp<AudioProfile> profile = new AudioProfile(formats[i],
-                dynamicFormatProfile->getChannels(),
-                dynamicFormatProfile->getSampleRates());
-        profile->setDynamicFormat(true);
-        profile->setDynamicChannels(dynamicFormatProfile->isDynamicChannels());
-        profile->setDynamicRate(dynamicFormatProfile->isDynamicRate());
-        add(profile);
-    }
-}
-
-void AudioProfileVector::dump(String8 *dst, int spaces) const
-{
-    dst->appendFormat("%*s- Profiles:\n", spaces, "");
-    for (size_t i = 0; i < size(); i++) {
-        dst->appendFormat("%*sProfile %zu:", spaces + 4, "", i);
-        itemAt(i)->dump(dst, spaces + 8);
-    }
-}
-
-sp<AudioProfile> AudioProfileVector::getProfileFor(audio_format_t format) const
-{
-    for (size_t i = 0; i < size(); i++) {
-        if (itemAt(i)->getFormat() == format) {
-            return itemAt(i);
-        }
-    }
-    return 0;
-}
-
-void AudioProfileVector::setSampleRatesFor(
-        const SampleRateVector &sampleRates, audio_format_t format)
-{
-    for (size_t i = 0; i < size(); i++) {
-        sp<AudioProfile> profile = itemAt(i);
-        if (profile->getFormat() == format && profile->isDynamicRate()) {
-            if (profile->hasValidRates()) {
-                // Need to create a new profile with same format
-                sp<AudioProfile> profileToAdd = new AudioProfile(format, profile->getChannels(),
-                        sampleRates);
-                profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
-                add(profileToAdd);
-            } else {
-                profile->setSampleRates(sampleRates);
-            }
-            return;
-        }
-    }
-}
-
-void AudioProfileVector::setChannelsFor(const ChannelsVector &channelMasks, audio_format_t format)
-{
-    for (size_t i = 0; i < size(); i++) {
-        sp<AudioProfile> profile = itemAt(i);
-        if (profile->getFormat() == format && profile->isDynamicChannels()) {
-            if (profile->hasValidChannels()) {
-                // Need to create a new profile with same format
-                sp<AudioProfile> profileToAdd = new AudioProfile(format, channelMasks,
-                        profile->getSampleRates());
-                profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
-                add(profileToAdd);
-            } else {
-                profile->setChannels(channelMasks);
-            }
-            return;
-        }
-    }
-}
-
-// static
-int AudioProfileVector::compareFormats(const sp<AudioProfile> *profile1,
-                                       const sp<AudioProfile> *profile2)
-{
-    return AudioPort::compareFormats((*profile1)->getFormat(), (*profile2)->getFormat());
-}
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioProfileVectorHelper.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioProfileVectorHelper.cpp
new file mode 100644
index 0000000..8ccb8b9
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioProfileVectorHelper.cpp
@@ -0,0 +1,439 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+#include <set>
+#include <string>
+
+#define LOG_TAG "APM::AudioProfileVectorHelper"
+//#define LOG_NDEBUG 0
+
+#include <media/AudioContainers.h>
+#include <media/AudioResamplerPublic.h>
+#include <utils/Errors.h>
+
+#include "AudioProfileVectorHelper.h"
+#include "HwModule.h"
+#include "PolicyAudioPort.h"
+#include "policy.h"
+
+namespace android {
+
+void sortAudioProfiles(AudioProfileVector &audioProfileVector) {
+    std::sort(audioProfileVector.begin(), audioProfileVector.end(),
+            [](const sp<AudioProfile> & a, const sp<AudioProfile> & b)
+            {
+                return PolicyAudioPort::compareFormats(a->getFormat(), b->getFormat()) < 0;
+            });
+}
+
+ssize_t addAudioProfileAndSort(AudioProfileVector &audioProfileVector,
+                               const sp<AudioProfile> &profile)
+{
+    ssize_t ret = audioProfileVector.add(profile);
+    // we sort from worst to best, so that AUDIO_FORMAT_DEFAULT is always the first entry.
+    sortAudioProfiles(audioProfileVector);
+    return ret;
+}
+
+sp<AudioProfile> getAudioProfileForFormat(const AudioProfileVector &audioProfileVector,
+                                          audio_format_t format)
+{
+    for (const auto &profile : audioProfileVector) {
+        if (profile->getFormat() == format) {
+            return profile;
+        }
+    }
+    return nullptr;
+}
+
+void setSampleRatesForAudioProfiles(AudioProfileVector &audioProfileVector,
+                                    const SampleRateSet &sampleRateSet,
+                                    audio_format_t format)
+{
+    for (const auto &profile : audioProfileVector) {
+        if (profile->getFormat() == format && profile->isDynamicRate()) {
+            if (profile->hasValidRates()) {
+                // Need to create a new profile with same format
+                sp<AudioProfile> profileToAdd = new AudioProfile(
+                        format, profile->getChannels(), sampleRateSet);
+                profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
+                addAudioProfileAndSort(audioProfileVector, profileToAdd);
+            } else {
+                profile->setSampleRates(sampleRateSet);
+            }
+            return;
+        }
+    }
+}
+
+void setChannelsForAudioProfiles(AudioProfileVector &audioProfileVector,
+                                 const ChannelMaskSet &channelMaskSet,
+                                 audio_format_t format)
+{
+    for (const auto &profile : audioProfileVector) {
+        if (profile->getFormat() == format && profile->isDynamicChannels()) {
+            if (profile->hasValidChannels()) {
+                // Need to create a new profile with same format
+                sp<AudioProfile> profileToAdd = new AudioProfile(format, channelMaskSet,
+                        profile->getSampleRates());
+                profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
+                addAudioProfileAndSort(audioProfileVector, profileToAdd);
+            } else {
+                profile->setChannels(channelMaskSet);
+            }
+            return;
+        }
+    }
+}
+
+void addProfilesForFormats(AudioProfileVector &audioProfileVector, const FormatVector &formatVector)
+{
+    // Only allow to change the format of dynamic profile
+    sp<AudioProfile> dynamicFormatProfile = getAudioProfileForFormat(
+            audioProfileVector, gDynamicFormat);
+    if (!dynamicFormatProfile) {
+        return;
+    }
+    for (const auto &format : formatVector) {
+        sp<AudioProfile> profile = new AudioProfile(format,
+                dynamicFormatProfile->getChannels(),
+                dynamicFormatProfile->getSampleRates());
+        profile->setDynamicFormat(true);
+        profile->setDynamicChannels(dynamicFormatProfile->isDynamicChannels());
+        profile->setDynamicRate(dynamicFormatProfile->isDynamicRate());
+        addAudioProfileAndSort(audioProfileVector, profile);
+    }
+}
+
+void addDynamicAudioProfileAndSort(AudioProfileVector &audioProfileVector,
+                                      const sp<AudioProfile> &profileToAdd)
+{
+    // Check valid profile to add:
+    if (!profileToAdd->hasValidFormat()) {
+        ALOGW("Adding dynamic audio profile without valid format");
+        return;
+    }
+    if (!profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
+        FormatVector formats;
+        formats.push_back(profileToAdd->getFormat());
+        addProfilesForFormats(audioProfileVector, FormatVector(formats));
+        return;
+    }
+    if (!profileToAdd->hasValidChannels() && profileToAdd->hasValidRates()) {
+        setSampleRatesForAudioProfiles(
+                audioProfileVector, profileToAdd->getSampleRates(), profileToAdd->getFormat());
+        return;
+    }
+    if (profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
+        setChannelsForAudioProfiles(
+                audioProfileVector, profileToAdd->getChannels(), profileToAdd->getFormat());
+        return;
+    }
+    // Go through the list of profile to avoid duplicates
+    for (size_t profileIndex = 0; profileIndex < audioProfileVector.size(); profileIndex++) {
+        const sp<AudioProfile> &profile = audioProfileVector.at(profileIndex);
+        if (profile->isValid() && profile == profileToAdd) {
+            // Nothing to do
+            return;
+        }
+    }
+    profileToAdd->setDynamicFormat(true); // set the format as dynamic to allow removal
+    addAudioProfileAndSort(audioProfileVector, profileToAdd);
+}
+
+void appendAudioProfiles(AudioProfileVector &audioProfileVector,
+                         const AudioProfileVector &audioProfileVectorToAppend)
+{
+    audioProfileVector.insert(audioProfileVector.end(),
+                              audioProfileVectorToAppend.begin(),
+                              audioProfileVectorToAppend.end());
+}
+
+status_t checkExact(const sp<AudioProfile> &audioProfile,
+                    uint32_t samplingRate,
+                    audio_channel_mask_t channelMask,
+                    audio_format_t format)
+{
+    if (audio_formats_match(format, audioProfile->getFormat()) &&
+            audioProfile->supportsChannels(channelMask) &&
+            audioProfile->supportsRate(samplingRate)) {
+        return NO_ERROR;
+    }
+    return BAD_VALUE;
+}
+
+status_t checkCompatibleSamplingRate(const sp<AudioProfile> &audioProfile,
+                                     uint32_t samplingRate,
+                                     uint32_t &updatedSamplingRate)
+{
+    ALOG_ASSERT(samplingRate > 0);
+
+    const SampleRateSet sampleRates = audioProfile->getSampleRates();
+    if (sampleRates.empty()) {
+        updatedSamplingRate = samplingRate;
+        return NO_ERROR;
+    }
+
+    // Search for the closest supported sampling rate that is above (preferred)
+    // or below (acceptable) the desired sampling rate, within a permitted ratio.
+    // The sampling rates are sorted in ascending order.
+    auto desiredRate = sampleRates.lower_bound(samplingRate);
+
+    // Prefer to down-sample from a higher sampling rate, as we get the desired frequency spectrum.
+    if (desiredRate != sampleRates.end()) {
+        if (*desiredRate / AUDIO_RESAMPLER_DOWN_RATIO_MAX <= samplingRate) {
+            updatedSamplingRate = *desiredRate;
+            return NO_ERROR;
+        }
+    }
+    // But if we have to up-sample from a lower sampling rate, that's OK.
+    if (desiredRate != sampleRates.begin()) {
+        uint32_t candidate = *(--desiredRate);
+        if (candidate * AUDIO_RESAMPLER_UP_RATIO_MAX >= samplingRate) {
+            updatedSamplingRate = candidate;
+            return NO_ERROR;
+        }
+    }
+    // leave updatedSamplingRate unmodified
+    return BAD_VALUE;
+}
+
+status_t checkCompatibleChannelMask(const sp<AudioProfile> &audioProfile,
+                                    audio_channel_mask_t channelMask,
+                                    audio_channel_mask_t &updatedChannelMask,
+                                    audio_port_type_t portType,
+                                    audio_port_role_t portRole)
+{
+    const ChannelMaskSet channelMasks = audioProfile->getChannels();
+    if (channelMasks.empty()) {
+        updatedChannelMask = channelMask;
+        return NO_ERROR;
+    }
+    const bool isRecordThread = portType == AUDIO_PORT_TYPE_MIX && portRole == AUDIO_PORT_ROLE_SINK;
+    const bool isIndex = audio_channel_mask_get_representation(channelMask)
+            == AUDIO_CHANNEL_REPRESENTATION_INDEX;
+    const uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
+    int bestMatch = 0;
+    for (const auto &supported : channelMasks) {
+        if (supported == channelMask) {
+            // Exact matches always taken.
+            updatedChannelMask = channelMask;
+            return NO_ERROR;
+        }
+
+        // AUDIO_CHANNEL_NONE (value: 0) is used for dynamic channel support
+        if (isRecordThread && supported != AUDIO_CHANNEL_NONE) {
+            // Approximate (best) match:
+            // The match score measures how well the supported channel mask matches the
+            // desired mask, where increasing-is-better.
+            //
+            // TODO: Some tweaks may be needed.
+            // Should be a static function of the data processing library.
+            //
+            // In priority:
+            // match score = 1000 if legacy channel conversion equivalent (always prefer this)
+            // OR
+            // match score += 100 if the channel mask representations match
+            // match score += number of channels matched.
+            // match score += 100 if the channel mask representations DO NOT match
+            //   but the profile has positional channel mask and less than 2 channels.
+            //   This is for audio HAL convention to not list index masks for less than 2 channels
+            //
+            // If there are no matched channels, the mask may still be accepted
+            // but the playback or record will be silent.
+            const bool isSupportedIndex = (audio_channel_mask_get_representation(supported)
+                    == AUDIO_CHANNEL_REPRESENTATION_INDEX);
+            const uint32_t supportedChannelCount = audio_channel_count_from_in_mask(supported);
+            int match;
+            if (isIndex && isSupportedIndex) {
+                // index equivalence
+                match = 100 + __builtin_popcount(
+                        audio_channel_mask_get_bits(channelMask)
+                            & audio_channel_mask_get_bits(supported));
+            } else if (isIndex && !isSupportedIndex) {
+                const uint32_t equivalentBits = (1 << supportedChannelCount) - 1 ;
+                match = __builtin_popcount(
+                        audio_channel_mask_get_bits(channelMask) & equivalentBits);
+                if (supportedChannelCount <= FCC_2) {
+                    match += 100;
+                }
+            } else if (!isIndex && isSupportedIndex) {
+                const uint32_t equivalentBits = (1 << channelCount) - 1;
+                match = __builtin_popcount(
+                        equivalentBits & audio_channel_mask_get_bits(supported));
+            } else {
+                // positional equivalence
+                match = 100 + __builtin_popcount(
+                        audio_channel_mask_get_bits(channelMask)
+                            & audio_channel_mask_get_bits(supported));
+                switch (supported) {
+                case AUDIO_CHANNEL_IN_FRONT_BACK:
+                case AUDIO_CHANNEL_IN_STEREO:
+                    if (channelMask == AUDIO_CHANNEL_IN_MONO) {
+                        match = 1000;
+                    }
+                    break;
+                case AUDIO_CHANNEL_IN_MONO:
+                    if (channelMask == AUDIO_CHANNEL_IN_FRONT_BACK
+                            || channelMask == AUDIO_CHANNEL_IN_STEREO) {
+                        match = 1000;
+                    }
+                    break;
+                default:
+                    break;
+                }
+            }
+            if (match > bestMatch) {
+                bestMatch = match;
+                updatedChannelMask = supported;
+            }
+        }
+    }
+    return bestMatch > 0 ? NO_ERROR : BAD_VALUE;
+}
+
+status_t checkExactProfile(const AudioProfileVector& audioProfileVector,
+                           const uint32_t samplingRate,
+                           audio_channel_mask_t channelMask,
+                           audio_format_t format)
+{
+    if (audioProfileVector.empty()) {
+        return NO_ERROR;
+    }
+
+    for (const auto& profile : audioProfileVector) {
+        if (checkExact(profile, samplingRate, channelMask, format) == NO_ERROR) {
+            return NO_ERROR;
+        }
+    }
+    return BAD_VALUE;
+}
+
+status_t checkCompatibleProfile(const AudioProfileVector &audioProfileVector,
+                                uint32_t &samplingRate,
+                                audio_channel_mask_t &channelMask,
+                                audio_format_t &format,
+                                audio_port_type_t portType,
+                                audio_port_role_t portRole)
+{
+    if (audioProfileVector.empty()) {
+        return NO_ERROR;
+    }
+
+    const bool checkInexact = // when port is input and format is linear pcm
+            portType == AUDIO_PORT_TYPE_MIX && portRole == AUDIO_PORT_ROLE_SINK
+            && audio_is_linear_pcm(format);
+
+    // iterate from best format to worst format (reverse order)
+    for (ssize_t i = audioProfileVector.size() - 1; i >= 0 ; --i) {
+        const sp<AudioProfile> profile = audioProfileVector.at(i);
+        audio_format_t formatToCompare = profile->getFormat();
+        if (formatToCompare == format ||
+                (checkInexact
+                        && formatToCompare != AUDIO_FORMAT_DEFAULT
+                        && audio_is_linear_pcm(formatToCompare))) {
+            // Compatible profile has been found, checks if this profile has compatible
+            // rate and channels as well
+            audio_channel_mask_t updatedChannels;
+            uint32_t updatedRate;
+            if (checkCompatibleChannelMask(profile, channelMask, updatedChannels,
+                                           portType, portRole) == NO_ERROR &&
+                    checkCompatibleSamplingRate(profile, samplingRate, updatedRate) == NO_ERROR) {
+                // for inexact checks we take the first linear pcm format due to sorting.
+                format = formatToCompare;
+                channelMask = updatedChannels;
+                samplingRate = updatedRate;
+                return NO_ERROR;
+            }
+        }
+    }
+    return BAD_VALUE;
+}
+
+// Returns an intersection between two possibly unsorted vectors and the contents of 'order'.
+// The result is ordered according to 'order'.
+template<typename T, typename Order>
+std::vector<typename T::value_type> intersectFilterAndOrder(
+        const T& input1, const T& input2, const Order& order)
+{
+    std::set<typename T::value_type> set1{input1.begin(), input1.end()};
+    std::set<typename T::value_type> set2{input2.begin(), input2.end()};
+    std::set<typename T::value_type> common;
+    std::set_intersection(set1.begin(), set1.end(), set2.begin(), set2.end(),
+            std::inserter(common, common.begin()));
+    std::vector<typename T::value_type> result;
+    for (const auto& e : order) {
+        if (common.find(e) != common.end()) result.push_back(e);
+    }
+    return result;
+}
+
+// Intersect two possibly unsorted vectors, return common elements according to 'comp' ordering.
+// 'comp' is a comparator function.
+template<typename T, typename Compare>
+std::vector<typename T::value_type> intersectAndOrder(
+        const T& input1, const T& input2, Compare comp)
+{
+    std::set<typename T::value_type, Compare> set1{input1.begin(), input1.end(), comp};
+    std::set<typename T::value_type, Compare> set2{input2.begin(), input2.end(), comp};
+    std::vector<typename T::value_type> result;
+    std::set_intersection(set1.begin(), set1.end(), set2.begin(), set2.end(),
+            std::back_inserter(result), comp);
+    return result;
+}
+
+status_t findBestMatchingOutputConfig(
+        const AudioProfileVector &audioProfileVector,
+        const AudioProfileVector &outputProfileVector,
+        const std::vector<audio_format_t> &preferredFormatVector, // order: most pref -> least pref
+        const std::vector<audio_channel_mask_t> &preferredOutputChannelVector,
+        bool preferHigherSamplingRates,
+        audio_config_base &bestOutputConfig)
+{
+    auto formats = intersectFilterAndOrder(audioProfileVector.getSupportedFormats(),
+            outputProfileVector.getSupportedFormats(), preferredFormatVector);
+    // Pick the best compatible profile.
+    for (const auto& f : formats) {
+        sp<AudioProfile> inputProfile = audioProfileVector.getFirstValidProfileFor(f);
+        sp<AudioProfile> outputProfile = outputProfileVector.getFirstValidProfileFor(f);
+        if (inputProfile == nullptr || outputProfile == nullptr) {
+            continue;
+        }
+        auto channels = intersectFilterAndOrder(asOutMask(inputProfile->getChannels()),
+                outputProfile->getChannels(), preferredOutputChannelVector);
+        if (channels.empty()) {
+            continue;
+        }
+        auto sampleRates = preferHigherSamplingRates ?
+                intersectAndOrder(inputProfile->getSampleRates(), outputProfile->getSampleRates(),
+                        std::greater<typename SampleRateSet::value_type>()) :
+                intersectAndOrder(inputProfile->getSampleRates(), outputProfile->getSampleRates(),
+                        std::less<typename SampleRateSet::value_type>());
+        if (sampleRates.empty()) {
+            continue;
+        }
+        ALOGD("%s() found channel mask %#x and sample rate %d for format %#x.",
+                __func__, *channels.begin(), *sampleRates.begin(), f);
+        bestOutputConfig.format = f;
+        bestOutputConfig.sample_rate = *sampleRates.begin();
+        bestOutputConfig.channel_mask = *channels.begin();
+        return NO_ERROR;
+    }
+    return BAD_VALUE;
+}
+
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
index 79f0919..2a18f19 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
@@ -19,7 +19,6 @@
 
 #include "AudioRoute.h"
 #include "HwModule.h"
-#include "AudioGain.h"
 
 namespace android
 {
@@ -27,25 +26,26 @@
 void AudioRoute::dump(String8 *dst, int spaces) const
 {
     dst->appendFormat("%*s- Type: %s\n", spaces, "", mType == AUDIO_ROUTE_MUX ? "Mux" : "Mix");
-    dst->appendFormat("%*s- Sink: %s\n", spaces, "", mSink->getTagName().string());
+    dst->appendFormat("%*s- Sink: %s\n", spaces, "", mSink->getTagName().c_str());
     if (mSources.size() != 0) {
         dst->appendFormat("%*s- Sources: \n", spaces, "");
         for (size_t i = 0; i < mSources.size(); i++) {
-            dst->appendFormat("%*s%s \n", spaces + 4, "", mSources[i]->getTagName().string());
+            dst->appendFormat("%*s%s \n", spaces + 4, "", mSources[i]->getTagName().c_str());
         }
     }
     dst->append("\n");
 }
 
-bool AudioRoute::supportsPatch(const sp<AudioPort> &srcPort, const sp<AudioPort> &dstPort) const
+bool AudioRoute::supportsPatch(const sp<PolicyAudioPort> &srcPort,
+                               const sp<PolicyAudioPort> &dstPort) const
 {
     if (mSink == 0 || dstPort == 0 || dstPort != mSink) {
         return false;
     }
-    ALOGV("%s: sinks %s matching", __FUNCTION__, mSink->getTagName().string());
+    ALOGV("%s: sinks %s matching", __FUNCTION__, mSink->getTagName().c_str());
     for (const auto &sourcePort : mSources) {
         if (sourcePort == srcPort) {
-            ALOGV("%s: sources %s matching", __FUNCTION__, sourcePort->getTagName().string());
+            ALOGV("%s: sources %s matching", __FUNCTION__, sourcePort->getTagName().c_str());
             return true;
         }
     }
diff --git a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
index ad07ab1..1dc7020 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
@@ -21,7 +21,6 @@
 #include <utils/Log.h>
 #include <utils/String8.h>
 #include <TypeConverter.h>
-#include "AudioGain.h"
 #include "AudioOutputDescriptor.h"
 #include "AudioPatch.h"
 #include "ClientDescriptor.h"
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index ecd5b34..3d3eb9c 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -22,52 +22,40 @@
 #include <set>
 #include "DeviceDescriptor.h"
 #include "TypeConverter.h"
-#include "AudioGain.h"
 #include "HwModule.h"
 
 namespace android {
 
-DeviceDescriptor::DeviceDescriptor(audio_devices_t type, const String8 &tagName) :
+DeviceDescriptor::DeviceDescriptor(audio_devices_t type, const std::string &tagName) :
         DeviceDescriptor(type, FormatVector{}, tagName)
 {
 }
 
 DeviceDescriptor::DeviceDescriptor(audio_devices_t type, const FormatVector &encodedFormats,
-        const String8 &tagName) :
-    AudioPort(String8(""), AUDIO_PORT_TYPE_DEVICE,
-              audio_is_output_device(type) ? AUDIO_PORT_ROLE_SINK :
-                                             AUDIO_PORT_ROLE_SOURCE),
-    mTagName(tagName), mDeviceType(type), mEncodedFormats(encodedFormats)
+        const std::string &tagName) :
+    DeviceDescriptorBase(type), mTagName(tagName), mEncodedFormats(encodedFormats)
 {
     mCurrentEncodedFormat = AUDIO_FORMAT_DEFAULT;
-    if (audio_is_remote_submix_device(type)) {
-        mAddress = String8("0");
-    }
     /* If framework runs against a pre 5.0 Audio HAL, encoded formats are absent from the config.
      * FIXME: APM should know the version of the HAL and don't add the formats for V5.0.
      * For now, the workaround to remove AC3 and IEC61937 support on HDMI is to declare
      * something like 'encodedFormats="AUDIO_FORMAT_PCM_16_BIT"' on the HDMI devicePort.
      */
-    if (type == AUDIO_DEVICE_OUT_HDMI && mEncodedFormats.isEmpty()) {
-        mEncodedFormats.add(AUDIO_FORMAT_AC3);
-        mEncodedFormats.add(AUDIO_FORMAT_IEC61937);
+    if (type == AUDIO_DEVICE_OUT_HDMI && mEncodedFormats.empty()) {
+        mEncodedFormats.push_back(AUDIO_FORMAT_AC3);
+        mEncodedFormats.push_back(AUDIO_FORMAT_IEC61937);
     }
 }
 
-audio_port_handle_t DeviceDescriptor::getId() const
-{
-    return mId;
-}
-
 void DeviceDescriptor::attach(const sp<HwModule>& module)
 {
-    AudioPort::attach(module);
+    PolicyAudioPort::attach(module);
     mId = getNextUniqueId();
 }
 
 void DeviceDescriptor::detach() {
     mId = AUDIO_PORT_HANDLE_NONE;
-    AudioPort::detach();
+    PolicyAudioPort::detach();
 }
 
 template<typename T>
@@ -97,7 +85,7 @@
     if (!device_has_encoding_capability(type())) {
         return true;
     }
-    if (mEncodedFormats.isEmpty()) {
+    if (mEncodedFormats.empty()) {
         return true;
     }
 
@@ -106,7 +94,7 @@
 
 bool DeviceDescriptor::supportsFormat(audio_format_t format)
 {
-    if (mEncodedFormats.isEmpty()) {
+    if (mEncodedFormats.empty()) {
         return true;
     }
 
@@ -118,6 +106,62 @@
     return false;
 }
 
+status_t DeviceDescriptor::applyAudioPortConfig(const struct audio_port_config *config,
+                                                audio_port_config *backupConfig)
+{
+    struct audio_port_config localBackupConfig = { .config_mask = config->config_mask };
+    status_t status = NO_ERROR;
+
+    toAudioPortConfig(&localBackupConfig);
+    if ((status = validationBeforeApplyConfig(config)) == NO_ERROR) {
+        AudioPortConfig::applyAudioPortConfig(config, backupConfig);
+        applyPolicyAudioPortConfig(config);
+    }
+
+    if (backupConfig != NULL) {
+        *backupConfig = localBackupConfig;
+    }
+    return status;
+}
+
+void DeviceDescriptor::toAudioPortConfig(struct audio_port_config *dstConfig,
+                                         const struct audio_port_config *srcConfig) const
+{
+    DeviceDescriptorBase::toAudioPortConfig(dstConfig, srcConfig);
+    toPolicyAudioPortConfig(dstConfig, srcConfig);
+
+    dstConfig->ext.device.hw_module = getModuleHandle();
+}
+
+void DeviceDescriptor::toAudioPort(struct audio_port *port) const
+{
+    ALOGV("DeviceDescriptor::toAudioPort() handle %d type %08x", mId, mDeviceType);
+    DeviceDescriptorBase::toAudioPort(port);
+    port->ext.device.hw_module = getModuleHandle();
+}
+
+void DeviceDescriptor::importAudioPortAndPickAudioProfile(
+        const sp<PolicyAudioPort>& policyPort, bool force) {
+    if (!force && !policyPort->asAudioPort()->hasDynamicAudioProfile()) {
+        return;
+    }
+    AudioPort::importAudioPort(policyPort->asAudioPort());
+    policyPort->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
+}
+
+void DeviceDescriptor::dump(String8 *dst, int spaces, int index, bool verbose) const
+{
+    String8 extraInfo;
+    if (!mTagName.empty()) {
+        extraInfo.appendFormat("%*s- tag name: %s\n", spaces, "", mTagName.c_str());
+    }
+
+    std::string descBaseDumpStr;
+    DeviceDescriptorBase::dump(&descBaseDumpStr, spaces, index, extraInfo.string(), verbose);
+    dst->append(descBaseDumpStr.c_str());
+}
+
+
 void DeviceVector::refreshTypes()
 {
     mDeviceTypes = AUDIO_DEVICE_NONE;
@@ -219,11 +263,11 @@
             // If format is specified, match it and ignore address
             // Otherwise if address is specified match it
             // Otherwise always match
-            if (((address == "" || itemAt(i)->address() == address) &&
+            if (((address == "" || (itemAt(i)->address().compare(address.c_str()) == 0)) &&
                  format == AUDIO_FORMAT_DEFAULT) ||
                 (itemAt(i)->supportsFormat(format) && format != AUDIO_FORMAT_DEFAULT)) {
                 device = itemAt(i);
-                if (itemAt(i)->address() == address) {
+                if (itemAt(i)->address().compare(address.c_str()) == 0) {
                     break;
                 }
             }
@@ -256,7 +300,6 @@
         audio_devices_t curType = itemAt(i)->type() & ~AUDIO_DEVICE_BIT_IN;
         if ((isOutput == curIsOutput) && ((type & curType) != 0)) {
             devices.add(itemAt(i));
-            type &= ~curType;
             ALOGV("DeviceVector::%s() for type %08x found %p",
                     __func__, itemAt(i)->type(), itemAt(i).get());
         }
@@ -264,7 +307,7 @@
     return devices;
 }
 
-sp<DeviceDescriptor> DeviceVector::getDeviceFromTagName(const String8 &tagName) const
+sp<DeviceDescriptor> DeviceVector::getDeviceFromTagName(const std::string &tagName) const
 {
     for (const auto& device : *this) {
         if (device->getTagName() == tagName) {
@@ -274,6 +317,38 @@
     return nullptr;
 }
 
+DeviceVector DeviceVector::getFirstDevicesFromTypes(
+        std::vector<audio_devices_t> orderedTypes) const
+{
+    DeviceVector devices;
+    for (auto deviceType : orderedTypes) {
+        if (!(devices = getDevicesFromTypeMask(deviceType)).isEmpty()) {
+            break;
+        }
+    }
+    return devices;
+}
+
+sp<DeviceDescriptor> DeviceVector::getFirstExistingDevice(
+        std::vector<audio_devices_t> orderedTypes) const {
+    sp<DeviceDescriptor> device;
+    for (auto deviceType : orderedTypes) {
+        if ((device = getDevice(deviceType, String8(""), AUDIO_FORMAT_DEFAULT)) != nullptr) {
+            break;
+        }
+    }
+    return device;
+}
+
+void DeviceVector::replaceDevicesByType(
+        audio_devices_t typeToRemove, const DeviceVector &devicesToAdd) {
+    DeviceVector devicesToRemove = getDevicesFromTypeMask(typeToRemove);
+    if (!devicesToRemove.isEmpty() && !devicesToAdd.isEmpty()) {
+        remove(devicesToRemove);
+        add(devicesToAdd);
+    }
+}
+
 void DeviceVector::dump(String8 *dst, const String8 &tag, int spaces, bool verbose) const
 {
     if (isEmpty()) {
@@ -285,84 +360,6 @@
     }
 }
 
-void DeviceDescriptor::toAudioPortConfig(struct audio_port_config *dstConfig,
-                                         const struct audio_port_config *srcConfig) const
-{
-    dstConfig->config_mask = AUDIO_PORT_CONFIG_GAIN;
-    if (mSamplingRate != 0) {
-        dstConfig->config_mask |= AUDIO_PORT_CONFIG_SAMPLE_RATE;
-    }
-    if (mChannelMask != AUDIO_CHANNEL_NONE) {
-        dstConfig->config_mask |= AUDIO_PORT_CONFIG_CHANNEL_MASK;
-    }
-    if (mFormat != AUDIO_FORMAT_INVALID) {
-        dstConfig->config_mask |= AUDIO_PORT_CONFIG_FORMAT;
-    }
-
-    if (srcConfig != NULL) {
-        dstConfig->config_mask |= srcConfig->config_mask;
-    }
-
-    AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
-
-    dstConfig->id = mId;
-    dstConfig->role = audio_is_output_device(mDeviceType) ?
-                        AUDIO_PORT_ROLE_SINK : AUDIO_PORT_ROLE_SOURCE;
-    dstConfig->type = AUDIO_PORT_TYPE_DEVICE;
-    dstConfig->ext.device.type = mDeviceType;
-
-    //TODO Understand why this test is necessary. i.e. why at boot time does it crash
-    // without the test?
-    // This has been demonstrated to NOT be true (at start up)
-    // ALOG_ASSERT(mModule != NULL);
-    dstConfig->ext.device.hw_module = getModuleHandle();
-    (void)audio_utils_strlcpy_zerofill(dstConfig->ext.device.address, mAddress.string());
-}
-
-void DeviceDescriptor::toAudioPort(struct audio_port *port) const
-{
-    ALOGV("DeviceDescriptor::toAudioPort() handle %d type %08x", mId, mDeviceType);
-    AudioPort::toAudioPort(port);
-    port->id = mId;
-    toAudioPortConfig(&port->active_config);
-    port->ext.device.type = mDeviceType;
-    port->ext.device.hw_module = getModuleHandle();
-    (void)audio_utils_strlcpy_zerofill(port->ext.device.address, mAddress.string());
-}
-
-void DeviceDescriptor::importAudioPort(const sp<AudioPort>& port, bool force) {
-    if (!force && !port->hasDynamicAudioProfile()) {
-        return;
-    }
-    AudioPort::importAudioPort(port);
-    port->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
-}
-
-void DeviceDescriptor::dump(String8 *dst, int spaces, int index, bool verbose) const
-{
-    dst->appendFormat("%*sDevice %d:\n", spaces, "", index + 1);
-    if (mId != 0) {
-        dst->appendFormat("%*s- id: %2d\n", spaces, "", mId);
-    }
-    if (!mTagName.isEmpty()) {
-        dst->appendFormat("%*s- tag name: %s\n", spaces, "", mTagName.string());
-    }
-
-    dst->appendFormat("%*s- type: %-48s\n", spaces, "", ::android::toString(mDeviceType).c_str());
-
-    if (mAddress.size() != 0) {
-        dst->appendFormat("%*s- address: %-32s\n", spaces, "", mAddress.string());
-    }
-    AudioPort::dump(dst, spaces, verbose);
-}
-
-std::string DeviceDescriptor::toString() const
-{
-    std::stringstream sstream;
-    sstream << "type:0x" << std::hex << type() << ",@:" << mAddress;
-    return sstream.str();
-}
-
 std::string DeviceVector::toString() const
 {
     if (isEmpty()) {
@@ -411,13 +408,4 @@
     return filteredDevices;
 }
 
-void DeviceDescriptor::log() const
-{
-    ALOGI("Device id:%d type:0x%08X:%s, addr:%s", mId,  mDeviceType,
-          ::android::toString(mDeviceType).c_str(),
-          mAddress.string());
-
-    AudioPort::log("  ");
-}
-
 } // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index 1f9b725..453006a 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -19,7 +19,6 @@
 
 #include "HwModule.h"
 #include "IOProfile.h"
-#include "AudioGain.h"
 #include <policy.h>
 #include <system/audio.h>
 
@@ -42,7 +41,7 @@
     }
 }
 
-status_t HwModule::addOutputProfile(const String8& name, const audio_config_t *config,
+status_t HwModule::addOutputProfile(const std::string& name, const audio_config_t *config,
                                     audio_devices_t device, const String8& address)
 {
     sp<IOProfile> profile = new OutputProfile(name);
@@ -51,7 +50,7 @@
                                               config->sample_rate));
 
     sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device);
-    devDesc->setAddress(address);
+    devDesc->setAddress(address.string());
     addDynamicDevice(devDesc);
     // Reciprocally attach the device to the module
     devDesc->attach(this);
@@ -96,7 +95,7 @@
     }
 }
 
-status_t HwModule::removeOutputProfile(const String8& name)
+status_t HwModule::removeOutputProfile(const std::string& name)
 {
     for (size_t i = 0; i < mOutputProfiles.size(); i++) {
         if (mOutputProfiles[i]->getName() == name) {
@@ -111,7 +110,7 @@
     return NO_ERROR;
 }
 
-status_t HwModule::addInputProfile(const String8& name, const audio_config_t *config,
+status_t HwModule::addInputProfile(const std::string& name, const audio_config_t *config,
                                    audio_devices_t device, const String8& address)
 {
     sp<IOProfile> profile = new InputProfile(name);
@@ -119,19 +118,19 @@
                                               config->sample_rate));
 
     sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device);
-    devDesc->setAddress(address);
+    devDesc->setAddress(address.string());
     addDynamicDevice(devDesc);
     // Reciprocally attach the device to the module
     devDesc->attach(this);
     profile->addSupportedDevice(devDesc);
 
     ALOGV("addInputProfile() name %s rate %d mask 0x%08x",
-          name.string(), config->sample_rate, config->channel_mask);
+          name.c_str(), config->sample_rate, config->channel_mask);
 
     return addInputProfile(profile);
 }
 
-status_t HwModule::removeInputProfile(const String8& name)
+status_t HwModule::removeInputProfile(const std::string& name)
 {
     for (size_t i = 0; i < mInputProfiles.size(); i++) {
         if (mInputProfiles[i]->getName() == name) {
@@ -157,7 +156,7 @@
 sp<DeviceDescriptor> HwModule::getRouteSinkDevice(const sp<AudioRoute> &route) const
 {
     sp<DeviceDescriptor> sinkDevice = 0;
-    if (route->getSink()->getType() == AUDIO_PORT_TYPE_DEVICE) {
+    if (route->getSink()->asAudioPort()->getType() == AUDIO_PORT_TYPE_DEVICE) {
         sinkDevice = mDeclaredDevices.getDeviceFromTagName(route->getSink()->getTagName());
     }
     return sinkDevice;
@@ -167,7 +166,7 @@
 {
     DeviceVector sourceDevices;
     for (const auto& source : route->getSources()) {
-        if (source->getType() == AUDIO_PORT_TYPE_DEVICE) {
+        if (source->asAudioPort()->getType() == AUDIO_PORT_TYPE_DEVICE) {
             sourceDevices.add(mDeclaredDevices.getDeviceFromTagName(source->getTagName()));
         }
     }
@@ -187,20 +186,20 @@
     for (const auto& stream : mInputProfiles) {
         DeviceVector sourceDevices;
         for (const auto& route : stream->getRoutes()) {
-            sp<AudioPort> sink = route->getSink();
+            sp<PolicyAudioPort> sink = route->getSink();
             if (sink == 0 || stream != sink) {
                 ALOGE("%s: Invalid route attached to input stream", __FUNCTION__);
                 continue;
             }
             DeviceVector sourceDevicesForRoute = getRouteSourceDevices(route);
             if (sourceDevicesForRoute.isEmpty()) {
-                ALOGE("%s: invalid source devices for %s", __FUNCTION__, stream->getName().string());
+                ALOGE("%s: invalid source devices for %s", __FUNCTION__, stream->getName().c_str());
                 continue;
             }
             sourceDevices.add(sourceDevicesForRoute);
         }
         if (sourceDevices.isEmpty()) {
-            ALOGE("%s: invalid source devices for %s", __FUNCTION__, stream->getName().string());
+            ALOGE("%s: invalid source devices for %s", __FUNCTION__, stream->getName().c_str());
             continue;
         }
         stream->setSupportedDevices(sourceDevices);
@@ -208,14 +207,14 @@
     for (const auto& stream : mOutputProfiles) {
         DeviceVector sinkDevices;
         for (const auto& route : stream->getRoutes()) {
-            sp<AudioPort> source = route->getSources().findByTagName(stream->getTagName());
+            sp<PolicyAudioPort> source = findByTagName(route->getSources(), stream->getTagName());
             if (source == 0 || stream != source) {
                 ALOGE("%s: Invalid route attached to output stream", __FUNCTION__);
                 continue;
             }
             sp<DeviceDescriptor> sinkDevice = getRouteSinkDevice(route);
             if (sinkDevice == 0) {
-                ALOGE("%s: invalid sink device for %s", __FUNCTION__, stream->getName().string());
+                ALOGE("%s: invalid sink device for %s", __FUNCTION__, stream->getName().c_str());
                 continue;
             }
             sinkDevices.add(sinkDevice);
@@ -230,7 +229,8 @@
     mHandle = handle;
 }
 
-bool HwModule::supportsPatch(const sp<AudioPort> &srcPort, const sp<AudioPort> &dstPort) const {
+bool HwModule::supportsPatch(const sp<PolicyAudioPort> &srcPort,
+                             const sp<PolicyAudioPort> &dstPort) const {
     for (const auto &route : mRoutes) {
         if (route->supportsPatch(srcPort, dstPort)) {
             return true;
@@ -260,7 +260,7 @@
     }
     mDeclaredDevices.dump(dst, String8("Declared"), 2, true);
     mDynamicDevices.dump(dst, String8("Dynamic"),  2, true);
-    mRoutes.dump(dst, 2);
+    dumpAudioRouteVector(mRoutes, dst, 2);
 }
 
 sp <HwModule> HwModuleCollection::getModuleFromName(const char *name) const
@@ -335,8 +335,8 @@
             }
             if (allowToCreate) {
                 moduleDevice->attach(hwModule);
-                moduleDevice->setAddress(devAddress);
-                moduleDevice->setName(String8(name));
+                moduleDevice->setAddress(devAddress.string());
+                moduleDevice->setName(name);
             }
             return moduleDevice;
         }
@@ -360,9 +360,9 @@
               address);
         return nullptr;
     }
-    sp<DeviceDescriptor> device = new DeviceDescriptor(type, String8(name));
-    device->setName(String8(name));
-    device->setAddress(String8(address));
+    sp<DeviceDescriptor> device = new DeviceDescriptor(type, name);
+    device->setName(name);
+    device->setAddress(address);
     device->setEncodedFormat(encodedFormat);
 
   // Add the device to the list of dynamic devices
@@ -382,7 +382,7 @@
             // @todo quid of audio profile? import the profile from device of the same type?
             const auto &isoTypeDeviceForProfile =
                 profile->getSupportedDevices().getDevice(type, String8(), AUDIO_FORMAT_DEFAULT);
-            device->importAudioPort(isoTypeDeviceForProfile, true /* force */);
+            device->importAudioPortAndPickAudioProfile(isoTypeDeviceForProfile, true /* force */);
 
             ALOGV("%s: adding device %s to profile %s", __FUNCTION__,
                   device->toString().c_str(), profile->getTagName().c_str());
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index fe2eaee..bf1a0f7 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -20,7 +20,6 @@
 #include <system/audio-base.h>
 #include "IOProfile.h"
 #include "HwModule.h"
-#include "AudioGain.h"
 #include "TypeConverter.h"
 
 namespace android {
@@ -79,7 +78,10 @@
         }
     }
 
-    if (isPlaybackThread && (getFlags() & flags) != flags) {
+    const uint32_t mustMatchOutputFlags =
+            AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_HW_AV_SYNC|AUDIO_OUTPUT_FLAG_MMAP_NOIRQ;
+    if (isPlaybackThread && (((getFlags() ^ flags) & mustMatchOutputFlags)
+                    || (getFlags() & flags) != flags)) {
         return false;
     }
     // The only input flag that is allowed to be different is the fast flag.
@@ -105,7 +107,9 @@
 
 void IOProfile::dump(String8 *dst) const
 {
-    AudioPort::dump(dst, 4);
+    std::string portStr;
+    AudioPort::dump(&portStr, 4);
+    dst->append(portStr.c_str());
 
     dst->appendFormat("    - flags: 0x%04x", getFlags());
     std::string flagsLiteral;
diff --git a/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
new file mode 100644
index 0000000..8c61b90
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::PolicyAudioPort"
+//#define LOG_NDEBUG 0
+#include "TypeConverter.h"
+#include "PolicyAudioPort.h"
+#include "HwModule.h"
+#include <policy.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+#endif
+
+namespace android {
+
+// --- PolicyAudioPort class implementation
+void PolicyAudioPort::attach(const sp<HwModule>& module)
+{
+    ALOGV("%s: attaching module %s to port %s",
+            __FUNCTION__, getModuleName(), asAudioPort()->getName().c_str());
+    mModule = module;
+}
+
+void PolicyAudioPort::detach()
+{
+    mModule = nullptr;
+}
+
+// Note that is a different namespace than AudioFlinger unique IDs
+audio_port_handle_t PolicyAudioPort::getNextUniqueId()
+{
+    return getNextHandle();
+}
+
+audio_module_handle_t PolicyAudioPort::getModuleHandle() const
+{
+    return mModule != 0 ? mModule->getHandle() : AUDIO_MODULE_HANDLE_NONE;
+}
+
+uint32_t PolicyAudioPort::getModuleVersionMajor() const
+{
+    return mModule != 0 ? mModule->getHalVersionMajor() : 0;
+}
+
+const char *PolicyAudioPort::getModuleName() const
+{
+    return mModule != 0 ? mModule->getName() : "invalid module";
+}
+
+status_t PolicyAudioPort::checkExactAudioProfile(const struct audio_port_config *config) const
+{
+    status_t status = NO_ERROR;
+    auto config_mask = config->config_mask;
+    if (config_mask & AUDIO_PORT_CONFIG_GAIN) {
+        config_mask &= ~AUDIO_PORT_CONFIG_GAIN;
+        status = asAudioPort()->checkGain(&config->gain, config->gain.index);
+        if (status != NO_ERROR) {
+            return status;
+        }
+    }
+    if (config_mask != 0) {
+        // TODO should we check sample_rate / channel_mask / format separately?
+        status = checkExactProfile(asAudioPort()->getAudioProfiles(), config->sample_rate,
+                config->channel_mask, config->format);
+    }
+    return status;
+}
+
+void PolicyAudioPort::pickSamplingRate(uint32_t &pickedRate,
+                                       const SampleRateSet &samplingRates) const
+{
+    pickedRate = 0;
+    // For direct outputs, pick minimum sampling rate: this helps ensuring that the
+    // channel count / sampling rate combination chosen will be supported by the connected
+    // sink
+    if (isDirectOutput()) {
+        uint32_t samplingRate = UINT_MAX;
+        for (const auto rate : samplingRates) {
+            if ((rate < samplingRate) && (rate > 0)) {
+                samplingRate = rate;
+            }
+        }
+        pickedRate = (samplingRate == UINT_MAX) ? 0 : samplingRate;
+    } else {
+        uint32_t maxRate = SAMPLE_RATE_HZ_MAX;
+
+        // For mixed output and inputs, use max mixer sampling rates. Do not
+        // limit sampling rate otherwise
+        // For inputs, also see checkCompatibleSamplingRate().
+        if (asAudioPort()->getType() == AUDIO_PORT_TYPE_MIX) {
+            maxRate = UINT_MAX;
+        }
+        // TODO: should mSamplingRates[] be ordered in terms of our preference
+        // and we return the first (and hence most preferred) match?  This is of concern if
+        // we want to choose 96kHz over 192kHz for USB driver stability or resource constraints.
+        for (const auto rate : samplingRates) {
+            if ((rate > pickedRate) && (rate <= maxRate)) {
+                pickedRate = rate;
+            }
+        }
+    }
+}
+
+void PolicyAudioPort::pickChannelMask(audio_channel_mask_t &pickedChannelMask,
+                                      const ChannelMaskSet &channelMasks) const
+{
+    pickedChannelMask = AUDIO_CHANNEL_NONE;
+    // For direct outputs, pick minimum channel count: this helps ensuring that the
+    // channel count / sampling rate combination chosen will be supported by the connected
+    // sink
+    if (isDirectOutput()) {
+        uint32_t channelCount = UINT_MAX;
+        for (const auto channelMask : channelMasks) {
+            uint32_t cnlCount;
+            if (asAudioPort()->useInputChannelMask()) {
+                cnlCount = audio_channel_count_from_in_mask(channelMask);
+            } else {
+                cnlCount = audio_channel_count_from_out_mask(channelMask);
+            }
+            if ((cnlCount < channelCount) && (cnlCount > 0)) {
+                pickedChannelMask = channelMask;
+                channelCount = cnlCount;
+            }
+        }
+    } else {
+        uint32_t channelCount = 0;
+        uint32_t maxCount = MAX_MIXER_CHANNEL_COUNT;
+
+        // For mixed output and inputs, use max mixer channel count. Do not
+        // limit channel count otherwise
+        if (asAudioPort()->getType() != AUDIO_PORT_TYPE_MIX) {
+            maxCount = UINT_MAX;
+        }
+        for (const auto channelMask : channelMasks) {
+            uint32_t cnlCount;
+            if (asAudioPort()->useInputChannelMask()) {
+                cnlCount = audio_channel_count_from_in_mask(channelMask);
+            } else {
+                cnlCount = audio_channel_count_from_out_mask(channelMask);
+            }
+            if ((cnlCount > channelCount) && (cnlCount <= maxCount)) {
+                pickedChannelMask = channelMask;
+                channelCount = cnlCount;
+            }
+        }
+    }
+}
+
+/* format in order of increasing preference */
+const audio_format_t PolicyAudioPort::sPcmFormatCompareTable[] = {
+        AUDIO_FORMAT_DEFAULT,
+        AUDIO_FORMAT_PCM_16_BIT,
+        AUDIO_FORMAT_PCM_8_24_BIT,
+        AUDIO_FORMAT_PCM_24_BIT_PACKED,
+        AUDIO_FORMAT_PCM_32_BIT,
+        AUDIO_FORMAT_PCM_FLOAT,
+};
+
+int PolicyAudioPort::compareFormats(audio_format_t format1, audio_format_t format2)
+{
+    // NOTE: AUDIO_FORMAT_INVALID is also considered not PCM and will be compared equal to any
+    // compressed format and better than any PCM format. This is by design of pickFormat()
+    if (!audio_is_linear_pcm(format1)) {
+        if (!audio_is_linear_pcm(format2)) {
+            return 0;
+        }
+        return 1;
+    }
+    if (!audio_is_linear_pcm(format2)) {
+        return -1;
+    }
+
+    int index1 = -1, index2 = -1;
+    for (size_t i = 0;
+            (i < ARRAY_SIZE(sPcmFormatCompareTable)) && ((index1 == -1) || (index2 == -1));
+            i ++) {
+        if (sPcmFormatCompareTable[i] == format1) {
+            index1 = i;
+        }
+        if (sPcmFormatCompareTable[i] == format2) {
+            index2 = i;
+        }
+    }
+    // format1 not found => index1 < 0 => format2 > format1
+    // format2 not found => index2 < 0 => format2 < format1
+    return index1 - index2;
+}
+
+uint32_t PolicyAudioPort::formatDistance(audio_format_t format1, audio_format_t format2)
+{
+    if (format1 == format2) {
+        return 0;
+    }
+    if (format1 == AUDIO_FORMAT_INVALID || format2 == AUDIO_FORMAT_INVALID) {
+        return kFormatDistanceMax;
+    }
+    int diffBytes = (int)audio_bytes_per_sample(format1) -
+            audio_bytes_per_sample(format2);
+
+    return abs(diffBytes);
+}
+
+bool PolicyAudioPort::isBetterFormatMatch(audio_format_t newFormat,
+                                          audio_format_t currentFormat,
+                                          audio_format_t targetFormat)
+{
+    return formatDistance(newFormat, targetFormat) < formatDistance(currentFormat, targetFormat);
+}
+
+void PolicyAudioPort::pickAudioProfile(uint32_t &samplingRate,
+                                       audio_channel_mask_t &channelMask,
+                                       audio_format_t &format) const
+{
+    format = AUDIO_FORMAT_DEFAULT;
+    samplingRate = 0;
+    channelMask = AUDIO_CHANNEL_NONE;
+
+    // special case for uninitialized dynamic profile
+    if (!asAudioPort()->hasValidAudioProfile()) {
+        return;
+    }
+    audio_format_t bestFormat = sPcmFormatCompareTable[ARRAY_SIZE(sPcmFormatCompareTable) - 1];
+    // For mixed output and inputs, use best mixer output format.
+    // Do not limit format otherwise
+    if ((asAudioPort()->getType() != AUDIO_PORT_TYPE_MIX) || isDirectOutput()) {
+        bestFormat = AUDIO_FORMAT_INVALID;
+    }
+
+    const AudioProfileVector& audioProfiles = asAudioPort()->getAudioProfiles();
+    for (size_t i = 0; i < audioProfiles.size(); i ++) {
+        if (!audioProfiles[i]->isValid()) {
+            continue;
+        }
+        audio_format_t formatToCompare = audioProfiles[i]->getFormat();
+        if ((compareFormats(formatToCompare, format) > 0) &&
+                (compareFormats(formatToCompare, bestFormat) <= 0)) {
+            uint32_t pickedSamplingRate = 0;
+            audio_channel_mask_t pickedChannelMask = AUDIO_CHANNEL_NONE;
+            pickChannelMask(pickedChannelMask, audioProfiles[i]->getChannels());
+            pickSamplingRate(pickedSamplingRate, audioProfiles[i]->getSampleRates());
+
+            if (formatToCompare != AUDIO_FORMAT_DEFAULT && pickedChannelMask != AUDIO_CHANNEL_NONE
+                    && pickedSamplingRate != 0) {
+                format = formatToCompare;
+                channelMask = pickedChannelMask;
+                samplingRate = pickedSamplingRate;
+                // TODO: shall we return on the first one or still trying to pick a better Profile?
+            }
+        }
+    }
+    ALOGV("%s Port[nm:%s] profile rate=%d, format=%d, channels=%d", __FUNCTION__,
+            asAudioPort()->getName().c_str(), samplingRate, channelMask, format);
+}
+
+// --- PolicyAudioPortConfig class implementation
+
+status_t PolicyAudioPortConfig::validationBeforeApplyConfig(
+        const struct audio_port_config *config) const
+{
+    sp<PolicyAudioPort> policyAudioPort = getPolicyAudioPort();
+    return policyAudioPort ? policyAudioPort->checkExactAudioProfile(config) : NO_INIT;
+}
+
+void PolicyAudioPortConfig::toPolicyAudioPortConfig(struct audio_port_config *dstConfig,
+                                                    const struct audio_port_config *srcConfig) const
+{
+    if (dstConfig->config_mask & AUDIO_PORT_CONFIG_FLAGS) {
+        if ((srcConfig != nullptr) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_FLAGS)) {
+            dstConfig->flags = srcConfig->flags;
+        } else {
+            dstConfig->flags = mFlags;
+        }
+    } else {
+        dstConfig->flags = { AUDIO_INPUT_FLAG_NONE };
+    }
+}
+
+
+
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 5f820c2..3b27cf6 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -406,8 +406,8 @@
             samplingRatesFromString(samplingRates, ","));
 
     profile->setDynamicFormat(profile->getFormat() == gDynamicFormat);
-    profile->setDynamicChannels(profile->getChannels().isEmpty());
-    profile->setDynamicRate(profile->getSampleRates().isEmpty());
+    profile->setDynamicChannels(profile->getChannels().empty());
+    profile->setDynamicRate(profile->getSampleRates().empty());
 
     return profile;
 }
@@ -430,16 +430,19 @@
     audio_port_role_t portRole = (role == Attributes::roleSource) ?
             AUDIO_PORT_ROLE_SOURCE : AUDIO_PORT_ROLE_SINK;
 
-    Element mixPort = new IOProfile(String8(name.c_str()), portRole);
+    Element mixPort = new IOProfile(name, portRole);
 
     AudioProfileTraits::Collection profiles;
     status_t status = deserializeCollection<AudioProfileTraits>(child, &profiles, NULL);
     if (status != NO_ERROR) {
         return Status::fromStatusT(status);
     }
-    if (profiles.isEmpty()) {
-        profiles.add(AudioProfile::createFullDynamic());
+    if (profiles.empty()) {
+        profiles.add(AudioProfile::createFullDynamic(gDynamicFormat));
     }
+    // The audio profiles are in order of listed in audio policy configuration file.
+    // Sort audio profiles accroding to the format.
+    sortAudioProfiles(profiles);
     mixPort->setAudioProfiles(profiles);
 
     std::string flags = getXmlAttribute(child, Attributes::flags);
@@ -508,12 +511,12 @@
     if (!encodedFormatsLiteral.empty()) {
         encodedFormats = formatsFromString(encodedFormatsLiteral, " ");
     }
-    Element deviceDesc = new DeviceDescriptor(type, encodedFormats, String8(name.c_str()));
+    Element deviceDesc = new DeviceDescriptor(type, encodedFormats, name);
 
     std::string address = getXmlAttribute(cur, Attributes::address);
     if (!address.empty()) {
         ALOGV("%s: address=%s for %s", __func__, address.c_str(), name.c_str());
-        deviceDesc->setAddress(String8(address.c_str()));
+        deviceDesc->setAddress(address);
     }
 
     AudioProfileTraits::Collection profiles;
@@ -521,9 +524,12 @@
     if (status != NO_ERROR) {
         return Status::fromStatusT(status);
     }
-    if (profiles.isEmpty()) {
-        profiles.add(AudioProfile::createFullDynamic());
+    if (profiles.empty()) {
+        profiles.add(AudioProfile::createFullDynamic(gDynamicFormat));
     }
+    // The audio profiles are in order of listed in audio policy configuration file.
+    // Sort audio profiles accroding to the format.
+    sortAudioProfiles(profiles);
     deviceDesc->setAudioProfiles(profiles);
 
     // Deserialize AudioGain children
@@ -532,7 +538,7 @@
         return Status::fromStatusT(status);
     }
     ALOGV("%s: adding device tag %s type %08x address %s", __func__,
-          deviceDesc->getName().string(), type, deviceDesc->address().string());
+          deviceDesc->getName().c_str(), type, deviceDesc->address().c_str());
     return deviceDesc;
 }
 
@@ -555,7 +561,7 @@
         return Status::fromStatusT(BAD_VALUE);
     }
     // Convert Sink name to port pointer
-    sp<AudioPort> sink = ctx->findPortByTagName(String8(sinkAttr.c_str()));
+    sp<PolicyAudioPort> sink = ctx->findPortByTagName(sinkAttr);
     if (sink == NULL) {
         ALOGE("%s: no sink found with name=%s", __func__, sinkAttr.c_str());
         return Status::fromStatusT(BAD_VALUE);
@@ -568,13 +574,13 @@
         return Status::fromStatusT(BAD_VALUE);
     }
     // Tokenize and Convert Sources name to port pointer
-    AudioPortVector sources;
+    PolicyAudioPortVector sources;
     std::unique_ptr<char[]> sourcesLiteral{strndup(
                 sourcesAttr.c_str(), strlen(sourcesAttr.c_str()))};
     char *devTag = strtok(sourcesLiteral.get(), ",");
     while (devTag != NULL) {
         if (strlen(devTag) != 0) {
-            sp<AudioPort> source = ctx->findPortByTagName(String8(devTag));
+            sp<PolicyAudioPort> source = ctx->findPortByTagName(devTag);
             if (source == NULL) {
                 ALOGE("%s: no source found with name=%s", __func__, devTag);
                 return Status::fromStatusT(BAD_VALUE);
@@ -586,7 +592,7 @@
 
     sink->addRoute(route);
     for (size_t i = 0; i < sources.size(); i++) {
-        sp<AudioPort> source = sources.itemAt(i);
+        sp<PolicyAudioPort> source = sources.itemAt(i);
         source->addRoute(route);
     }
     route->setSources(sources);
@@ -648,7 +654,7 @@
                         ALOGV("%s: %s %s=%s", __func__, tag, childAttachedDeviceTag,
                                 reinterpret_cast<const char*>(attachedDevice.get()));
                         sp<DeviceDescriptor> device = module->getDeclaredDevices().
-                                getDeviceFromTagName(String8(reinterpret_cast<const char*>(
+                                getDeviceFromTagName(std::string(reinterpret_cast<const char*>(
                                                         attachedDevice.get())));
                         ctx->addAvailableDevice(device);
                     }
@@ -663,7 +669,7 @@
                 ALOGV("%s: %s %s=%s", __func__, tag, childDefaultOutputDeviceTag,
                         reinterpret_cast<const char*>(defaultOutputDevice.get()));
                 sp<DeviceDescriptor> device = module->getDeclaredDevices().getDeviceFromTagName(
-                        String8(reinterpret_cast<const char*>(defaultOutputDevice.get())));
+                        std::string(reinterpret_cast<const char*>(defaultOutputDevice.get())));
                 if (device != 0 && ctx->getDefaultOutputDevice() == 0) {
                     ctx->setDefaultOutputDevice(device);
                     ALOGV("%s: default is %08x",
diff --git a/services/audiopolicy/config/audio_policy_volumes.xml b/services/audiopolicy/config/audio_policy_volumes.xml
index ec64a7c..27bd3ff 100644
--- a/services/audiopolicy/config/audio_policy_volumes.xml
+++ b/services/audiopolicy/config/audio_policy_volumes.xml
@@ -44,7 +44,7 @@
     <volume stream="AUDIO_STREAM_VOICE_CALL" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
                                              ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_VOICE_CALL" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
-                                             ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+                                             ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_SYSTEM" deviceCategory="DEVICE_CATEGORY_HEADSET">
         <point>1,-3000</point>
         <point>33,-2600</point>
diff --git a/services/audiopolicy/engine/config/Android.bp b/services/audiopolicy/engine/config/Android.bp
index 6e72f2a..885b5fa 100644
--- a/services/audiopolicy/engine/config/Android.bp
+++ b/services/audiopolicy/engine/config/Android.bp
@@ -3,7 +3,6 @@
     export_include_dirs: ["include"],
     include_dirs: [
         "external/libxml2/include",
-        "external/icu/icu4c/source/common",
     ],
     srcs: [
         "src/EngineConfig.cpp",
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
index ebd82a7..349f969 100644
--- a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
@@ -16,8 +16,7 @@
 
 #pragma once
 
-#include <AudioGain.h>
-#include <AudioPort.h>
+#include <PolicyAudioPort.h>
 #include <AudioPatch.h>
 #include <IOProfile.h>
 #include <DeviceDescriptor.h>
diff --git a/services/audiopolicy/engineconfigurable/Android.bp b/services/audiopolicy/engineconfigurable/Android.bp
index c27dc88..8f522f0 100644
--- a/services/audiopolicy/engineconfigurable/Android.bp
+++ b/services/audiopolicy/engineconfigurable/Android.bp
@@ -33,6 +33,7 @@
 
     ],
     shared_libs: [
+        "libaudiofoundation",
         "liblog",
         "libcutils",
         "libutils",
diff --git a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
index 4b57444..465a6f9 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
+++ b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
@@ -259,7 +259,7 @@
     std::string criterionName = audio_is_output_device(devDesc->type()) ?
                 gOutputDeviceAddressCriterionName : gInputDeviceAddressCriterionName;
 
-    ALOGV("%s: device with address %s %s", __FUNCTION__, devDesc->address().string(),
+    ALOGV("%s: device with address %s %s", __FUNCTION__, devDesc->address().c_str(),
           state != AUDIO_POLICY_DEVICE_STATE_AVAILABLE? "disconnected" : "connected");
     ISelectionCriterionInterface *criterion =
             getElement<ISelectionCriterionInterface>(criterionName, mPolicyCriteria);
@@ -271,7 +271,7 @@
 
     auto criterionType = criterion->getCriterionType();
     int deviceAddressId;
-    if (not criterionType->getNumericalValue(devDesc->address().string(), deviceAddressId)) {
+    if (not criterionType->getNumericalValue(devDesc->address().c_str(), deviceAddressId)) {
         ALOGW("%s: unknown device address reported (%s)", __FUNCTION__, devDesc->address().c_str());
         return BAD_TYPE;
     }
diff --git a/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
index 5bfad29..8443008 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
+++ b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
@@ -16,8 +16,7 @@
 
 #pragma once
 
-#include <AudioGain.h>
-#include <AudioPort.h>
+#include <PolicyAudioPort.h>
 #include <HwModule.h>
 #include <DeviceDescriptor.h>
 #include <system/audio.h>
diff --git a/services/audiopolicy/enginedefault/Android.bp b/services/audiopolicy/enginedefault/Android.bp
index 2b9cf09..aaf4158 100644
--- a/services/audiopolicy/enginedefault/Android.bp
+++ b/services/audiopolicy/enginedefault/Android.bp
@@ -21,6 +21,7 @@
         "libaudiopolicyengine_config",
     ],
     shared_libs: [
+        "libaudiofoundation",
         "liblog",
         "libcutils",
         "libutils",
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index c602f3a..a2b39dd 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -27,7 +27,7 @@
 #include "Engine.h"
 #include <android-base/macros.h>
 #include <AudioPolicyManagerObserver.h>
-#include <AudioPort.h>
+#include <PolicyAudioPort.h>
 #include <IOProfile.h>
 #include <AudioIODescriptorInterface.h>
 #include <policy.h>
@@ -136,27 +136,23 @@
     return EngineBase::setForceUse(usage, config);
 }
 
-audio_devices_t Engine::getDeviceForStrategyInt(legacy_strategy strategy,
-                                                DeviceVector availableOutputDevices,
-                                                DeviceVector availableInputDevices,
-                                                const SwAudioOutputCollection &outputs,
-                                                uint32_t outputDeviceTypesToIgnore) const
+DeviceVector Engine::getDevicesForStrategyInt(legacy_strategy strategy,
+                                              DeviceVector availableOutputDevices,
+                                              DeviceVector availableInputDevices,
+                                              const SwAudioOutputCollection &outputs) const
 {
-    uint32_t device = AUDIO_DEVICE_NONE;
-    uint32_t availableOutputDevicesType =
-            availableOutputDevices.types() & ~outputDeviceTypesToIgnore;
+    DeviceVector devices;
 
     switch (strategy) {
 
     case STRATEGY_TRANSMITTED_THROUGH_SPEAKER:
-        device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
+        devices = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER);
         break;
 
     case STRATEGY_SONIFICATION_RESPECTFUL:
         if (isInCall() || outputs.isActiveLocally(toVolumeSource(AUDIO_STREAM_VOICE_CALL))) {
-            device = getDeviceForStrategyInt(
-                    STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs,
-                    outputDeviceTypesToIgnore);
+            devices = getDevicesForStrategyInt(
+                    STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs);
         } else {
             bool media_active_locally =
                     outputs.isActiveLocally(toVolumeSource(AUDIO_STREAM_MUSIC),
@@ -165,17 +161,18 @@
                         toVolumeSource(AUDIO_STREAM_ACCESSIBILITY),
                         SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY);
             // routing is same as media without the "remote" device
-            device = getDeviceForStrategyInt(STRATEGY_MEDIA,
+            availableOutputDevices.remove(availableOutputDevices.getDevicesFromTypeMask(
+                    AUDIO_DEVICE_OUT_REMOTE_SUBMIX));
+            devices = getDevicesForStrategyInt(STRATEGY_MEDIA,
                     availableOutputDevices,
-                    availableInputDevices, outputs,
-                    AUDIO_DEVICE_OUT_REMOTE_SUBMIX | outputDeviceTypesToIgnore);
+                    availableInputDevices, outputs);
             // if no media is playing on the device, check for mandatory use of "safe" speaker
             // when media would have played on speaker, and the safe speaker path is available
-            if (!media_active_locally
-                    && (device & AUDIO_DEVICE_OUT_SPEAKER)
-                    && (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
-                device |= AUDIO_DEVICE_OUT_SPEAKER_SAFE;
-                device &= ~AUDIO_DEVICE_OUT_SPEAKER;
+            if (!media_active_locally) {
+                devices.replaceDevicesByType(
+                        AUDIO_DEVICE_OUT_SPEAKER,
+                        availableOutputDevices.getDevicesFromTypeMask(
+                                AUDIO_DEVICE_OUT_SPEAKER_SAFE));
             }
         }
         break;
@@ -183,9 +180,8 @@
     case STRATEGY_DTMF:
         if (!isInCall()) {
             // when off call, DTMF strategy follows the same rules as MEDIA strategy
-            device = getDeviceForStrategyInt(
-                    STRATEGY_MEDIA, availableOutputDevices, availableInputDevices, outputs,
-                    outputDeviceTypesToIgnore);
+            devices = getDevicesForStrategyInt(
+                    STRATEGY_MEDIA, availableOutputDevices, availableInputDevices, outputs);
             break;
         }
         // when in call, DTMF and PHONE strategies follow the same rules
@@ -197,24 +193,27 @@
         //   - cannot route from voice call RX OR
         //   - audio HAL version is < 3.0 and TX device is on the primary HW module
         if (getPhoneState() == AUDIO_MODE_IN_CALL) {
-            audio_devices_t txDevice = getDeviceForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
+            audio_devices_t txDevice = getDeviceForInputSource(
+                    AUDIO_SOURCE_VOICE_COMMUNICATION)->type();
             sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
-            audio_devices_t availPrimaryInputDevices =
-                 availableInputDevices.getDeviceTypesFromHwModule(primaryOutput->getModuleHandle());
+            DeviceVector availPrimaryInputDevices =
+                    availableInputDevices.getDevicesFromHwModule(primaryOutput->getModuleHandle());
 
             // TODO: getPrimaryOutput return only devices from first module in
             // audio_policy_configuration.xml, hearing aid is not there, but it's
             // a primary device
             // FIXME: this is not the right way of solving this problem
-            audio_devices_t availPrimaryOutputDevices =
-                (primaryOutput->supportedDevices().types() | AUDIO_DEVICE_OUT_HEARING_AID) &
-                availableOutputDevices.types();
+            DeviceVector availPrimaryOutputDevices = availableOutputDevices.getDevicesFromTypeMask(
+                    primaryOutput->supportedDevices().types());
+            availPrimaryOutputDevices.add(
+                    availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_HEARING_AID));
 
-            if (((availableInputDevices.types() &
-                    AUDIO_DEVICE_IN_TELEPHONY_RX & ~AUDIO_DEVICE_BIT_IN) == 0) ||
-                    (((txDevice & availPrimaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
-                         (primaryOutput->getAudioPort()->getModuleVersionMajor() < 3))) {
-                availableOutputDevicesType = availPrimaryOutputDevices;
+            if ((availableInputDevices.getDevice(AUDIO_DEVICE_IN_TELEPHONY_RX,
+                    String8(""), AUDIO_FORMAT_DEFAULT) == nullptr) ||
+                    ((availPrimaryInputDevices.getDevice(
+                            txDevice, String8(""), AUDIO_FORMAT_DEFAULT) != nullptr) &&
+                            (primaryOutput->getPolicyAudioPort()->getModuleVersionMajor() < 3))) {
+                availableOutputDevices = availPrimaryOutputDevices;
             }
         }
         // for phone strategy, we first consider the forced use and then the available devices by
@@ -222,49 +221,40 @@
         switch (getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION)) {
         case AUDIO_POLICY_FORCE_BT_SCO:
             if (!isInCall() || strategy != STRATEGY_DTMF) {
-                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
-                if (device) break;
+                devices = availableOutputDevices.getDevicesFromTypeMask(
+                        AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT);
+                if (!devices.isEmpty()) break;
             }
-            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET;
-            if (device) break;
-            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO;
-            if (device) break;
+            devices = availableOutputDevices.getFirstDevicesFromTypes({
+                    AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET, AUDIO_DEVICE_OUT_BLUETOOTH_SCO});
+            if (!devices.isEmpty()) break;
             // if SCO device is requested but no SCO device is available, fall back to default case
             FALLTHROUGH_INTENDED;
 
         default:    // FORCE_NONE
-            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_HEARING_AID;
-            if (device) break;
+            devices = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_HEARING_AID);
+            if (!devices.isEmpty()) break;
             // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to A2DP
             if (!isInCall() &&
                     (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
                      outputs.isA2dpSupported()) {
-                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
-                if (device) break;
-                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
-                if (device) break;
+                devices = availableOutputDevices.getFirstDevicesFromTypes({
+                        AUDIO_DEVICE_OUT_BLUETOOTH_A2DP,
+                        AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES});
+                if (!devices.isEmpty()) break;
             }
-            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
-            if (device) break;
-            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
-            if (device) break;
-            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
-            if (device) break;
-            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_HEADSET;
-            if (device) break;
-            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
-            if (device) break;
+            devices = availableOutputDevices.getFirstDevicesFromTypes({
+                    AUDIO_DEVICE_OUT_WIRED_HEADPHONE, AUDIO_DEVICE_OUT_WIRED_HEADSET,
+                    AUDIO_DEVICE_OUT_LINE, AUDIO_DEVICE_OUT_USB_HEADSET,
+                    AUDIO_DEVICE_OUT_USB_DEVICE});
+            if (!devices.isEmpty()) break;
             if (!isInCall()) {
-                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
-                if (device) break;
-                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
-                if (device) break;
-                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
-                if (device) break;
-                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
-                if (device) break;
+                devices = availableOutputDevices.getFirstDevicesFromTypes({
+                        AUDIO_DEVICE_OUT_USB_ACCESSORY, AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET,
+                        AUDIO_DEVICE_OUT_AUX_DIGITAL, AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET});
+                if (!devices.isEmpty()) break;
             }
-            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_EARPIECE;
+            devices = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_EARPIECE);
             break;
 
         case AUDIO_POLICY_FORCE_SPEAKER:
@@ -273,22 +263,18 @@
             if (!isInCall() &&
                     (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
                      outputs.isA2dpSupported()) {
-                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
-                if (device) break;
+                devices = availableOutputDevices.getDevicesFromTypeMask(
+                        AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER);
+                if (!devices.isEmpty()) break;
             }
             if (!isInCall()) {
-                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
-                if (device) break;
-                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
-                if (device) break;
-                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
-                if (device) break;
-                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
-                if (device) break;
-                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
-                if (device) break;
+                devices = availableOutputDevices.getFirstDevicesFromTypes({
+                        AUDIO_DEVICE_OUT_USB_ACCESSORY, AUDIO_DEVICE_OUT_USB_DEVICE,
+                        AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET, AUDIO_DEVICE_OUT_AUX_DIGITAL,
+                        AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET});
+                if (!devices.isEmpty()) break;
             }
-            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
+            devices = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER);
             break;
         }
     break;
@@ -298,9 +284,8 @@
         // If incall, just select the STRATEGY_PHONE device
         if (isInCall() ||
                 outputs.isActiveLocally(toVolumeSource(AUDIO_STREAM_VOICE_CALL))) {
-            device = getDeviceForStrategyInt(
-                    STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
-                    outputDeviceTypesToIgnore);
+            devices = getDevicesForStrategyInt(
+                    STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs);
             break;
         }
         FALLTHROUGH_INTENDED;
@@ -313,41 +298,37 @@
 
         if ((strategy == STRATEGY_SONIFICATION) ||
                 (getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) {
-            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
+            devices = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER);
         }
 
         // if SCO headset is connected and we are told to use it, play ringtone over
         // speaker and BT SCO
-        if ((availableOutputDevicesType & AUDIO_DEVICE_OUT_ALL_SCO) != 0) {
-            uint32_t device2 = AUDIO_DEVICE_NONE;
-            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
-            if (device2 == AUDIO_DEVICE_NONE) {
-                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET;
-            }
-            if (device2 == AUDIO_DEVICE_NONE) {
-                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO;
-            }
+        if (!availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_ALL_SCO).isEmpty()) {
+            DeviceVector devices2;
+            devices2 = availableOutputDevices.getFirstDevicesFromTypes({
+                    AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT, AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET,
+                    AUDIO_DEVICE_OUT_BLUETOOTH_SCO});
             // Use ONLY Bluetooth SCO output when ringing in vibration mode
             if (!((getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)
                     && (strategy == STRATEGY_ENFORCED_AUDIBLE))) {
                 if (getForceUse(AUDIO_POLICY_FORCE_FOR_VIBRATE_RINGING)
                         == AUDIO_POLICY_FORCE_BT_SCO) {
-                    if (device2 != AUDIO_DEVICE_NONE) {
-                        device = device2;
+                    if (!devices2.isEmpty()) {
+                        devices = devices2;
                         break;
                     }
                 }
             }
             // Use both Bluetooth SCO and phone default output when ringing in normal mode
             if (getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION) == AUDIO_POLICY_FORCE_BT_SCO) {
-                if ((strategy == STRATEGY_SONIFICATION) &&
-                        (device & AUDIO_DEVICE_OUT_SPEAKER) &&
-                        (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
-                    device |= AUDIO_DEVICE_OUT_SPEAKER_SAFE;
-                    device &= ~AUDIO_DEVICE_OUT_SPEAKER;
+                if (strategy == STRATEGY_SONIFICATION) {
+                    devices.replaceDevicesByType(
+                            AUDIO_DEVICE_OUT_SPEAKER,
+                            availableOutputDevices.getDevicesFromTypeMask(
+                                    AUDIO_DEVICE_OUT_SPEAKER_SAFE));
                 }
-                if (device2 != AUDIO_DEVICE_NONE) {
-                    device |= device2;
+                if (!devices2.isEmpty()) {
+                    devices.add(devices2);
                     break;
                 }
             }
@@ -361,25 +342,20 @@
             // compressed format as they would likely not be mixed and dropped.
             for (size_t i = 0; i < outputs.size(); i++) {
                 sp<AudioOutputDescriptor> desc = outputs.valueAt(i);
-                audio_devices_t devices = desc->devices().types() &
-                    (AUDIO_DEVICE_OUT_HDMI | AUDIO_DEVICE_OUT_SPDIF | AUDIO_DEVICE_OUT_HDMI_ARC);
-                if (desc->isActive() && !audio_is_linear_pcm(desc->mFormat) &&
-                        devices != AUDIO_DEVICE_NONE) {
-                    availableOutputDevicesType = availableOutputDevices.types() & ~devices;
+                if (desc->isActive() && !audio_is_linear_pcm(desc->getFormat())) {
+                    availableOutputDevices.remove(desc->devices().getDevicesFromTypeMask(
+                            AUDIO_DEVICE_OUT_HDMI | AUDIO_DEVICE_OUT_SPDIF
+                            | AUDIO_DEVICE_OUT_HDMI_ARC));
                 }
             }
-            availableOutputDevices =
-                    availableOutputDevices.getDevicesFromTypeMask(availableOutputDevicesType);
             if (outputs.isActive(toVolumeSource(AUDIO_STREAM_RING)) ||
                     outputs.isActive(toVolumeSource(AUDIO_STREAM_ALARM))) {
-                return getDeviceForStrategyInt(
-                    STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs,
-                    outputDeviceTypesToIgnore);
+                return getDevicesForStrategyInt(
+                    STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs);
             }
             if (isInCall()) {
-                return getDeviceForStrategyInt(
-                        STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
-                        outputDeviceTypesToIgnore);
+                return getDevicesForStrategyInt(
+                        STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs);
             }
         }
         // For other cases, STRATEGY_ACCESSIBILITY behaves like STRATEGY_MEDIA
@@ -388,128 +364,116 @@
     // FIXME: STRATEGY_REROUTING follow STRATEGY_MEDIA for now
     case STRATEGY_REROUTING:
     case STRATEGY_MEDIA: {
-        uint32_t device2 = AUDIO_DEVICE_NONE;
+        DeviceVector devices2;
         if (strategy != STRATEGY_SONIFICATION) {
             // no sonification on remote submix (e.g. WFD)
-            if (availableOutputDevices.getDevice(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
-                                                 String8("0"), AUDIO_FORMAT_DEFAULT) != 0) {
-                device2 = availableOutputDevices.types() & AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
+            sp<DeviceDescriptor> remoteSubmix;
+            if ((remoteSubmix = availableOutputDevices.getDevice(
+                    AUDIO_DEVICE_OUT_REMOTE_SUBMIX, String8("0"),
+                    AUDIO_FORMAT_DEFAULT)) != nullptr) {
+                devices2.add(remoteSubmix);
             }
         }
         if (isInCall() && (strategy == STRATEGY_MEDIA)) {
-            device = getDeviceForStrategyInt(
-                    STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
-                    outputDeviceTypesToIgnore);
+            devices = getDevicesForStrategyInt(
+                    STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs);
             break;
         }
         // FIXME: Find a better solution to prevent routing to BT hearing aid(b/122931261).
-        if ((device2 == AUDIO_DEVICE_NONE) &&
+        if ((devices2.isEmpty()) &&
                 (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP)) {
-            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_HEARING_AID;
+            devices2 = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_HEARING_AID);
         }
-        if ((device2 == AUDIO_DEVICE_NONE) &&
+        if ((devices2.isEmpty()) &&
                 (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
                  outputs.isA2dpSupported()) {
-            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
-            if (device2 == AUDIO_DEVICE_NONE) {
-                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
-            }
-            if (device2 == AUDIO_DEVICE_NONE) {
-                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
-            }
+            devices2 = availableOutputDevices.getFirstDevicesFromTypes({
+                    AUDIO_DEVICE_OUT_BLUETOOTH_A2DP, AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES,
+                    AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER});
         }
-        if ((device2 == AUDIO_DEVICE_NONE) &&
+        if ((devices2.isEmpty()) &&
             (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) == AUDIO_POLICY_FORCE_SPEAKER)) {
-            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
+            devices2 = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER);
         }
-        if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
+        if (devices2.isEmpty()) {
+            devices2 = availableOutputDevices.getFirstDevicesFromTypes({
+                    AUDIO_DEVICE_OUT_WIRED_HEADPHONE, AUDIO_DEVICE_OUT_LINE,
+                    AUDIO_DEVICE_OUT_WIRED_HEADSET, AUDIO_DEVICE_OUT_USB_HEADSET,
+                    AUDIO_DEVICE_OUT_USB_ACCESSORY, AUDIO_DEVICE_OUT_USB_DEVICE,
+                    AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET});
         }
-        if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
-        }
-        if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
-        }
-        if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_HEADSET;
-        }
-        if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
-        }
-        if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
-        }
-        if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
-        }
-        if ((device2 == AUDIO_DEVICE_NONE) && (strategy != STRATEGY_SONIFICATION)) {
+        if ((devices2.isEmpty()) && (strategy != STRATEGY_SONIFICATION)) {
             // no sonification on aux digital (e.g. HDMI)
-            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
+            devices2 = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_AUX_DIGITAL);
         }
-        if ((device2 == AUDIO_DEVICE_NONE) &&
+        if ((devices2.isEmpty()) &&
                 (getForceUse(AUDIO_POLICY_FORCE_FOR_DOCK) == AUDIO_POLICY_FORCE_ANALOG_DOCK)) {
-            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
+            devices2 = availableOutputDevices.getDevicesFromTypeMask(
+                    AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET);
         }
-        if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
+        if (devices2.isEmpty()) {
+            devices2 = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER);
         }
-        int device3 = AUDIO_DEVICE_NONE;
+        DeviceVector devices3;
         if (strategy == STRATEGY_MEDIA) {
             // ARC, SPDIF and AUX_LINE can co-exist with others.
-            device3 = availableOutputDevicesType & AUDIO_DEVICE_OUT_HDMI_ARC;
-            device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPDIF);
-            device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_LINE);
+            devices3 = availableOutputDevices.getDevicesFromTypeMask(
+                    AUDIO_DEVICE_OUT_HDMI_ARC | AUDIO_DEVICE_OUT_SPDIF | AUDIO_DEVICE_OUT_AUX_LINE);
         }
 
-        device2 |= device3;
+        devices2.add(devices3);
         // device is DEVICE_OUT_SPEAKER if we come from case STRATEGY_SONIFICATION or
         // STRATEGY_ENFORCED_AUDIBLE, AUDIO_DEVICE_NONE otherwise
-        device |= device2;
+        devices.add(devices2);
 
         // If hdmi system audio mode is on, remove speaker out of output list.
         if ((strategy == STRATEGY_MEDIA) &&
             (getForceUse(AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO) ==
                 AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED)) {
-            device &= ~AUDIO_DEVICE_OUT_SPEAKER;
+            devices.remove(devices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER));
         }
 
         // for STRATEGY_SONIFICATION:
         // if SPEAKER was selected, and SPEAKER_SAFE is available, use SPEAKER_SAFE instead
-        if ((strategy == STRATEGY_SONIFICATION) &&
-                (device & AUDIO_DEVICE_OUT_SPEAKER) &&
-                (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
-            device |= AUDIO_DEVICE_OUT_SPEAKER_SAFE;
-            device &= ~AUDIO_DEVICE_OUT_SPEAKER;
+        if (strategy == STRATEGY_SONIFICATION) {
+            devices.replaceDevicesByType(
+                    AUDIO_DEVICE_OUT_SPEAKER,
+                    availableOutputDevices.getDevicesFromTypeMask(
+                            AUDIO_DEVICE_OUT_SPEAKER_SAFE));
         }
         } break;
 
     default:
-        ALOGW("getDeviceForStrategy() unknown strategy: %d", strategy);
+        ALOGW("getDevicesForStrategy() unknown strategy: %d", strategy);
         break;
     }
 
-    if (device == AUDIO_DEVICE_NONE) {
-        ALOGV("getDeviceForStrategy() no device found for strategy %d", strategy);
-        device = getApmObserver()->getDefaultOutputDevice()->type();
-        ALOGE_IF(device == AUDIO_DEVICE_NONE,
-                 "getDeviceForStrategy() no default device defined");
+    if (devices.isEmpty()) {
+        ALOGV("getDevicesForStrategy() no device found for strategy %d", strategy);
+        sp<DeviceDescriptor> defaultOutputDevice = getApmObserver()->getDefaultOutputDevice();
+        if (defaultOutputDevice != nullptr) {
+            devices.add(defaultOutputDevice);
+        }
+        ALOGE_IF(devices.isEmpty(),
+                 "getDevicesForStrategy() no default device defined");
     }
-    ALOGVV("getDeviceForStrategy() strategy %d, device %x", strategy, device);
-    return device;
+
+    ALOGVV("getDevices"
+           "ForStrategy() strategy %d, device %x", strategy, devices.types());
+    return devices;
 }
 
 
-audio_devices_t Engine::getDeviceForInputSource(audio_source_t inputSource) const
+sp<DeviceDescriptor> Engine::getDeviceForInputSource(audio_source_t inputSource) const
 {
     const DeviceVector availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
     const DeviceVector availableInputDevices = getApmObserver()->getAvailableInputDevices();
     const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
-    audio_devices_t availableDeviceTypes = availableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
+    DeviceVector availableDevices = availableInputDevices;
     sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
-    audio_devices_t availablePrimaryDeviceTypes = availableInputDevices.getDeviceTypesFromHwModule(
-        primaryOutput->getModuleHandle()) & ~AUDIO_DEVICE_BIT_IN;
-    uint32_t device = AUDIO_DEVICE_NONE;
+    DeviceVector availablePrimaryDevices = availableInputDevices.getDevicesFromHwModule(
+            primaryOutput->getModuleHandle());
+    sp<DeviceDescriptor> device;
 
     // when a call is active, force device selection to match source VOICE_COMMUNICATION
     // for most other input sources to avoid rerouting call TX audio
@@ -532,57 +496,47 @@
     switch (inputSource) {
     case AUDIO_SOURCE_DEFAULT:
     case AUDIO_SOURCE_MIC:
-    if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) {
-        device = AUDIO_DEVICE_IN_BLUETOOTH_A2DP;
-    } else if ((getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) == AUDIO_POLICY_FORCE_BT_SCO) &&
-        (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET)) {
-        device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
-    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
-        device = AUDIO_DEVICE_IN_WIRED_HEADSET;
-    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_HEADSET) {
-        device = AUDIO_DEVICE_IN_USB_HEADSET;
-    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
-        device = AUDIO_DEVICE_IN_USB_DEVICE;
-    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
-        device = AUDIO_DEVICE_IN_BUILTIN_MIC;
-    }
-    break;
+        device = availableDevices.getDevice(
+                AUDIO_DEVICE_IN_BLUETOOTH_A2DP, String8(""), AUDIO_FORMAT_DEFAULT);
+        if (device != nullptr) break;
+        if (getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) == AUDIO_POLICY_FORCE_BT_SCO) {
+            device = availableDevices.getDevice(
+                    AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, String8(""), AUDIO_FORMAT_DEFAULT);
+            if (device != nullptr) break;
+        }
+        device = availableDevices.getFirstExistingDevice({
+                AUDIO_DEVICE_IN_WIRED_HEADSET, AUDIO_DEVICE_IN_USB_HEADSET,
+                AUDIO_DEVICE_IN_USB_DEVICE, AUDIO_DEVICE_IN_BUILTIN_MIC});
+        break;
 
     case AUDIO_SOURCE_VOICE_COMMUNICATION:
         // Allow only use of devices on primary input if in call and HAL does not support routing
         // to voice call path.
         if ((getPhoneState() == AUDIO_MODE_IN_CALL) &&
-                (availableOutputDevices.types() & AUDIO_DEVICE_OUT_TELEPHONY_TX) == 0) {
-            availableDeviceTypes = availablePrimaryDeviceTypes;
+                (availableOutputDevices.getDevice(AUDIO_DEVICE_OUT_TELEPHONY_TX,
+                        String8(""), AUDIO_FORMAT_DEFAULT)) == nullptr) {
+            availableDevices = availablePrimaryDevices;
         }
 
         switch (getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION)) {
         case AUDIO_POLICY_FORCE_BT_SCO:
             // if SCO device is requested but no SCO device is available, fall back to default case
-            if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
-                device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
+            device = availableDevices.getDevice(
+                    AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, String8(""), AUDIO_FORMAT_DEFAULT);
+            if (device != nullptr) {
                 break;
             }
             FALLTHROUGH_INTENDED;
 
         default:    // FORCE_NONE
-            if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
-                device = AUDIO_DEVICE_IN_WIRED_HEADSET;
-            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_HEADSET) {
-                device = AUDIO_DEVICE_IN_USB_HEADSET;
-            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
-                device = AUDIO_DEVICE_IN_USB_DEVICE;
-            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
-                device = AUDIO_DEVICE_IN_BUILTIN_MIC;
-            }
+            device = availableDevices.getFirstExistingDevice({
+                    AUDIO_DEVICE_IN_WIRED_HEADSET, AUDIO_DEVICE_IN_USB_HEADSET,
+                    AUDIO_DEVICE_IN_USB_DEVICE, AUDIO_DEVICE_IN_BUILTIN_MIC});
             break;
 
         case AUDIO_POLICY_FORCE_SPEAKER:
-            if (availableDeviceTypes & AUDIO_DEVICE_IN_BACK_MIC) {
-                device = AUDIO_DEVICE_IN_BACK_MIC;
-            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
-                device = AUDIO_DEVICE_IN_BUILTIN_MIC;
-            }
+            device = availableDevices.getFirstExistingDevice({
+                    AUDIO_DEVICE_IN_BACK_MIC, AUDIO_DEVICE_IN_BUILTIN_MIC});
             break;
         }
         break;
@@ -591,77 +545,60 @@
     case AUDIO_SOURCE_UNPROCESSED:
     case AUDIO_SOURCE_HOTWORD:
         if (inputSource == AUDIO_SOURCE_HOTWORD) {
-            availableDeviceTypes = availablePrimaryDeviceTypes;
+            availableDevices = availablePrimaryDevices;
         }
-        if (getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) == AUDIO_POLICY_FORCE_BT_SCO &&
-                availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
-            device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
-        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
-            device = AUDIO_DEVICE_IN_WIRED_HEADSET;
-        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_HEADSET) {
-            device = AUDIO_DEVICE_IN_USB_HEADSET;
-        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
-            device = AUDIO_DEVICE_IN_USB_DEVICE;
-        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
-            device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+        if (getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) == AUDIO_POLICY_FORCE_BT_SCO) {
+            device = availableDevices.getDevice(
+                    AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, String8(""), AUDIO_FORMAT_DEFAULT);
+            if (device != nullptr) break;
         }
+        device = availableDevices.getFirstExistingDevice({
+                AUDIO_DEVICE_IN_WIRED_HEADSET, AUDIO_DEVICE_IN_USB_HEADSET,
+                AUDIO_DEVICE_IN_USB_DEVICE, AUDIO_DEVICE_IN_BUILTIN_MIC});
         break;
     case AUDIO_SOURCE_CAMCORDER:
-        if (availableDeviceTypes & AUDIO_DEVICE_IN_BACK_MIC) {
-            device = AUDIO_DEVICE_IN_BACK_MIC;
-        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
-            device = AUDIO_DEVICE_IN_BUILTIN_MIC;
-        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
-            // This is specifically for a device without built-in mic
-            device = AUDIO_DEVICE_IN_USB_DEVICE;
-        }
+        // For a device without built-in mic, adding usb device
+        device = availableDevices.getFirstExistingDevice({
+                AUDIO_DEVICE_IN_BACK_MIC, AUDIO_DEVICE_IN_BUILTIN_MIC,
+                AUDIO_DEVICE_IN_USB_DEVICE});
         break;
     case AUDIO_SOURCE_VOICE_DOWNLINK:
     case AUDIO_SOURCE_VOICE_CALL:
     case AUDIO_SOURCE_VOICE_UPLINK:
-        if (availableDeviceTypes & AUDIO_DEVICE_IN_VOICE_CALL) {
-            device = AUDIO_DEVICE_IN_VOICE_CALL;
-        }
+        device = availableDevices.getDevice(
+                AUDIO_DEVICE_IN_VOICE_CALL, String8(""), AUDIO_FORMAT_DEFAULT);
         break;
     case AUDIO_SOURCE_VOICE_PERFORMANCE:
-        if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
-            device = AUDIO_DEVICE_IN_WIRED_HEADSET;
-        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_HEADSET) {
-            device = AUDIO_DEVICE_IN_USB_HEADSET;
-        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
-            device = AUDIO_DEVICE_IN_USB_DEVICE;
-        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
-            device = AUDIO_DEVICE_IN_BUILTIN_MIC;
-        }
+        device = availableDevices.getFirstExistingDevice({
+                AUDIO_DEVICE_IN_WIRED_HEADSET, AUDIO_DEVICE_IN_USB_HEADSET,
+                AUDIO_DEVICE_IN_USB_DEVICE, AUDIO_DEVICE_IN_BUILTIN_MIC});
         break;
     case AUDIO_SOURCE_REMOTE_SUBMIX:
-        if (availableDeviceTypes & AUDIO_DEVICE_IN_REMOTE_SUBMIX) {
-            device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
-        }
+        device = availableDevices.getDevice(
+                AUDIO_DEVICE_IN_REMOTE_SUBMIX, String8(""), AUDIO_FORMAT_DEFAULT);
         break;
     case AUDIO_SOURCE_FM_TUNER:
-        if (availableDeviceTypes & AUDIO_DEVICE_IN_FM_TUNER) {
-            device = AUDIO_DEVICE_IN_FM_TUNER;
-        }
+        device = availableDevices.getDevice(
+                AUDIO_DEVICE_IN_FM_TUNER, String8(""), AUDIO_FORMAT_DEFAULT);
         break;
     case AUDIO_SOURCE_ECHO_REFERENCE:
-        if (availableDeviceTypes & AUDIO_DEVICE_IN_ECHO_REFERENCE) {
-            device = AUDIO_DEVICE_IN_ECHO_REFERENCE;
-        }
+        device = availableDevices.getDevice(
+                AUDIO_DEVICE_IN_ECHO_REFERENCE, String8(""), AUDIO_FORMAT_DEFAULT);
         break;
     default:
         ALOGW("getDeviceForInputSource() invalid input source %d", inputSource);
         break;
     }
-    if (device == AUDIO_DEVICE_NONE) {
+    if (device == nullptr) {
         ALOGV("getDeviceForInputSource() no device found for source %d", inputSource);
-        if (availableDeviceTypes & AUDIO_DEVICE_IN_STUB) {
-            device = AUDIO_DEVICE_IN_STUB;
-        }
-        ALOGE_IF(device == AUDIO_DEVICE_NONE,
+        device = availableDevices.getDevice(
+                AUDIO_DEVICE_IN_STUB, String8(""), AUDIO_FORMAT_DEFAULT);
+        ALOGE_IF(device == nullptr,
                  "getDeviceForInputSource() no default device defined");
     }
-    ALOGV("getDeviceForInputSource()input source %d, device %08x", inputSource, device);
+    ALOGV_IF(device != nullptr,
+             "getDeviceForInputSource()input source %d, device %08x",
+             inputSource, device->type());
     return device;
 }
 
@@ -684,11 +621,9 @@
 
     auto legacyStrategy = mLegacyStrategyMap.find(strategy) != end(mLegacyStrategyMap) ?
                 mLegacyStrategyMap.at(strategy) : STRATEGY_NONE;
-    audio_devices_t devices = getDeviceForStrategyInt(legacyStrategy,
-                                                      availableOutputDevices,
-                                                      availableInputDevices, outputs,
-                                                      (uint32_t)AUDIO_DEVICE_NONE);
-    return availableOutputDevices.getDevicesFromTypeMask(devices);
+    return getDevicesForStrategyInt(legacyStrategy,
+                                    availableOutputDevices,
+                                    availableInputDevices, outputs);
 }
 
 DeviceVector Engine::getOutputDevicesForAttributes(const audio_attributes_t &attributes,
@@ -747,17 +682,21 @@
     if (device != nullptr) {
         return device;
     }
-    audio_devices_t deviceType = getDeviceForInputSource(attr.source);
 
-    if (audio_is_remote_submix_device(deviceType)) {
-        address = "0";
-        std::size_t pos;
-        std::string tags { attr.tags };
-        if ((pos = tags.find("addr=")) != std::string::npos) {
-            address = tags.substr(pos + std::strlen("addr="));
-        }
+    device = getDeviceForInputSource(attr.source);
+    if (device == nullptr || !audio_is_remote_submix_device(device->type())) {
+        // Return immediately if the device is null or it is not a remote submix device.
+        return device;
     }
-    return availableInputDevices.getDevice(deviceType,
+
+    // For remote submix device, try to find the device by address.
+    address = "0";
+    std::size_t pos;
+    std::string tags { attr.tags };
+    if ((pos = tags.find("addr=")) != std::string::npos) {
+        address = tags.substr(pos + std::strlen("addr="));
+    }
+    return availableInputDevices.getDevice(device->type(),
                                            String8(address.c_str()),
                                            AUDIO_FORMAT_DEFAULT);
 }
diff --git a/services/audiopolicy/enginedefault/src/Engine.h b/services/audiopolicy/enginedefault/src/Engine.h
index 62938cf..4360c6f 100644
--- a/services/audiopolicy/enginedefault/src/Engine.h
+++ b/services/audiopolicy/enginedefault/src/Engine.h
@@ -18,7 +18,6 @@
 
 #include "EngineBase.h"
 #include "EngineInterface.h"
-#include <AudioGain.h>
 #include <policy.h>
 
 namespace android
@@ -74,15 +73,14 @@
 
     status_t setDefaultDevice(audio_devices_t device);
 
-    audio_devices_t getDeviceForStrategyInt(legacy_strategy strategy,
-                                            DeviceVector availableOutputDevices,
-                                            DeviceVector availableInputDevices,
-                                            const SwAudioOutputCollection &outputs,
-                                            uint32_t outputDeviceTypesToIgnore) const;
+    DeviceVector getDevicesForStrategyInt(legacy_strategy strategy,
+                                          DeviceVector availableOutputDevices,
+                                          DeviceVector availableInputDevices,
+                                          const SwAudioOutputCollection &outputs) const;
 
     DeviceVector getDevicesForProductStrategy(product_strategy_t strategy) const;
 
-    audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const;
+    sp<DeviceDescriptor> getDeviceForInputSource(audio_source_t inputSource) const;
 
     DeviceStrategyMap mDevicesForStrategies;
 
diff --git a/services/audiopolicy/manager/AudioPolicyFactory.cpp b/services/audiopolicy/manager/AudioPolicyFactory.cpp
index 7aff6a9..476a1ec 100644
--- a/services/audiopolicy/manager/AudioPolicyFactory.cpp
+++ b/services/audiopolicy/manager/AudioPolicyFactory.cpp
@@ -21,7 +21,13 @@
 extern "C" AudioPolicyInterface* createAudioPolicyManager(
         AudioPolicyClientInterface *clientInterface)
 {
-    return new AudioPolicyManager(clientInterface);
+    AudioPolicyManager *apm = new AudioPolicyManager(clientInterface);
+    status_t status = apm->initialize();
+    if (status != NO_ERROR) {
+        delete apm;
+        apm = nullptr;
+    }
+    return apm;
 }
 
 extern "C" void destroyAudioPolicyManager(AudioPolicyInterface *interface)
diff --git a/services/audiopolicy/managerdefault/Android.bp b/services/audiopolicy/managerdefault/Android.bp
index 8fbeff9..1fa0d19 100644
--- a/services/audiopolicy/managerdefault/Android.bp
+++ b/services/audiopolicy/managerdefault/Android.bp
@@ -9,6 +9,7 @@
     export_include_dirs: ["."],
 
     shared_libs: [
+        "libaudiofoundation",
         "libcutils",
         "libdl",
         "libutils",
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index edab95a..aec9cee 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -73,6 +73,26 @@
         AUDIO_CHANNEL_OUT_2POINT1POINT2, AUDIO_CHANNEL_OUT_2POINT0POINT2,
         AUDIO_CHANNEL_OUT_5POINT1, AUDIO_CHANNEL_OUT_STEREO }};
 
+template <typename T>
+bool operator== (const SortedVector<T> &left, const SortedVector<T> &right)
+{
+    if (left.size() != right.size()) {
+        return false;
+    }
+    for (size_t index = 0; index < right.size(); index++) {
+        if (left[index] != right[index]) {
+            return false;
+        }
+    }
+    return true;
+}
+
+template <typename T>
+bool operator!= (const SortedVector<T> &left, const SortedVector<T> &right)
+{
+    return !(left == right);
+}
+
 // ----------------------------------------------------------------------------
 // AudioPolicyInterface implementation
 // ----------------------------------------------------------------------------
@@ -92,7 +112,7 @@
 void AudioPolicyManager::broadcastDeviceConnectionState(const sp<DeviceDescriptor> &device,
                                                         audio_policy_dev_state_t state)
 {
-    AudioParameter param(device->address());
+    AudioParameter param(String8(device->address().c_str()));
     const String8 key(state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE ?
                 AudioParameter::keyDeviceConnect : AudioParameter::keyDeviceDisconnect);
     param.addInt(key, device->type());
@@ -1088,7 +1108,7 @@
     audio_config_base_t clientConfig = {.sample_rate = config->sample_rate,
         .format = config->format,
         .channel_mask = config->channel_mask };
-    *portId = AudioPort::getNextUniqueId();
+    *portId = PolicyAudioPort::getNextUniqueId();
 
     sp<TrackClientDescriptor> clientDesc =
         new TrackClientDescriptor(*portId, uid, session, resultAttr, clientConfig,
@@ -1185,9 +1205,9 @@
             if (!desc->isDuplicated() && (profile == desc->mProfile)) {
                 // reuse direct output if currently open by the same client
                 // and configured with same parameters
-                if ((config->sample_rate == desc->mSamplingRate) &&
-                    (config->format == desc->mFormat) &&
-                    (channelMask == desc->mChannelMask) &&
+                if ((config->sample_rate == desc->getSamplingRate()) &&
+                    (config->format == desc->getFormat()) &&
+                    (channelMask == desc->getChannelMask()) &&
                     (session == desc->mDirectClientSession)) {
                     desc->mDirectOpenCount++;
                     ALOGI("%s reusing direct output %d for session %d", __func__, 
@@ -1227,13 +1247,13 @@
 
         // only accept an output with the requested parameters
         if (status != NO_ERROR ||
-            (config->sample_rate != 0 && config->sample_rate != outputDesc->mSamplingRate) ||
-            (config->format != AUDIO_FORMAT_DEFAULT && config->format != outputDesc->mFormat) ||
-            (channelMask != 0 && channelMask != outputDesc->mChannelMask)) {
+            (config->sample_rate != 0 && config->sample_rate != outputDesc->getSamplingRate()) ||
+            (config->format != AUDIO_FORMAT_DEFAULT && config->format != outputDesc->getFormat()) ||
+            (channelMask != 0 && channelMask != outputDesc->getChannelMask())) {
             ALOGV("%s failed opening direct output: output %d sample rate %d %d," 
                     "format %d %d, channel mask %04x %04x", __func__, output, config->sample_rate,
-                    outputDesc->mSamplingRate, config->format, outputDesc->mFormat,
-                    channelMask, outputDesc->mChannelMask);
+                    outputDesc->getSamplingRate(), config->format, outputDesc->getFormat(),
+                    channelMask, outputDesc->getChannelMask());
             if (output != AUDIO_IO_HANDLE_NONE) {
                 outputDesc->close();
             }
@@ -1339,19 +1359,19 @@
     // Each IOProfile represents a MixPort from audio_policy_configuration.xml
     for (const auto &inProfile : inputProfiles) {
         if (hwAvSync == ((inProfile->getFlags() & AUDIO_INPUT_FLAG_HW_AV_SYNC) != 0)) {
-            msdProfiles.appendVector(inProfile->getAudioProfiles());
+            appendAudioProfiles(msdProfiles, inProfile->getAudioProfiles());
         }
     }
     AudioProfileVector deviceProfiles;
     for (const auto &outProfile : outputProfiles) {
         if (hwAvSync == ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0)) {
-            deviceProfiles.appendVector(outProfile->getAudioProfiles());
+            appendAudioProfiles(deviceProfiles, outProfile->getAudioProfiles());
         }
     }
     struct audio_config_base bestSinkConfig;
-    status_t result = msdProfiles.findBestMatchingOutputConfig(deviceProfiles,
+    status_t result = findBestMatchingOutputConfig(msdProfiles, deviceProfiles,
             compressedFormatsOrder, surroundChannelMasksOrder, true /*preferHigherSamplingRates*/,
-            &bestSinkConfig);
+            bestSinkConfig);
     if (result != NO_ERROR) {
         ALOGD("%s() no matching profiles found for device: %s, hwAvSync: %d",
                 __func__, outputDevice->toString().c_str(), hwAvSync);
@@ -1504,13 +1524,13 @@
         // If haptic channel is specified, use the haptic output if present.
         // When using haptic output, same audio format and sample rate are required.
         const uint32_t outputHapticChannelCount = audio_channel_count_from_out_mask(
-            outputDesc->mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL);
+            outputDesc->getChannelMask() & AUDIO_CHANNEL_HAPTIC_ALL);
         if ((hapticChannelCount == 0) != (outputHapticChannelCount == 0)) {
             continue;
         }
         if (outputHapticChannelCount >= hapticChannelCount
-            && format == outputDesc->mFormat
-            && samplingRate == outputDesc->mSamplingRate) {
+            && format == outputDesc->getFormat()
+            && samplingRate == outputDesc->getSamplingRate()) {
                 currentMatchCriteria[0] = outputHapticChannelCount;
         }
 
@@ -1518,12 +1538,13 @@
         currentMatchCriteria[1] = popcount(outputDesc->mFlags & functionalFlags);
 
         // channel mask and channel count match
-        uint32_t outputChannelCount = audio_channel_count_from_out_mask(outputDesc->mChannelMask);
+        uint32_t outputChannelCount = audio_channel_count_from_out_mask(
+                outputDesc->getChannelMask());
         if (channelMask != AUDIO_CHANNEL_NONE && channelCount > 2 &&
             channelCount <= outputChannelCount) {
             if ((audio_channel_mask_get_representation(channelMask) ==
-                    audio_channel_mask_get_representation(outputDesc->mChannelMask)) &&
-                    ((channelMask & outputDesc->mChannelMask) == channelMask)) {
+                    audio_channel_mask_get_representation(outputDesc->getChannelMask())) &&
+                    ((channelMask & outputDesc->getChannelMask()) == channelMask)) {
                 currentMatchCriteria[2] = outputChannelCount;
             }
             currentMatchCriteria[3] = outputChannelCount;
@@ -1531,8 +1552,8 @@
 
         // sampling rate match
         if (samplingRate > SAMPLE_RATE_HZ_DEFAULT &&
-                samplingRate <= outputDesc->mSamplingRate) {
-            currentMatchCriteria[4] = outputDesc->mSamplingRate;
+                samplingRate <= outputDesc->getSamplingRate()) {
+            currentMatchCriteria[4] = outputDesc->getSamplingRate();
         }
 
         // performance flags match
@@ -1541,8 +1562,8 @@
         // format match
         if (format != AUDIO_FORMAT_INVALID) {
             currentMatchCriteria[6] =
-                AudioPort::kFormatDistanceMax -
-                AudioPort::formatDistance(format, outputDesc->mFormat);
+                PolicyAudioPort::kFormatDistanceMax -
+                PolicyAudioPort::formatDistance(format, outputDesc->getFormat());
         }
 
         // primary output match
@@ -2066,7 +2087,7 @@
 
     isSoundTrigger = attributes.source == AUDIO_SOURCE_HOTWORD &&
         mSoundTriggerSessions.indexOfKey(session) >= 0;
-    *portId = AudioPort::getNextUniqueId();
+    *portId = PolicyAudioPort::getNextUniqueId();
 
     clientDesc = new RecordClientDescriptor(*portId, riid, uid, session, attributes, *config,
                                             requestedDeviceId, attributes.source, flags,
@@ -2408,7 +2429,9 @@
     for (size_t i = 0; i < mInputs.size(); i++) {
         const sp<AudioInputDescriptor> input = mInputs.valueAt(i);
         if (input->clientsList().size() == 0
-                || !mAvailableInputDevices.containsAtLeastOne(input->supportedDevices())) {
+                || !mAvailableInputDevices.containsAtLeastOne(input->supportedDevices())
+                || (input->getPolicyAudioPort()->getFlags()
+                        & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0) {
             inputsToClose.push_back(mInputs.keyAt(i));
         } else {
             bool close = false;
@@ -2900,9 +2923,9 @@
             // stereo and let audio flinger do the channel conversion if needed.
             outputConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
             inputConfig.channel_mask = AUDIO_CHANNEL_IN_STEREO;
-            rSubmixModule->addOutputProfile(address, &outputConfig,
+            rSubmixModule->addOutputProfile(address.c_str(), &outputConfig,
                     AUDIO_DEVICE_OUT_REMOTE_SUBMIX, address);
-            rSubmixModule->addInputProfile(address, &inputConfig,
+            rSubmixModule->addInputProfile(address.c_str(), &inputConfig,
                     AUDIO_DEVICE_IN_REMOTE_SUBMIX, address);
 
             if ((res = setDeviceConnectionStateInt(deviceTypeToMakeAvailable,
@@ -2998,8 +3021,8 @@
                     }
                 }
             }
-            rSubmixModule->removeOutputProfile(address);
-            rSubmixModule->removeInputProfile(address);
+            rSubmixModule->removeOutputProfile(address.c_str());
+            rSubmixModule->removeInputProfile(address.c_str());
 
         } else if ((mix.mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
             if (mPolicyMixes.unregisterMix(mix) != NO_ERROR) {
@@ -3211,7 +3234,7 @@
     ALOGV("%s() profile %sfound with name: %s, "
         "sample rate: %u, format: 0x%x, channel_mask: 0x%x, output flags: 0x%x",
         __FUNCTION__, profile != 0 ? "" : "NOT ",
-        (profile != 0 ? profile->getTagName().string() : "null"),
+        (profile != 0 ? profile->getTagName().c_str() : "null"),
         config.sample_rate, config.format, config.channel_mask, output_flags);
     return (profile != 0);
 }
@@ -3834,7 +3857,7 @@
         return BAD_VALUE;
     }
 
-    *portId = AudioPort::getNextUniqueId();
+    *portId = PolicyAudioPort::getNextUniqueId();
 
     struct audio_patch dummyPatch = {};
     sp<AudioPatch> patchDesc = new AudioPatch(&dummyPatch, uid);
@@ -3875,7 +3898,7 @@
     if (srcDevice->hasSameHwModuleAs(sinkDevice) &&
             srcDevice->getModuleVersionMajor() >= 3 &&
             sinkDevice->getModule()->supportsPatch(srcDevice, sinkDevice) &&
-            srcDevice->getAudioPort()->mGains.size() > 0) {
+            srcDevice->getAudioPort()->getGains().size() > 0) {
         ALOGV("%s Device to Device route supported by >=3.0 HAL", __FUNCTION__);
         // TODO: may explicitly specify whether we should use HW or SW patch
         //   create patch between src device and output device
@@ -4114,8 +4137,8 @@
             AUDIO_DEVICE_OUT_HDMI);
     for (size_t i = 0; i < hdmiOutputDevices.size(); i++) {
         // Simulate reconnection to update enabled surround sound formats.
-        String8 address = hdmiOutputDevices[i]->address();
-        String8 name = hdmiOutputDevices[i]->getName();
+        String8 address = String8(hdmiOutputDevices[i]->address().c_str());
+        std::string name = hdmiOutputDevices[i]->getName();
         status_t status = setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_HDMI,
                                                       AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
                                                       address.c_str(),
@@ -4136,8 +4159,8 @@
                 AUDIO_DEVICE_IN_HDMI);
     for (size_t i = 0; i < hdmiInputDevices.size(); i++) {
         // Simulate reconnection to update enabled surround sound formats.
-        String8 address = hdmiInputDevices[i]->address();
-        String8 name = hdmiInputDevices[i]->getName();
+        String8 address = String8(hdmiInputDevices[i]->address().c_str());
+        std::string name = hdmiInputDevices[i]->getName();
         status_t status = setDeviceConnectionStateInt(AUDIO_DEVICE_IN_HDMI,
                                                       AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
                                                       address.c_str(),
@@ -4162,11 +4185,11 @@
     return profileUpdated ? NO_ERROR : INVALID_OPERATION;
 }
 
-void AudioPolicyManager::setAppState(uid_t uid, app_state_t state)
+void AudioPolicyManager::setAppState(audio_port_handle_t portId, app_state_t state)
 {
-    ALOGV("%s(uid:%d, state:%d)", __func__, uid, state);
+    ALOGV("%s(portId:%d, state:%d)", __func__, portId, state);
     for (size_t i = 0; i < mInputs.size(); i++) {
-        mInputs.valueAt(i)->setAppState(uid, state);
+        mInputs.valueAt(i)->setAppState(portId, state);
     }
 }
 
@@ -4305,7 +4328,6 @@
         : AudioPolicyManager(clientInterface, false /*forTesting*/)
 {
     loadConfig();
-    initialize();
 }
 
 void AudioPolicyManager::loadConfig() {
@@ -4449,7 +4471,7 @@
                 // give a valid ID to an attached device once confirmed it is reachable
                 if (!device->isAttached()) {
                     device->attach(hwModule);
-                    device->importAudioPort(inProfile, true);
+                    device->importAudioPortAndPickAudioProfile(inProfile, true);
                 }
             }
             inputDesc->close();
@@ -4480,11 +4502,11 @@
     }
     // If microphones address is empty, set it according to device type
     for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
-        if (mAvailableInputDevices[i]->address().isEmpty()) {
+        if (mAvailableInputDevices[i]->address().empty()) {
             if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_BUILTIN_MIC) {
-                mAvailableInputDevices[i]->setAddress(String8(AUDIO_BOTTOM_MICROPHONE_ADDRESS));
+                mAvailableInputDevices[i]->setAddress(AUDIO_BOTTOM_MICROPHONE_ADDRESS);
             } else if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_BACK_MIC) {
-                mAvailableInputDevices[i]->setAddress(String8(AUDIO_BACK_MICROPHONE_ADDRESS));
+                mAvailableInputDevices[i]->setAddress(AUDIO_BACK_MICROPHONE_ADDRESS);
             }
         }
     }
@@ -4553,7 +4575,7 @@
                                                    SortedVector<audio_io_handle_t>& outputs)
 {
     audio_devices_t deviceType = device->type();
-    const String8 &address = device->address();
+    const String8 &address = String8(device->address().c_str());
     sp<SwAudioOutputDescriptor> desc;
 
     if (audio_device_is_digital(deviceType)) {
@@ -4605,7 +4627,7 @@
                     // matching profile: save the sample rates, format and channel masks supported
                     // by the profile in our device descriptor
                     if (audio_device_is_digital(deviceType)) {
-                        device->importAudioPort(profile);
+                        device->importAudioPortAndPickAudioProfile(profile);
                     }
                     break;
                 }
@@ -4621,7 +4643,7 @@
             }
 
             ALOGV("opening output for device %08x with params %s profile %p name %s",
-                  deviceType, address.string(), profile.get(), profile->getName().string());
+                  deviceType, address.string(), profile.get(), profile->getName().c_str());
             desc = new SwAudioOutputDescriptor(profile, mpClientInterface);
             audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
             status_t status = desc->open(nullptr, DeviceVector(device),
@@ -4707,7 +4729,7 @@
                 outputs.add(output);
                 // Load digital format info only for digital devices
                 if (audio_device_is_digital(deviceType)) {
-                    device->importAudioPort(profile);
+                    device->importAudioPortAndPickAudioProfile(profile);
                 }
 
                 if (device_distinguishes_on_address(deviceType)) {
@@ -4801,7 +4823,7 @@
                 desc = mInputs.valueAt(input_index);
                 if (desc->mProfile == profile) {
                     if (audio_device_is_digital(device->type())) {
-                        device->importAudioPort(profile);
+                        device->importAudioPortAndPickAudioProfile(profile);
                     }
                     break;
                 }
@@ -4825,7 +4847,7 @@
                                          &input);
 
             if (status == NO_ERROR) {
-                const String8& address = device->address();
+                const String8& address = String8(device->address().c_str());
                 if (!address.isEmpty()) {
                     char *param = audio_device_address_to_parameter(device->type(), address);
                     mpClientInterface->setParameters(input, String8(param));
@@ -4850,7 +4872,7 @@
                 profile_index--;
             } else {
                 if (audio_device_is_digital(device->type())) {
-                    device->importAudioPort(profile);
+                    device->importAudioPortAndPickAudioProfile(profile);
                 }
                 ALOGV("checkInputsForDevice(): adding input %d", input);
             }
@@ -5043,10 +5065,12 @@
     // also take into account external policy-related changes: add all outputs which are
     // associated with policies in the "before" and "after" output vectors
     ALOGVV("%s(): policy related outputs", __func__);
+    bool hasDynamicPolicy = false;
     for (size_t i = 0 ; i < mPreviousOutputs.size() ; i++) {
         const sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueAt(i);
         if (desc != 0 && desc->mPolicyMix != NULL) {
             srcOutputs.add(desc->mIoHandle);
+            hasDynamicPolicy = true;
             ALOGVV(" previous outputs: adding %d", desc->mIoHandle);
         }
     }
@@ -5054,6 +5078,7 @@
         const sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
         if (desc != 0 && desc->mPolicyMix != NULL) {
             dstOutputs.add(desc->mIoHandle);
+            hasDynamicPolicy = true;
             ALOGVV(" new outputs: adding %d", desc->mIoHandle);
         }
     }
@@ -5062,12 +5087,45 @@
         // get maximum latency of all source outputs to determine the minimum mute time guaranteeing
         // audio from invalidated tracks will be rendered when unmuting
         uint32_t maxLatency = 0;
+        bool invalidate = hasDynamicPolicy;
         for (audio_io_handle_t srcOut : srcOutputs) {
             sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueFor(srcOut);
-            if (desc != 0 && maxLatency < desc->latency()) {
+            if (desc == nullptr) continue;
+
+            if (desc->isStrategyActive(psId) && maxLatency < desc->latency()) {
                 maxLatency = desc->latency();
             }
+
+            if (invalidate) continue;
+
+            for (auto client : desc->clientsList(false /*activeOnly*/)) {
+                if (desc->isDuplicated() || !desc->mProfile->isDirectOutput()) {
+                    // a client on a non direct outputs has necessarily a linear PCM format
+                    // so we can call selectOutput() safely
+                    const audio_io_handle_t newOutput = selectOutput(dstOutputs,
+                                                                     client->flags(),
+                                                                     client->config().format,
+                                                                     client->config().channel_mask,
+                                                                     client->config().sample_rate);
+                    if (newOutput != srcOut) {
+                        invalidate = true;
+                        break;
+                    }
+                } else {
+                    sp<IOProfile> profile = getProfileForOutput(newDevices,
+                                   client->config().sample_rate,
+                                   client->config().format,
+                                   client->config().channel_mask,
+                                   client->flags(),
+                                   true /* directOnly */);
+                    if (profile != desc->mProfile) {
+                        invalidate = true;
+                        break;
+                    }
+                }
+            }
         }
+
         ALOGV_IF(!(srcOutputs.isEmpty() || dstOutputs.isEmpty()),
               "%s: strategy %d, moving from output %s to output %s", __func__, psId,
               std::to_string(srcOutputs[0]).c_str(),
@@ -5075,7 +5133,9 @@
         // mute strategy while moving tracks from one output to another
         for (audio_io_handle_t srcOut : srcOutputs) {
             sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueFor(srcOut);
-            if (desc != 0 && desc->isStrategyActive(psId)) {
+            if (desc == nullptr) continue;
+
+            if (desc->isStrategyActive(psId)) {
                 setStrategyMute(psId, true, desc);
                 setStrategyMute(psId, false, desc, maxLatency * LATENCY_MUTE_FACTOR,
                                 newDevices.types());
@@ -5091,8 +5151,10 @@
             selectOutputForMusicEffects();
         }
         // Move tracks associated to this stream (and linked) from previous output to new output
-        for (auto stream :  mEngine->getStreamTypesForProductStrategy(psId)) {
-            mpClientInterface->invalidateStream(stream);
+        if (invalidate) {
+            for (auto stream :  mEngine->getStreamTypesForProductStrategy(psId)) {
+                mpClientInterface->invalidateStream(stream);
+            }
         }
     }
 }
@@ -5694,8 +5756,9 @@
     const auto ringVolumeSrc = toVolumeSource(AUDIO_STREAM_RING);
     const auto musicVolumeSrc = toVolumeSource(AUDIO_STREAM_MUSIC);
     const auto alarmVolumeSrc = toVolumeSource(AUDIO_STREAM_ALARM);
+    const auto a11yVolumeSrc = toVolumeSource(AUDIO_STREAM_ACCESSIBILITY);
 
-    if (volumeSource == toVolumeSource(AUDIO_STREAM_ACCESSIBILITY)
+    if (volumeSource == a11yVolumeSrc
             && (AUDIO_MODE_RINGTONE == mEngine->getPhoneState()) &&
             mOutputs.isActive(ringVolumeSrc, 0)) {
         auto &ringCurves = getVolumeCurves(AUDIO_STREAM_RING);
@@ -5712,7 +5775,7 @@
              volumeSource == toVolumeSource(AUDIO_STREAM_NOTIFICATION) ||
              volumeSource == toVolumeSource(AUDIO_STREAM_ENFORCED_AUDIBLE) ||
              volumeSource == toVolumeSource(AUDIO_STREAM_DTMF) ||
-             volumeSource == toVolumeSource(AUDIO_STREAM_ACCESSIBILITY))) {
+             volumeSource == a11yVolumeSrc)) {
         auto &voiceCurves = getVolumeCurves(callVolumeSrc);
         int voiceVolumeIndex = voiceCurves.getVolumeIndex(device);
         const float maxVoiceVolDb =
@@ -5724,7 +5787,9 @@
         // VOICE_CALL stream has minVolumeIndex > 0 : Users cannot set the volume of voice calls to
         // 0. We don't want to cap volume when the system has programmatically muted the voice call
         // stream. See setVolumeCurveIndex() for more information.
-        bool exemptFromCapping = (volumeSource == ringVolumeSrc) && (voiceVolumeIndex == 0);
+        bool exemptFromCapping =
+                ((volumeSource == ringVolumeSrc) || (volumeSource == a11yVolumeSrc))
+                && (voiceVolumeIndex == 0);
         ALOGV_IF(exemptFromCapping, "%s volume source %d at vol=%f not capped", __func__,
                  volumeSource, volumeDb);
         if ((volumeDb > maxVoiceVolDb) && !exemptFromCapping) {
@@ -6081,24 +6146,24 @@
         formatSet.insert(enforcedSurround.begin(), enforcedSurround.end());
     }
     for (const auto& format : formatSet) {
-        formatsPtr->push(format);
+        formatsPtr->push_back(format);
     }
 }
 
-void AudioPolicyManager::modifySurroundChannelMasks(ChannelsVector *channelMasksPtr) {
-    ChannelsVector &channelMasks = *channelMasksPtr;
+void AudioPolicyManager::modifySurroundChannelMasks(ChannelMaskSet *channelMasksPtr) {
+    ChannelMaskSet &channelMasks = *channelMasksPtr;
     audio_policy_forced_cfg_t forceUse = mEngine->getForceUse(
             AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND);
 
     // If NEVER, then remove support for channelMasks > stereo.
     if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER) {
-        for (size_t maskIndex = 0; maskIndex < channelMasks.size(); ) {
-            audio_channel_mask_t channelMask = channelMasks[maskIndex];
+        for (auto it = channelMasks.begin(); it != channelMasks.end();) {
+            audio_channel_mask_t channelMask = *it;
             if (channelMask & ~AUDIO_CHANNEL_OUT_STEREO) {
                 ALOGI("%s: force NEVER, so remove channelMask 0x%08x", __FUNCTION__, channelMask);
-                channelMasks.removeAt(maskIndex);
+                it = channelMasks.erase(it);
             } else {
-                maskIndex++;
+                ++it;
             }
         }
     // If ALWAYS or MANUAL, then make sure we at least support 5.1
@@ -6114,7 +6179,7 @@
         }
         // If not then add 5.1 support.
         if (!supports5dot1) {
-            channelMasks.add(AUDIO_CHANNEL_OUT_5POINT1);
+            channelMasks.insert(AUDIO_CHANNEL_OUT_5POINT1);
             ALOGI("%s: force MANUAL or ALWAYS, so adding channelMask for 5.1 surround", __func__);
         }
     }
@@ -6143,12 +6208,12 @@
                 || isDeviceOfModule(devDesc, AUDIO_HARDWARE_MODULE_ID_MSD)) {
             modifySurroundFormats(devDesc, &formats);
         }
-        profiles.setFormats(formats);
+        addProfilesForFormats(profiles, formats);
     }
 
     for (audio_format_t format : profiles.getSupportedFormats()) {
-        ChannelsVector channelMasks;
-        SampleRateVector samplingRates;
+        ChannelMaskSet channelMasks;
+        SampleRateSet samplingRates;
         AudioParameter requestedParameters;
         requestedParameters.addInt(String8(AudioParameter::keyFormat), format);
 
@@ -6179,7 +6244,8 @@
                 }
             }
         }
-        profiles.addProfileFromHal(new AudioProfile(format, channelMasks, samplingRates));
+        addDynamicAudioProfileAndSort(
+                profiles, new AudioProfile(format, channelMasks, samplingRates));
     }
 }
 
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index d88d1ec..93d67ea 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -31,15 +31,14 @@
 #include <utils/SortedVector.h>
 #include <media/AudioParameter.h>
 #include <media/AudioPolicy.h>
+#include <media/AudioProfile.h>
 #include <media/PatchBuilder.h>
 #include "AudioPolicyInterface.h"
 
 #include <AudioPolicyManagerObserver.h>
-#include <AudioGain.h>
 #include <AudioPolicyConfig.h>
-#include <AudioPort.h>
+#include <PolicyAudioPort.h>
 #include <AudioPatch.h>
-#include <AudioProfile.h>
 #include <DeviceDescriptor.h>
 #include <IOProfile.h>
 #include <HwModule.h>
@@ -279,7 +278,7 @@
         virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
                     std::vector<audio_format_t> *formats);
 
-        virtual void setAppState(uid_t uid, app_state_t state);
+        virtual void setAppState(audio_port_handle_t portId, app_state_t state);
 
         virtual bool isHapticPlaybackSupported();
 
@@ -307,6 +306,8 @@
             return volumeGroup != VOLUME_GROUP_NONE ? NO_ERROR : BAD_VALUE;
         }
 
+        status_t initialize();
+
 protected:
         // A constructor that allows more fine-grained control over initialization process,
         // used in automatic tests.
@@ -321,7 +322,6 @@
         //   - initialize.
         AudioPolicyConfig& getConfig() { return mConfig; }
         void loadConfig();
-        status_t initialize();
 
         // From AudioPolicyManagerObserver
         virtual const AudioPatchCollection &getAudioPatches() const
@@ -646,7 +646,8 @@
         }
         String8 getFirstDeviceAddress(const DeviceVector &devices) const
         {
-            return (devices.size() > 0) ? devices.itemAt(0)->address() : String8("");
+            return (devices.size() > 0) ?
+                    String8(devices.itemAt(0)->address().c_str()) : String8("");
         }
 
         uint32_t updateCallRouting(const DeviceVector &rxDevices, uint32_t delayMs = 0);
@@ -762,7 +763,7 @@
 private:
         // Add or remove AC3 DTS encodings based on user preferences.
         void modifySurroundFormats(const sp<DeviceDescriptor>& devDesc, FormatVector *formatsPtr);
-        void modifySurroundChannelMasks(ChannelsVector *channelMasksPtr);
+        void modifySurroundChannelMasks(ChannelMaskSet *channelMasksPtr);
 
         // Support for Multi-Stream Decoder (MSD) module
         sp<DeviceDescriptor> getMsdAudioInDevice() const;
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index fa8da89..389f861 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -222,7 +222,7 @@
 
     if (result == NO_ERROR) {
         sp <AudioPlaybackClient> client =
-            new AudioPlaybackClient(*attr, *output, uid, pid, session, *selectedDeviceId, *stream);
+            new AudioPlaybackClient(*attr, *output, uid, pid, session, *portId, *selectedDeviceId, *stream);
         mAudioPlaybackClients.add(*portId, client);
     }
     return result;
@@ -451,7 +451,7 @@
             return status;
         }
 
-        sp<AudioRecordClient> client = new AudioRecordClient(*attr, *input, uid, pid, session,
+        sp<AudioRecordClient> client = new AudioRecordClient(*attr, *input, uid, pid, session, *portId,
                                                              *selectedDeviceId, opPackageName,
                                                              canCaptureOutput, canCaptureHotword);
         mAudioRecordClients.add(*portId, client);
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 85ea94f..7dfc205 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -406,15 +406,19 @@
 {
 //    Go over all active clients and allow capture (does not force silence) in the
 //    following cases:
-//    Another client in the same UID has already been allowed to capture
-//    OR The client is the assistant
+//    The client is the assistant
 //        AND an accessibility service is on TOP or a RTT call is active
-//               AND the source is VOICE_RECOGNITION or HOTWORD
-//        OR uses VOICE_RECOGNITION AND is on TOP
-//               OR uses HOTWORD
+//                AND the source is VOICE_RECOGNITION or HOTWORD
+//            OR uses VOICE_RECOGNITION AND is on TOP
+//                OR uses HOTWORD
 //            AND there is no active privacy sensitive capture or call
 //                OR client has CAPTURE_AUDIO_OUTPUT privileged permission
 //    OR The client is an accessibility service
+//        AND Is on TOP
+//                AND the source is VOICE_RECOGNITION or HOTWORD
+//            OR The assistant is not on TOP
+//                AND there is no active privacy sensitive capture or call
+//                    OR client has CAPTURE_AUDIO_OUTPUT privileged permission
 //        AND is on TOP
 //        AND the source is VOICE_RECOGNITION or HOTWORD
 //    OR the client source is virtual (remote submix, call audio TX or RX...)
@@ -422,7 +426,7 @@
 //        AND The assistant is not on TOP
 //        AND is on TOP or latest started
 //        AND there is no active privacy sensitive capture or call
-//                OR client has CAPTURE_AUDIO_OUTPUT privileged permission
+//            OR client has CAPTURE_AUDIO_OUTPUT privileged permission
 
     sp<AudioRecordClient> topActive;
     sp<AudioRecordClient> latestActive;
@@ -458,16 +462,24 @@
             continue;
         }
 
-        if (appState == APP_STATE_TOP) {
+        bool isAssistant = mUidPolicy->isAssistantUid(current->uid);
+        bool isAccessibility = mUidPolicy->isA11yUid(current->uid);
+        if (appState == APP_STATE_TOP && !isAccessibility) {
             if (current->startTimeNs > topStartNs) {
                 topActive = current;
                 topStartNs = current->startTimeNs;
             }
-            if (mUidPolicy->isAssistantUid(current->uid)) {
+            if (isAssistant) {
                 isAssistantOnTop = true;
             }
         }
-        if (current->startTimeNs > latestStartNs) {
+        // Assistant capturing for HOTWORD or Accessibility services not considered
+        // for latest active to avoid masking regular clients started before
+        if (current->startTimeNs > latestStartNs
+                && !((current->attributes.source == AUDIO_SOURCE_HOTWORD
+                        || isA11yOnTop || rttCallActive)
+                    && isAssistant)
+                && !isAccessibility) {
             latestActive = current;
             latestStartNs = current->startTimeNs;
         }
@@ -485,21 +497,12 @@
         topActive = latestActive;
     }
 
-    std::vector<uid_t> enabledUids;
-
     for (size_t i =0; i < mAudioRecordClients.size(); i++) {
         sp<AudioRecordClient> current = mAudioRecordClients[i];
         if (!current->active) {
             continue;
         }
 
-        // keep capture allowed if another client with the same UID has already
-        // been allowed to capture
-        if (std::find(enabledUids.begin(), enabledUids.end(), current->uid)
-                != enabledUids.end()) {
-            continue;
-        }
-
         audio_source_t source = current->attributes.source;
         bool isTopOrLatestActive = topActive == nullptr ? false : current->uid == topActive->uid;
         bool isLatestSensitive = latestSensitiveActive == nullptr ?
@@ -539,19 +542,24 @@
             }
         } else if (mUidPolicy->isA11yUid(current->uid)) {
             // For accessibility service allow capture if:
-            //     Is on TOP
-            //     AND the source is VOICE_RECOGNITION or HOTWORD
-            if (isA11yOnTop &&
-                    (source == AUDIO_SOURCE_VOICE_RECOGNITION || source == AUDIO_SOURCE_HOTWORD)) {
+            //     The assistant is not on TOP
+            //         AND there is no active privacy sensitive capture or call
+            //             OR client has CAPTURE_AUDIO_OUTPUT privileged permission
+            //     OR
+            //         Is on TOP AND the source is VOICE_RECOGNITION or HOTWORD
+            if (!isAssistantOnTop
+                    && (!(isSensitiveActive || isInCall) || current->canCaptureOutput)) {
                 allowCapture = true;
             }
+            if (isA11yOnTop) {
+                if (source == AUDIO_SOURCE_VOICE_RECOGNITION || source == AUDIO_SOURCE_HOTWORD) {
+                    allowCapture = true;
+                }
+            }
         }
-        setAppState_l(current->uid,
+        setAppState_l(current->portId,
                       allowCapture ? apmStatFromAmState(mUidPolicy->getUidState(current->uid)) :
                                 APP_STATE_IDLE);
-        if (allowCapture) {
-            enabledUids.push_back(current->uid);
-        }
     }
 }
 
@@ -559,7 +567,7 @@
     for (size_t i = 0; i < mAudioRecordClients.size(); i++) {
         sp<AudioRecordClient> current = mAudioRecordClients[i];
         if (!isVirtualSource(current->attributes.source)) {
-            setAppState_l(current->uid, APP_STATE_IDLE);
+            setAppState_l(current->portId, APP_STATE_IDLE);
         }
     }
 }
@@ -605,17 +613,17 @@
     return false;
 }
 
-void AudioPolicyService::setAppState_l(uid_t uid, app_state_t state)
+void AudioPolicyService::setAppState_l(audio_port_handle_t portId, app_state_t state)
 {
     AutoCallerClear acc;
 
     if (mAudioPolicyManager) {
-        mAudioPolicyManager->setAppState(uid, state);
+        mAudioPolicyManager->setAppState(portId, state);
     }
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
     if (af) {
         bool silenced = state == APP_STATE_IDLE;
-        af->setRecordSilenced(uid, silenced);
+        af->setRecordSilenced(portId, silenced);
     }
 }
 
@@ -702,11 +710,11 @@
     if (in == BAD_TYPE || out == BAD_TYPE || err == BAD_TYPE) {
         return BAD_VALUE;
     }
-    if (args.size() == 3 && args[0] == String16("set-uid-state")) {
+    if (args.size() >= 3 && args[0] == String16("set-uid-state")) {
         return handleSetUidState(args, err);
-    } else if (args.size() == 2 && args[0] == String16("reset-uid-state")) {
+    } else if (args.size() >= 2 && args[0] == String16("reset-uid-state")) {
         return handleResetUidState(args, err);
-    } else if (args.size() == 2 && args[0] == String16("get-uid-state")) {
+    } else if (args.size() >= 2 && args[0] == String16("get-uid-state")) {
         return handleGetUidState(args, out, err);
     } else if (args.size() == 1 && args[0] == String16("help")) {
         printHelp(out);
@@ -716,14 +724,32 @@
     return BAD_VALUE;
 }
 
-status_t AudioPolicyService::handleSetUidState(Vector<String16>& args, int err) {
-    PermissionController pc;
-    int uid = pc.getPackageUid(args[1], 0);
-    if (uid <= 0) {
-        ALOGE("Unknown package: '%s'", String8(args[1]).string());
-        dprintf(err, "Unknown package: '%s'\n", String8(args[1]).string());
+static status_t getUidForPackage(String16 packageName, int userId, /*inout*/uid_t& uid, int err) {
+    if (userId < 0) {
+        ALOGE("Invalid user: %d", userId);
+        dprintf(err, "Invalid user: %d\n", userId);
         return BAD_VALUE;
     }
+
+    PermissionController pc;
+    uid = pc.getPackageUid(packageName, 0);
+    if (uid <= 0) {
+        ALOGE("Unknown package: '%s'", String8(packageName).string());
+        dprintf(err, "Unknown package: '%s'\n", String8(packageName).string());
+        return BAD_VALUE;
+    }
+
+    uid = multiuser_get_uid(userId, uid);
+    return NO_ERROR;
+}
+
+status_t AudioPolicyService::handleSetUidState(Vector<String16>& args, int err) {
+    // Valid arg.size() is 3 or 5, args.size() is 5 with --user option.
+    if (!(args.size() == 3 || args.size() == 5)) {
+        printHelp(err);
+        return BAD_VALUE;
+    }
+
     bool active = false;
     if (args[2] == String16("active")) {
         active = true;
@@ -731,30 +757,59 @@
         ALOGE("Expected active or idle but got: '%s'", String8(args[2]).string());
         return BAD_VALUE;
     }
+
+    int userId = 0;
+    if (args.size() >= 5 && args[3] == String16("--user")) {
+        userId = atoi(String8(args[4]));
+    }
+
+    uid_t uid;
+    if (getUidForPackage(args[1], userId, uid, err) == BAD_VALUE) {
+        return BAD_VALUE;
+    }
+
     mUidPolicy->addOverrideUid(uid, active);
     return NO_ERROR;
 }
 
 status_t AudioPolicyService::handleResetUidState(Vector<String16>& args, int err) {
-    PermissionController pc;
-    int uid = pc.getPackageUid(args[1], 0);
-    if (uid < 0) {
-        ALOGE("Unknown package: '%s'", String8(args[1]).string());
-        dprintf(err, "Unknown package: '%s'\n", String8(args[1]).string());
+    // Valid arg.size() is 2 or 4, args.size() is 4 with --user option.
+    if (!(args.size() == 2 || args.size() == 4)) {
+        printHelp(err);
         return BAD_VALUE;
     }
+
+    int userId = 0;
+    if (args.size() >= 4 && args[2] == String16("--user")) {
+        userId = atoi(String8(args[3]));
+    }
+
+    uid_t uid;
+    if (getUidForPackage(args[1], userId, uid, err) == BAD_VALUE) {
+        return BAD_VALUE;
+    }
+
     mUidPolicy->removeOverrideUid(uid);
     return NO_ERROR;
 }
 
 status_t AudioPolicyService::handleGetUidState(Vector<String16>& args, int out, int err) {
-    PermissionController pc;
-    int uid = pc.getPackageUid(args[1], 0);
-    if (uid < 0) {
-        ALOGE("Unknown package: '%s'", String8(args[1]).string());
-        dprintf(err, "Unknown package: '%s'\n", String8(args[1]).string());
+    // Valid arg.size() is 2 or 4, args.size() is 4 with --user option.
+    if (!(args.size() == 2 || args.size() == 4)) {
+        printHelp(err);
         return BAD_VALUE;
     }
+
+    int userId = 0;
+    if (args.size() >= 4 && args[2] == String16("--user")) {
+        userId = atoi(String8(args[3]));
+    }
+
+    uid_t uid;
+    if (getUidForPackage(args[1], userId, uid, err) == BAD_VALUE) {
+        return BAD_VALUE;
+    }
+
     if (mUidPolicy->isUidActive(uid)) {
         return dprintf(out, "active\n");
     } else {
@@ -764,9 +819,9 @@
 
 status_t AudioPolicyService::printHelp(int out) {
     return dprintf(out, "Audio policy service commands:\n"
-        "  get-uid-state <PACKAGE> gets the uid state\n"
-        "  set-uid-state <PACKAGE> <active|idle> overrides the uid state\n"
-        "  reset-uid-state <PACKAGE> clears the uid state override\n"
+        "  get-uid-state <PACKAGE> [--user USER_ID] gets the uid state\n"
+        "  set-uid-state <PACKAGE> <active|idle> [--user USER_ID] overrides the uid state\n"
+        "  reset-uid-state <PACKAGE> [--user USER_ID] clears the uid state override\n"
         "  help print this message\n");
 }
 
@@ -970,8 +1025,7 @@
 
 bool AudioPolicyService::UidPolicy::isA11yOnTop() {
     for (const auto &uid : mCachedUids) {
-        std::vector<uid_t>::iterator it = find(mA11yUids.begin(), mA11yUids.end(), uid.first);
-        if (it == mA11yUids.end()) {
+        if (!isA11yUid(uid.first)) {
             continue;
         }
         if (uid.second.second >= ActivityManager::PROCESS_STATE_TOP
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 74aea0d..939df2c 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -311,7 +311,7 @@
     virtual status_t shellCommand(int in, int out, int err, Vector<String16>& args);
 
     // Sets whether the given UID records only silence
-    virtual void setAppState_l(uid_t uid, app_state_t state);
+    virtual void setAppState_l(audio_port_handle_t portId, app_state_t state);
 
     // Overrides the UID state as if it is idle
     status_t handleSetUidState(Vector<String16>& args, int err);
@@ -759,9 +759,10 @@
     public:
                 AudioClient(const audio_attributes_t attributes,
                             const audio_io_handle_t io, uid_t uid, pid_t pid,
-                            const audio_session_t session, const audio_port_handle_t deviceId) :
+                            const audio_session_t session,  audio_port_handle_t portId,
+                            const audio_port_handle_t deviceId) :
                                 attributes(attributes), io(io), uid(uid), pid(pid),
-                                session(session), deviceId(deviceId), active(false) {}
+                                session(session), portId(portId), deviceId(deviceId), active(false) {}
                 ~AudioClient() override = default;
 
 
@@ -770,6 +771,7 @@
         const uid_t uid;                     // client UID
         const pid_t pid;                     // client PID
         const audio_session_t session;       // audio session ID
+        const audio_port_handle_t portId;
         const audio_port_handle_t deviceId;  // selected input device port ID
               bool active;                   // Playback/Capture is active or inactive
     };
@@ -781,10 +783,10 @@
     public:
                 AudioRecordClient(const audio_attributes_t attributes,
                           const audio_io_handle_t io, uid_t uid, pid_t pid,
-                          const audio_session_t session, const audio_port_handle_t deviceId,
-                          const String16& opPackageName,
+                          const audio_session_t session, audio_port_handle_t portId,
+                          const audio_port_handle_t deviceId, const String16& opPackageName,
                           bool canCaptureOutput, bool canCaptureHotword) :
-                    AudioClient(attributes, io, uid, pid, session, deviceId),
+                    AudioClient(attributes, io, uid, pid, session, portId, deviceId),
                     opPackageName(opPackageName), startTimeNs(0),
                     canCaptureOutput(canCaptureOutput), canCaptureHotword(canCaptureHotword) {}
                 ~AudioRecordClient() override = default;
@@ -802,9 +804,9 @@
     public:
                 AudioPlaybackClient(const audio_attributes_t attributes,
                       const audio_io_handle_t io, uid_t uid, pid_t pid,
-                            const audio_session_t session, audio_port_handle_t deviceId,
-                            audio_stream_type_t stream) :
-                    AudioClient(attributes, io, uid, pid, session, deviceId), stream(stream) {}
+                            const audio_session_t session, audio_port_handle_t portId,
+                            audio_port_handle_t deviceId, audio_stream_type_t stream) :
+                    AudioClient(attributes, io, uid, pid, session, portId, deviceId), stream(stream) {}
                 ~AudioPlaybackClient() override = default;
 
         const audio_stream_type_t stream;
diff --git a/services/audiopolicy/tests/Android.bp b/services/audiopolicy/tests/Android.bp
new file mode 100644
index 0000000..df23410
--- /dev/null
+++ b/services/audiopolicy/tests/Android.bp
@@ -0,0 +1,66 @@
+cc_test {
+    name: "audiopolicy_tests",
+
+    include_dirs: [
+        "frameworks/av/services/audiopolicy",
+    ],
+
+    shared_libs: [
+        "libaudioclient",
+        "libaudiofoundation",
+        "libaudiopolicy",
+        "libaudiopolicymanagerdefault",
+        "libbase",
+        "libhidlbase",
+        "liblog",
+        "libmedia_helper",
+        "libutils",
+        "libxml2",
+    ],
+
+    static_libs: ["libaudiopolicycomponents"],
+
+    header_libs: [
+        "libaudiopolicycommon",
+        "libaudiopolicyengine_interface_headers",
+        "libaudiopolicymanager_interface_headers",
+    ],
+
+    srcs: ["audiopolicymanager_tests.cpp"],
+
+    data: [":audiopolicytest_configuration_files",],
+
+    cflags: [
+        "-Werror",
+        "-Wall",
+    ],
+
+    test_suites: ["device-tests"],
+
+}
+
+// system/audio.h utilities test
+
+cc_test {
+    name: "systemaudio_tests",
+
+    shared_libs: [
+        "libaudiofoundation",
+        "libbase",
+        "liblog",
+        "libmedia_helper",
+        "libutils",
+    ],
+
+    header_libs: ["libmedia_headers"],
+
+    srcs: ["systemaudio_tests.cpp"],
+
+    cflags: [
+        "-Werror",
+        "-Wall",
+    ],
+
+    test_suites: ["device-tests"],
+
+}
diff --git a/services/audiopolicy/tests/Android.mk b/services/audiopolicy/tests/Android.mk
deleted file mode 100644
index ab9f78b..0000000
--- a/services/audiopolicy/tests/Android.mk
+++ /dev/null
@@ -1,65 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_C_INCLUDES := \
-  frameworks/av/services/audiopolicy \
-  $(call include-path-for, audio-utils) \
-
-LOCAL_SHARED_LIBRARIES := \
-  libaudiopolicymanagerdefault \
-  libbase \
-  liblog \
-  libmedia_helper \
-  libutils \
-
-LOCAL_STATIC_LIBRARIES := \
-  libaudiopolicycomponents \
-
-LOCAL_HEADER_LIBRARIES := \
-    libaudiopolicycommon \
-    libaudiopolicyengine_interface_headers \
-    libaudiopolicymanager_interface_headers
-
-LOCAL_SRC_FILES := \
-  audiopolicymanager_tests.cpp \
-
-LOCAL_MODULE := audiopolicy_tests
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_CFLAGS := -Werror -Wall
-
-LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
-
-LOCAL_COMPATIBILITY_SUITE := device-tests
-
-include $(BUILD_NATIVE_TEST)
-
-# system/audio.h utilities test
-
-include $(CLEAR_VARS)
-
-LOCAL_SHARED_LIBRARIES := \
-  libbase \
-  liblog \
-  libmedia_helper \
-  libutils
-
-LOCAL_HEADER_LIBRARIES := \
-  libmedia_headers
-
-LOCAL_SRC_FILES := \
-  systemaudio_tests.cpp \
-
-LOCAL_MODULE := systemaudio_tests
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_CFLAGS := -Werror -Wall
-
-LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
-
-LOCAL_COMPATIBILITY_SUITE := device-tests
-
-include $(BUILD_NATIVE_TEST)
diff --git a/services/audiopolicy/tests/AudioPolicyTestManager.h b/services/audiopolicy/tests/AudioPolicyTestManager.h
index fe543a6..ba1412b 100644
--- a/services/audiopolicy/tests/AudioPolicyTestManager.h
+++ b/services/audiopolicy/tests/AudioPolicyTestManager.h
@@ -25,6 +25,7 @@
             : AudioPolicyManager(clientInterface, true /*forTesting*/) { }
     using AudioPolicyManager::getConfig;
     using AudioPolicyManager::initialize;
+    using AudioPolicyManager::getOutputs;
 };
 
 }  // namespace android
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index e10a716..d3d839e 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -14,17 +14,24 @@
  * limitations under the License.
  */
 
+#include <map>
 #include <memory>
-#include <set>
+#include <string>
 #include <sys/wait.h>
 #include <unistd.h>
 
 #include <gtest/gtest.h>
 
 #define LOG_TAG "APM_Test"
-#include <log/log.h>
+#include <Serializer.h>
+#include <android-base/file.h>
+#include <media/AudioPolicy.h>
 #include <media/PatchBuilder.h>
+#include <media/RecordingActivityTracker.h>
+#include <utils/Log.h>
+#include <utils/Vector.h>
 
+#include "AudioPolicyInterface.h"
 #include "AudioPolicyTestClient.h"
 #include "AudioPolicyTestManager.h"
 
@@ -73,6 +80,12 @@
         return NO_ERROR;
     }
 
+    audio_io_handle_t openDuplicateOutput(audio_io_handle_t /*output1*/,
+                                 audio_io_handle_t /*output2*/) override {
+        audio_io_handle_t id = mNextIoHandle++;
+        return id;
+    }
+
     status_t openInput(audio_module_handle_t module,
                        audio_io_handle_t* input,
                        audio_config_t* /*config*/,
@@ -89,11 +102,11 @@
         return NO_ERROR;
     }
 
-    status_t createAudioPatch(const struct audio_patch* /*patch*/,
+    status_t createAudioPatch(const struct audio_patch* patch,
                               audio_patch_handle_t* handle,
                               int /*delayMs*/) override {
         *handle = mNextPatchHandle++;
-        mActivePatches.insert(*handle);
+        mActivePatches.insert(std::make_pair(*handle, *patch));
         return NO_ERROR;
     }
 
@@ -114,11 +127,19 @@
     // Helper methods for tests
     size_t getActivePatchesCount() const { return mActivePatches.size(); }
 
+    const struct audio_patch* getLastAddedPatch() const {
+        if (mActivePatches.empty()) {
+            return nullptr;
+        }
+        auto it = --mActivePatches.end();
+        return &it->second;
+    };
+
   private:
     audio_module_handle_t mNextModuleHandle = AUDIO_MODULE_HANDLE_NONE + 1;
     audio_io_handle_t mNextIoHandle = AUDIO_IO_HANDLE_NONE + 1;
     audio_patch_handle_t mNextPatchHandle = AUDIO_PATCH_HANDLE_NONE + 1;
-    std::set<audio_patch_handle_t> mActivePatches;
+    std::map<audio_patch_handle_t, struct audio_patch> mActivePatches;
 };
 
 class PatchCountCheck {
@@ -143,18 +164,35 @@
   protected:
     void SetUp() override;
     void TearDown() override;
-    virtual void SetUpConfig(AudioPolicyConfig *config) { (void)config; }
+    virtual void SetUpManagerConfig();
 
     void dumpToLog();
+    // When explicit routing is needed, selectedDeviceId needs to be set as the wanted port
+    // id. Otherwise, selectedDeviceId needs to be initialized as AUDIO_PORT_HANDLE_NONE.
     void getOutputForAttr(
             audio_port_handle_t *selectedDeviceId,
             audio_format_t format,
             int channelMask,
             int sampleRate,
             audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+            audio_io_handle_t *output = nullptr,
+            audio_port_handle_t *portId = nullptr,
+            audio_attributes_t attr = {});
+    void getInputForAttr(
+            const audio_attributes_t &attr,
+            audio_unique_id_t riid,
+            audio_port_handle_t *selectedDeviceId,
+            audio_format_t format,
+            int channelMask,
+            int sampleRate,
+            audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
             audio_port_handle_t *portId = nullptr);
     PatchCountCheck snapshotPatchCount() { return PatchCountCheck(mClient.get()); }
 
+    void findDevicePort(audio_port_role_t role, audio_devices_t deviceType,
+            const std::string &address, audio_port &foundPort);
+    static audio_port_handle_t getDeviceIdFromPatch(const struct audio_patch* patch);
+
     std::unique_ptr<AudioPolicyManagerTestClient> mClient;
     std::unique_ptr<AudioPolicyTestManager> mManager;
 };
@@ -162,8 +200,7 @@
 void AudioPolicyManagerTest::SetUp() {
     mClient.reset(new AudioPolicyManagerTestClient);
     mManager.reset(new AudioPolicyTestManager(mClient.get()));
-    mManager->getConfig().setDefault();
-    SetUpConfig(&mManager->getConfig());  // Subclasses may want to customize the config.
+    SetUpManagerConfig();  // Subclasses may want to customize the config.
     ASSERT_EQ(NO_ERROR, mManager->initialize());
     ASSERT_EQ(NO_ERROR, mManager->initCheck());
 }
@@ -173,6 +210,10 @@
     mClient.reset();
 }
 
+void AudioPolicyManagerTest::SetUpManagerConfig() {
+    mManager->getConfig().setDefault();
+}
+
 void AudioPolicyManagerTest::dumpToLog() {
     int pipefd[2];
     ASSERT_NE(-1, pipe(pipefd));
@@ -209,22 +250,90 @@
         int channelMask,
         int sampleRate,
         audio_output_flags_t flags,
-        audio_port_handle_t *portId) {
-    audio_attributes_t attr = {};
-    audio_io_handle_t output = AUDIO_PORT_HANDLE_NONE;
+        audio_io_handle_t *output,
+        audio_port_handle_t *portId,
+        audio_attributes_t attr) {
+    audio_io_handle_t localOutput;
+    if (!output) output = &localOutput;
+    *output = AUDIO_IO_HANDLE_NONE;
     audio_stream_type_t stream = AUDIO_STREAM_DEFAULT;
     audio_config_t config = AUDIO_CONFIG_INITIALIZER;
     config.sample_rate = sampleRate;
     config.channel_mask = channelMask;
     config.format = format;
-    *selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
     audio_port_handle_t localPortId;
     if (!portId) portId = &localPortId;
     *portId = AUDIO_PORT_HANDLE_NONE;
     ASSERT_EQ(OK, mManager->getOutputForAttr(
-                    &attr, &output, AUDIO_SESSION_NONE, &stream, 0 /*uid*/, &config, &flags,
+                    &attr, output, AUDIO_SESSION_NONE, &stream, 0 /*uid*/, &config, &flags,
                     selectedDeviceId, portId, {}));
     ASSERT_NE(AUDIO_PORT_HANDLE_NONE, *portId);
+    ASSERT_NE(AUDIO_IO_HANDLE_NONE, *output);
+}
+
+void AudioPolicyManagerTest::getInputForAttr(
+        const audio_attributes_t &attr,
+        audio_unique_id_t riid,
+        audio_port_handle_t *selectedDeviceId,
+        audio_format_t format,
+        int channelMask,
+        int sampleRate,
+        audio_input_flags_t flags,
+        audio_port_handle_t *portId) {
+    audio_io_handle_t input = AUDIO_PORT_HANDLE_NONE;
+    audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+    config.sample_rate = sampleRate;
+    config.channel_mask = channelMask;
+    config.format = format;
+    audio_port_handle_t localPortId;
+    if (!portId) portId = &localPortId;
+    *portId = AUDIO_PORT_HANDLE_NONE;
+    AudioPolicyInterface::input_type_t inputType;
+    ASSERT_EQ(OK, mManager->getInputForAttr(
+            &attr, &input, riid, AUDIO_SESSION_NONE, 0 /*uid*/, &config, flags,
+            selectedDeviceId, &inputType, portId));
+    ASSERT_NE(AUDIO_PORT_HANDLE_NONE, *portId);
+}
+
+void AudioPolicyManagerTest::findDevicePort(audio_port_role_t role,
+        audio_devices_t deviceType, const std::string &address, audio_port &foundPort) {
+    uint32_t numPorts = 0;
+    uint32_t generation1;
+    status_t ret;
+
+    ret = mManager->listAudioPorts(role, AUDIO_PORT_TYPE_DEVICE, &numPorts, nullptr, &generation1);
+    ASSERT_EQ(NO_ERROR, ret);
+
+    uint32_t generation2;
+    struct audio_port ports[numPorts];
+    ret = mManager->listAudioPorts(role, AUDIO_PORT_TYPE_DEVICE, &numPorts, ports, &generation2);
+    ASSERT_EQ(NO_ERROR, ret);
+    ASSERT_EQ(generation1, generation2);
+
+    for (const auto &port : ports) {
+        if (port.role == role && port.ext.device.type == deviceType &&
+                (strncmp(port.ext.device.address, address.c_str(),
+                         AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0)) {
+            foundPort = port;
+            return;
+        }
+    }
+    GTEST_FAIL() << "Device port with role " << role << " and address " << address << "not found";
+}
+
+audio_port_handle_t AudioPolicyManagerTest::getDeviceIdFromPatch(
+        const struct audio_patch* patch) {
+    // The logic here is the same as the one in AudioIoDescriptor.
+    // Note this function is aim to get routed device id for test.
+    // In that case, device to device patch is not expected here.
+    if (patch->num_sources != 0 && patch->num_sinks != 0) {
+        if (patch->sources[0].type == AUDIO_PORT_TYPE_MIX) {
+            return patch->sinks[0].id;
+        } else {
+            return patch->sources[0].id;
+        }
+    }
+    return AUDIO_PORT_HANDLE_NONE;
 }
 
 
@@ -286,15 +395,17 @@
 
 class AudioPolicyManagerTestMsd : public AudioPolicyManagerTest {
   protected:
-    void SetUpConfig(AudioPolicyConfig *config) override;
+    void SetUpManagerConfig() override;
     void TearDown() override;
 
     sp<DeviceDescriptor> mMsdOutputDevice;
     sp<DeviceDescriptor> mMsdInputDevice;
 };
 
-void AudioPolicyManagerTestMsd::SetUpConfig(AudioPolicyConfig *config) {
+void AudioPolicyManagerTestMsd::SetUpManagerConfig() {
     // TODO: Consider using Serializer to load part of the config from a string.
+    AudioPolicyManagerTest::SetUpManagerConfig();
+    AudioPolicyConfig& config = mManager->getConfig();
     mMsdOutputDevice = new DeviceDescriptor(AUDIO_DEVICE_OUT_BUS);
     sp<AudioProfile> pcmOutputProfile = new AudioProfile(
             AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 48000);
@@ -307,22 +418,21 @@
     sp<AudioProfile> pcmInputProfile = new AudioProfile(
             AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_STEREO, 44100);
     mMsdInputDevice->addAudioProfile(pcmInputProfile);
-    config->addAvailableDevice(mMsdOutputDevice);
-    config->addAvailableDevice(mMsdInputDevice);
+    config.addAvailableDevice(mMsdOutputDevice);
+    config.addAvailableDevice(mMsdInputDevice);
 
     sp<HwModule> msdModule = new HwModule(AUDIO_HARDWARE_MODULE_ID_MSD, 2 /*halVersionMajor*/);
-    HwModuleCollection modules = config->getHwModules();
+    HwModuleCollection modules = config.getHwModules();
     modules.add(msdModule);
-    config->setHwModules(modules);
+    config.setHwModules(modules);
     mMsdOutputDevice->attach(msdModule);
     mMsdInputDevice->attach(msdModule);
 
-    sp<OutputProfile> msdOutputProfile = new OutputProfile(String8("msd input"));
+    sp<OutputProfile> msdOutputProfile = new OutputProfile("msd input");
     msdOutputProfile->addAudioProfile(pcmOutputProfile);
     msdOutputProfile->addSupportedDevice(mMsdOutputDevice);
     msdModule->addOutputProfile(msdOutputProfile);
-    sp<OutputProfile> msdCompressedOutputProfile =
-            new OutputProfile(String8("msd compressed input"));
+    sp<OutputProfile> msdCompressedOutputProfile = new OutputProfile("msd compressed input");
     msdCompressedOutputProfile->addAudioProfile(ac3OutputProfile);
     msdCompressedOutputProfile->setFlags(
             AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD |
@@ -330,7 +440,7 @@
     msdCompressedOutputProfile->addSupportedDevice(mMsdOutputDevice);
     msdModule->addOutputProfile(msdCompressedOutputProfile);
 
-    sp<InputProfile> msdInputProfile = new InputProfile(String8("msd output"));
+    sp<InputProfile> msdInputProfile = new InputProfile("msd output");
     msdInputProfile->addAudioProfile(pcmInputProfile);
     msdInputProfile->addSupportedDevice(mMsdInputDevice);
     msdModule->addInputProfile(msdInputProfile);
@@ -339,12 +449,12 @@
     // of streams that are not supported by MSD.
     sp<AudioProfile> dtsOutputProfile = new AudioProfile(
             AUDIO_FORMAT_DTS, AUDIO_CHANNEL_OUT_5POINT1, 48000);
-    config->getDefaultOutputDevice()->addAudioProfile(dtsOutputProfile);
-    sp<OutputProfile> primaryEncodedOutputProfile = new OutputProfile(String8("encoded"));
+    config.getDefaultOutputDevice()->addAudioProfile(dtsOutputProfile);
+    sp<OutputProfile> primaryEncodedOutputProfile = new OutputProfile("encoded");
     primaryEncodedOutputProfile->addAudioProfile(dtsOutputProfile);
     primaryEncodedOutputProfile->setFlags(AUDIO_OUTPUT_FLAG_DIRECT);
-    primaryEncodedOutputProfile->addSupportedDevice(config->getDefaultOutputDevice());
-    config->getHwModules().getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY)->
+    primaryEncodedOutputProfile->addSupportedDevice(config.getDefaultOutputDevice());
+    config.getHwModules().getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY)->
             addOutputProfile(primaryEncodedOutputProfile);
 }
 
@@ -372,7 +482,7 @@
 
 TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrEncodedRoutesToMsd) {
     const PatchCountCheck patchCount = snapshotPatchCount();
-    audio_port_handle_t selectedDeviceId;
+    audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
     getOutputForAttr(&selectedDeviceId,
             AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT);
     ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
@@ -381,7 +491,7 @@
 
 TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrPcmRoutesToMsd) {
     const PatchCountCheck patchCount = snapshotPatchCount();
-    audio_port_handle_t selectedDeviceId;
+    audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
     getOutputForAttr(&selectedDeviceId,
             AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 48000);
     ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
@@ -390,7 +500,7 @@
 
 TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrEncodedPlusPcmRoutesToMsd) {
     const PatchCountCheck patchCount = snapshotPatchCount();
-    audio_port_handle_t selectedDeviceId;
+    audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
     getOutputForAttr(&selectedDeviceId,
             AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT);
     ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
@@ -403,7 +513,7 @@
 
 TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrUnsupportedFormatBypassesMsd) {
     const PatchCountCheck patchCount = snapshotPatchCount();
-    audio_port_handle_t selectedDeviceId;
+    audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
     getOutputForAttr(&selectedDeviceId,
             AUDIO_FORMAT_DTS, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT);
     ASSERT_NE(selectedDeviceId, mMsdOutputDevice->getId());
@@ -414,10 +524,11 @@
     // Switch between formats that are supported and not supported by MSD.
     {
         const PatchCountCheck patchCount = snapshotPatchCount();
-        audio_port_handle_t selectedDeviceId, portId;
+        audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+        audio_port_handle_t portId;
         getOutputForAttr(&selectedDeviceId,
                 AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT,
-                &portId);
+                nullptr /*output*/, &portId);
         ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
         ASSERT_EQ(1, patchCount.deltaFromSnapshot());
         mManager->releaseOutput(portId);
@@ -425,10 +536,11 @@
     }
     {
         const PatchCountCheck patchCount = snapshotPatchCount();
-        audio_port_handle_t selectedDeviceId, portId;
+        audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+        audio_port_handle_t portId;
         getOutputForAttr(&selectedDeviceId,
                 AUDIO_FORMAT_DTS, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT,
-                &portId);
+                nullptr /*output*/, &portId);
         ASSERT_NE(selectedDeviceId, mMsdOutputDevice->getId());
         ASSERT_EQ(-1, patchCount.deltaFromSnapshot());
         mManager->releaseOutput(portId);
@@ -436,10 +548,662 @@
     }
     {
         const PatchCountCheck patchCount = snapshotPatchCount();
-        audio_port_handle_t selectedDeviceId;
+        audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
         getOutputForAttr(&selectedDeviceId,
                 AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT);
         ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
         ASSERT_EQ(0, patchCount.deltaFromSnapshot());
     }
 }
+
+class AudioPolicyManagerTestWithConfigurationFile : public AudioPolicyManagerTest {
+protected:
+    void SetUpManagerConfig() override;
+    virtual std::string getConfigFile() { return sDefaultConfig; }
+
+    static const std::string sExecutableDir;
+    static const std::string sDefaultConfig;
+};
+
+const std::string AudioPolicyManagerTestWithConfigurationFile::sExecutableDir =
+        base::GetExecutableDirectory() + "/";
+
+const std::string AudioPolicyManagerTestWithConfigurationFile::sDefaultConfig =
+        sExecutableDir + "test_audio_policy_configuration.xml";
+
+void AudioPolicyManagerTestWithConfigurationFile::SetUpManagerConfig() {
+    status_t status = deserializeAudioPolicyFile(getConfigFile().c_str(), &mManager->getConfig());
+    ASSERT_EQ(NO_ERROR, status);
+}
+
+TEST_F(AudioPolicyManagerTestWithConfigurationFile, InitSuccess) {
+    // SetUp must finish with no assertions.
+}
+
+TEST_F(AudioPolicyManagerTestWithConfigurationFile, Dump) {
+    dumpToLog();
+}
+
+using PolicyMixTuple = std::tuple<audio_usage_t, audio_source_t, uint32_t>;
+
+class AudioPolicyManagerTestDynamicPolicy : public AudioPolicyManagerTestWithConfigurationFile {
+protected:
+    void TearDown() override;
+
+    status_t addPolicyMix(int mixType, int mixFlag, audio_devices_t deviceType,
+            std::string mixAddress, const audio_config_t& audioConfig,
+            const std::vector<PolicyMixTuple>& rules);
+    void clearPolicyMix();
+
+    Vector<AudioMix> mAudioMixes;
+    const std::string mMixAddress = "remote_submix_media";
+};
+
+void AudioPolicyManagerTestDynamicPolicy::TearDown() {
+    mManager->unregisterPolicyMixes(mAudioMixes);
+    AudioPolicyManagerTestWithConfigurationFile::TearDown();
+}
+
+status_t AudioPolicyManagerTestDynamicPolicy::addPolicyMix(int mixType, int mixFlag,
+        audio_devices_t deviceType, std::string mixAddress, const audio_config_t& audioConfig,
+        const std::vector<PolicyMixTuple>& rules) {
+    Vector<AudioMixMatchCriterion> myMixMatchCriteria;
+
+    for(const auto &rule: rules) {
+        myMixMatchCriteria.add(AudioMixMatchCriterion(
+                std::get<0>(rule), std::get<1>(rule), std::get<2>(rule)));
+    }
+
+    AudioMix myAudioMix(myMixMatchCriteria, mixType, audioConfig, mixFlag,
+            String8(mixAddress.c_str()), 0);
+    myAudioMix.mDeviceType = deviceType;
+    // Clear mAudioMix before add new one to make sure we don't add already exist mixes.
+    mAudioMixes.clear();
+    mAudioMixes.add(myAudioMix);
+
+    // As the policy mixes registration may fail at some case,
+    // caller need to check the returned status.
+    status_t ret = mManager->registerPolicyMixes(mAudioMixes);
+    return ret;
+}
+
+void AudioPolicyManagerTestDynamicPolicy::clearPolicyMix() {
+    if (mManager != nullptr) {
+        mManager->unregisterPolicyMixes(mAudioMixes);
+    }
+    mAudioMixes.clear();
+}
+
+TEST_F(AudioPolicyManagerTestDynamicPolicy, InitSuccess) {
+    // SetUp must finish with no assertions
+}
+
+TEST_F(AudioPolicyManagerTestDynamicPolicy, Dump) {
+    dumpToLog();
+}
+
+TEST_F(AudioPolicyManagerTestDynamicPolicy, RegisterPolicyMixes) {
+    status_t ret;
+    audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+
+    // Only capture of playback is allowed in LOOP_BACK &RENDER mode
+    ret = addPolicyMix(MIX_TYPE_RECORDERS, MIX_ROUTE_FLAG_LOOP_BACK_AND_RENDER,
+            AUDIO_DEVICE_OUT_REMOTE_SUBMIX, "", audioConfig, std::vector<PolicyMixTuple>());
+    ASSERT_EQ(INVALID_OPERATION, ret);
+
+    // Fail due to the device is already connected.
+    clearPolicyMix();
+    ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_LOOP_BACK,
+            AUDIO_DEVICE_OUT_REMOTE_SUBMIX, "", audioConfig, std::vector<PolicyMixTuple>());
+    ASSERT_EQ(INVALID_OPERATION, ret);
+
+    // The first time to register policy mixes with valid parameter should succeed.
+    clearPolicyMix();
+    audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+    audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+    audioConfig.sample_rate = 48000;
+    ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_LOOP_BACK,
+            AUDIO_DEVICE_OUT_REMOTE_SUBMIX, mMixAddress, audioConfig,
+            std::vector<PolicyMixTuple>());
+    ASSERT_EQ(NO_ERROR, ret);
+    // Registering the same policy mixes should fail.
+    ret = mManager->registerPolicyMixes(mAudioMixes);
+    ASSERT_EQ(INVALID_OPERATION, ret);
+
+    // Registration should fail due to device not found.
+    // Note that earpiece is not present in the test configuration file.
+    // This will need to be updated if earpiece is added in the test configuration file.
+    clearPolicyMix();
+    ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+            AUDIO_DEVICE_OUT_EARPIECE, "", audioConfig, std::vector<PolicyMixTuple>());
+    ASSERT_EQ(INVALID_OPERATION, ret);
+
+    // Registration should fail due to output not found.
+    clearPolicyMix();
+    ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+            AUDIO_DEVICE_OUT_REMOTE_SUBMIX, "", audioConfig, std::vector<PolicyMixTuple>());
+    ASSERT_EQ(INVALID_OPERATION, ret);
+
+    // The first time to register valid policy mixes should succeed.
+    clearPolicyMix();
+    ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+            AUDIO_DEVICE_OUT_SPEAKER, "", audioConfig, std::vector<PolicyMixTuple>());
+    ASSERT_EQ(NO_ERROR, ret);
+    // Registering the same policy mixes should fail.
+    ret = mManager->registerPolicyMixes(mAudioMixes);
+    ASSERT_EQ(INVALID_OPERATION, ret);
+}
+
+TEST_F(AudioPolicyManagerTestDynamicPolicy, UnregisterPolicyMixes) {
+    status_t ret;
+    audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+
+    audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+    audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+    audioConfig.sample_rate = 48000;
+    ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_LOOP_BACK,
+            AUDIO_DEVICE_OUT_REMOTE_SUBMIX, mMixAddress, audioConfig,
+            std::vector<PolicyMixTuple>());
+    ASSERT_EQ(NO_ERROR, ret);
+
+    // After successfully registering policy mixes, it should be able to unregister.
+    ret = mManager->unregisterPolicyMixes(mAudioMixes);
+    ASSERT_EQ(NO_ERROR, ret);
+
+    // After unregistering policy mixes successfully, it should fail unregistering
+    // the same policy mixes as they are not registered.
+    ret = mManager->unregisterPolicyMixes(mAudioMixes);
+    ASSERT_EQ(INVALID_OPERATION, ret);
+}
+
+class AudioPolicyManagerTestDPNoRemoteSubmixModule : public AudioPolicyManagerTestDynamicPolicy {
+protected:
+    std::string getConfigFile() override { return sPrimaryOnlyConfig; }
+
+    static const std::string sPrimaryOnlyConfig;
+};
+
+const std::string AudioPolicyManagerTestDPNoRemoteSubmixModule::sPrimaryOnlyConfig =
+        sExecutableDir + "test_audio_policy_primary_only_configuration.xml";
+
+TEST_F(AudioPolicyManagerTestDPNoRemoteSubmixModule, InitSuccess) {
+    // SetUp must finish with no assertions.
+}
+
+TEST_F(AudioPolicyManagerTestDPNoRemoteSubmixModule, Dump) {
+    dumpToLog();
+}
+
+TEST_F(AudioPolicyManagerTestDPNoRemoteSubmixModule, RegistrationFailure) {
+    // Registration/Unregistration should fail due to module for remote submix not found.
+    status_t ret;
+    audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+    audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+    audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+    audioConfig.sample_rate = 48000;
+    ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_LOOP_BACK,
+            AUDIO_DEVICE_OUT_REMOTE_SUBMIX, "", audioConfig, std::vector<PolicyMixTuple>());
+    ASSERT_EQ(INVALID_OPERATION, ret);
+
+    ret = mManager->unregisterPolicyMixes(mAudioMixes);
+    ASSERT_EQ(INVALID_OPERATION, ret);
+}
+
+class AudioPolicyManagerTestDPPlaybackReRouting : public AudioPolicyManagerTestDynamicPolicy,
+        public testing::WithParamInterface<audio_attributes_t> {
+protected:
+    void SetUp() override;
+    void TearDown() override;
+
+    std::unique_ptr<RecordingActivityTracker> mTracker;
+
+    std::vector<PolicyMixTuple> mUsageRules = {
+            {AUDIO_USAGE_MEDIA, AUDIO_SOURCE_DEFAULT, RULE_MATCH_ATTRIBUTE_USAGE},
+            {AUDIO_USAGE_ALARM, AUDIO_SOURCE_DEFAULT, RULE_MATCH_ATTRIBUTE_USAGE}
+    };
+
+    struct audio_port mInjectionPort;
+    audio_port_handle_t mPortId = AUDIO_PORT_HANDLE_NONE;
+};
+
+void AudioPolicyManagerTestDPPlaybackReRouting::SetUp() {
+    AudioPolicyManagerTestDynamicPolicy::SetUp();
+
+    mTracker.reset(new RecordingActivityTracker());
+
+    audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+    audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+    audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+    audioConfig.sample_rate = 48000;
+    status_t ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_LOOP_BACK,
+            AUDIO_DEVICE_OUT_REMOTE_SUBMIX, mMixAddress, audioConfig, mUsageRules);
+    ASSERT_EQ(NO_ERROR, ret);
+
+    struct audio_port extractionPort;
+    findDevicePort(AUDIO_PORT_ROLE_SOURCE, AUDIO_DEVICE_IN_REMOTE_SUBMIX,
+            mMixAddress, extractionPort);
+
+    audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+    audio_source_t source = AUDIO_SOURCE_REMOTE_SUBMIX;
+    audio_attributes_t attr = {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, source, 0, ""};
+    std::string tags = "addr=" + mMixAddress;
+    strncpy(attr.tags, tags.c_str(), AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
+    getInputForAttr(attr, mTracker->getRiid(), &selectedDeviceId, AUDIO_FORMAT_PCM_16_BIT,
+            AUDIO_CHANNEL_IN_STEREO, 48000 /*sampleRate*/, AUDIO_INPUT_FLAG_NONE, &mPortId);
+    ASSERT_EQ(NO_ERROR, mManager->startInput(mPortId));
+    ASSERT_EQ(extractionPort.id, selectedDeviceId);
+
+    findDevicePort(AUDIO_PORT_ROLE_SINK, AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+            mMixAddress, mInjectionPort);
+}
+
+void AudioPolicyManagerTestDPPlaybackReRouting::TearDown() {
+    mManager->stopInput(mPortId);
+    AudioPolicyManagerTestDynamicPolicy::TearDown();
+}
+
+TEST_F(AudioPolicyManagerTestDPPlaybackReRouting, InitSuccess) {
+    // SetUp must finish with no assertions
+}
+
+TEST_F(AudioPolicyManagerTestDPPlaybackReRouting, Dump) {
+    dumpToLog();
+}
+
+TEST_P(AudioPolicyManagerTestDPPlaybackReRouting, PlaybackReRouting) {
+    const audio_attributes_t attr = GetParam();
+    const audio_usage_t usage = attr.usage;
+
+    audio_port_handle_t playbackRoutedPortId = AUDIO_PORT_HANDLE_NONE;
+    getOutputForAttr(&playbackRoutedPortId, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+            48000 /*sampleRate*/, AUDIO_OUTPUT_FLAG_NONE,
+            nullptr /*output*/, nullptr /*portId*/, attr);
+    if (std::find_if(begin(mUsageRules), end(mUsageRules), [&usage](const auto &usageRule) {
+            return (std::get<0>(usageRule) == usage) &&
+            (std::get<2>(usageRule) == RULE_MATCH_ATTRIBUTE_USAGE);}) != end(mUsageRules) ||
+            (strncmp(attr.tags, "addr=", strlen("addr=")) == 0 &&
+                    strncmp(attr.tags + strlen("addr="), mMixAddress.c_str(),
+                    AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0)) {
+        EXPECT_EQ(mInjectionPort.id, playbackRoutedPortId);
+    } else {
+        EXPECT_NE(mInjectionPort.id, playbackRoutedPortId);
+    }
+}
+
+INSTANTIATE_TEST_CASE_P(
+        PlaybackReroutingUsageMatch,
+        AudioPolicyManagerTestDPPlaybackReRouting,
+        testing::Values(
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_MEDIA,
+                                     AUDIO_SOURCE_DEFAULT, 0, ""},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_ALARM,
+                                     AUDIO_SOURCE_DEFAULT, 0, ""}
+                )
+        );
+
+INSTANTIATE_TEST_CASE_P(
+        PlaybackReroutingAddressPriorityMatch,
+        AudioPolicyManagerTestDPPlaybackReRouting,
+        testing::Values(
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_MEDIA,
+                                     AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_VOICE_COMMUNICATION,
+                                     AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+                                     AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING,
+                                     AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_ALARM,
+                                     AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_NOTIFICATION,
+                                     AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+                                     AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE,
+                                     AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+                                     AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST,
+                                     AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+                                     AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT,
+                                     AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+                                     AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED,
+                                     AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_NOTIFICATION_EVENT,
+                                     AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+                                     AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY,
+                                     AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+                                     AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
+                                     AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+                                     AUDIO_USAGE_ASSISTANCE_SONIFICATION,
+                                     AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_GAME,
+                                     AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_VIRTUAL_SOURCE,
+                                     AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_ASSISTANT,
+                                     AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"}
+                )
+        );
+
+INSTANTIATE_TEST_CASE_P(
+        PlaybackReroutingUnHandledUsages,
+        AudioPolicyManagerTestDPPlaybackReRouting,
+        testing::Values(
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_VOICE_COMMUNICATION,
+                                     AUDIO_SOURCE_DEFAULT, 0, ""},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+                                     AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING,
+                                     AUDIO_SOURCE_DEFAULT, 0, ""},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_NOTIFICATION,
+                                     AUDIO_SOURCE_DEFAULT, 0, ""},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+                                     AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE,
+                                     AUDIO_SOURCE_DEFAULT, 0, ""},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+                                     AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST,
+                                     AUDIO_SOURCE_DEFAULT, 0, ""},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+                                     AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT,
+                                     AUDIO_SOURCE_DEFAULT, 0, ""},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+                                     AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED,
+                                     AUDIO_SOURCE_DEFAULT, 0, ""},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_NOTIFICATION_EVENT,
+                                     AUDIO_SOURCE_DEFAULT, 0, ""},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+                                     AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY,
+                                     AUDIO_SOURCE_DEFAULT, 0, ""},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+                                     AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
+                                     AUDIO_SOURCE_DEFAULT, 0, ""},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+                                     AUDIO_USAGE_ASSISTANCE_SONIFICATION,
+                                     AUDIO_SOURCE_DEFAULT, 0, ""},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_GAME,
+                                     AUDIO_SOURCE_DEFAULT, 0, ""},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_ASSISTANT,
+                                     AUDIO_SOURCE_DEFAULT, 0, ""}
+                )
+        );
+
+class AudioPolicyManagerTestDPMixRecordInjection : public AudioPolicyManagerTestDynamicPolicy,
+        public testing::WithParamInterface<audio_attributes_t> {
+protected:
+    void SetUp() override;
+    void TearDown() override;
+
+    std::unique_ptr<RecordingActivityTracker> mTracker;
+
+    std::vector<PolicyMixTuple> mSourceRules = {
+        {AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_CAMCORDER, RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET},
+        {AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_MIC, RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET},
+        {AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_VOICE_COMMUNICATION, RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET}
+    };
+
+    struct audio_port mExtractionPort;
+    audio_port_handle_t mPortId = AUDIO_PORT_HANDLE_NONE;
+};
+
+void AudioPolicyManagerTestDPMixRecordInjection::SetUp() {
+    AudioPolicyManagerTestDynamicPolicy::SetUp();
+
+    mTracker.reset(new RecordingActivityTracker());
+
+    audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+    audioConfig.channel_mask = AUDIO_CHANNEL_IN_STEREO;
+    audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+    audioConfig.sample_rate = 48000;
+    status_t ret = addPolicyMix(MIX_TYPE_RECORDERS, MIX_ROUTE_FLAG_LOOP_BACK,
+            AUDIO_DEVICE_IN_REMOTE_SUBMIX, mMixAddress, audioConfig, mSourceRules);
+    ASSERT_EQ(NO_ERROR, ret);
+
+    struct audio_port injectionPort;
+    findDevicePort(AUDIO_PORT_ROLE_SINK, AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+            mMixAddress, injectionPort);
+
+    audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+    audio_usage_t usage = AUDIO_USAGE_VIRTUAL_SOURCE;
+    audio_attributes_t attr = {AUDIO_CONTENT_TYPE_UNKNOWN, usage, AUDIO_SOURCE_DEFAULT, 0, ""};
+    std::string tags = std::string("addr=") + mMixAddress;
+    strncpy(attr.tags, tags.c_str(), AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
+    getOutputForAttr(&selectedDeviceId, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+            48000 /*sampleRate*/, AUDIO_OUTPUT_FLAG_NONE, nullptr /*output*/, &mPortId, attr);
+    ASSERT_EQ(NO_ERROR, mManager->startOutput(mPortId));
+    ASSERT_EQ(injectionPort.id, getDeviceIdFromPatch(mClient->getLastAddedPatch()));
+
+    findDevicePort(AUDIO_PORT_ROLE_SOURCE, AUDIO_DEVICE_IN_REMOTE_SUBMIX,
+            mMixAddress, mExtractionPort);
+}
+
+void AudioPolicyManagerTestDPMixRecordInjection::TearDown() {
+    mManager->stopOutput(mPortId);
+    AudioPolicyManagerTestDynamicPolicy::TearDown();
+}
+
+TEST_F(AudioPolicyManagerTestDPMixRecordInjection, InitSuccess) {
+    // SetUp mush finish with no assertions.
+}
+
+TEST_F(AudioPolicyManagerTestDPMixRecordInjection, Dump) {
+    dumpToLog();
+}
+
+TEST_P(AudioPolicyManagerTestDPMixRecordInjection, RecordingInjection) {
+    const audio_attributes_t attr = GetParam();
+    const audio_source_t source = attr.source;
+
+    audio_port_handle_t captureRoutedPortId = AUDIO_PORT_HANDLE_NONE;
+    audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
+    getInputForAttr(attr, mTracker->getRiid(), &captureRoutedPortId, AUDIO_FORMAT_PCM_16_BIT,
+            AUDIO_CHANNEL_IN_STEREO, 48000 /*sampleRate*/, AUDIO_INPUT_FLAG_NONE, &portId);
+    if (std::find_if(begin(mSourceRules), end(mSourceRules), [&source](const auto &sourceRule) {
+            return (std::get<1>(sourceRule) == source) &&
+            (std::get<2>(sourceRule) == RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET);})
+            != end(mSourceRules)) {
+        EXPECT_EQ(mExtractionPort.id, captureRoutedPortId);
+    } else {
+        EXPECT_NE(mExtractionPort.id, captureRoutedPortId);
+    }
+}
+
+// No address priority rule for remote recording, address is a "don't care"
+INSTANTIATE_TEST_CASE_P(
+        RecordInjectionSourceMatch,
+        AudioPolicyManagerTestDPMixRecordInjection,
+        testing::Values(
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+                                     AUDIO_SOURCE_CAMCORDER, 0, ""},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+                                     AUDIO_SOURCE_CAMCORDER, 0, "addr=remote_submix_media"},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+                                     AUDIO_SOURCE_MIC, 0, "addr=remote_submix_media"},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+                                     AUDIO_SOURCE_MIC, 0, ""},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+                                     AUDIO_SOURCE_VOICE_COMMUNICATION, 0, ""},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+                                     AUDIO_SOURCE_VOICE_COMMUNICATION, 0,
+                                     "addr=remote_submix_media"}
+                )
+        );
+
+// No address priority rule for remote recording
+INSTANTIATE_TEST_CASE_P(
+        RecordInjectionSourceNotMatch,
+        AudioPolicyManagerTestDPMixRecordInjection,
+        testing::Values(
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+                                     AUDIO_SOURCE_VOICE_RECOGNITION, 0, ""},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+                                     AUDIO_SOURCE_HOTWORD, 0, ""},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+                                     AUDIO_SOURCE_VOICE_RECOGNITION, 0,
+                                     "addr=remote_submix_media"},
+                (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+                                     AUDIO_SOURCE_HOTWORD, 0, "addr=remote_submix_media"}
+                )
+        );
+
+using DeviceConnectionTestParams =
+        std::tuple<audio_devices_t /*type*/, std::string /*name*/, std::string /*address*/>;
+
+class AudioPolicyManagerTestDeviceConnection : public AudioPolicyManagerTestWithConfigurationFile,
+        public testing::WithParamInterface<DeviceConnectionTestParams> {
+};
+
+TEST_F(AudioPolicyManagerTestDeviceConnection, InitSuccess) {
+    // SetUp must finish with no assertions.
+}
+
+TEST_F(AudioPolicyManagerTestDeviceConnection, Dump) {
+    dumpToLog();
+}
+
+TEST_P(AudioPolicyManagerTestDeviceConnection, SetDeviceConnectionState) {
+    const audio_devices_t type = std::get<0>(GetParam());
+    const std::string name = std::get<1>(GetParam());
+    const std::string address = std::get<2>(GetParam());
+
+    if (type == AUDIO_DEVICE_OUT_HDMI) {
+        // Set device connection state failed due to no device descriptor found
+        // For HDMI case, it is easier to simulate device descriptor not found error
+        // by using a undeclared encoded format.
+        ASSERT_EQ(INVALID_OPERATION, mManager->setDeviceConnectionState(
+                type, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+                address.c_str(), name.c_str(), AUDIO_FORMAT_MAT_2_1));
+    }
+    // Connect with valid parameters should succeed
+    ASSERT_EQ(NO_ERROR, mManager->setDeviceConnectionState(
+            type, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+            address.c_str(), name.c_str(), AUDIO_FORMAT_DEFAULT));
+    // Try to connect with the same device again should fail
+    ASSERT_EQ(INVALID_OPERATION, mManager->setDeviceConnectionState(
+            type, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+            address.c_str(), name.c_str(), AUDIO_FORMAT_DEFAULT));
+    // Disconnect the connected device should succeed
+    ASSERT_EQ(NO_ERROR, mManager->setDeviceConnectionState(
+            type, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+            address.c_str(), name.c_str(), AUDIO_FORMAT_DEFAULT));
+    // Disconnect device that is not connected should fail
+    ASSERT_EQ(INVALID_OPERATION, mManager->setDeviceConnectionState(
+            type, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+            address.c_str(), name.c_str(), AUDIO_FORMAT_DEFAULT));
+    // Try to set device connection state  with a invalid connection state should fail
+    ASSERT_EQ(BAD_VALUE, mManager->setDeviceConnectionState(
+            type, AUDIO_POLICY_DEVICE_STATE_CNT,
+            "", "", AUDIO_FORMAT_DEFAULT));
+}
+
+TEST_P(AudioPolicyManagerTestDeviceConnection, ExplicitlyRoutingAfterConnection) {
+    const audio_devices_t type = std::get<0>(GetParam());
+    const std::string name = std::get<1>(GetParam());
+    const std::string address = std::get<2>(GetParam());
+
+    // Connect device to do explicitly routing test
+    ASSERT_EQ(NO_ERROR, mManager->setDeviceConnectionState(
+            type, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+            address.c_str(), name.c_str(), AUDIO_FORMAT_DEFAULT));
+
+    audio_port devicePort;
+    const audio_port_role_t role = audio_is_output_device(type)
+            ? AUDIO_PORT_ROLE_SINK : AUDIO_PORT_ROLE_SOURCE;
+    findDevicePort(role, type, address, devicePort);
+
+    audio_port_handle_t routedPortId = devicePort.id;
+    // Try start input or output according to the device type
+    if (audio_is_output_devices(type)) {
+        getOutputForAttr(&routedPortId, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+                48000 /*sampleRate*/, AUDIO_OUTPUT_FLAG_NONE);
+    } else if (audio_is_input_device(type)) {
+        RecordingActivityTracker tracker;
+        getInputForAttr({}, tracker.getRiid(), &routedPortId, AUDIO_FORMAT_PCM_16_BIT,
+                AUDIO_CHANNEL_IN_STEREO, 48000 /*sampleRate*/, AUDIO_INPUT_FLAG_NONE);
+    }
+    ASSERT_EQ(devicePort.id, routedPortId);
+
+    ASSERT_EQ(NO_ERROR, mManager->setDeviceConnectionState(
+            type, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+            address.c_str(), name.c_str(), AUDIO_FORMAT_DEFAULT));
+}
+
+INSTANTIATE_TEST_CASE_P(
+        DeviceConnectionState,
+        AudioPolicyManagerTestDeviceConnection,
+        testing::Values(
+                DeviceConnectionTestParams({AUDIO_DEVICE_IN_HDMI, "test_in_hdmi",
+                                            "audio_policy_test_in_hdmi"}),
+                DeviceConnectionTestParams({AUDIO_DEVICE_OUT_HDMI, "test_out_hdmi",
+                                            "audio_policy_test_out_hdmi"}),
+                DeviceConnectionTestParams({AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, "bt_hfp_in",
+                                            "hfp_client_in"}),
+                DeviceConnectionTestParams({AUDIO_DEVICE_OUT_BLUETOOTH_SCO, "bt_hfp_out",
+                                            "hfp_client_out"})
+                )
+        );
+
+class AudioPolicyManagerTVTest : public AudioPolicyManagerTestWithConfigurationFile {
+protected:
+    std::string getConfigFile() override { return sTvConfig; }
+    void testHDMIPortSelection(audio_output_flags_t flags, const char* expectedMixPortName);
+
+    static const std::string sTvConfig;
+};
+
+const std::string AudioPolicyManagerTVTest::sTvConfig =
+        AudioPolicyManagerTVTest::sExecutableDir + "test_tv_apm_configuration.xml";
+
+// SwAudioOutputDescriptor doesn't populate flags so check against the port name.
+void AudioPolicyManagerTVTest::testHDMIPortSelection(
+        audio_output_flags_t flags, const char* expectedMixPortName) {
+    ASSERT_EQ(NO_ERROR, mManager->setDeviceConnectionState(
+            AUDIO_DEVICE_OUT_AUX_DIGITAL, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+            "" /*address*/, "" /*name*/, AUDIO_FORMAT_DEFAULT));
+    audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+    audio_io_handle_t output;
+    audio_port_handle_t portId;
+    getOutputForAttr(&selectedDeviceId, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 48000,
+            flags, &output, &portId);
+    sp<SwAudioOutputDescriptor> outDesc = mManager->getOutputs().valueFor(output);
+    ASSERT_NE(nullptr, outDesc.get());
+    audio_port port = {};
+    outDesc->toAudioPort(&port);
+    mManager->releaseOutput(portId);
+    ASSERT_EQ(NO_ERROR, mManager->setDeviceConnectionState(
+            AUDIO_DEVICE_OUT_AUX_DIGITAL, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+            "" /*address*/, "" /*name*/, AUDIO_FORMAT_DEFAULT));
+    ASSERT_EQ(AUDIO_PORT_TYPE_MIX, port.type);
+    ASSERT_EQ(AUDIO_PORT_ROLE_SOURCE, port.role);
+    ASSERT_STREQ(expectedMixPortName, port.name);
+}
+
+TEST_F(AudioPolicyManagerTVTest, InitSuccess) {
+    // SetUp must finish with no assertions.
+}
+
+TEST_F(AudioPolicyManagerTVTest, Dump) {
+    dumpToLog();
+}
+
+TEST_F(AudioPolicyManagerTVTest, MatchNoFlags) {
+    testHDMIPortSelection(AUDIO_OUTPUT_FLAG_NONE, "primary output");
+}
+
+TEST_F(AudioPolicyManagerTVTest, MatchOutputDirectNoHwAvSync) {
+    // b/140447125: The selected port must not have HW AV Sync flag (see the config file).
+    testHDMIPortSelection(AUDIO_OUTPUT_FLAG_DIRECT, "direct");
+}
+
+TEST_F(AudioPolicyManagerTVTest, MatchOutputDirectHwAvSync) {
+    testHDMIPortSelection(static_cast<audio_output_flags_t>(
+                    AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_HW_AV_SYNC),
+            "tunnel");
+}
+
+TEST_F(AudioPolicyManagerTVTest, MatchOutputDirectMMapNoIrq) {
+    testHDMIPortSelection(static_cast<audio_output_flags_t>(
+                    AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_MMAP_NOIRQ),
+            "low latency");
+}
diff --git a/services/audiopolicy/tests/resources/Android.bp b/services/audiopolicy/tests/resources/Android.bp
new file mode 100644
index 0000000..d9476d9
--- /dev/null
+++ b/services/audiopolicy/tests/resources/Android.bp
@@ -0,0 +1,8 @@
+filegroup {
+    name: "audiopolicytest_configuration_files",
+    srcs: [
+        "test_audio_policy_configuration.xml",
+        "test_audio_policy_primary_only_configuration.xml",
+        "test_tv_apm_configuration.xml",
+    ],
+}
diff --git a/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml b/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml
new file mode 100644
index 0000000..87f0ab9
--- /dev/null
+++ b/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml
@@ -0,0 +1,111 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+    <globalConfiguration speaker_drc_enabled="true"/>
+
+    <modules>
+        <!-- Primary module -->
+        <module name="primary" halVersion="2.0">
+            <attachedDevices>
+                <item>Speaker</item>
+                <item>Built-In Mic</item>
+            </attachedDevices>
+            <defaultOutputDevice>Speaker</defaultOutputDevice>
+            <mixPorts>
+                <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+                </mixPort>
+                <mixPort name="primary input" role="sink">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000"
+                             channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+                </mixPort>
+                <mixPort name="mixport_bt_hfp_output" role="source">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+                </mixPort>
+                <mixPort name="mixport_bt_hfp_input" role="sink">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,11025,16000,44100,48000"
+                             channelMasks="AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_MONO"/>
+                </mixPort>
+            </mixPorts>
+            <devicePorts>
+                <devicePort tagName="Speaker" type="AUDIO_DEVICE_OUT_SPEAKER" role="sink">
+                </devicePort>
+                <devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source">
+                </devicePort>
+                <devicePort tagName="Hdmi" type="AUDIO_DEVICE_OUT_HDMI" role="sink">
+                </devicePort>
+                <devicePort tagName="Hdmi-In Mic" type="AUDIO_DEVICE_IN_HDMI" role="source">
+                </devicePort>
+                <devicePort tagName="BT SCO" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO"
+                            role="sink" address="hfp_client_out">
+                </devicePort>
+                <devicePort tagName="BT SCO Headset Mic" type="AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET"
+                            role="source" address="hfp_client_in">
+                </devicePort>
+            </devicePorts>
+            <routes>
+                <route type="mix" sink="Speaker"
+                       sources="primary output"/>
+                <route type="mix" sink="primary input"
+                       sources="Built-In Mic,Hdmi-In Mic"/>
+                <route type="mix" sink="Hdmi"
+                       sources="primary output"/>
+                <route type="mix" sink="BT SCO"
+                       sources="mixport_bt_hfp_output"/>
+                <route type="mix" sink="mixport_bt_hfp_input"
+                       sources="BT SCO Headset Mic"/>
+            </routes>
+        </module>
+
+        <!-- Remote Submix module -->
+        <module name="r_submix" halVersion="2.0">
+            <attachedDevices>
+                <item>Remote Submix In</item>
+            </attachedDevices>
+            <mixPorts>
+                <mixPort name="r_submix output" role="source">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+                </mixPort>
+                <mixPort name="r_submix input" role="sink">
+                   <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                            samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+                </mixPort>
+           </mixPorts>
+           <devicePorts>
+               <devicePort tagName="Remote Submix Out" type="AUDIO_DEVICE_OUT_REMOTE_SUBMIX"  role="sink">
+                   <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                            samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+               </devicePort>
+               <devicePort tagName="Remote Submix In" type="AUDIO_DEVICE_IN_REMOTE_SUBMIX"  role="source">
+                   <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                            samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+                </devicePort>
+            </devicePorts>
+            <routes>
+                <route type="mix" sink="Remote Submix Out"
+                       sources="r_submix output"/>
+                <route type="mix" sink="r_submix input"
+                       sources="Remote Submix In"/>
+            </routes>
+        </module>
+    </modules>
+</audioPolicyConfiguration>
diff --git a/services/audiopolicy/tests/resources/test_audio_policy_primary_only_configuration.xml b/services/audiopolicy/tests/resources/test_audio_policy_primary_only_configuration.xml
new file mode 100644
index 0000000..edc0adb
--- /dev/null
+++ b/services/audiopolicy/tests/resources/test_audio_policy_primary_only_configuration.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+    <globalConfiguration speaker_drc_enabled="true"/>
+
+    <modules>
+        <!-- Primary module -->
+        <module name="primary" halVersion="2.0">
+            <attachedDevices>
+                <item>Speaker</item>
+                <item>Built-In Mic</item>
+            </attachedDevices>
+            <defaultOutputDevice>Speaker</defaultOutputDevice>
+            <mixPorts>
+                <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+                </mixPort>
+                <mixPort name="primary input" role="sink">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000"
+                             channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+                </mixPort>
+            </mixPorts>
+            <devicePorts>
+                <devicePort tagName="Speaker" type="AUDIO_DEVICE_OUT_SPEAKER" role="sink">
+                </devicePort>
+                <devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source">
+                </devicePort>
+            </devicePorts>
+            <routes>
+                <route type="mix" sink="Speaker"
+                       sources="primary output"/>
+                <route type="mix" sink="primary input"
+                       sources="Built-In Mic"/>
+            </routes>
+        </module>
+    </modules>
+</audioPolicyConfiguration>
diff --git a/services/audiopolicy/tests/resources/test_tv_apm_configuration.xml b/services/audiopolicy/tests/resources/test_tv_apm_configuration.xml
new file mode 100644
index 0000000..f1638f3
--- /dev/null
+++ b/services/audiopolicy/tests/resources/test_tv_apm_configuration.xml
@@ -0,0 +1,58 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+    <globalConfiguration speaker_drc_enabled="false"/>
+    <modules>
+        <module name="primary" halVersion="2.0">
+            <attachedDevices>
+                <item>Speaker</item>
+            </attachedDevices>
+            <defaultOutputDevice>Speaker</defaultOutputDevice>
+            <mixPorts>
+                <!-- Profiles on the HDMI port are explicit for simplicity. In reality they are dynamic -->
+                <!-- Note: ports are intentionally arranged from more specific to less
+                     specific in order to test b/140447125 for HW AV Sync, and similar "explicit matches" -->
+                <mixPort name="tunnel" role="source"
+                         flags="AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_HW_AV_SYNC">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+                </mixPort>
+                <mixPort name="low latency" role="source"
+                         flags="AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_MMAP_NOIRQ">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+                </mixPort>
+                <mixPort name="direct" role="source" flags="AUDIO_OUTPUT_FLAG_DIRECT">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+                </mixPort>
+                <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+                </mixPort>
+           </mixPorts>
+           <devicePorts>
+                <devicePort tagName="Speaker" type="AUDIO_DEVICE_OUT_SPEAKER" role="sink" />
+                <devicePort tagName="Out Aux Digital" type="AUDIO_DEVICE_OUT_AUX_DIGITAL" role="sink" />
+            </devicePorts>
+            <routes>
+                <route type="mix" sink="Speaker" sources="primary output"/>
+                <route type="mix" sink="Out Aux Digital" sources="primary output,tunnel,direct,low latency"/>
+            </routes>
+        </module>
+    </modules>
+</audioPolicyConfiguration>
diff --git a/services/audiopolicy/tests/systemaudio_tests.cpp b/services/audiopolicy/tests/systemaudio_tests.cpp
index abaae52..1e4d07f 100644
--- a/services/audiopolicy/tests/systemaudio_tests.cpp
+++ b/services/audiopolicy/tests/systemaudio_tests.cpp
@@ -14,6 +14,8 @@
  * limitations under the License.
  */
 
+#include <unordered_set>
+
 #include <gtest/gtest.h>
 
 #define LOG_TAG "SysAudio_Test"
@@ -115,3 +117,81 @@
                     (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch(),
                     (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch()));
 }
+
+void runAudioDeviceTypeHelperFunction(const std::unordered_set<audio_devices_t>& allDevices,
+                                      const audio_devices_t targetDevices[],
+                                      unsigned int targetDeviceCount,
+                                      const std::string& deviceTag,
+                                      bool (*device_type_helper_function)(audio_devices_t))
+{
+    std::unordered_set<audio_devices_t> devices(targetDevices, targetDevices + targetDeviceCount);
+    for (auto device : allDevices) {
+        if (devices.find(device) == devices.end()) {
+            ASSERT_FALSE(device_type_helper_function(device))
+                    << std::hex << device << " should not be " << deviceTag << " device";
+        } else {
+            ASSERT_TRUE(device_type_helper_function(device))
+                    << std::hex << device << " should be " << deviceTag << " device";
+        }
+    }
+}
+
+TEST(SystemAudioTest, AudioDeviceTypeHelperFunction) {
+    std::unordered_set<audio_devices_t> allDeviceTypes;
+    allDeviceTypes.insert(std::begin(AUDIO_DEVICE_OUT_ALL_ARRAY),
+            std::end(AUDIO_DEVICE_OUT_ALL_ARRAY));
+    allDeviceTypes.insert(std::begin(AUDIO_DEVICE_IN_ALL_ARRAY),
+            std::end(AUDIO_DEVICE_IN_ALL_ARRAY));
+
+    runAudioDeviceTypeHelperFunction(allDeviceTypes, AUDIO_DEVICE_OUT_ALL_ARRAY,
+            std::size(AUDIO_DEVICE_OUT_ALL_ARRAY), "output", audio_is_output_device);
+    runAudioDeviceTypeHelperFunction(allDeviceTypes, AUDIO_DEVICE_IN_ALL_ARRAY,
+            std::size(AUDIO_DEVICE_IN_ALL_ARRAY), "input", audio_is_input_device);
+    runAudioDeviceTypeHelperFunction(allDeviceTypes, AUDIO_DEVICE_OUT_ALL_A2DP_ARRAY,
+            std::size(AUDIO_DEVICE_OUT_ALL_A2DP_ARRAY), "a2dp out", audio_is_a2dp_out_device);
+    const audio_devices_t bluetoothInA2dpDevices[] = { AUDIO_DEVICE_IN_BLUETOOTH_A2DP };
+    runAudioDeviceTypeHelperFunction(allDeviceTypes, bluetoothInA2dpDevices,
+            std::size(bluetoothInA2dpDevices), "a2dp in", audio_is_a2dp_in_device);
+    runAudioDeviceTypeHelperFunction(allDeviceTypes, AUDIO_DEVICE_OUT_ALL_SCO_ARRAY,
+            std::size(AUDIO_DEVICE_OUT_ALL_SCO_ARRAY), "bluetooth out sco",
+            audio_is_bluetooth_out_sco_device);
+    runAudioDeviceTypeHelperFunction(allDeviceTypes, AUDIO_DEVICE_IN_ALL_SCO_ARRAY,
+            std::size(AUDIO_DEVICE_IN_ALL_SCO_ARRAY), "bluetooth in sco",
+            audio_is_bluetooth_in_sco_device);
+    const unsigned int scoDeviceCount = AUDIO_DEVICE_OUT_SCO_CNT + AUDIO_DEVICE_IN_SCO_CNT;
+    audio_devices_t scoDevices[scoDeviceCount];
+    std::copy(std::begin(AUDIO_DEVICE_OUT_ALL_SCO_ARRAY), std::end(AUDIO_DEVICE_OUT_ALL_SCO_ARRAY),
+              std::begin(scoDevices));
+    std::copy(std::begin(AUDIO_DEVICE_IN_ALL_SCO_ARRAY), std::end(AUDIO_DEVICE_IN_ALL_SCO_ARRAY),
+              std::begin(scoDevices) + AUDIO_DEVICE_OUT_SCO_CNT);
+    runAudioDeviceTypeHelperFunction(allDeviceTypes, scoDevices,
+            std::size(scoDevices), "bluetooth sco", audio_is_bluetooth_sco_device);
+    const audio_devices_t hearingAidOutDevices[] = { AUDIO_DEVICE_OUT_HEARING_AID };
+    runAudioDeviceTypeHelperFunction(allDeviceTypes, hearingAidOutDevices,
+            std::size(hearingAidOutDevices), "hearing aid out", audio_is_hearing_aid_out_device);
+    runAudioDeviceTypeHelperFunction(allDeviceTypes, AUDIO_DEVICE_OUT_ALL_USB_ARRAY,
+            std::size(AUDIO_DEVICE_OUT_ALL_USB_ARRAY), "usb out", audio_is_usb_out_device);
+    runAudioDeviceTypeHelperFunction(allDeviceTypes, AUDIO_DEVICE_IN_ALL_USB_ARRAY,
+            std::size(AUDIO_DEVICE_IN_ALL_USB_ARRAY), "usb in", audio_is_usb_in_device);
+    const audio_devices_t remoteSubmixDevices[] = {
+            AUDIO_DEVICE_IN_REMOTE_SUBMIX, AUDIO_DEVICE_OUT_REMOTE_SUBMIX };
+    runAudioDeviceTypeHelperFunction(allDeviceTypes, remoteSubmixDevices,
+            std::size(remoteSubmixDevices), "remote submix", audio_is_remote_submix_device);
+    runAudioDeviceTypeHelperFunction(allDeviceTypes, AUDIO_DEVICE_OUT_ALL_DIGITAL_ARRAY,
+            std::size(AUDIO_DEVICE_OUT_ALL_DIGITAL_ARRAY), "digital out",
+            audio_is_digital_out_device);
+    runAudioDeviceTypeHelperFunction(allDeviceTypes, AUDIO_DEVICE_IN_ALL_DIGITAL_ARRAY,
+            std::size(AUDIO_DEVICE_IN_ALL_DIGITAL_ARRAY), "digital in",
+            audio_is_digital_in_device);
+    const unsigned int digitalDeviceCount
+            = AUDIO_DEVICE_OUT_DIGITAL_CNT + AUDIO_DEVICE_IN_DIGITAL_CNT;
+    audio_devices_t digitalDevices[digitalDeviceCount];
+    std::copy(std::begin(AUDIO_DEVICE_OUT_ALL_DIGITAL_ARRAY),
+              std::end(AUDIO_DEVICE_OUT_ALL_DIGITAL_ARRAY),
+              std::begin(digitalDevices));
+    std::copy(std::begin(AUDIO_DEVICE_IN_ALL_DIGITAL_ARRAY),
+              std::end(AUDIO_DEVICE_IN_ALL_DIGITAL_ARRAY),
+              std::begin(digitalDevices) + AUDIO_DEVICE_OUT_DIGITAL_CNT);
+    runAudioDeviceTypeHelperFunction(allDeviceTypes, digitalDevices,
+              std::size(digitalDevices), "digital", audio_device_is_digital);
+}
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index 1c1f5e6..b26398e 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -86,9 +86,7 @@
         "libfmq",
         "libgui",
         "libhardware",
-        "libhwbinder",
         "libhidlbase",
-        "libhidltransport",
         "libjpeg",
         "libmedia_omx",
         "libmemunreachable",
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index a4868bf..94642f2 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -117,7 +117,12 @@
 
 // ----------------------------------------------------------------------------
 
+static const String16 sDumpPermission("android.permission.DUMP");
 static const String16 sManageCameraPermission("android.permission.MANAGE_CAMERA");
+static const String16 sCameraPermission("android.permission.CAMERA");
+static const String16 sSystemCameraPermission("android.permission.SYSTEM_CAMERA");
+static const String16
+        sCameraSendSystemEventsPermission("android.permission.CAMERA_SEND_SYSTEM_EVENTS");
 
 // Matches with PERCEPTIBLE_APP_ADJ in ProcessList.java
 static constexpr int32_t kVendorClientScore = 200;
@@ -130,7 +135,9 @@
 CameraService::CameraService() :
         mEventLog(DEFAULT_EVENT_LOG_LENGTH),
         mNumberOfCameras(0),
-        mSoundRef(0), mInitialized(false) {
+        mNumberOfCamerasWithoutSystemCamera(0),
+        mSoundRef(0), mInitialized(false),
+        mAudioRestriction(hardware::camera2::ICameraDeviceUser::AUDIO_RESTRICTION_NONE) {
     ALOGI("CameraService started (pid=%d)", getpid());
     mServiceLockWrapper = std::make_shared<WaitableMutexWrapper>(&mServiceLock);
 }
@@ -153,17 +160,21 @@
         mInitialized = true;
     }
 
-    CameraService::pingCameraServiceProxy();
-
     mUidPolicy = new UidPolicy(this);
     mUidPolicy->registerSelf();
     mSensorPrivacyPolicy = new SensorPrivacyPolicy(this);
     mSensorPrivacyPolicy->registerSelf();
+    mAppOps.setCameraAudioRestriction(mAudioRestriction);
     sp<HidlCameraService> hcs = HidlCameraService::getInstance(this);
     if (hcs->registerAsService() != android::OK) {
         ALOGE("%s: Failed to register default android.frameworks.cameraservice.service@1.0",
               __FUNCTION__);
     }
+
+    // This needs to be last call in this function, so that it's as close to
+    // ServiceManager::addService() as possible.
+    CameraService::pingCameraServiceProxy();
+    ALOGI("CameraService pinged cameraservice proxy");
 }
 
 status_t CameraService::enumerateProviders() {
@@ -239,7 +250,7 @@
     Mutex::Autolock lock(mStatusListenerLock);
 
     for (auto& i : mListenerList) {
-        i.second->getListener()->onTorchStatusChanged(mapToInterface(status), String16{cameraId});
+        i->getListener()->onTorchStatusChanged(mapToInterface(status), String16{cameraId});
     }
 }
 
@@ -253,11 +264,32 @@
     enumerateProviders();
 }
 
+void CameraService::filterAPI1SystemCameraLocked(
+        const std::vector<std::string> &normalDeviceIds) {
+    mNormalDeviceIdsWithoutSystemCamera.clear();
+    for (auto &deviceId : normalDeviceIds) {
+        if (getSystemCameraKind(String8(deviceId.c_str())) ==
+                SystemCameraKind::SYSTEM_ONLY_CAMERA) {
+            // All system camera ids will necessarily come after public camera
+            // device ids as per the HAL interface contract.
+            break;
+        }
+        mNormalDeviceIdsWithoutSystemCamera.push_back(deviceId);
+    }
+    ALOGV("%s: number of API1 compatible public cameras is %zu", __FUNCTION__,
+              mNormalDeviceIdsWithoutSystemCamera.size());
+}
+
 void CameraService::updateCameraNumAndIds() {
     Mutex::Autolock l(mServiceLock);
-    mNumberOfCameras = mCameraProviderManager->getCameraCount();
+    std::pair<int, int> systemAndNonSystemCameras = mCameraProviderManager->getCameraCount();
+    // Excludes hidden secure cameras
+    mNumberOfCameras =
+            systemAndNonSystemCameras.first + systemAndNonSystemCameras.second;
+    mNumberOfCamerasWithoutSystemCamera = systemAndNonSystemCameras.second;
     mNormalDeviceIds =
             mCameraProviderManager->getAPI1CompatibleCameraDeviceIds();
+    filterAPI1SystemCameraLocked(mNormalDeviceIds);
 }
 
 void CameraService::addStates(const String8 id) {
@@ -439,15 +471,31 @@
     broadcastTorchModeStatus(cameraId, newStatus);
 }
 
+static bool hasPermissionsForSystemCamera(int callingPid, int callingUid) {
+    return checkPermission(sSystemCameraPermission, callingPid, callingUid) &&
+            checkPermission(sCameraPermission, callingPid, callingUid);
+}
+
 Status CameraService::getNumberOfCameras(int32_t type, int32_t* numCameras) {
     ATRACE_CALL();
     Mutex::Autolock l(mServiceLock);
+    bool hasSystemCameraPermissions =
+            hasPermissionsForSystemCamera(CameraThreadState::getCallingPid(),
+                    CameraThreadState::getCallingUid());
     switch (type) {
         case CAMERA_TYPE_BACKWARD_COMPATIBLE:
-            *numCameras = static_cast<int>(mNormalDeviceIds.size());
+            if (hasSystemCameraPermissions) {
+                *numCameras = static_cast<int>(mNormalDeviceIds.size());
+            } else {
+                *numCameras = static_cast<int>(mNormalDeviceIdsWithoutSystemCamera.size());
+            }
             break;
         case CAMERA_TYPE_ALL:
-            *numCameras = mNumberOfCameras;
+            if (hasSystemCameraPermissions) {
+                *numCameras = mNumberOfCameras;
+            } else {
+                *numCameras = mNumberOfCamerasWithoutSystemCamera;
+            }
             break;
         default:
             ALOGW("%s: Unknown camera type %d",
@@ -462,20 +510,31 @@
         CameraInfo* cameraInfo) {
     ATRACE_CALL();
     Mutex::Autolock l(mServiceLock);
+    std::string cameraIdStr = cameraIdIntToStrLocked(cameraId);
+    if (shouldRejectSystemCameraConnection(String8(cameraIdStr.c_str()))) {
+        return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION, "Unable to retrieve camera"
+                "characteristics for system only device %s: ", cameraIdStr.c_str());
+    }
 
     if (!mInitialized) {
         return STATUS_ERROR(ERROR_DISCONNECTED,
                 "Camera subsystem is not available");
     }
-
-    if (cameraId < 0 || cameraId >= mNumberOfCameras) {
+    bool hasSystemCameraPermissions =
+            hasPermissionsForSystemCamera(CameraThreadState::getCallingPid(),
+                    CameraThreadState::getCallingUid());
+    int cameraIdBound = mNumberOfCamerasWithoutSystemCamera;
+    if (hasSystemCameraPermissions) {
+        cameraIdBound = mNumberOfCameras;
+    }
+    if (cameraId < 0 || cameraId >= cameraIdBound) {
         return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT,
                 "CameraId is not valid");
     }
 
     Status ret = Status::ok();
     status_t err = mCameraProviderManager->getCameraInfo(
-            cameraIdIntToStrLocked(cameraId), cameraInfo);
+            cameraIdStr.c_str(), cameraInfo);
     if (err != OK) {
         ret = STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
                 "Error retrieving camera info from device %d: %s (%d)", cameraId,
@@ -486,13 +545,20 @@
 }
 
 std::string CameraService::cameraIdIntToStrLocked(int cameraIdInt) {
-    if (cameraIdInt < 0 || cameraIdInt >= static_cast<int>(mNormalDeviceIds.size())) {
+    const std::vector<std::string> *deviceIds = &mNormalDeviceIdsWithoutSystemCamera;
+    auto callingPid = CameraThreadState::getCallingPid();
+    auto callingUid = CameraThreadState::getCallingUid();
+    if (checkPermission(sSystemCameraPermission, callingPid, callingUid) ||
+            getpid() == callingPid) {
+        deviceIds = &mNormalDeviceIds;
+    }
+    if (cameraIdInt < 0 || cameraIdInt >= static_cast<int>(deviceIds->size())) {
         ALOGE("%s: input id %d invalid: valid range  (0, %zu)",
-                __FUNCTION__, cameraIdInt, mNormalDeviceIds.size());
+                __FUNCTION__, cameraIdInt, deviceIds->size());
         return std::string{};
     }
 
-    return mNormalDeviceIds[cameraIdInt];
+    return (*deviceIds)[cameraIdInt];
 }
 
 String8 CameraService::cameraIdIntToStr(int cameraIdInt) {
@@ -514,6 +580,11 @@
                 "Camera subsystem is not available");;
     }
 
+    if (shouldRejectSystemCameraConnection(String8(cameraId))) {
+        return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION, "Unable to retrieve camera"
+                "characteristics for system only device %s: ", String8(cameraId).string());
+    }
+
     Status ret{};
 
     status_t res = mCameraProviderManager->getCameraCharacteristics(
@@ -527,9 +598,12 @@
     int callingPid = CameraThreadState::getCallingPid();
     int callingUid = CameraThreadState::getCallingUid();
     std::vector<int32_t> tagsRemoved;
-    // If it's not calling from cameraserver, check the permission.
+    // If it's not calling from cameraserver, check the permission only if
+    // android.permission.CAMERA is required. If android.permission.SYSTEM_CAMERA was needed,
+    // it would've already been checked in shouldRejectSystemCameraConnection.
     if ((callingPid != getpid()) &&
-            !checkPermission(String16("android.permission.CAMERA"), callingPid, callingUid)) {
+            (getSystemCameraKind(String8(cameraId)) != SystemCameraKind::SYSTEM_ONLY_CAMERA) &&
+            !checkPermission(sCameraPermission, callingPid, callingUid)) {
         res = cameraInfo->removePermissionEntries(
                 mCameraProviderManager->getProviderTagIdLocked(String8(cameraId).string()),
                 &tagsRemoved);
@@ -969,9 +1043,18 @@
                 clientName8.string(), clientUid, clientPid);
     }
 
-    // If it's not calling from cameraserver, check the permission.
+    if (shouldRejectSystemCameraConnection(cameraId)) {
+        ALOGW("Attempting to connect to system-only camera id %s, connection rejected",
+                cameraId.c_str());
+        return STATUS_ERROR_FMT(ERROR_DISCONNECTED, "No camera device with ID \"%s\" is"
+                                "available", cameraId.string());
+    }
+    // If it's not calling from cameraserver, check the permission if the
+    // device isn't a system only camera (shouldRejectSystemCameraConnection already checks for
+    // android.permission.SYSTEM_CAMERA for system only camera devices).
     if (callingPid != getpid() &&
-            !checkPermission(String16("android.permission.CAMERA"), clientPid, clientUid)) {
+                (getSystemCameraKind(cameraId) != SystemCameraKind::SYSTEM_ONLY_CAMERA) &&
+                !checkPermission(sCameraPermission, clientPid, clientUid)) {
         ALOGE("Permission Denial: can't use the camera pid=%d, uid=%d", clientPid, clientUid);
         return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
                 "Caller \"%s\" (PID %d, UID %d) cannot open camera \"%s\" without camera permission",
@@ -1324,18 +1407,61 @@
     return ret;
 }
 
-bool CameraService::shouldRejectHiddenCameraConnection(const String8 & cameraId) {
-    // If the thread serving this call is not a hwbinder thread and the caller
-    // isn't the cameraserver itself, and the camera id being requested is to be
-    // publically hidden, we should reject the connection.
-    if (!hardware::IPCThreadState::self()->isServingCall() &&
-            CameraThreadState::getCallingPid() != getpid() &&
-            mCameraProviderManager->isPublicallyHiddenSecureCamera(cameraId.c_str())) {
+bool CameraService::shouldSkipStatusUpdates(const String8& cameraId, bool isVendorListener,
+        int clientPid, int clientUid) const {
+    SystemCameraKind systemCameraKind = getSystemCameraKind(cameraId);
+    // If the client is not a vendor client, don't add listener if
+    //   a) the camera is a publicly hidden secure camera OR
+    //   b) the camera is a system only camera and the client doesn't
+    //      have android.permission.SYSTEM_CAMERA permissions.
+    if (!isVendorListener && (systemCameraKind == SystemCameraKind::HIDDEN_SECURE_CAMERA ||
+            (systemCameraKind == SystemCameraKind::SYSTEM_ONLY_CAMERA &&
+            !hasPermissionsForSystemCamera(clientPid, clientUid)))) {
         return true;
     }
     return false;
 }
 
+bool CameraService::shouldRejectSystemCameraConnection(const String8& cameraId) const {
+    // Rules for rejection:
+    // 1) If cameraserver tries to access this camera device, accept the
+    //    connection.
+    // 2) The camera device is a publicly hidden secure camera device AND some
+    //    component is trying to access it on a non-hwbinder thread (generally a non HAL client),
+    //    reject it.
+    // 3) if the camera device is advertised by the camera HAL as SYSTEM_ONLY
+    //    and the serving thread is a non hwbinder thread, the client must have
+    //    android.permission.SYSTEM_CAMERA permissions to connect.
+
+    int cPid = CameraThreadState::getCallingPid();
+    int cUid = CameraThreadState::getCallingUid();
+    SystemCameraKind systemCameraKind = getSystemCameraKind(cameraId);
+
+    // (1) Cameraserver trying to connect, accept.
+    if (CameraThreadState::getCallingPid() == getpid()) {
+        return false;
+    }
+    // (2)
+    if (!hardware::IPCThreadState::self()->isServingCall() &&
+            systemCameraKind == SystemCameraKind::HIDDEN_SECURE_CAMERA) {
+        ALOGW("Rejecting access to secure hidden camera %s", cameraId.c_str());
+        return true;
+    }
+    // (3) Here we only check for permissions if it is a system only camera device. This is since
+    //     getCameraCharacteristics() allows for calls to succeed (albeit after hiding some
+    //     characteristics) even if clients don't have android.permission.CAMERA. We do not want the
+    //     same behavior for system camera devices.
+    if (!hardware::IPCThreadState::self()->isServingCall() &&
+            systemCameraKind == SystemCameraKind::SYSTEM_ONLY_CAMERA &&
+            !hasPermissionsForSystemCamera(cPid, cUid)) {
+        ALOGW("Rejecting access to system only camera %s, inadequete permissions",
+                cameraId.c_str());
+        return true;
+    }
+
+    return false;
+}
+
 Status CameraService::connectDevice(
         const sp<hardware::camera2::ICameraDeviceCallbacks>& cameraCb,
         const String16& cameraId,
@@ -1385,14 +1511,6 @@
             (halVersion == -1) ? "default" : std::to_string(halVersion).c_str(),
             static_cast<int>(effectiveApiLevel));
 
-    if (shouldRejectHiddenCameraConnection(cameraId)) {
-        ALOGW("Attempting to connect to system-only camera id %s, connection rejected",
-              cameraId.c_str());
-        return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
-                                "No camera device with ID \"%s\" currently available",
-                                cameraId.string());
-
-    }
     sp<CLIENT> client = nullptr;
     {
         // Acquire mServiceLock and prevent other clients from connecting
@@ -1668,8 +1786,7 @@
     if (pid != selfPid) {
         // Ensure we're being called by system_server, or similar process with
         // permissions to notify the camera service about system events
-        if (!checkCallingPermission(
-                String16("android.permission.CAMERA_SEND_SYSTEM_EVENTS"))) {
+        if (!checkCallingPermission(sCameraSendSystemEventsPermission)) {
             const int uid = CameraThreadState::getCallingUid();
             ALOGE("Permission Denial: cannot send updates to camera service about system"
                     " events from pid=%d, uid=%d", pid, uid);
@@ -1704,7 +1821,7 @@
     Mutex::Autolock lock(mStatusListenerLock);
 
     for (const auto& it : mListenerList) {
-        auto ret = it.second->getListener()->onCameraAccessPrioritiesChanged();
+        auto ret = it->getListener()->onCameraAccessPrioritiesChanged();
         if (!ret.isOk()) {
             ALOGE("%s: Failed to trigger permission callback: %d", __FUNCTION__,
                     ret.exceptionCode());
@@ -1720,8 +1837,7 @@
     if (pid != selfPid) {
         // Ensure we're being called by system_server, or similar process with
         // permissions to notify the camera service about system events
-        if (!checkCallingPermission(
-                String16("android.permission.CAMERA_SEND_SYSTEM_EVENTS"))) {
+        if (!checkCallingPermission(sCameraSendSystemEventsPermission)) {
             const int uid = CameraThreadState::getCallingUid();
             ALOGE("Permission Denial: cannot send updates to camera service about device"
                     " state changes from pid=%d, uid=%d", pid, uid);
@@ -1775,20 +1891,23 @@
         return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, "Null listener given to addListener");
     }
 
+    auto clientUid = CameraThreadState::getCallingUid();
+    auto clientPid = CameraThreadState::getCallingPid();
+
     Mutex::Autolock lock(mServiceLock);
 
     {
         Mutex::Autolock lock(mStatusListenerLock);
         for (const auto &it : mListenerList) {
-            if (IInterface::asBinder(it.second->getListener()) == IInterface::asBinder(listener)) {
+            if (IInterface::asBinder(it->getListener()) == IInterface::asBinder(listener)) {
                 ALOGW("%s: Tried to add listener %p which was already subscribed",
                       __FUNCTION__, listener.get());
                 return STATUS_ERROR(ERROR_ALREADY_EXISTS, "Listener already registered");
             }
         }
 
-        auto clientUid = CameraThreadState::getCallingUid();
-        sp<ServiceListener> serviceListener = new ServiceListener(this, listener, clientUid);
+        sp<ServiceListener> serviceListener =
+                new ServiceListener(this, listener, clientUid, clientPid, isVendorListener);
         auto ret = serviceListener->initialize();
         if (ret != NO_ERROR) {
             String8 msg = String8::format("Failed to initialize service listener: %s (%d)",
@@ -1796,7 +1915,10 @@
             ALOGE("%s: %s", __FUNCTION__, msg.string());
             return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, msg.string());
         }
-        mListenerList.emplace_back(isVendorListener, serviceListener);
+        // The listener still needs to be added to the list of listeners, regardless of what
+        // permissions the listener process has / whether it is a vendor listener. Since it might be
+        // eligible to listen to other camera ids.
+        mListenerList.emplace_back(serviceListener);
         mUidPolicy->registerMonitorUid(clientUid);
     }
 
@@ -1804,8 +1926,7 @@
     {
         Mutex::Autolock lock(mCameraStatesLock);
         for (auto& i : mCameraStates) {
-            if (!isVendorListener &&
-                mCameraProviderManager->isPublicallyHiddenSecureCamera(i.first.c_str())) {
+            if (shouldSkipStatusUpdates(i.first, isVendorListener, clientPid, clientUid)) {
                 ALOGV("Cannot add public listener for hidden system-only %s for pid %d",
                       i.first.c_str(), CameraThreadState::getCallingPid());
                 continue;
@@ -1844,9 +1965,9 @@
     {
         Mutex::Autolock lock(mStatusListenerLock);
         for (auto it = mListenerList.begin(); it != mListenerList.end(); it++) {
-            if (IInterface::asBinder(it->second->getListener()) == IInterface::asBinder(listener)) {
-                mUidPolicy->unregisterMonitorUid(it->second->getListenerUid());
-                IInterface::asBinder(listener)->unlinkToDeath(it->second);
+            if (IInterface::asBinder((*it)->getListener()) == IInterface::asBinder(listener)) {
+                mUidPolicy->unregisterMonitorUid((*it)->getListenerUid());
+                IInterface::asBinder(listener)->unlinkToDeath(*it);
                 mListenerList.erase(it);
                 return Status::ok();
             }
@@ -1962,6 +2083,7 @@
             mActiveClientManager.remove(i);
         }
     }
+    updateAudioRestrictionLocked();
 }
 
 bool CameraService::evictClientIdByRemote(const wp<IBinder>& remote) {
@@ -2335,6 +2457,7 @@
         mClientPackageName(clientPackageName), mClientPid(clientPid), mClientUid(clientUid),
         mServicePid(servicePid),
         mDisconnected(false),
+        mAudioRestriction(hardware::camera2::ICameraDeviceUser::AUDIO_RESTRICTION_NONE),
         mRemoteBinder(remoteCallback)
 {
     if (sCameraService == nullptr) {
@@ -2438,6 +2561,35 @@
     return level == API_2;
 }
 
+status_t CameraService::BasicClient::setAudioRestriction(int32_t mode) {
+    {
+        Mutex::Autolock l(mAudioRestrictionLock);
+        mAudioRestriction = mode;
+    }
+    sCameraService->updateAudioRestriction();
+    return OK;
+}
+
+int32_t CameraService::BasicClient::getServiceAudioRestriction() const {
+    return sCameraService->updateAudioRestriction();
+}
+
+int32_t CameraService::BasicClient::getAudioRestriction() const {
+    Mutex::Autolock l(mAudioRestrictionLock);
+    return mAudioRestriction;
+}
+
+bool CameraService::BasicClient::isValidAudioRestriction(int32_t mode) {
+    switch (mode) {
+        case hardware::camera2::ICameraDeviceUser::AUDIO_RESTRICTION_NONE:
+        case hardware::camera2::ICameraDeviceUser::AUDIO_RESTRICTION_VIBRATION:
+        case hardware::camera2::ICameraDeviceUser::AUDIO_RESTRICTION_VIBRATION_SOUND:
+            return true;
+        default:
+            return false;
+    }
+}
+
 status_t CameraService::BasicClient::startCameraOps() {
     ATRACE_CALL();
 
@@ -3029,7 +3181,7 @@
 status_t CameraService::dump(int fd, const Vector<String16>& args) {
     ATRACE_CALL();
 
-    if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
+    if (checkCallingPermission(sDumpPermission) == false) {
         dprintf(fd, "Permission Denial: can't dump CameraService from pid=%d, uid=%d\n",
                 CameraThreadState::getCallingPid(),
                 CameraThreadState::getCallingUid());
@@ -3053,6 +3205,8 @@
     dprintf(fd, "\n== Service global info: ==\n\n");
     dprintf(fd, "Number of camera devices: %d\n", mNumberOfCameras);
     dprintf(fd, "Number of normal camera devices: %zu\n", mNormalDeviceIds.size());
+    dprintf(fd, "Number of public camera devices visible to API1: %zu\n",
+            mNormalDeviceIdsWithoutSystemCamera.size());
     for (size_t i = 0; i < mNormalDeviceIds.size(); i++) {
         dprintf(fd, "    Device %zu maps to \"%s\"\n", i, mNormalDeviceIds[i].c_str());
     }
@@ -3261,13 +3415,13 @@
             Mutex::Autolock lock(mStatusListenerLock);
 
             for (auto& listener : mListenerList) {
-                if (!listener.first &&
-                    mCameraProviderManager->isPublicallyHiddenSecureCamera(cameraId.c_str())) {
+                if (shouldSkipStatusUpdates(cameraId, listener->isVendorListener(),
+                        listener->getListenerPid(), listener->getListenerUid())) {
                     ALOGV("Skipping camera discovery callback for system-only camera %s",
-                          cameraId.c_str());
+                            cameraId.c_str());
                     continue;
                 }
-                listener.second->getListener()->onStatusChanged(mapToInterface(status),
+                listener->getListener()->onStatusChanged(mapToInterface(status),
                         String16(cameraId));
             }
         });
@@ -3467,4 +3621,25 @@
         "  help print this message\n");
 }
 
+int32_t CameraService::updateAudioRestriction() {
+    Mutex::Autolock lock(mServiceLock);
+    return updateAudioRestrictionLocked();
+}
+
+int32_t CameraService::updateAudioRestrictionLocked() {
+    int32_t mode = 0;
+    // iterate through all active client
+    for (const auto& i : mActiveClientManager.getAll()) {
+        const auto clientSp = i->getValue();
+        mode |= clientSp->getAudioRestriction();
+    }
+
+    bool modeChanged = (mAudioRestriction != mode);
+    mAudioRestriction = mode;
+    if (modeChanged) {
+        mAppOps.setCameraAudioRestriction(mode);
+    }
+    return mode;
+}
+
 }; // namespace android
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index cf93a41..058d57e 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -258,6 +258,19 @@
 
         // Block the client form using the camera
         virtual void block();
+
+        // set audio restriction from client
+        // Will call into camera service and hold mServiceLock
+        virtual status_t setAudioRestriction(int32_t mode);
+
+        // Get current global audio restriction setting
+        // Will call into camera service and hold mServiceLock
+        virtual int32_t getServiceAudioRestriction() const;
+
+        // Get current audio restriction setting for this client
+        virtual int32_t getAudioRestriction() const;
+
+        static bool isValidAudioRestriction(int32_t mode);
     protected:
         BasicClient(const sp<CameraService>& cameraService,
                 const sp<IBinder>& remoteCallback,
@@ -286,6 +299,9 @@
         const pid_t                     mServicePid;
         bool                            mDisconnected;
 
+        mutable Mutex                   mAudioRestrictionLock;
+        int32_t                         mAudioRestriction;
+
         // - The app-side Binder interface to receive callbacks from us
         sp<IBinder>                     mRemoteBinder;   // immutable after constructor
 
@@ -439,6 +455,9 @@
 
     }; // class CameraClientManager
 
+    int32_t updateAudioRestriction();
+    int32_t updateAudioRestrictionLocked();
+
 private:
 
     typedef hardware::camera::common::V1_0::CameraDeviceStatus CameraDeviceStatus;
@@ -633,9 +652,27 @@
         sp<BasicClient>* client,
         std::shared_ptr<resource_policy::ClientDescriptor<String8, sp<BasicClient>>>* partial);
 
-    // Should an operation attempt on a cameraId be rejected, if the camera id is
-    // advertised as a publically hidden secure camera, by the camera HAL ?
-    bool shouldRejectHiddenCameraConnection(const String8 & cameraId);
+    // Should an operation attempt on a cameraId be rejected ? (this can happen
+    // under various conditions. For example if a camera device is advertised as
+    // system only or hidden secure camera, amongst possible others.
+    bool shouldRejectSystemCameraConnection(const String8 & cameraId) const;
+
+    // Should a device status update be skipped for a particular camera device ? (this can happen
+    // under various conditions. For example if a camera device is advertised as
+    // system only or hidden secure camera, amongst possible others.
+    bool shouldSkipStatusUpdates(const String8& cameraId, bool isVendorListener, int clientPid,
+            int clientUid) const;
+
+    inline SystemCameraKind getSystemCameraKind(const String8& cameraId) const {
+        return mCameraProviderManager->getSystemCameraKind(cameraId.c_str());
+    }
+
+    // Update the set of API1Compatible camera devices without including system
+    // cameras and secure cameras. This is used for hiding system only cameras
+    // from clients using camera1 api and not having android.permission.SYSTEM_CAMERA.
+    // This function expects @param normalDeviceIds, to have normalDeviceIds
+    // sorted in alpha-numeric order.
+    void filterAPI1SystemCameraLocked(const std::vector<std::string> &normalDeviceIds);
 
     // Single implementation shared between the various connect calls
     template<class CALLBACK, class CLIENT>
@@ -791,9 +828,14 @@
      */
     void updateCameraNumAndIds();
 
+    // Number of camera devices (excluding hidden secure cameras)
     int                 mNumberOfCameras;
+    // Number of camera devices (excluding hidden secure cameras and
+    // system cameras)
+    int                 mNumberOfCamerasWithoutSystemCamera;
 
     std::vector<std::string> mNormalDeviceIds;
+    std::vector<std::string> mNormalDeviceIdsWithoutSystemCamera;
 
     // sounds
     sp<MediaPlayer>     newMediaPlayer(const char *file);
@@ -810,7 +852,9 @@
     class ServiceListener : public virtual IBinder::DeathRecipient {
         public:
             ServiceListener(sp<CameraService> parent, sp<hardware::ICameraServiceListener> listener,
-                    int uid) : mParent(parent), mListener(listener), mListenerUid(uid) {}
+                    int uid, int pid, bool isVendorClient)
+                    : mParent(parent), mListener(listener), mListenerUid(uid), mListenerPid(pid),
+                      mIsVendorListener(isVendorClient) { }
 
             status_t initialize() {
                 return IInterface::asBinder(mListener)->linkToDeath(this);
@@ -824,16 +868,20 @@
             }
 
             int getListenerUid() { return mListenerUid; }
+            int getListenerPid() { return mListenerPid; }
             sp<hardware::ICameraServiceListener> getListener() { return mListener; }
+            bool isVendorListener() { return mIsVendorListener; }
 
         private:
             wp<CameraService> mParent;
             sp<hardware::ICameraServiceListener> mListener;
-            int mListenerUid;
+            int mListenerUid = -1;
+            int mListenerPid = -1;
+            bool mIsVendorListener = false;
     };
 
     // Guarded by mStatusListenerMutex
-    std::vector<std::pair<bool, sp<ServiceListener>>> mListenerList;
+    std::vector<sp<ServiceListener>> mListenerList;
 
     Mutex       mStatusListenerLock;
 
@@ -949,6 +997,13 @@
 
     void broadcastTorchModeStatus(const String8& cameraId,
             hardware::camera::common::V1_0::TorchModeStatus status);
+
+    // TODO: right now each BasicClient holds one AppOpsManager instance.
+    // We can refactor the code so all of clients share this instance
+    AppOpsManager mAppOps;
+
+    // Aggreated audio restriction mode for all camera clients
+    int32_t mAudioRestriction;
 };
 
 } // namespace android
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 162b50f..c273881 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -959,6 +959,11 @@
         case Parameters::RECORD:
         case Parameters::PREVIEW:
             syncWithDevice();
+            // Due to flush a camera device sync is not a sufficient
+            // guarantee that the current client parameters are
+            // correctly applied. To resolve this wait for the current
+            // request id to return in the results.
+            waitUntilCurrentRequestIdLocked();
             res = stopStream();
             if (res != OK) {
                 ALOGE("%s: Camera %d: Can't stop streaming: %s (%d)",
@@ -2253,6 +2258,58 @@
     return OK;
 }
 
+status_t Camera2Client::setAudioRestriction(int /*mode*/) {
+    // Empty implementation. setAudioRestriction is hidden interface and not
+    // supported by android.hardware.Camera API
+    return INVALID_OPERATION;
+}
+
+int32_t Camera2Client::getGlobalAudioRestriction() {
+    // Empty implementation. getAudioRestriction is hidden interface and not
+    // supported by android.hardware.Camera API
+    return INVALID_OPERATION;
+}
+
+status_t Camera2Client::waitUntilCurrentRequestIdLocked() {
+    int32_t activeRequestId = mStreamingProcessor->getActiveRequestId();
+    if (activeRequestId != 0) {
+        auto res = waitUntilRequestIdApplied(activeRequestId,
+                mDevice->getExpectedInFlightDuration());
+        if (res == TIMED_OUT) {
+            ALOGE("%s: Camera %d: Timed out waiting for current request id to return in results!",
+                    __FUNCTION__, mCameraId);
+            return res;
+        } else if (res != OK) {
+            ALOGE("%s: Camera %d: Error while waiting for current request id to return in results!",
+                    __FUNCTION__, mCameraId);
+            return res;
+        }
+    }
+
+    return OK;
+}
+
+status_t Camera2Client::waitUntilRequestIdApplied(int32_t requestId, nsecs_t timeout) {
+    Mutex::Autolock l(mLatestRequestMutex);
+    while (mLatestRequestId != requestId) {
+        nsecs_t startTime = systemTime();
+
+        auto res = mLatestRequestSignal.waitRelative(mLatestRequestMutex, timeout);
+        if (res != OK) return res;
+
+        timeout -= (systemTime() - startTime);
+    }
+
+    return OK;
+}
+
+void Camera2Client::notifyRequestId(int32_t requestId) {
+    Mutex::Autolock al(mLatestRequestMutex);
+
+    mLatestRequestId = requestId;
+    mLatestRequestSignal.signal();
+}
+
 const char* Camera2Client::kAutofocusLabel = "autofocus";
 const char* Camera2Client::kTakepictureLabel = "take_picture";
 
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index a9ea271..8a17b17 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -83,6 +83,8 @@
     virtual void            notifyError(int32_t errorCode,
                                         const CaptureResultExtras& resultExtras);
     virtual status_t        setVideoTarget(const sp<IGraphicBufferProducer>& bufferProducer);
+    virtual status_t        setAudioRestriction(int mode);
+    virtual int32_t         getGlobalAudioRestriction();
 
     /**
      * Interface used by CameraService
@@ -122,6 +124,8 @@
 
     camera2::SharedParameters& getParameters();
 
+    void notifyRequestId(int32_t requestId);
+
     int getPreviewStreamId() const;
     int getCaptureStreamId() const;
     int getCallbackStreamId() const;
@@ -227,6 +231,12 @@
     status_t initializeImpl(TProviderPtr providerPtr, const String8& monitorTags);
 
     bool isZslEnabledInStillTemplate();
+
+    mutable Mutex mLatestRequestMutex;
+    Condition mLatestRequestSignal;
+    int32_t mLatestRequestId = -1;
+    status_t waitUntilRequestIdApplied(int32_t requestId, nsecs_t timeout);
+    status_t waitUntilCurrentRequestIdLocked();
 };
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index d65ac7b..388a5dc 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -534,7 +534,7 @@
     }
 
     if (mHardware != nullptr) {
-        VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(dataPtr->pointer());
+        VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(dataPtr->unsecurePointer());
         metadata->eType = kMetadataBufferTypeNativeHandleSource;
         metadata->pHandle = handle;
         mHardware->releaseRecordingFrame(dataPtr);
@@ -573,7 +573,7 @@
         }
 
         if (!disconnected) {
-            VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(dataPtr->pointer());
+            VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(dataPtr->unsecurePointer());
             metadata->eType = kMetadataBufferTypeNativeHandleSource;
             metadata->pHandle = handle;
             frames.push_back(dataPtr);
@@ -916,8 +916,12 @@
                 ALOGE("%s: dataPtr does not contain VideoNativeHandleMetadata!", __FUNCTION__);
                 return;
             }
+            // TODO: Using unsecurePointer() has some associated security pitfalls
+            //       (see declaration for details).
+            //       Either document why it is safe in this case or address the
+            //       issue (e.g. by copying).
             VideoNativeHandleMetadata *metadata =
-                (VideoNativeHandleMetadata*)(msg.dataPtr->pointer());
+                (VideoNativeHandleMetadata*)(msg.dataPtr->unsecurePointer());
             if (metadata->eType == kMetadataBufferTypeNativeHandleSource) {
                 handle = metadata->pHandle;
             }
@@ -1073,8 +1077,12 @@
 
         // Check if dataPtr contains a VideoNativeHandleMetadata.
         if (dataPtr->size() == sizeof(VideoNativeHandleMetadata)) {
+            // TODO: Using unsecurePointer() has some associated security pitfalls
+            //       (see declaration for details).
+            //       Either document why it is safe in this case or address the
+            //       issue (e.g. by copying).
             VideoNativeHandleMetadata *metadata =
-                (VideoNativeHandleMetadata*)(dataPtr->pointer());
+                (VideoNativeHandleMetadata*)(dataPtr->unsecurePointer());
             if (metadata->eType == kMetadataBufferTypeNativeHandleSource) {
                 handle = metadata->pHandle;
             }
@@ -1171,4 +1179,25 @@
     return INVALID_OPERATION;
 }
 
+status_t CameraClient::setAudioRestriction(int mode) {
+    if (!isValidAudioRestriction(mode)) {
+        ALOGE("%s: invalid audio restriction mode %d", __FUNCTION__, mode);
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock lock(mLock);
+    if (checkPidAndHardware() != NO_ERROR) {
+        return INVALID_OPERATION;
+    }
+    return BasicClient::setAudioRestriction(mode);
+}
+
+int32_t CameraClient::getGlobalAudioRestriction() {
+    Mutex::Autolock lock(mLock);
+    if (checkPidAndHardware() != NO_ERROR) {
+        return INVALID_OPERATION;
+    }
+    return BasicClient::getServiceAudioRestriction();
+}
+
 }; // namespace android
diff --git a/services/camera/libcameraservice/api1/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h
index 9530b6c..b26b612 100644
--- a/services/camera/libcameraservice/api1/CameraClient.h
+++ b/services/camera/libcameraservice/api1/CameraClient.h
@@ -59,6 +59,8 @@
     virtual String8         getParameters() const;
     virtual status_t        sendCommand(int32_t cmd, int32_t arg1, int32_t arg2);
     virtual status_t        setVideoTarget(const sp<IGraphicBufferProducer>& bufferProducer);
+    virtual status_t        setAudioRestriction(int mode);
+    virtual int32_t         getGlobalAudioRestriction();
 
     // Interface used by CameraService
     CameraClient(const sp<CameraService>& cameraService,
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index 683e84d..63e293a 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -86,6 +86,12 @@
         process3aState(frame, client);
     }
 
+    if (mCurrentRequestId != frame.mResultExtras.requestId) {
+        mCurrentRequestId = frame.mResultExtras.requestId;
+
+        client->notifyRequestId(mCurrentRequestId);
+    }
+
     return FrameProcessorBase::processSingleFrame(frame, device);
 }
 
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
index 8183c12..142b8cd 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
@@ -94,6 +94,7 @@
     };
 
     AlgState m3aState;
+    int32_t mCurrentRequestId = -1;
 
     // frame number -> pending 3A states that not all data are received yet.
     KeyedVector<int32_t, AlgState> mPending3AStates;
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index c7a4f2b..d93d26f 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -1870,6 +1870,34 @@
     return res;
 }
 
+binder::Status CameraDeviceClient::setCameraAudioRestriction(int32_t mode) {
+    ATRACE_CALL();
+    binder::Status res;
+    if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
+
+    if (!isValidAudioRestriction(mode)) {
+        String8 msg = String8::format("Camera %s: invalid audio restriction mode %d",
+                mCameraIdStr.string(), mode);
+        ALOGW("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+    }
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+    BasicClient::setAudioRestriction(mode);
+    return binder::Status::ok();
+}
+
+binder::Status CameraDeviceClient::getGlobalAudioRestriction(/*out*/ int32_t* outMode) {
+    ATRACE_CALL();
+    binder::Status res;
+    if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
+    Mutex::Autolock icl(mBinderSerializationLock);
+    if (outMode != nullptr) {
+        *outMode = BasicClient::getServiceAudioRestriction();
+    }
+    return binder::Status::ok();
+}
+
 status_t CameraDeviceClient::dump(int fd, const Vector<String16>& args) {
     return BasicClient::dump(fd, args);
 }
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 1c5abb0..fe25010 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -152,6 +152,10 @@
     virtual binder::Status finalizeOutputConfigurations(int32_t streamId,
             const hardware::camera2::params::OutputConfiguration &outputConfiguration) override;
 
+    virtual binder::Status setCameraAudioRestriction(int32_t mode) override;
+
+    virtual binder::Status getGlobalAudioRestriction(/*out*/int32_t* outMode) override;
+
     /**
      * Interface used by CameraService
      */
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
index 5a87134..410fd2c 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
@@ -1210,7 +1210,7 @@
     outputFormat->setInt32(KEY_I_FRAME_INTERVAL, 0);
     outputFormat->setInt32(KEY_COLOR_FORMAT,
             useGrid ? COLOR_FormatYUV420Flexible : COLOR_FormatSurface);
-    outputFormat->setInt32(KEY_FRAME_RATE, gridRows * gridCols);
+    outputFormat->setInt32(KEY_FRAME_RATE, useGrid ? gridRows * gridCols : kNoGridOpRate);
     // This only serves as a hint to encoder when encoding is not real-time.
     outputFormat->setInt32(KEY_OPERATING_RATE, useGrid ? kGridOpRate : kNoGridOpRate);
 
@@ -1671,8 +1671,13 @@
                          ALOGE("CB_OUTPUT_FORMAT_CHANGED: format is expected.");
                          break;
                      }
-
-                     parent->onHeicFormatChanged(format);
+                     // Here format is MediaCodec's internal copy of output format.
+                     // Make a copy since onHeicFormatChanged() might modify it.
+                     sp<AMessage> formatCopy;
+                     if (format != nullptr) {
+                         formatCopy = format->dup();
+                     }
+                     parent->onHeicFormatChanged(formatCopy);
                      break;
                  }
 
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 98c1b5e..935bc37 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -383,6 +383,12 @@
      * drop buffers for stream of streamId.
      */
     virtual status_t dropStreamBuffers(bool /*dropping*/, int /*streamId*/) = 0;
+
+    /**
+     * Returns the maximum expected time it'll take for all currently in-flight
+     * requests to complete, based on their settings
+     */
+    virtual nsecs_t getExpectedInFlightDuration() = 0;
 };
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index c72029f..974026c 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -104,13 +104,25 @@
     return OK;
 }
 
-int CameraProviderManager::getCameraCount() const {
+std::pair<int, int> CameraProviderManager::getCameraCount() const {
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
-    int count = 0;
+    int systemCameraCount = 0;
+    int publicCameraCount = 0;
     for (auto& provider : mProviders) {
-        count += provider->mUniqueCameraIds.size();
+        for (auto &id : provider->mUniqueCameraIds) {
+            switch(getSystemCameraKindLocked(id)) {
+                case SystemCameraKind::PUBLIC:
+                    publicCameraCount++;
+                    break;
+                case SystemCameraKind::SYSTEM_ONLY_CAMERA:
+                    systemCameraCount++;
+                    break;
+                default:
+                    break;
+            }
+        }
     }
-    return count;
+    return std::make_pair(systemCameraCount, publicCameraCount);
 }
 
 std::vector<std::string> CameraProviderManager::getCameraDeviceIds() const {
@@ -124,21 +136,38 @@
     return deviceIds;
 }
 
+void CameraProviderManager::collectDeviceIdsLocked(const std::vector<std::string> deviceIds,
+        std::vector<std::string>& publicDeviceIds,
+        std::vector<std::string>& systemDeviceIds) const {
+    for (auto &deviceId : deviceIds) {
+        if (getSystemCameraKindLocked(deviceId) == SystemCameraKind::SYSTEM_ONLY_CAMERA) {
+            systemDeviceIds.push_back(deviceId);
+        } else {
+            publicDeviceIds.push_back(deviceId);
+        }
+    }
+}
+
 std::vector<std::string> CameraProviderManager::getAPI1CompatibleCameraDeviceIds() const {
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
+    std::vector<std::string> publicDeviceIds;
+    std::vector<std::string> systemDeviceIds;
     std::vector<std::string> deviceIds;
     for (auto& provider : mProviders) {
         std::vector<std::string> providerDeviceIds = provider->mUniqueAPI1CompatibleCameraIds;
-
+        // Secure cameras should not be exposed through camera 1 api
+        providerDeviceIds.erase(std::remove_if(providerDeviceIds.begin(), providerDeviceIds.end(),
+                [this](const std::string& s) {
+                bool rem = this->getSystemCameraKindLocked(s) ==
+                        SystemCameraKind::HIDDEN_SECURE_CAMERA;
+                return rem;}), providerDeviceIds.end());
         // API1 app doesn't handle logical and physical camera devices well. So
         // for each camera facing, only take the first id advertised by HAL in
         // all [logical, physical1, physical2, ...] id combos, and filter out the rest.
         filterLogicalCameraIdsLocked(providerDeviceIds);
-
-        deviceIds.insert(deviceIds.end(), providerDeviceIds.begin(), providerDeviceIds.end());
+        collectDeviceIdsLocked(providerDeviceIds, publicDeviceIds, systemDeviceIds);
     }
-
-    std::sort(deviceIds.begin(), deviceIds.end(),
+    auto sortFunc =
             [](const std::string& a, const std::string& b) -> bool {
                 uint32_t aUint = 0, bUint = 0;
                 bool aIsUint = base::ParseUint(a, &aUint);
@@ -154,7 +183,13 @@
                 }
                 // Simple string compare if both id are not uint
                 return a < b;
-            });
+            };
+    // We put device ids for system cameras at the end since they will be pared
+    // off for processes not having system camera permissions.
+    std::sort(publicDeviceIds.begin(), publicDeviceIds.end(), sortFunc);
+    std::sort(systemDeviceIds.begin(), systemDeviceIds.end(), sortFunc);
+    deviceIds.insert(deviceIds.end(), publicDeviceIds.begin(), publicDeviceIds.end());
+    deviceIds.insert(deviceIds.end(), systemDeviceIds.begin(), systemDeviceIds.end());
     return deviceIds;
 }
 
@@ -534,15 +569,23 @@
     }
 }
 
-bool CameraProviderManager::ProviderInfo::DeviceInfo3::isPublicallyHiddenSecureCamera() {
+SystemCameraKind CameraProviderManager::ProviderInfo::DeviceInfo3::getSystemCameraKind() {
     camera_metadata_entry_t entryCap;
     entryCap = mCameraCharacteristics.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
-    if (entryCap.count != 1) {
-        // Do NOT hide this camera device if the capabilities specify anything more
-        // than ANDROID_REQUEST_AVAILABLE_CAPABILITIES_SECURE_IMAGE_DATA.
-        return false;
+    if (entryCap.count == 1 &&
+            entryCap.data.u8[0] == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_SECURE_IMAGE_DATA) {
+        return SystemCameraKind::HIDDEN_SECURE_CAMERA;
     }
-    return entryCap.data.u8[0] == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_SECURE_IMAGE_DATA;
+
+    // Go through the capabilities and check if it has
+    // ANDROID_REQUEST_AVAILABLE_CAPABILITIES_SYSTEM_CAMERA
+    for (size_t i = 0; i < entryCap.count; ++i) {
+        uint8_t capability = entryCap.data.u8[i];
+        if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_SYSTEM_CAMERA) {
+            return SystemCameraKind::SYSTEM_ONLY_CAMERA;
+        }
+    }
+    return SystemCameraKind::PUBLIC;
 }
 
 void CameraProviderManager::ProviderInfo::DeviceInfo3::getSupportedSizes(
@@ -1046,14 +1089,17 @@
     return deviceInfo->mIsLogicalCamera;
 }
 
-bool CameraProviderManager::isPublicallyHiddenSecureCamera(const std::string& id) {
+SystemCameraKind CameraProviderManager::getSystemCameraKind(const std::string& id) const {
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
+    return getSystemCameraKindLocked(id);
+}
 
+SystemCameraKind CameraProviderManager::getSystemCameraKindLocked(const std::string& id) const {
     auto deviceInfo = findDeviceInfoLocked(id);
     if (deviceInfo == nullptr) {
-        return false;
+        return SystemCameraKind::PUBLIC;
     }
-    return deviceInfo->mIsPublicallyHiddenSecureCamera;
+    return deviceInfo->mSystemCameraKind;
 }
 
 bool CameraProviderManager::isHiddenPhysicalCamera(const std::string& cameraId) {
@@ -1937,7 +1983,7 @@
         return;
     }
 
-    mIsPublicallyHiddenSecureCamera = isPublicallyHiddenSecureCamera();
+    mSystemCameraKind = getSystemCameraKind();
 
     status_t res = fixupMonochromeTags();
     if (OK != res) {
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index 8cdfc24..f4cf667 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -54,6 +54,26 @@
             sp<VendorTagDescriptor>& descriptor);
 };
 
+enum SystemCameraKind {
+   /**
+    * These camera devices are visible to all apps and system components alike
+    */
+   PUBLIC = 0,
+
+   /**
+    * These camera devices are visible only to processes having the
+    * android.permission.SYSTEM_CAMERA permission. They are not exposed to 3P
+    * apps.
+    */
+   SYSTEM_ONLY_CAMERA,
+
+   /**
+    * These camera devices are visible only to HAL clients (that try to connect
+    * on a hwbinder thread).
+    */
+   HIDDEN_SECURE_CAMERA
+};
+
 /**
  * A manager for all camera providers available on an Android device.
  *
@@ -132,10 +152,10 @@
             ServiceInteractionProxy *proxy = &sHardwareServiceInteractionProxy);
 
     /**
-     * Retrieve the total number of available cameras. This value may change dynamically as cameras
-     * are added or removed.
+     * Retrieve the total number of available cameras.
+     * This value may change dynamically as cameras are added or removed.
      */
-    int getCameraCount() const;
+    std::pair<int, int> getCameraCount() const;
 
     std::vector<std::string> getCameraDeviceIds() const;
 
@@ -272,7 +292,7 @@
      */
     bool isLogicalCamera(const std::string& id, std::vector<std::string>* physicalCameraIds);
 
-    bool isPublicallyHiddenSecureCamera(const std::string& id);
+    SystemCameraKind getSystemCameraKind(const std::string& id) const;
     bool isHiddenPhysicalCamera(const std::string& cameraId);
 
     static const float kDepthARTolerance;
@@ -379,7 +399,7 @@
             std::vector<std::string> mPhysicalIds;
             hardware::CameraInfo mInfo;
             sp<IBase> mSavedInterface;
-            bool mIsPublicallyHiddenSecureCamera = false;
+            SystemCameraKind mSystemCameraKind = SystemCameraKind::PUBLIC;
 
             const hardware::camera::common::V1_0::CameraResourceCost mResourceCost;
 
@@ -497,7 +517,7 @@
             CameraMetadata mCameraCharacteristics;
             std::unordered_map<std::string, CameraMetadata> mPhysicalCameraCharacteristics;
             void queryPhysicalCameraIds();
-            bool isPublicallyHiddenSecureCamera();
+            SystemCameraKind getSystemCameraKind();
             status_t fixupMonochromeTags();
             status_t addDynamicDepthTags();
             static void getSupportedSizes(const CameraMetadata& ch, uint32_t tag,
@@ -595,6 +615,12 @@
     status_t getCameraCharacteristicsLocked(const std::string &id,
             CameraMetadata* characteristics) const;
     void filterLogicalCameraIdsLocked(std::vector<std::string>& deviceIds) const;
+
+    SystemCameraKind getSystemCameraKindLocked(const std::string& id) const;
+
+    void collectDeviceIdsLocked(const std::vector<std::string> deviceIds,
+            std::vector<std::string>& normalDeviceIds,
+            std::vector<std::string>& systemCameraDeviceIds) const;
 };
 
 } // namespace android
diff --git a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
index 3c90de0..94541d8 100644
--- a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
+++ b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
@@ -419,7 +419,7 @@
 
     std::vector<std::unique_ptr<Item>> items;
     std::vector<std::unique_ptr<Camera>> cameraList;
-    auto image = Image::FromDataForPrimaryImage("android/mainimage", &items);
+    auto image = Image::FromDataForPrimaryImage("image/jpeg", &items);
     std::unique_ptr<CameraParams> cameraParams(new CameraParams(std::move(image)));
     if (cameraParams == nullptr) {
         ALOGE("%s: Failed to initialize camera parameters", __FUNCTION__);
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp b/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp
index 522d521..62ef681 100644
--- a/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp
+++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp
@@ -165,8 +165,12 @@
         mem = mHidlMemPoolMap.at(data);
     }
     sp<CameraHeapMemory> heapMem(static_cast<CameraHeapMemory *>(mem->handle));
+    // TODO: Using unsecurePointer() has some associated security pitfalls
+    //       (see declaration for details).
+    //       Either document why it is safe in this case or address the
+    //       issue (e.g. by copying).
     VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*)
-            heapMem->mBuffers[bufferIndex]->pointer();
+            heapMem->mBuffers[bufferIndex]->unsecurePointer();
     md->pHandle = const_cast<native_handle_t*>(frameData.getNativeHandle());
     sDataCbTimestamp(timestamp, (int32_t) msgType, mem, bufferIndex, this);
     return hardware::Void();
@@ -192,8 +196,12 @@
                      hidl_msg.bufferIndex, mem->mNumBufs);
                 return hardware::Void();
             }
+            // TODO: Using unsecurePointer() has some associated security pitfalls
+            //       (see declaration for details).
+            //       Either document why it is safe in this case or address the
+            //       issue (e.g. by copying).
             VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*)
-                    mem->mBuffers[hidl_msg.bufferIndex]->pointer();
+                    mem->mBuffers[hidl_msg.bufferIndex]->unsecurePointer();
             md->pHandle = const_cast<native_handle_t*>(hidl_msg.frameData.getNativeHandle());
 
             msgs.push_back({hidl_msg.timestamp, mem->mBuffers[hidl_msg.bufferIndex]});
@@ -578,7 +586,11 @@
     int bufferIndex = offset / size;
     if (CC_LIKELY(mHidlDevice != nullptr)) {
         if (size == sizeof(VideoNativeHandleMetadata)) {
-            VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*) mem->pointer();
+            // TODO: Using unsecurePointer() has some associated security pitfalls
+            //       (see declaration for details).
+            //       Either document why it is safe in this case or address the
+            //       issue (e.g. by copying).
+            VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*) mem->unsecurePointer();
             // Caching the handle here because md->pHandle will be subject to HAL's edit
             native_handle_t* nh = md->pHandle;
             hidl_handle frame = nh;
@@ -605,7 +617,11 @@
             if (size == sizeof(VideoNativeHandleMetadata)) {
                 uint32_t heapId = heap->getHeapID();
                 uint32_t bufferIndex = offset / size;
-                VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*) mem->pointer();
+                // TODO: Using unsecurePointer() has some associated security pitfalls
+                //       (see declaration for details).
+                //       Either document why it is safe in this case or address the
+                //       issue (e.g. by copying).
+                VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*) mem->unsecurePointer();
                 // Caching the handle here because md->pHandle will be subject to HAL's edit
                 native_handle_t* nh = md->pHandle;
                 VideoFrameMessage msg;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 4227a3b..3188892 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -2802,6 +2802,27 @@
         mOperatingMode = operatingMode;
     }
 
+    // In case called from configureStreams, abort queued input buffers not belonging to
+    // any pending requests.
+    if (mInputStream != NULL && notifyRequestThread) {
+        while (true) {
+            camera3_stream_buffer_t inputBuffer;
+            status_t res = mInputStream->getInputBuffer(&inputBuffer,
+                    /*respectHalLimit*/ false);
+            if (res != OK) {
+                // Exhausted acquiring all input buffers.
+                break;
+            }
+
+            inputBuffer.status = CAMERA3_BUFFER_STATUS_ERROR;
+            res = mInputStream->returnInputBuffer(inputBuffer);
+            if (res != OK) {
+                ALOGE("%s: %d: couldn't return input buffer while clearing input queue: "
+                        "%s (%d)", __FUNCTION__, __LINE__, strerror(-res), res);
+            }
+        }
+    }
+
     if (!mNeedConfig) {
         ALOGV("%s: Skipping config, no stream changes", __FUNCTION__);
         return OK;
@@ -3679,7 +3700,7 @@
         // Did we get the (final) result metadata for this capture?
         if (result->result != NULL && !isPartialResult) {
             if (request.physicalCameraIds.size() != result->num_physcam_metadata) {
-                SET_ERR("Requested physical Camera Ids %d not equal to number of metadata %d",
+                SET_ERR("Expected physical Camera metadata count %d not equal to actual count %d",
                         request.physicalCameraIds.size(), result->num_physcam_metadata);
                 return;
             }
@@ -3873,12 +3894,14 @@
                             errorCode) {
                         if (physicalCameraId.size() > 0) {
                             String8 cameraId(physicalCameraId);
-                            if (r.physicalCameraIds.find(cameraId) == r.physicalCameraIds.end()) {
+                            auto iter = r.physicalCameraIds.find(cameraId);
+                            if (iter == r.physicalCameraIds.end()) {
                                 ALOGE("%s: Reported result failure for physical camera device: %s "
                                         " which is not part of the respective request!",
                                         __FUNCTION__, cameraId.string());
                                 break;
                             }
+                            r.physicalCameraIds.erase(iter);
                             resultExtras.errorPhysicalCameraId = physicalCameraId;
                         } else {
                             logicalDeviceResultError = true;
@@ -5082,6 +5105,7 @@
                     ALOGW("%s: %d: couldn't get input buffer while clearing the request "
                             "list: %s (%d)", __FUNCTION__, __LINE__, strerror(-res), res);
                 } else {
+                    inputBuffer.status = CAMERA3_BUFFER_STATUS_ERROR;
                     res = (*it)->mInputStream->returnInputBuffer(inputBuffer);
                     if (res != OK) {
                         ALOGE("%s: %d: couldn't return input buffer while clearing the request "
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index cae34ce..2573b48 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -194,6 +194,8 @@
      */
     status_t dropStreamBuffers(bool dropping, int streamId) override;
 
+    nsecs_t getExpectedInFlightDuration() override;
+
     /**
      * Helper functions to map between framework and HIDL values
      */
@@ -1111,12 +1113,6 @@
             const SurfaceMap& outputSurfaces);
 
     /**
-     * Returns the maximum expected time it'll take for all currently in-flight
-     * requests to complete, based on their settings
-     */
-    nsecs_t getExpectedInFlightDuration();
-
-    /**
      * Tracking for idle detection
      */
     sp<camera3::StatusTracker> mStatusTracker;
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index fc83684..cb59a76 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -71,7 +71,8 @@
 
     res = mConsumer->acquireBuffer(&bufferItem, /*waitForFence*/false);
     if (res != OK) {
-        ALOGE("%s: Stream %d: Can't acquire next output buffer: %s (%d)",
+        // This may or may not be an error condition depending on caller.
+        ALOGV("%s: Stream %d: Can't acquire next output buffer: %s (%d)",
                 __FUNCTION__, mId, strerror(-res), res);
         return res;
     }
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index acb8b3c..e1d35e8 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -54,9 +54,8 @@
         mState = STATE_ERROR;
     }
 
-    if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
-        mBufferReleasedListener = new BufferReleasedListener(this);
-    }
+    bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
+    mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
 }
 
 Camera3OutputStream::Camera3OutputStream(int id,
@@ -87,9 +86,8 @@
         mState = STATE_ERROR;
     }
 
-    if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
-        mBufferReleasedListener = new BufferReleasedListener(this);
-    }
+    bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
+    mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
 }
 
 Camera3OutputStream::Camera3OutputStream(int id,
@@ -124,10 +122,8 @@
     }
 
     mConsumerName = String8("Deferred");
-    if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
-        mBufferReleasedListener = new BufferReleasedListener(this);
-    }
-
+    bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
+    mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
 }
 
 Camera3OutputStream::Camera3OutputStream(int id, camera3_stream_type_t type,
@@ -151,9 +147,8 @@
         mDropBuffers(false),
         mDequeueBufferLatency(kDequeueLatencyBinSize) {
 
-    if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
-        mBufferReleasedListener = new BufferReleasedListener(this);
-    }
+    bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
+    mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
 
     // Subclasses expected to initialize mConsumer themselves
 }
@@ -261,7 +256,7 @@
         notifyBufferReleased(anwBuffer);
         if (mUseBufferManager) {
             // Return this buffer back to buffer manager.
-            mBufferReleasedListener->onBufferReleased();
+            mBufferProducerListener->onBufferReleased();
         }
     } else {
         if (mTraceFirstBuffer && (stream_type == CAMERA3_STREAM_OUTPUT)) {
@@ -387,8 +382,8 @@
     // Configure consumer-side ANativeWindow interface. The listener may be used
     // to notify buffer manager (if it is used) of the returned buffers.
     res = mConsumer->connect(NATIVE_WINDOW_API_CAMERA,
-            /*listener*/mBufferReleasedListener,
-            /*reportBufferRemoval*/true);
+            /*reportBufferRemoval*/true,
+            /*listener*/mBufferProducerListener);
     if (res != OK) {
         ALOGE("%s: Unable to connect to native window for stream %d",
                 __FUNCTION__, mId);
@@ -790,7 +785,7 @@
     return INVALID_OPERATION;
 }
 
-void Camera3OutputStream::BufferReleasedListener::onBufferReleased() {
+void Camera3OutputStream::BufferProducerListener::onBufferReleased() {
     sp<Camera3OutputStream> stream = mParent.promote();
     if (stream == nullptr) {
         ALOGV("%s: Parent camera3 output stream was destroyed", __FUNCTION__);
@@ -823,6 +818,25 @@
     }
 }
 
+void Camera3OutputStream::BufferProducerListener::onBuffersDiscarded(
+        const std::vector<sp<GraphicBuffer>>& buffers) {
+    sp<Camera3OutputStream> stream = mParent.promote();
+    if (stream == nullptr) {
+        ALOGV("%s: Parent camera3 output stream was destroyed", __FUNCTION__);
+        return;
+    }
+
+    if (buffers.size() > 0) {
+        Mutex::Autolock l(stream->mLock);
+        stream->onBuffersRemovedLocked(buffers);
+        if (stream->mUseBufferManager) {
+            stream->mBufferManager->onBuffersRemoved(stream->getId(),
+                    stream->getStreamSetId(), buffers.size());
+        }
+        ALOGV("Stream %d: %zu Buffers discarded.", stream->getId(), buffers.size());
+    }
+}
+
 void Camera3OutputStream::onBuffersRemovedLocked(
         const std::vector<sp<GraphicBuffer>>& removedBuffers) {
     sp<Camera3StreamBufferFreedListener> callback = mBufferFreedListener.promote();
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 729c655..b4e49f9 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -146,18 +146,22 @@
      */
     virtual status_t setConsumers(const std::vector<sp<Surface>>& consumers);
 
-    class BufferReleasedListener : public BnProducerListener {
+    class BufferProducerListener : public SurfaceListener {
         public:
-          BufferReleasedListener(wp<Camera3OutputStream> parent) : mParent(parent) {}
+            BufferProducerListener(wp<Camera3OutputStream> parent, bool needsReleaseNotify)
+                    : mParent(parent), mNeedsReleaseNotify(needsReleaseNotify) {}
 
-          /**
-          * Implementation of IProducerListener, used to notify this stream that the consumer
-          * has returned a buffer and it is ready to return to Camera3BufferManager for reuse.
-          */
-          virtual void onBufferReleased();
+            /**
+            * Implementation of IProducerListener, used to notify this stream that the consumer
+            * has returned a buffer and it is ready to return to Camera3BufferManager for reuse.
+            */
+            virtual void onBufferReleased();
+            virtual bool needsReleaseNotify() { return mNeedsReleaseNotify; }
+            virtual void onBuffersDiscarded(const std::vector<sp<GraphicBuffer>>& buffers);
 
         private:
-          wp<Camera3OutputStream> mParent;
+            wp<Camera3OutputStream> mParent;
+            bool mNeedsReleaseNotify;
     };
 
     virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
@@ -262,10 +266,10 @@
     sp<Camera3BufferManager> mBufferManager;
 
     /**
-     * Buffer released listener, used to notify the buffer manager that a buffer is released
-     * from consumer side.
+     * Buffer producer listener, used to handle notification when a buffer is released
+     * from consumer side, or a set of buffers are discarded by the consumer.
      */
-    sp<BufferReleasedListener> mBufferReleasedListener;
+    sp<BufferProducerListener> mBufferProducerListener;
 
     /**
      * Flag indicating if the buffer manager is used to allocate the stream buffers
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index fd9b4b0..f707ef8 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -70,7 +70,7 @@
     mFormatOverridden(false),
     mOriginalFormat(format),
     mDataSpaceOverridden(false),
-    mOriginalDataSpace(HAL_DATASPACE_UNKNOWN),
+    mOriginalDataSpace(dataSpace),
     mPhysicalCameraId(physicalCameraId),
     mLastTimestamp(0) {
 
@@ -137,9 +137,6 @@
 
 void Camera3Stream::setDataSpaceOverride(bool dataSpaceOverridden) {
     mDataSpaceOverridden = dataSpaceOverridden;
-    if (dataSpaceOverridden && mOriginalDataSpace == HAL_DATASPACE_UNKNOWN) {
-        mOriginalDataSpace = camera3_stream::data_space;
-    }
 }
 
 bool Camera3Stream::isDataSpaceOverridden() const {
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 67afd0f..805df82 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -588,7 +588,7 @@
 
     //Keep track of original dataSpace in case it gets overridden
     bool mDataSpaceOverridden;
-    android_dataspace mOriginalDataSpace;
+    const android_dataspace mOriginalDataSpace;
 
     String8 mPhysicalCameraId;
     nsecs_t mLastTimestamp;
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
index 84c2ec7..80df7db 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
@@ -496,7 +496,7 @@
         mInputSlots[bufferItem.mSlot].mFrameNumber = bufferItem.mFrameNumber;
     } else {
         SP_LOGE("%s: Invalid input graphic buffer!", __FUNCTION__);
-        res = BAD_VALUE;
+        mOnFrameAvailableRes.store(BAD_VALUE);
         return;
     }
     bufferId = bufferItem.mGraphicBuffer->getId();
@@ -541,6 +541,11 @@
     mOnFrameAvailableRes.store(res);
 }
 
+void Camera3StreamSplitter::onFrameReplaced(const BufferItem& item) {
+    ATRACE_CALL();
+    onFrameAvailable(item);
+}
+
 void Camera3StreamSplitter::decrementBufRefCountLocked(uint64_t id, size_t surfaceId) {
     ATRACE_CALL();
 
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.h b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
index 960f7aa..4eb455a 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
@@ -102,6 +102,13 @@
     void onFrameAvailable(const BufferItem& item) override;
 
     // From IConsumerListener
+    //
+    // Similar to onFrameAvailable, but buffer item is indeed replacing a buffer
+    // in the buffer queue. This can happen when buffer queue is in droppable
+    // mode.
+    void onFrameReplaced(const BufferItem& item) override;
+
+    // From IConsumerListener
     // We don't care about released buffers because we detach each buffer as
     // soon as we acquire it. See the comment for onBufferReleased below for
     // some clarifying notes about the name.
diff --git a/services/camera/libcameraservice/tests/Android.mk b/services/camera/libcameraservice/tests/Android.mk
index b4e7c32..ec5e876 100644
--- a/services/camera/libcameraservice/tests/Android.mk
+++ b/services/camera/libcameraservice/tests/Android.mk
@@ -23,7 +23,6 @@
     libcameraservice \
     libhidlbase \
     liblog \
-    libhidltransport \
     libcamera_client \
     libcamera_metadata \
     libutils \
diff --git a/services/mediaanalytics/MediaAnalyticsService.cpp b/services/mediaanalytics/MediaAnalyticsService.cpp
index 0e7edfd..988c06b 100644
--- a/services/mediaanalytics/MediaAnalyticsService.cpp
+++ b/services/mediaanalytics/MediaAnalyticsService.cpp
@@ -169,6 +169,7 @@
     ALOGV("caller has uid=%d, embedded uid=%d", uid, uid_given);
 
     switch (uid)  {
+        case AID_DRM:
         case AID_MEDIA:
         case AID_MEDIA_CODEC:
         case AID_MEDIA_EX:
diff --git a/services/mediaanalytics/iface_statsd.cpp b/services/mediaanalytics/iface_statsd.cpp
index 6845f06..6fd4415 100644
--- a/services/mediaanalytics/iface_statsd.cpp
+++ b/services/mediaanalytics/iface_statsd.cpp
@@ -60,6 +60,7 @@
     { "audiotrack", statsd_audiotrack },
     { "codec", statsd_codec},
     { "drm.vendor.Google.WidevineCDM", statsd_widevineCDM },
+    { "drmmanager", statsd_drmmanager },
     { "extractor", statsd_extractor },
     { "mediadrm", statsd_mediadrm },
     { "nuplayer", statsd_nuplayer },
diff --git a/services/mediaanalytics/iface_statsd.h b/services/mediaanalytics/iface_statsd.h
index f85d303..014929b 100644
--- a/services/mediaanalytics/iface_statsd.h
+++ b/services/mediaanalytics/iface_statsd.h
@@ -30,5 +30,6 @@
 
 extern bool statsd_mediadrm(MediaAnalyticsItem *);
 extern bool statsd_widevineCDM(MediaAnalyticsItem *);
+extern bool statsd_drmmanager(MediaAnalyticsItem *);
 
 } // namespace android
diff --git a/services/mediaanalytics/statsd_drm.cpp b/services/mediaanalytics/statsd_drm.cpp
index 902483a..845383d 100644
--- a/services/mediaanalytics/statsd_drm.cpp
+++ b/services/mediaanalytics/statsd_drm.cpp
@@ -104,4 +104,38 @@
     return true;
 }
 
+// drmmanager
+bool statsd_drmmanager(MediaAnalyticsItem *item)
+{
+    if (item == NULL) return false;
+
+    nsecs_t timestamp = item->getTimestamp();
+    std::string pkgName = item->getPkgName();
+    int64_t pkgVersionCode = item->getPkgVersionCode();
+    int64_t mediaApexVersion = 0;
+
+    char *plugin_id = NULL;
+    (void) item->getCString("plugin_id", &plugin_id);
+    char *description = NULL;
+    (void) item->getCString("description", &description);
+    int32_t method_id = -1;
+    (void) item->getInt32("method_id", &method_id);
+    char *mime_types = NULL;
+    (void) item->getCString("mime_types", &mime_types);
+
+    if (enabled_statsd) {
+        android::util::stats_write(android::util::MEDIAMETRICS_DRMMANAGER_REPORTED,
+                                   timestamp, pkgName.c_str(), pkgVersionCode,
+                                   mediaApexVersion,
+                                   plugin_id, description,
+                                   method_id, mime_types);
+    } else {
+        ALOGV("NOT sending: drmmanager data");
+    }
+
+    free(plugin_id);
+    free(description);
+    free(mime_types);
+    return true;
+}
 } // namespace android
diff --git a/services/mediacodec/Android.bp b/services/mediacodec/Android.bp
index 99a6d6b..36042a4 100644
--- a/services/mediacodec/Android.bp
+++ b/services/mediacodec/Android.bp
@@ -10,8 +10,6 @@
         "libavservices_minijail",
         "libbase",
         "libhidlbase",
-        "libhidltransport",
-        "libhwbinder",
         "liblog",
         "libmedia_codecserviceregistrant",
     ],
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index ecc8408..d878d72 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -39,9 +39,7 @@
     libbase \
     libavservices_minijail_vendor \
     libcutils \
-    libhwbinder \
     libhidlbase \
-    libhidltransport \
     libstagefright_omx \
     libstagefright_xmlparser \
     android.hardware.media.omx@1.0 \
diff --git a/services/mediacodec/main_codecservice.cpp b/services/mediacodec/main_codecservice.cpp
index f668c33..6a82b1b 100644
--- a/services/mediacodec/main_codecservice.cpp
+++ b/services/mediacodec/main_codecservice.cpp
@@ -21,6 +21,7 @@
 #include "minijail.h"
 
 #include <binder/ProcessState.h>
+#include <cutils/properties.h>
 #include <hidl/HidlTransportSupport.h>
 #include <media/stagefright/omx/1.0/Omx.h>
 #include <media/stagefright/omx/1.0/OmxStore.h>
@@ -57,7 +58,8 @@
     } else {
         LOG(INFO) << "IOmx HAL service created.";
     }
-    sp<IOmxStore> omxStore = new implementation::OmxStore(omx);
+    sp<IOmxStore> omxStore = new implementation::OmxStore(
+            property_get_int64("vendor.media.omx", 1) ? omx : nullptr);
     if (omxStore == nullptr) {
         LOG(ERROR) << "Cannot create IOmxStore HAL service.";
     } else if (omxStore->registerAsService() != OK) {
diff --git a/services/mediacodec/registrant/Android.bp b/services/mediacodec/registrant/Android.bp
index e3893e5..fa5bc4a 100644
--- a/services/mediacodec/registrant/Android.bp
+++ b/services/mediacodec/registrant/Android.bp
@@ -42,8 +42,8 @@
         "libcodec2_soft_opusenc",
         "libcodec2_soft_vp8dec",
         "libcodec2_soft_vp9dec",
-        "libcodec2_soft_av1dec",
-        "libcodec2_soft_gav1dec",
+        // "libcodec2_soft_av1dec_aom",  // replaced by the gav1 implementation
+        "libcodec2_soft_av1dec_gav1",
         "libcodec2_soft_vp8enc",
         "libcodec2_soft_vp9enc",
         "libcodec2_soft_rawdec",
diff --git a/services/mediadrm/Android.mk b/services/mediadrm/Android.mk
index 227a29d..3e94596 100644
--- a/services/mediadrm/Android.mk
+++ b/services/mediadrm/Android.mk
@@ -27,7 +27,6 @@
     libutils \
     libhidlbase \
     libhidlmemory \
-    libhidltransport \
     android.hardware.drm@1.0 \
     android.hardware.drm@1.1 \
     android.hardware.drm@1.2
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index 28bfd3f..bdcd5e4 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -21,8 +21,11 @@
 
 #include <binder/IMediaResourceMonitor.h>
 #include <binder/IServiceManager.h>
+#include <cutils/sched_policy.h>
 #include <dirent.h>
 #include <media/stagefright/ProcessInfo.h>
+#include <mediautils/BatteryNotifier.h>
+#include <mediautils/SchedulingPolicyService.h>
 #include <string.h>
 #include <sys/types.h>
 #include <sys/stat.h>
@@ -31,8 +34,7 @@
 
 #include "ResourceManagerService.h"
 #include "ServiceLog.h"
-#include "mediautils/SchedulingPolicyService.h"
-#include <cutils/sched_policy.h>
+
 namespace android {
 
 namespace {
@@ -69,9 +71,9 @@
     return itemsStr;
 }
 
-static bool hasResourceType(MediaResource::Type type, const Vector<MediaResource>& resources) {
-    for (size_t i = 0; i < resources.size(); ++i) {
-        if (resources[i].mType == type) {
+static bool hasResourceType(MediaResource::Type type, const ResourceList& resources) {
+    for (auto it = resources.begin(); it != resources.end(); it++) {
+        if (it->second.mType == type) {
             return true;
         }
     }
@@ -101,20 +103,22 @@
 }
 
 static ResourceInfo& getResourceInfoForEdit(
+        uid_t uid,
         int64_t clientId,
         const sp<IResourceManagerClient>& client,
         ResourceInfos& infos) {
-    for (size_t i = 0; i < infos.size(); ++i) {
-        if (infos[i].clientId == clientId) {
-            return infos.editItemAt(i);
-        }
+    ssize_t index = infos.indexOfKey(clientId);
+
+    if (index < 0) {
+        ResourceInfo info;
+        info.uid = uid;
+        info.clientId = clientId;
+        info.client = client;
+
+        index = infos.add(clientId, info);
     }
-    ResourceInfo info;
-    info.clientId = clientId;
-    info.client = client;
-    info.cpuBoost = false;
-    infos.push_back(info);
-    return infos.editItemAt(infos.size() - 1);
+
+    return infos.editValueAt(index);
 }
 
 static void notifyResourceGranted(int pid, const Vector<MediaResource> &resources) {
@@ -181,10 +185,10 @@
             snprintf(buffer, SIZE, "        Name: %s\n", infos[j].client->getName().string());
             result.append(buffer);
 
-            Vector<MediaResource> resources = infos[j].resources;
+            const ResourceList &resources = infos[j].resources;
             result.append("        Resources:\n");
-            for (size_t k = 0; k < resources.size(); ++k) {
-                snprintf(buffer, SIZE, "          %s\n", resources[k].toString().string());
+            for (auto it = resources.begin(); it != resources.end(); it++) {
+                snprintf(buffer, SIZE, "          %s\n", it->second.toString().string());
                 result.append(buffer);
             }
         }
@@ -196,15 +200,45 @@
     return OK;
 }
 
-ResourceManagerService::ResourceManagerService()
-    : ResourceManagerService(new ProcessInfo()) {}
+struct SystemCallbackImpl :
+        public ResourceManagerService::SystemCallbackInterface {
+    SystemCallbackImpl() {}
 
-ResourceManagerService::ResourceManagerService(sp<ProcessInfoInterface> processInfo)
+    virtual void noteStartVideo(int uid) override {
+        BatteryNotifier::getInstance().noteStartVideo(uid);
+    }
+    virtual void noteStopVideo(int uid) override {
+        BatteryNotifier::getInstance().noteStopVideo(uid);
+    }
+    virtual void noteResetVideo() override {
+        BatteryNotifier::getInstance().noteResetVideo();
+    }
+    virtual bool requestCpusetBoost(
+            bool enable, const sp<IInterface> &client) override {
+        return android::requestCpusetBoost(enable, client);
+    }
+
+protected:
+    virtual ~SystemCallbackImpl() {}
+
+private:
+    DISALLOW_EVIL_CONSTRUCTORS(SystemCallbackImpl);
+};
+
+ResourceManagerService::ResourceManagerService()
+    : ResourceManagerService(new ProcessInfo(), new SystemCallbackImpl()) {}
+
+ResourceManagerService::ResourceManagerService(
+        const sp<ProcessInfoInterface> &processInfo,
+        const sp<SystemCallbackInterface> &systemResource)
     : mProcessInfo(processInfo),
+      mSystemCB(systemResource),
       mServiceLog(new ServiceLog()),
       mSupportsMultipleSecureCodecs(true),
       mSupportsSecureWithNonSecureCodec(true),
-      mCpuBoostCount(0) {}
+      mCpuBoostCount(0) {
+    mSystemCB->noteResetVideo();
+}
 
 ResourceManagerService::~ResourceManagerService() {}
 
@@ -224,8 +258,41 @@
     }
 }
 
+void ResourceManagerService::onFirstAdded(
+        const MediaResource& resource, const ResourceInfo& clientInfo) {
+    // first time added
+    if (resource.mType == MediaResource::kCpuBoost
+     && resource.mSubType == MediaResource::kUnspecifiedSubType) {
+        // Request it on every new instance of kCpuBoost, as the media.codec
+        // could have died, if we only do it the first time subsequent instances
+        // never gets the boost.
+        if (mSystemCB->requestCpusetBoost(true, this) != OK) {
+            ALOGW("couldn't request cpuset boost");
+        }
+        mCpuBoostCount++;
+    } else if (resource.mType == MediaResource::kBattery
+            && resource.mSubType == MediaResource::kVideoCodec) {
+        mSystemCB->noteStartVideo(clientInfo.uid);
+    }
+}
+
+void ResourceManagerService::onLastRemoved(
+        const MediaResource& resource, const ResourceInfo& clientInfo) {
+    if (resource.mType == MediaResource::kCpuBoost
+            && resource.mSubType == MediaResource::kUnspecifiedSubType
+            && mCpuBoostCount > 0) {
+        if (--mCpuBoostCount == 0) {
+            mSystemCB->requestCpusetBoost(false, this);
+        }
+    } else if (resource.mType == MediaResource::kBattery
+            && resource.mSubType == MediaResource::kVideoCodec) {
+        mSystemCB->noteStopVideo(clientInfo.uid);
+    }
+}
+
 void ResourceManagerService::addResource(
         int pid,
+        int uid,
         int64_t clientId,
         const sp<IResourceManagerClient> client,
         const Vector<MediaResource> &resources) {
@@ -239,20 +306,15 @@
         return;
     }
     ResourceInfos& infos = getResourceInfosForEdit(pid, mMap);
-    ResourceInfo& info = getResourceInfoForEdit(clientId, client, infos);
-    // TODO: do the merge instead of append.
-    info.resources.appendVector(resources);
+    ResourceInfo& info = getResourceInfoForEdit(uid, clientId, client, infos);
 
     for (size_t i = 0; i < resources.size(); ++i) {
-        if (resources[i].mType == MediaResource::kCpuBoost && !info.cpuBoost) {
-            info.cpuBoost = true;
-            // Request it on every new instance of kCpuBoost, as the media.codec
-            // could have died, if we only do it the first time subsequent instances
-            // never gets the boost.
-            if (requestCpusetBoost(true, this) != OK) {
-                ALOGW("couldn't request cpuset boost");
-            }
-            mCpuBoostCount++;
+        const auto resType = std::make_pair(resources[i].mType, resources[i].mSubType);
+        if (info.resources.find(resType) == info.resources.end()) {
+            onFirstAdded(resources[i], info);
+            info.resources[resType] = resources[i];
+        } else {
+            info.resources[resType].mValue += resources[i].mValue;
         }
     }
     if (info.deathNotifier == nullptr) {
@@ -262,7 +324,48 @@
     notifyResourceGranted(pid, resources);
 }
 
-void ResourceManagerService::removeResource(int pid, int64_t clientId) {
+void ResourceManagerService::removeResource(int pid, int64_t clientId,
+        const Vector<MediaResource> &resources) {
+    String8 log = String8::format("removeResource(pid %d, clientId %lld, resources %s)",
+            pid, (long long) clientId, getString(resources).string());
+    mServiceLog->add(log);
+
+    Mutex::Autolock lock(mLock);
+    if (!mProcessInfo->isValidPid(pid)) {
+        ALOGE("Rejected removeResource call with invalid pid.");
+        return;
+    }
+    ssize_t index = mMap.indexOfKey(pid);
+    if (index < 0) {
+        ALOGV("removeResource: didn't find pid %d for clientId %lld", pid, (long long) clientId);
+        return;
+    }
+    ResourceInfos &infos = mMap.editValueAt(index);
+
+    index = infos.indexOfKey(clientId);
+    if (index < 0) {
+        ALOGV("removeResource: didn't find clientId %lld", (long long) clientId);
+        return;
+    }
+
+    ResourceInfo &info = infos.editValueAt(index);
+
+    for (size_t i = 0; i < resources.size(); ++i) {
+        const auto resType = std::make_pair(resources[i].mType, resources[i].mSubType);
+        // ignore if we don't have it
+        if (info.resources.find(resType) != info.resources.end()) {
+            MediaResource &resource = info.resources[resType];
+            if (resource.mValue > resources[i].mValue) {
+                resource.mValue -= resources[i].mValue;
+            } else {
+                onLastRemoved(resources[i], info);
+                info.resources.erase(resType);
+            }
+        }
+    }
+}
+
+void ResourceManagerService::removeClient(int pid, int64_t clientId) {
     removeResource(pid, clientId, true);
 }
 
@@ -282,24 +385,22 @@
         ALOGV("removeResource: didn't find pid %d for clientId %lld", pid, (long long) clientId);
         return;
     }
-    bool found = false;
     ResourceInfos &infos = mMap.editValueAt(index);
-    for (size_t j = 0; j < infos.size(); ++j) {
-        if (infos[j].clientId == clientId) {
-            if (infos[j].cpuBoost && mCpuBoostCount > 0) {
-                if (--mCpuBoostCount == 0) {
-                    requestCpusetBoost(false, this);
-                }
-            }
-            IInterface::asBinder(infos[j].client)->unlinkToDeath(infos[j].deathNotifier);
-            j = infos.removeAt(j);
-            found = true;
-            break;
-        }
+
+    index = infos.indexOfKey(clientId);
+    if (index < 0) {
+        ALOGV("removeResource: didn't find clientId %lld", (long long) clientId);
+        return;
     }
-    if (!found) {
-        ALOGV("didn't find client");
+
+    const ResourceInfo &info = infos[index];
+    for (auto it = info.resources.begin(); it != info.resources.end(); it++) {
+        onLastRemoved(it->second, info);
     }
+
+    IInterface::asBinder(info.client)->unlinkToDeath(info.deathNotifier);
+
+    infos.removeItemsAt(index);
 }
 
 void ResourceManagerService::getClientForResource_l(
@@ -410,7 +511,7 @@
             ResourceInfos &infos = mMap.editValueAt(i);
             for (size_t j = 0; j < infos.size();) {
                 if (infos[j].client == failedClient) {
-                    j = infos.removeAt(j);
+                    j = infos.removeItemsAt(j);
                     found = true;
                 } else {
                     ++j;
@@ -538,11 +639,12 @@
     uint64_t largestValue = 0;
     const ResourceInfos &infos = mMap.valueAt(index);
     for (size_t i = 0; i < infos.size(); ++i) {
-        Vector<MediaResource> resources = infos[i].resources;
-        for (size_t j = 0; j < resources.size(); ++j) {
-            if (resources[j].mType == type) {
-                if (resources[j].mValue > largestValue) {
-                    largestValue = resources[j].mValue;
+        const ResourceList &resources = infos[i].resources;
+        for (auto it = resources.begin(); it != resources.end(); it++) {
+            const MediaResource &resource = it->second;
+            if (resource.mType == type) {
+                if (resource.mValue > largestValue) {
+                    largestValue = resource.mValue;
                     clientTemp = infos[i].client;
                 }
             }
diff --git a/services/mediaresourcemanager/ResourceManagerService.h b/services/mediaresourcemanager/ResourceManagerService.h
index 82d2a0b..f086dc3 100644
--- a/services/mediaresourcemanager/ResourceManagerService.h
+++ b/services/mediaresourcemanager/ResourceManagerService.h
@@ -33,15 +33,17 @@
 class ServiceLog;
 struct ProcessInfoInterface;
 
+typedef std::map<std::pair<MediaResource::Type, MediaResource::SubType>, MediaResource> ResourceList;
 struct ResourceInfo {
     int64_t clientId;
+    uid_t uid;
     sp<IResourceManagerClient> client;
     sp<IBinder::DeathRecipient> deathNotifier;
-    Vector<MediaResource> resources;
-    bool cpuBoost;
+    ResourceList resources;
 };
 
-typedef Vector<ResourceInfo> ResourceInfos;
+// TODO: convert these to std::map
+typedef KeyedVector<int64_t, ResourceInfo> ResourceInfos;
 typedef KeyedVector<int, ResourceInfos> PidResourceInfosMap;
 
 class ResourceManagerService
@@ -49,23 +51,37 @@
       public BnResourceManagerService
 {
 public:
+    struct SystemCallbackInterface : public RefBase {
+        virtual void noteStartVideo(int uid) = 0;
+        virtual void noteStopVideo(int uid) = 0;
+        virtual void noteResetVideo() = 0;
+        virtual bool requestCpusetBoost(
+                bool enable, const sp<IInterface> &client) = 0;
+    };
+
     static char const *getServiceName() { return "media.resource_manager"; }
 
     virtual status_t dump(int fd, const Vector<String16>& args);
 
     ResourceManagerService();
-    explicit ResourceManagerService(sp<ProcessInfoInterface> processInfo);
+    explicit ResourceManagerService(
+            const sp<ProcessInfoInterface> &processInfo,
+            const sp<SystemCallbackInterface> &systemResource);
 
     // IResourceManagerService interface
     virtual void config(const Vector<MediaResourcePolicy> &policies);
 
     virtual void addResource(
             int pid,
+            int uid,
             int64_t clientId,
             const sp<IResourceManagerClient> client,
             const Vector<MediaResource> &resources);
 
-    virtual void removeResource(int pid, int64_t clientId);
+    virtual void removeResource(int pid, int64_t clientId,
+            const Vector<MediaResource> &resources);
+
+    virtual void removeClient(int pid, int64_t clientId);
 
     // Tries to reclaim resource from processes with lower priority than the calling process
     // according to the requested resources.
@@ -107,8 +123,12 @@
     void getClientForResource_l(
         int callingPid, const MediaResource *res, Vector<sp<IResourceManagerClient>> *clients);
 
+    void onFirstAdded(const MediaResource& res, const ResourceInfo& clientInfo);
+    void onLastRemoved(const MediaResource& res, const ResourceInfo& clientInfo);
+
     mutable Mutex mLock;
     sp<ProcessInfoInterface> mProcessInfo;
+    sp<SystemCallbackInterface> mSystemCB;
     sp<ServiceLog> mServiceLog;
     PidResourceInfosMap mMap;
     bool mSupportsMultipleSecureCodecs;
diff --git a/services/mediaresourcemanager/TEST_MAPPING b/services/mediaresourcemanager/TEST_MAPPING
new file mode 100644
index 0000000..418b159
--- /dev/null
+++ b/services/mediaresourcemanager/TEST_MAPPING
@@ -0,0 +1,10 @@
+{
+  "presubmit": [
+    {
+       "name": "ResourceManagerService_test"
+    },
+    {
+       "name": "ServiceLog_test"
+    }
+  ]
+}
diff --git a/services/mediaresourcemanager/test/Android.bp b/services/mediaresourcemanager/test/Android.bp
index 70e8833..543c87c 100644
--- a/services/mediaresourcemanager/test/Android.bp
+++ b/services/mediaresourcemanager/test/Android.bp
@@ -2,6 +2,7 @@
 cc_test {
     name: "ResourceManagerService_test",
     srcs: ["ResourceManagerService_test.cpp"],
+    test_suites: ["device-tests"],
     shared_libs: [
         "libbinder",
         "liblog",
@@ -23,6 +24,7 @@
 cc_test {
     name: "ServiceLog_test",
     srcs: ["ServiceLog_test.cpp"],
+    test_suites: ["device-tests"],
     shared_libs: [
         "liblog",
         "libmedia",
diff --git a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
index ed0b0ef..ae97ec8 100644
--- a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
+++ b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
@@ -52,13 +52,69 @@
     DISALLOW_EVIL_CONSTRUCTORS(TestProcessInfo);
 };
 
+struct TestSystemCallback :
+        public ResourceManagerService::SystemCallbackInterface {
+    TestSystemCallback() :
+        mLastEvent({EventType::INVALID, 0}), mEventCount(0) {}
+
+    enum EventType {
+        INVALID          = -1,
+        VIDEO_ON         = 0,
+        VIDEO_OFF        = 1,
+        VIDEO_RESET      = 2,
+        CPUSET_ENABLE    = 3,
+        CPUSET_DISABLE   = 4,
+    };
+
+    struct EventEntry {
+        EventType type;
+        int arg;
+    };
+
+    virtual void noteStartVideo(int uid) override {
+        mLastEvent = {EventType::VIDEO_ON, uid};
+        mEventCount++;
+    }
+
+    virtual void noteStopVideo(int uid) override {
+        mLastEvent = {EventType::VIDEO_OFF, uid};
+        mEventCount++;
+    }
+
+    virtual void noteResetVideo() override {
+        mLastEvent = {EventType::VIDEO_RESET, 0};
+        mEventCount++;
+    }
+
+    virtual bool requestCpusetBoost(
+            bool enable, const sp<IInterface> &/*client*/) override {
+        mLastEvent = {enable ? EventType::CPUSET_ENABLE : EventType::CPUSET_DISABLE, 0};
+        mEventCount++;
+        return true;
+    }
+
+    size_t eventCount() { return mEventCount; }
+    EventType lastEventType() { return mLastEvent.type; }
+    EventEntry lastEvent() { return mLastEvent; }
+
+protected:
+    virtual ~TestSystemCallback() {}
+
+private:
+    EventEntry mLastEvent;
+    size_t mEventCount;
+
+    DISALLOW_EVIL_CONSTRUCTORS(TestSystemCallback);
+};
+
+
 struct TestClient : public BnResourceManagerClient {
     TestClient(int pid, sp<ResourceManagerService> service)
         : mReclaimed(false), mPid(pid), mService(service) {}
 
     virtual bool reclaimResource() {
         sp<IResourceManagerClient> client(this);
-        mService->removeResource(mPid, (int64_t) client.get());
+        mService->removeClient(mPid, (int64_t) client.get());
         mReclaimed = true;
         return true;
     }
@@ -86,16 +142,26 @@
 };
 
 static const int kTestPid1 = 30;
+static const int kTestUid1 = 1010;
+
 static const int kTestPid2 = 20;
+static const int kTestUid2 = 1011;
 
 static const int kLowPriorityPid = 40;
 static const int kMidPriorityPid = 25;
 static const int kHighPriorityPid = 10;
 
+using EventType = TestSystemCallback::EventType;
+using EventEntry = TestSystemCallback::EventEntry;
+bool operator== (const EventEntry& lhs, const EventEntry& rhs) {
+    return lhs.type == rhs.type && lhs.arg == rhs.arg;
+}
+
 class ResourceManagerServiceTest : public ::testing::Test {
 public:
     ResourceManagerServiceTest()
-        : mService(new ResourceManagerService(new TestProcessInfo)),
+        : mSystemCB(new TestSystemCallback()),
+          mService(new ResourceManagerService(new TestProcessInfo, mSystemCB)),
           mTestClient1(new TestClient(kTestPid1, mService)),
           mTestClient2(new TestClient(kTestPid2, mService)),
           mTestClient3(new TestClient(kTestPid2, mService)) {
@@ -103,20 +169,21 @@
 
 protected:
     static bool isEqualResources(const Vector<MediaResource> &resources1,
-            const Vector<MediaResource> &resources2) {
-        if (resources1.size() != resources2.size()) {
-            return false;
-        }
+            const ResourceList &resources2) {
+        // convert resource1 to ResourceList
+        ResourceList r1;
         for (size_t i = 0; i < resources1.size(); ++i) {
-            if (resources1[i] != resources2[i]) {
-                return false;
-            }
+            const auto resType = std::make_pair(resources1[i].mType, resources1[i].mSubType);
+            r1[resType] = resources1[i];
         }
-        return true;
+        return r1 == resources2;
     }
 
-    static void expectEqResourceInfo(const ResourceInfo &info, sp<IResourceManagerClient> client,
+    static void expectEqResourceInfo(const ResourceInfo &info,
+            int uid,
+            sp<IResourceManagerClient> client,
             const Vector<MediaResource> &resources) {
+        EXPECT_EQ(uid, info.uid);
         EXPECT_EQ(client, info.client);
         EXPECT_TRUE(isEqualResources(resources, info.resources));
     }
@@ -153,24 +220,24 @@
         // kTestPid1 mTestClient1
         Vector<MediaResource> resources1;
         resources1.push_back(MediaResource(MediaResource::kSecureCodec, 1));
-        mService->addResource(kTestPid1, getId(mTestClient1), mTestClient1, resources1);
+        mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
         resources1.push_back(MediaResource(MediaResource::kGraphicMemory, 200));
         Vector<MediaResource> resources11;
         resources11.push_back(MediaResource(MediaResource::kGraphicMemory, 200));
-        mService->addResource(kTestPid1, getId(mTestClient1), mTestClient1, resources11);
+        mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources11);
 
         // kTestPid2 mTestClient2
         Vector<MediaResource> resources2;
         resources2.push_back(MediaResource(MediaResource::kNonSecureCodec, 1));
         resources2.push_back(MediaResource(MediaResource::kGraphicMemory, 300));
-        mService->addResource(kTestPid2, getId(mTestClient2), mTestClient2, resources2);
+        mService->addResource(kTestPid2, kTestUid2, getId(mTestClient2), mTestClient2, resources2);
 
         // kTestPid2 mTestClient3
         Vector<MediaResource> resources3;
-        mService->addResource(kTestPid2, getId(mTestClient3), mTestClient3, resources3);
+        mService->addResource(kTestPid2, kTestUid2, getId(mTestClient3), mTestClient3, resources3);
         resources3.push_back(MediaResource(MediaResource::kSecureCodec, 1));
         resources3.push_back(MediaResource(MediaResource::kGraphicMemory, 100));
-        mService->addResource(kTestPid2, getId(mTestClient3), mTestClient3, resources3);
+        mService->addResource(kTestPid2, kTestUid2, getId(mTestClient3), mTestClient3, resources3);
 
         const PidResourceInfosMap &map = mService->mMap;
         EXPECT_EQ(2u, map.size());
@@ -178,14 +245,14 @@
         ASSERT_GE(index1, 0);
         const ResourceInfos &infos1 = map[index1];
         EXPECT_EQ(1u, infos1.size());
-        expectEqResourceInfo(infos1[0], mTestClient1, resources1);
+        expectEqResourceInfo(infos1.valueFor(getId(mTestClient1)), kTestUid1, mTestClient1, resources1);
 
         ssize_t index2 = map.indexOfKey(kTestPid2);
         ASSERT_GE(index2, 0);
         const ResourceInfos &infos2 = map[index2];
         EXPECT_EQ(2u, infos2.size());
-        expectEqResourceInfo(infos2[0], mTestClient2, resources2);
-        expectEqResourceInfo(infos2[1], mTestClient3, resources3);
+        expectEqResourceInfo(infos2.valueFor(getId(mTestClient2)), kTestUid2, mTestClient2, resources2);
+        expectEqResourceInfo(infos2.valueFor(getId(mTestClient3)), kTestUid2, mTestClient3, resources3);
     }
 
     void testConfig() {
@@ -219,10 +286,84 @@
         EXPECT_TRUE(mService->mSupportsSecureWithNonSecureCodec);
     }
 
+    void testCombineResource() {
+        // kTestPid1 mTestClient1
+        Vector<MediaResource> resources1;
+        resources1.push_back(MediaResource(MediaResource::kSecureCodec, 1));
+        mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+
+        Vector<MediaResource> resources11;
+        resources11.push_back(MediaResource(MediaResource::kGraphicMemory, 200));
+        mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources11);
+
+        const PidResourceInfosMap &map = mService->mMap;
+        EXPECT_EQ(1u, map.size());
+        ssize_t index1 = map.indexOfKey(kTestPid1);
+        ASSERT_GE(index1, 0);
+        const ResourceInfos &infos1 = map[index1];
+        EXPECT_EQ(1u, infos1.size());
+
+        // test adding existing types to combine values
+        resources1.push_back(MediaResource(MediaResource::kGraphicMemory, 100));
+        mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+
+        Vector<MediaResource> expected;
+        expected.push_back(MediaResource(MediaResource::kSecureCodec, 2));
+        expected.push_back(MediaResource(MediaResource::kGraphicMemory, 300));
+        expectEqResourceInfo(infos1.valueFor(getId(mTestClient1)), kTestUid1, mTestClient1, expected);
+
+        // test adding new types (including types that differs only in subType)
+        resources11.push_back(MediaResource(MediaResource::kNonSecureCodec, 1));
+        resources11.push_back(MediaResource(MediaResource::kSecureCodec, MediaResource::kVideoCodec, 1));
+        mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources11);
+
+        expected.clear();
+        expected.push_back(MediaResource(MediaResource::kSecureCodec, 2));
+        expected.push_back(MediaResource(MediaResource::kNonSecureCodec, 1));
+        expected.push_back(MediaResource(MediaResource::kSecureCodec, MediaResource::kVideoCodec, 1));
+        expected.push_back(MediaResource(MediaResource::kGraphicMemory, 500));
+        expectEqResourceInfo(infos1.valueFor(getId(mTestClient1)), kTestUid1, mTestClient1, expected);
+    }
+
     void testRemoveResource() {
+        // kTestPid1 mTestClient1
+        Vector<MediaResource> resources1;
+        resources1.push_back(MediaResource(MediaResource::kSecureCodec, 1));
+        mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+
+        Vector<MediaResource> resources11;
+        resources11.push_back(MediaResource(MediaResource::kGraphicMemory, 200));
+        mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources11);
+
+        const PidResourceInfosMap &map = mService->mMap;
+        EXPECT_EQ(1u, map.size());
+        ssize_t index1 = map.indexOfKey(kTestPid1);
+        ASSERT_GE(index1, 0);
+        const ResourceInfos &infos1 = map[index1];
+        EXPECT_EQ(1u, infos1.size());
+
+        // test partial removal
+        resources11.editItemAt(0).mValue = 100;
+        mService->removeResource(kTestPid1, getId(mTestClient1), resources11);
+
+        Vector<MediaResource> expected;
+        expected.push_back(MediaResource(MediaResource::kSecureCodec, 1));
+        expected.push_back(MediaResource(MediaResource::kGraphicMemory, 100));
+        expectEqResourceInfo(infos1.valueFor(getId(mTestClient1)), kTestUid1, mTestClient1, expected);
+
+        // test complete removal with overshoot value
+        resources11.editItemAt(0).mValue = 1000;
+        mService->removeResource(kTestPid1, getId(mTestClient1), resources11);
+
+        expected.clear();
+        expected.push_back(MediaResource(MediaResource::kSecureCodec, 1));
+        expectEqResourceInfo(infos1.valueFor(getId(mTestClient1)), kTestUid1, mTestClient1, expected);
+    }
+
+    void testRemoveClient() {
         addResource();
 
-        mService->removeResource(kTestPid2, getId(mTestClient2));
+        mService->removeClient(kTestPid2, getId(mTestClient2));
 
         const PidResourceInfosMap &map = mService->mMap;
         EXPECT_EQ(2u, map.size());
@@ -231,6 +372,7 @@
         EXPECT_EQ(1u, infos1.size());
         EXPECT_EQ(1u, infos2.size());
         // mTestClient2 has been removed.
+        // (OK to use infos2[0] as there is only 1 entry)
         EXPECT_EQ(mTestClient3, infos2[0].client);
     }
 
@@ -246,6 +388,7 @@
         EXPECT_TRUE(mService->getAllClients_l(kHighPriorityPid, type, &clients));
 
         EXPECT_EQ(2u, clients.size());
+        // (OK to require ordering in clients[], as the pid map is sorted)
         EXPECT_EQ(mTestClient3, clients[0]);
         EXPECT_EQ(mTestClient1, clients[1]);
     }
@@ -438,7 +581,7 @@
             verifyClients(true /* c1 */, false /* c2 */, false /* c3 */);
 
             // clean up client 3 which still left
-            mService->removeResource(kTestPid2, getId(mTestClient3));
+            mService->removeClient(kTestPid2, getId(mTestClient3));
         }
     }
 
@@ -498,6 +641,84 @@
         EXPECT_TRUE(mService->isCallingPriorityHigher_l(99, 100));
     }
 
+    void testBatteryStats() {
+        // reset should always be called when ResourceManagerService is created (restarted)
+        EXPECT_EQ(1u, mSystemCB->eventCount());
+        EXPECT_EQ(EventType::VIDEO_RESET, mSystemCB->lastEventType());
+
+        // new client request should cause VIDEO_ON
+        Vector<MediaResource> resources1;
+        resources1.push_back(MediaResource(MediaResource::kBattery, MediaResource::kVideoCodec, 1));
+        mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+        EXPECT_EQ(2u, mSystemCB->eventCount());
+        EXPECT_EQ(EventEntry({EventType::VIDEO_ON, kTestUid1}), mSystemCB->lastEvent());
+
+        // each client should only cause 1 VIDEO_ON
+        mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+        EXPECT_EQ(2u, mSystemCB->eventCount());
+
+        // new client request should cause VIDEO_ON
+        Vector<MediaResource> resources2;
+        resources2.push_back(MediaResource(MediaResource::kBattery, MediaResource::kVideoCodec, 2));
+        mService->addResource(kTestPid2, kTestUid2, getId(mTestClient2), mTestClient2, resources2);
+        EXPECT_EQ(3u, mSystemCB->eventCount());
+        EXPECT_EQ(EventEntry({EventType::VIDEO_ON, kTestUid2}), mSystemCB->lastEvent());
+
+        // partially remove mTestClient1's request, shouldn't be any VIDEO_OFF
+        mService->removeResource(kTestPid1, getId(mTestClient1), resources1);
+        EXPECT_EQ(3u, mSystemCB->eventCount());
+
+        // remove mTestClient1's request, should be VIDEO_OFF for kTestUid1
+        // (use resource2 to test removing more instances than previously requested)
+        mService->removeResource(kTestPid1, getId(mTestClient1), resources2);
+        EXPECT_EQ(4u, mSystemCB->eventCount());
+        EXPECT_EQ(EventEntry({EventType::VIDEO_OFF, kTestUid1}), mSystemCB->lastEvent());
+
+        // remove mTestClient2, should be VIDEO_OFF for kTestUid2
+        mService->removeClient(kTestPid2, getId(mTestClient2));
+        EXPECT_EQ(5u, mSystemCB->eventCount());
+        EXPECT_EQ(EventEntry({EventType::VIDEO_OFF, kTestUid2}), mSystemCB->lastEvent());
+    }
+
+    void testCpusetBoost() {
+        // reset should always be called when ResourceManagerService is created (restarted)
+        EXPECT_EQ(1u, mSystemCB->eventCount());
+        EXPECT_EQ(EventType::VIDEO_RESET, mSystemCB->lastEventType());
+
+        // new client request should cause CPUSET_ENABLE
+        Vector<MediaResource> resources1;
+        resources1.push_back(MediaResource(MediaResource::kCpuBoost, 1));
+        mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+        EXPECT_EQ(2u, mSystemCB->eventCount());
+        EXPECT_EQ(EventType::CPUSET_ENABLE, mSystemCB->lastEventType());
+
+        // each client should only cause 1 CPUSET_ENABLE
+        mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+        EXPECT_EQ(2u, mSystemCB->eventCount());
+
+        // new client request should cause CPUSET_ENABLE
+        Vector<MediaResource> resources2;
+        resources2.push_back(MediaResource(MediaResource::kCpuBoost, 2));
+        mService->addResource(kTestPid2, kTestUid2, getId(mTestClient2), mTestClient2, resources2);
+        EXPECT_EQ(3u, mSystemCB->eventCount());
+        EXPECT_EQ(EventType::CPUSET_ENABLE, mSystemCB->lastEventType());
+
+        // remove mTestClient2 should not cause CPUSET_DISABLE, mTestClient1 still active
+        mService->removeClient(kTestPid2, getId(mTestClient2));
+        EXPECT_EQ(3u, mSystemCB->eventCount());
+
+        // remove 1 cpuboost from mTestClient1, should not be CPUSET_DISABLE (still 1 left)
+        mService->removeResource(kTestPid1, getId(mTestClient1), resources1);
+        EXPECT_EQ(3u, mSystemCB->eventCount());
+
+        // remove 2 cpuboost from mTestClient1, should be CPUSET_DISABLE
+        // (use resource2 to test removing more than previously requested)
+        mService->removeResource(kTestPid1, getId(mTestClient1), resources2);
+        EXPECT_EQ(4u, mSystemCB->eventCount());
+        EXPECT_EQ(EventType::CPUSET_DISABLE, mSystemCB->lastEventType());
+    }
+
+    sp<TestSystemCallback> mSystemCB;
     sp<ResourceManagerService> mService;
     sp<IResourceManagerClient> mTestClient1;
     sp<IResourceManagerClient> mTestClient2;
@@ -512,10 +733,18 @@
     addResource();
 }
 
+TEST_F(ResourceManagerServiceTest, combineResource) {
+    testCombineResource();
+}
+
 TEST_F(ResourceManagerServiceTest, removeResource) {
     testRemoveResource();
 }
 
+TEST_F(ResourceManagerServiceTest, removeClient) {
+    testRemoveClient();
+}
+
 TEST_F(ResourceManagerServiceTest, reclaimResource) {
     testReclaimResourceSecure();
     testReclaimResourceNonSecure();
@@ -541,4 +770,12 @@
     testIsCallingPriorityHigher();
 }
 
+TEST_F(ResourceManagerServiceTest, testBatteryStats) {
+    testBatteryStats();
+}
+
+TEST_F(ResourceManagerServiceTest, testCpusetBoost) {
+    testCpusetBoost();
+}
+
 } // namespace android
diff --git a/services/oboeservice/Android.bp b/services/oboeservice/Android.bp
index 655f017..1b7a20c 100644
--- a/services/oboeservice/Android.bp
+++ b/services/oboeservice/Android.bp
@@ -43,7 +43,7 @@
     ],
 
     shared_libs: [
-        "libaaudio",
+        "libaaudio_internal",
         "libaudioclient",
         "libaudioflinger",
         "libbase",
diff --git a/services/soundtrigger/Android.bp b/services/soundtrigger/Android.bp
index 3f02f48..1bbd591 100644
--- a/services/soundtrigger/Android.bp
+++ b/services/soundtrigger/Android.bp
@@ -31,10 +31,8 @@
         "libaudioutils",
         "libmediautils",
 
-        "libhwbinder",
         "libhidlbase",
         "libhidlmemory",
-        "libhidltransport",
         "libbase",
         "libaudiohal",
         "libaudiohal_deathhandler",
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index 377d30b..ccbeb77 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -40,6 +40,32 @@
 #define HW_MODULE_PREFIX "primary"
 namespace android {
 
+namespace {
+
+// Given an IMemory, returns a copy of its content along with its size.
+// Returns nullptr on failure or if input is nullptr.
+std::pair<std::unique_ptr<uint8_t[]>,
+          size_t> CopyToArray(const sp<IMemory>& mem) {
+    if (mem == nullptr) {
+        return std::make_pair(nullptr, 0);
+    }
+
+    const size_t size = mem->size();
+    if (size == 0) {
+        return std::make_pair(nullptr, 0);
+    }
+
+    std::unique_ptr<uint8_t[]> ar = std::make_unique<uint8_t[]>(size);
+    if (ar == nullptr) {
+        return std::make_pair(nullptr, 0);
+    }
+
+    memcpy(ar.get(), mem->unsecurePointer(), size);
+    return std::make_pair(std::move(ar), size);
+}
+
+}
+
 SoundTriggerHwService::SoundTriggerHwService()
     : BnSoundTriggerHwService(),
       mNextUniqueId(1),
@@ -234,11 +260,11 @@
 
     size_t size = event->data_offset + event->data_size;
     eventMemory = mMemoryDealer->allocate(size);
-    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+    if (eventMemory == 0 || eventMemory->unsecurePointer() == NULL) {
         eventMemory.clear();
         return eventMemory;
     }
-    memcpy(eventMemory->pointer(), event, size);
+    memcpy(eventMemory->unsecurePointer(), event, size);
 
     return eventMemory;
 }
@@ -283,11 +309,11 @@
 
     size_t size = event->data_offset + event->data_size;
     eventMemory = mMemoryDealer->allocate(size);
-    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+    if (eventMemory == 0 || eventMemory->unsecurePointer() == NULL) {
         eventMemory.clear();
         return eventMemory;
     }
-    memcpy(eventMemory->pointer(), event, size);
+    memcpy(eventMemory->unsecurePointer(), event, size);
 
     return eventMemory;
 }
@@ -313,11 +339,11 @@
 
     size_t size = sizeof(sound_trigger_service_state_t);
     eventMemory = mMemoryDealer->allocate(size);
-    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+    if (eventMemory == 0 || eventMemory->unsecurePointer() == NULL) {
         eventMemory.clear();
         return eventMemory;
     }
-    *((sound_trigger_service_state_t *)eventMemory->pointer()) = state;
+    *((sound_trigger_service_state_t *)eventMemory->unsecurePointer()) = state;
     return eventMemory;
 }
 
@@ -557,8 +583,13 @@
         return NO_INIT;
     }
 
-    struct sound_trigger_sound_model *sound_model =
-            (struct sound_trigger_sound_model *)modelMemory->pointer();
+    auto immutableMemory = CopyToArray(modelMemory);
+    if (immutableMemory.first == nullptr) {
+        return NO_MEMORY;
+    }
+
+    struct sound_trigger_sound_model* sound_model =
+        (struct sound_trigger_sound_model*) immutableMemory.first.get();
 
     size_t structSize;
     if (sound_model->type == SOUND_MODEL_TYPE_KEYPHRASE) {
@@ -568,9 +599,10 @@
     }
 
     if (sound_model->data_offset < structSize ||
-           sound_model->data_size > (UINT_MAX - sound_model->data_offset) ||
-           modelMemory->size() < sound_model->data_offset ||
-           sound_model->data_size > (modelMemory->size() - sound_model->data_offset)) {
+        sound_model->data_size > (UINT_MAX - sound_model->data_offset) ||
+        immutableMemory.second < sound_model->data_offset ||
+            sound_model->data_size >
+            (immutableMemory.second - sound_model->data_offset)) {
         android_errorWriteLog(0x534e4554, "30148546");
         ALOGE("loadSoundModel() data_size is too big");
         return BAD_VALUE;
@@ -651,13 +683,19 @@
         return NO_INIT;
     }
 
-    struct sound_trigger_recognition_config *config =
-            (struct sound_trigger_recognition_config *)dataMemory->pointer();
+    auto immutableMemory = CopyToArray(dataMemory);
+    if (immutableMemory.first == nullptr) {
+        return NO_MEMORY;
+    }
+
+    struct sound_trigger_recognition_config* config =
+        (struct sound_trigger_recognition_config*) immutableMemory.first.get();
 
     if (config->data_offset < sizeof(struct sound_trigger_recognition_config) ||
-            config->data_size > (UINT_MAX - config->data_offset) ||
-            dataMemory->size() < config->data_offset ||
-            config->data_size > (dataMemory->size() - config->data_offset)) {
+        config->data_size > (UINT_MAX - config->data_offset) ||
+        immutableMemory.second < config->data_offset ||
+            config->data_size >
+            (immutableMemory.second - config->data_offset)) {
         ALOGE("startRecognition() data_size is too big");
         return BAD_VALUE;
     }
@@ -734,9 +772,10 @@
 {
     ALOGV("onCallbackEvent type %d", event->mType);
 
+    // Memory is coming from a trusted process.
     sp<IMemory> eventMemory = event->mMemory;
 
-    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+    if (eventMemory == 0 || eventMemory->unsecurePointer() == NULL) {
         return;
     }
     if (mModuleClients.isEmpty()) {
@@ -749,7 +788,7 @@
     switch (event->mType) {
     case CallbackEvent::TYPE_RECOGNITION: {
         struct sound_trigger_recognition_event *recognitionEvent =
-                (struct sound_trigger_recognition_event *)eventMemory->pointer();
+                (struct sound_trigger_recognition_event *)eventMemory->unsecurePointer();
         {
             AutoMutex lock(mLock);
             sp<Model> model = getModel(recognitionEvent->model);
@@ -769,7 +808,7 @@
     } break;
     case CallbackEvent::TYPE_SOUNDMODEL: {
         struct sound_trigger_model_event *soundmodelEvent =
-                (struct sound_trigger_model_event *)eventMemory->pointer();
+                (struct sound_trigger_model_event *)eventMemory->unsecurePointer();
         {
             AutoMutex lock(mLock);
             sp<Model> model = getModel(soundmodelEvent->model);
@@ -1082,7 +1121,8 @@
 
     sp<IMemory> eventMemory = event->mMemory;
 
-    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+    // Memory is coming from a trusted process.
+    if (eventMemory == 0 || eventMemory->unsecurePointer() == NULL) {
         return;
     }
 
diff --git a/soundtrigger/SoundTrigger.cpp b/soundtrigger/SoundTrigger.cpp
index 9708ea7..e297ee7 100644
--- a/soundtrigger/SoundTrigger.cpp
+++ b/soundtrigger/SoundTrigger.cpp
@@ -204,39 +204,42 @@
 void SoundTrigger::onRecognitionEvent(const sp<IMemory>& eventMemory)
 {
     Mutex::Autolock _l(mLock);
-    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+    if (eventMemory == 0 || eventMemory->unsecurePointer() == NULL) {
         return;
     }
 
     if (mCallback != 0) {
+        // Memory is coming from a trusted process.
         mCallback->onRecognitionEvent(
-                (struct sound_trigger_recognition_event *)eventMemory->pointer());
+                (struct sound_trigger_recognition_event *)eventMemory->unsecurePointer());
     }
 }
 
 void SoundTrigger::onSoundModelEvent(const sp<IMemory>& eventMemory)
 {
     Mutex::Autolock _l(mLock);
-    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+    if (eventMemory == 0 || eventMemory->unsecurePointer() == NULL) {
         return;
     }
 
     if (mCallback != 0) {
+        // Memory is coming from a trusted process.
         mCallback->onSoundModelEvent(
-                (struct sound_trigger_model_event *)eventMemory->pointer());
+                (struct sound_trigger_model_event *)eventMemory->unsecurePointer());
     }
 }
 
 void SoundTrigger::onServiceStateChange(const sp<IMemory>& eventMemory)
 {
     Mutex::Autolock _l(mLock);
-    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+    if (eventMemory == 0 || eventMemory->unsecurePointer() == NULL) {
         return;
     }
 
     if (mCallback != 0) {
+        // Memory is coming from a trusted process.
         mCallback->onServiceStateChange(
-                *((sound_trigger_service_state_t *)eventMemory->pointer()));
+                *((sound_trigger_service_state_t *)eventMemory->unsecurePointer()));
     }
 }