Merge changes from topic "xaac_buffer_20190204"

* changes:
  C2SoftXaac: Updating xaac plugin code after DRC memory cleanup
  Updating xaac plugin code after DRC memory cleanup
diff --git a/apex/Android.bp b/apex/Android.bp
index c077a77..9455290 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -12,9 +12,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-apex {
-    name: "com.android.media",
-    manifest: "manifest.json",
+apex_defaults {
+    name: "com.android.media-defaults",
     java_libs: ["updatable-media"],
     compile_multilib: "both",
     multilib: {
@@ -42,16 +41,29 @@
         },
     },
     key: "com.android.media.key",
+    certificate: ":com.android.media.certificate",
 }
 
 apex {
-    name: "com.android.media.swcodec",
-    manifest: "manifest_codec.json",
+    name: "com.android.media",
+    manifest: "manifest.json",
+    defaults: ["com.android.media-defaults"],
+}
+
+apex_defaults {
+    name: "com.android.media.swcodec-defaults",
     native_shared_libs: [
         "libmedia_codecserviceregistrant",
     ],
     use_vendor: true,
     key: "com.android.media.swcodec.key",
+    certificate: ":com.android.media.swcodec.certificate",
+}
+
+apex {
+    name: "com.android.media.swcodec",
+    manifest: "manifest_codec.json",
+    defaults: ["com.android.media.swcodec-defaults"],
 }
 
 apex_key {
@@ -65,3 +77,13 @@
     public_key: "com.android.media.swcodec.avbpubkey",
     private_key: "com.android.media.swcodec.pem",
 }
+
+android_app_certificate {
+    name: "com.android.media.certificate",
+    certificate: "com.android.media",
+}
+
+android_app_certificate {
+    name: "com.android.media.swcodec.certificate",
+    certificate: "com.android.media.swcodec",
+}
diff --git a/apex/com.android.media.pk8 b/apex/com.android.media.pk8
new file mode 100644
index 0000000..6df741e
--- /dev/null
+++ b/apex/com.android.media.pk8
Binary files differ
diff --git a/apex/com.android.media.swcodec.pk8 b/apex/com.android.media.swcodec.pk8
new file mode 100644
index 0000000..05a4216
--- /dev/null
+++ b/apex/com.android.media.swcodec.pk8
Binary files differ
diff --git a/apex/com.android.media.swcodec.x509.pem b/apex/com.android.media.swcodec.x509.pem
new file mode 100644
index 0000000..67b9b4f
--- /dev/null
+++ b/apex/com.android.media.swcodec.x509.pem
@@ -0,0 +1,34 @@
+-----BEGIN CERTIFICATE-----
+MIIF3jCCA8agAwIBAgIJAIM72JpD4v6XMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD
+VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4g
+VmlldzEQMA4GA1UECgwHQW5kcm9pZDEQMA4GA1UECwwHQW5kcm9pZDEiMCAGA1UE
+AwwZY29tLmFuZHJvaWQubWVkaWEuc3djb2RlYzAgFw0xOTAyMTEwMjExMTFaGA80
+NzU3MDEwNzAyMTExMVowgYIxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9y
+bmlhMRYwFAYDVQQHDA1Nb3VudGFpbiBWaWV3MRAwDgYDVQQKDAdBbmRyb2lkMRAw
+DgYDVQQLDAdBbmRyb2lkMSIwIAYDVQQDDBljb20uYW5kcm9pZC5tZWRpYS5zd2Nv
+ZGVjMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAsqXE0AIWpLW9Tgq2
+nQGph7KZ6L2Q9oxviqCVHxIaPqfhM2SwTbycADIQeqrrlRxhddVkjLuMUkJa7mev
+fERmgpiOfnPIlGK6PTs2gljCkskZhF3bgfeyuHt0tsYO+UaN8MVoZD7/QdiE46w2
+OMDClG1UqgiqOBhLTEN/cHXObnUiiVXUYqN8aYZf6L6Fs3yQi2ZZgfbxTVFewqdv
+aLLOqCYnVYXZH+ZxbXESA0M+WXKgRKsYTj2GYs3eko1rFi4Y6uHVLx45yaoT5u/i
+SxPEkocyMCKvGJWu4XlSOd3EjSOMaqCOYVyGLxdlnQWQU7PZDqBSJ0SysWgpFHpB
+I15c2jhRdXOCfQ9ZtDfPZkE0a2A8kJDAoF1mzTp6IvBAWUsl5nHPw5CWkFpNad/h
+tqqGCScWbiKZuvrQ4/RQNm3f1K+mxX9TrjFigpqNO6d4pGAo1fa6sHR3xWPw/myq
+h5ZJjVnXU5Yq64S4xWOssfjpOg7RfNuvzuk3ok3MYs1mbx3vhZOj5km1f3qrgX9c
+mXjYnyXD0jJBm4uAJWXLdK9PlZvlXbztMCzYj832Io4pFLCtSxkzX75t1em36Nv0
+mNp6NtSSy6SFSq8l7IsXV2FNyUiyHWxS/UQm8pYg5Q5dWHvEEF78P6lV0wRa6FQl
+BBSgpqTAI092KIjDDtB7GQCgV5ECAwEAAaNTMFEwHQYDVR0OBBYEFAFIdFTDEDft
+ewSSAS7Fa3OZ5TXzMB8GA1UdIwQYMBaAFAFIdFTDEDftewSSAS7Fa3OZ5TXzMA8G
+A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIBAC5e3zXythJCGmz1FmAV
+8Y/UI+Glg6G0x/k04WaRG0DPLLjlJ1F0LM1/IReBSgXcYAL0CAgPycf/rGPOgMFm
+tQxYyjBUxKdjpIqU5DJoV1feanGveIRpto1YRKNgHuzG9rZGR4AgPnt6X4Yxlq04
+lI7QpWadXe1myARJhj3niSNY9+2wEInkx4ZuCO1LtIGqnbdc8jQ8YoVqIE5N4kuM
+ccyPYgsdABtopbjN92rueu8sfF8R6ROy+tNgb6OjpAAevtnBfZ2LXqfObKirHCK+
+k6w4WSB1UUoZ3Xgz8sJtXgokvYeInkN8tHuTagHYU2VQTcA0rdBGMN/1OljJpWlN
+0UUq4fAYU6cN4lHxr2LM9If4WvAzdLAWvaIZrDqaU4i/zYT9l6rR4lC2KW3EHWov
+nPXfgEJJ8AP1iRGibvew3i3SB6XTWFQYTUIBeJfDz/KDXQabP+yzXWISdZCUMUpx
+f+Raqsb5MoKaJdVgnSL0mBunjCyJDzzg34J7oGx6/BnwoiOrwLN4Qaz5U8jbrPSx
+p9LfleCcO7ZdeE8GKqx0X1T4d7tradtmxOS8Iwr4niskkHGRkzozvVvuyGKmoN2k
+162Vfjq+ddj7qEpSh3BS6hHU+vlMbC9L0trGxPxFEAHDrwu0KwGNduTkiu/3jvfB
+JTgH8P9mD1loYxRdo+vet8eQ
+-----END CERTIFICATE-----
diff --git a/apex/com.android.media.x509.pem b/apex/com.android.media.x509.pem
new file mode 100644
index 0000000..e7908fa
--- /dev/null
+++ b/apex/com.android.media.x509.pem
@@ -0,0 +1,33 @@
+-----BEGIN CERTIFICATE-----
+MIIFzDCCA7SgAwIBAgIJAO05DBBusaaLMA0GCSqGSIb3DQEBCwUAMHoxCzAJBgNV
+BAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1Nb3VudGFpbiBW
+aWV3MRAwDgYDVQQKDAdBbmRyb2lkMRAwDgYDVQQLDAdBbmRyb2lkMRowGAYDVQQD
+DBFjb20uYW5kcm9pZC5tZWRpYTAgFw0xOTAxMjUxNzE3MTdaGA80NzU2MTIyMTE3
+MTcxN1owejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNV
+BAcMDU1vdW50YWluIFZpZXcxEDAOBgNVBAoMB0FuZHJvaWQxEDAOBgNVBAsMB0Fu
+ZHJvaWQxGjAYBgNVBAMMEWNvbS5hbmRyb2lkLm1lZGlhMIICIjANBgkqhkiG9w0B
+AQEFAAOCAg8AMIICCgKCAgEAmNkVxUbp/bLbeGbvKqYXzwBycSDpmOhh///lNGYQ
+/AMUD0q6EaZzU2bd4aL0rOGqfoYlhKd0kMVmMUmfdE9ODAfKxleEeEaRl2GJS8a9
+ABi770l3GHbB2xMI2sEWeOD9xsPFF6+ByPZmoUuNhMr4pUbXsDpE3h8ljrgXHtIg
+bh7ofbvddruwBV0lS1k9OZ9jPVGhEKkJnhgQa67cwgdjizAMbI0Dcz9gtMMawsDj
+Z2aQd1r+vxgh1/XkI/NMmXCnG2ERytXcJeC5S4gEtHfTTPoP0FuVgSB6y6dalMuZ
+F0NBZw8Mvgdy3QJip0uNa36J63CMZKTJWbTdlFpPL2hk0PgaYvje8C5Xtk5282wT
+dMocc8n2zIXbzbnSXGvjcNZib3Pfu55YUnX6eTqZ1BxlJ0FHZAsC4quFFWXxYBYD
+LCRoNNFEtIDQpuvuHF2DuHNDULpAQjy2y6+7eot0KEsVoDmZ4H8BpuAVVu2SxYNb
+gYflR9SmM0tmYeAcRT48q3xrocGyEHMqvgQRUpPfvct/8l8xVcDzOI/sJVDqmYzM
+u0Cj3fkSypGDJOMF/esFSmVvoI01tS7kaNS5vvtKYib//xqKRC9f0dCsGfFLnuUK
+o4KYbYWYwMyJqEd/5/ZvXyKIPAEeJL174L9+wTkc3cQpoBwJN4t+2E5MnhOEq6do
+5L0CAwEAAaNTMFEwHQYDVR0OBBYEFHjNK/GZko1RdZp+8iavWXL5xz9wMB8GA1Ud
+IwQYMBaAFHjNK/GZko1RdZp+8iavWXL5xz9wMA8GA1UdEwEB/wQFMAMBAf8wDQYJ
+KoZIhvcNAQELBQADggIBACmPQMksuLrNV1vbI44S1f70I0FHdBxchFGB39zuLbcn
+SsYom/LPtYJiD0Dl4bB4eb+ZnxkQP2XeP6pycmUH2j1EWexFwvdUvlfe8Qz+wAec
+ap4AxiX4Z2Ke2ivYotIZFUHdZOLkX20js8Wex1mzY43MLQn5APl9gK1VZTxDggeR
+EObH1S+JVjGwQqYZj2e6gNZH34Q25NQ698RL85GDkYtSISAifJtaJsU/B3vKm82I
+k9xMiCooCH6bRdGHG1jze4SRpidjxEm8cxkiaQagfcuXeCLziXJr3qAMKYiEY6bp
+0+bAqCt3S8OrrN3RQZfQrnlwitsM1jJJ/+C+WoDg4eY5AFrXDLvNeKh1qO/f8xv+
+fCXkQPcVVphLfRH9oxNrSgOWBP5/qIDH4s1YUL9luGT6H+08dlue3RkbzDbBqsQu
+7fQ/BbrIG/GuVKgyEM+a7C9gv7zc86YlueVYJEyxKidnn7RxOqyDBqyyfXA3zvme
+Rro7xIrMHPL7Nu3AWjwjXzbp/w0z+tEFPsfVB+OOHKsWPcUG0HUTJGkyeO/uHRjN
+qPEkkf7BHHUO4V2gjOIdCsELxKwHf7vsZTOk40EV751fZ7FDHMr1eddQkgH4eqAb
+DB79uP+SLfUo+42n4q6eMmoqw8d76bBXRoUhIo/Ms4sebhV0sRtAS67OQioc9UUg
+-----END CERTIFICATE-----
diff --git a/apex/testing/Android.bp b/apex/testing/Android.bp
new file mode 100644
index 0000000..701ced7
--- /dev/null
+++ b/apex/testing/Android.bp
@@ -0,0 +1,29 @@
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+apex {
+    name: "test_com.android.media",
+    manifest: "test_manifest.json",
+    file_contexts: "com.android.media",
+    defaults: ["com.android.media-defaults"],
+    installable: false,
+}
+
+apex {
+    name: "test_com.android.media.swcodec",
+    manifest: "test_manifest_codec.json",
+    file_contexts: "com.android.media.swcodec",
+    defaults: ["com.android.media.swcodec-defaults"],
+    installable: false,
+}
diff --git a/apex/testing/test_manifest.json b/apex/testing/test_manifest.json
new file mode 100644
index 0000000..9f81f9f
--- /dev/null
+++ b/apex/testing/test_manifest.json
@@ -0,0 +1,4 @@
+{
+  "name": "com.android.media",
+  "version": 2
+}
diff --git a/apex/testing/test_manifest_codec.json b/apex/testing/test_manifest_codec.json
new file mode 100644
index 0000000..c956454
--- /dev/null
+++ b/apex/testing/test_manifest_codec.json
@@ -0,0 +1,4 @@
+{
+  "name": "com.android.media.swcodec",
+  "version": 2
+}
diff --git a/camera/aidl/android/hardware/ICameraService.aidl b/camera/aidl/android/hardware/ICameraService.aidl
index c038314..0e969c7 100644
--- a/camera/aidl/android/hardware/ICameraService.aidl
+++ b/camera/aidl/android/hardware/ICameraService.aidl
@@ -162,6 +162,28 @@
      * Callers require the android.permission.CAMERA_SEND_SYSTEM_EVENTS permission.
      */
     const int EVENT_NONE = 0;
-    const int EVENT_USER_SWITCHED = 1;
+    const int EVENT_USER_SWITCHED = 1; // The argument is the set of new foreground user IDs.
     oneway void notifySystemEvent(int eventId, in int[] args);
+
+    /**
+     * Notify the camera service of a device physical status change. May only be called from
+     * a privileged process.
+     *
+     * newState is a bitfield consisting of DEVICE_STATE_* values combined together. Valid state
+     * combinations are device-specific. At device startup, the camera service will assume the device
+     * state is NORMAL until otherwise notified.
+     *
+     * Callers require the android.permission.CAMERA_SEND_SYSTEM_EVENTS permission.
+     */
+    oneway void notifyDeviceStateChange(long newState);
+
+    // Bitfield constants for notifyDeviceStateChange
+    // All bits >= 32 are for custom vendor states
+    // Written as ints since AIDL does not support long constants.
+    const int DEVICE_STATE_NORMAL = 0;
+    const int DEVICE_STATE_BACK_COVERED = 1;
+    const int DEVICE_STATE_FRONT_COVERED = 2;
+    const int DEVICE_STATE_FOLDED = 4;
+    const int DEVICE_STATE_LAST_FRAMEWORK_BIT = 0x80000000; // 1 << 31;
+
 }
diff --git a/camera/cameraserver/Android.bp b/camera/cameraserver/Android.bp
index b88a2c5..92b06c2 100644
--- a/camera/cameraserver/Android.bp
+++ b/camera/cameraserver/Android.bp
@@ -27,6 +27,7 @@
         "libhidltransport",
         "android.hardware.camera.common@1.0",
         "android.hardware.camera.provider@2.4",
+        "android.hardware.camera.provider@2.5",
         "android.hardware.camera.device@1.0",
         "android.hardware.camera.device@3.2",
         "android.hardware.camera.device@3.4",
diff --git a/camera/ndk/impl/ACameraMetadata.cpp b/camera/ndk/impl/ACameraMetadata.cpp
index c661233..de40990 100644
--- a/camera/ndk/impl/ACameraMetadata.cpp
+++ b/camera/ndk/impl/ACameraMetadata.cpp
@@ -36,6 +36,8 @@
         filterDurations(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS);
         filterDurations(ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS);
         filterDurations(ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS);
+        filterDurations(ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS);
+        filterDurations(ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS);
     }
     // TODO: filter request/result keys
 }
@@ -104,7 +106,8 @@
     for (size_t i = 0; i < entry.count; ++i) {
         if (ids[i] == '\0') {
             if (start != i) {
-                mStaticPhysicalCameraIds.push_back((const char*)ids+start);
+                mStaticPhysicalCameraIdValues.push_back(String8((const char *)ids+start));
+                mStaticPhysicalCameraIds.push_back(mStaticPhysicalCameraIdValues.back().string());
             }
             start = i+1;
         }
@@ -173,6 +176,16 @@
                     filteredDurations.push_back(duration);
                 }
                 break;
+            case ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS:
+            case ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS:
+                if (format == HAL_PIXEL_FORMAT_BLOB) {
+                    format = AIMAGE_FORMAT_HEIC;
+                    filteredDurations.push_back(format);
+                    filteredDurations.push_back(width);
+                    filteredDurations.push_back(height);
+                    filteredDurations.push_back(duration);
+                }
+                break;
             default:
                 // Should not reach here
                 ALOGE("%s: Unkown tag 0x%x", __FUNCTION__, tag);
@@ -246,6 +259,31 @@
         filteredDepthStreamConfigs.push_back(isInput);
     }
     mData.update(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS, filteredDepthStreamConfigs);
+
+    entry = mData.find(ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS);
+    Vector<int32_t> filteredHeicStreamConfigs;
+    filteredHeicStreamConfigs.setCapacity(entry.count);
+
+    for (size_t i=0; i < entry.count; i += STREAM_CONFIGURATION_SIZE) {
+        int32_t format = entry.data.i32[i + STREAM_FORMAT_OFFSET];
+        int32_t width = entry.data.i32[i + STREAM_WIDTH_OFFSET];
+        int32_t height = entry.data.i32[i + STREAM_HEIGHT_OFFSET];
+        int32_t isInput = entry.data.i32[i + STREAM_IS_INPUT_OFFSET];
+        if (isInput == ACAMERA_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS_INPUT) {
+            // Hide input streams
+            continue;
+        }
+        // Translate HAL formats to NDK format
+        if (format == HAL_PIXEL_FORMAT_BLOB) {
+            format = AIMAGE_FORMAT_HEIC;
+        }
+
+        filteredHeicStreamConfigs.push_back(format);
+        filteredHeicStreamConfigs.push_back(width);
+        filteredHeicStreamConfigs.push_back(height);
+        filteredHeicStreamConfigs.push_back(isInput);
+    }
+    mData.update(ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS, filteredHeicStreamConfigs);
 }
 
 bool
@@ -484,6 +522,8 @@
     ANDROID_STATISTICS_INFO_SHARPNESS_MAP_SIZE,
     ANDROID_INFO_SUPPORTED_BUFFER_MANAGEMENT_VERSION,
     ANDROID_DEPTH_MAX_DEPTH_SAMPLES,
+    ANDROID_HEIC_INFO_SUPPORTED,
+    ANDROID_HEIC_INFO_MAX_JPEG_APP_SEGMENTS_COUNT,
 });
 
 /*~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~
diff --git a/camera/ndk/impl/ACameraMetadata.h b/camera/ndk/impl/ACameraMetadata.h
index 7049c4b..3d895cb 100644
--- a/camera/ndk/impl/ACameraMetadata.h
+++ b/camera/ndk/impl/ACameraMetadata.h
@@ -117,6 +117,7 @@
     static std::unordered_set<uint32_t> sSystemTags;
 
     std::vector<const char*> mStaticPhysicalCameraIds;
+    std::vector<String8> mStaticPhysicalCameraIdValues;
 };
 
 #endif // _ACAMERA_METADATA_H
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index c1efa5f..8c19e1d 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -71,6 +71,8 @@
     ACAMERA_DEPTH,
     ACAMERA_LOGICAL_MULTI_CAMERA,
     ACAMERA_DISTORTION_CORRECTION,
+    ACAMERA_HEIC,
+    ACAMERA_HEIC_INFO,
     ACAMERA_SECTION_COUNT,
 
     ACAMERA_VENDOR = 0x8000
@@ -112,6 +114,8 @@
     ACAMERA_DISTORTION_CORRECTION_START
                                    = ACAMERA_DISTORTION_CORRECTION
                                                                 << 16,
+    ACAMERA_HEIC_START             = ACAMERA_HEIC              << 16,
+    ACAMERA_HEIC_INFO_START        = ACAMERA_HEIC_INFO         << 16,
     ACAMERA_VENDOR_START           = ACAMERA_VENDOR            << 16
 } acamera_metadata_section_start_t;
 
@@ -1912,6 +1916,7 @@
      *   <li>ACaptureRequest</li>
      * </ul></p>
      *
+     * <p>This tag is also used for HEIC image capture.</p>
      */
     ACAMERA_JPEG_GPS_COORDINATES =                              // double[3]
             ACAMERA_JPEG_START,
@@ -1927,6 +1932,7 @@
      *   <li>ACaptureRequest</li>
      * </ul></p>
      *
+     * <p>This tag is also used for HEIC image capture.</p>
      */
     ACAMERA_JPEG_GPS_PROCESSING_METHOD =                        // byte
             ACAMERA_JPEG_START + 1,
@@ -1942,6 +1948,7 @@
      *   <li>ACaptureRequest</li>
      * </ul></p>
      *
+     * <p>This tag is also used for HEIC image capture.</p>
      */
     ACAMERA_JPEG_GPS_TIMESTAMP =                                // int64
             ACAMERA_JPEG_START + 2,
@@ -1986,6 +1993,10 @@
      * </code></pre>
      * <p>For EXTERNAL cameras the sensor orientation will always be set to 0 and the facing will
      * also be set to EXTERNAL. The above code is not relevant in such case.</p>
+     * <p>This tag is also used to describe the orientation of the HEIC image capture, in which
+     * case the rotation is reflected by
+     * <a href="https://developer.android.com/reference/android/media/ExifInterface.html#TAG_ORIENTATION">EXIF orientation flag</a>, and not by
+     * rotating the image data itself.</p>
      *
      * @see ACAMERA_SENSOR_ORIENTATION
      */
@@ -2003,7 +2014,8 @@
      *   <li>ACaptureRequest</li>
      * </ul></p>
      *
-     * <p>85-95 is typical usage range.</p>
+     * <p>85-95 is typical usage range. This tag is also used to describe the quality
+     * of the HEIC image capture.</p>
      */
     ACAMERA_JPEG_QUALITY =                                      // byte
             ACAMERA_JPEG_START + 4,
@@ -2019,6 +2031,7 @@
      *   <li>ACaptureRequest</li>
      * </ul></p>
      *
+     * <p>This tag is also used to describe the quality of the HEIC image capture.</p>
      */
     ACAMERA_JPEG_THUMBNAIL_QUALITY =                            // byte
             ACAMERA_JPEG_START + 5,
@@ -2055,6 +2068,10 @@
      *   orientation is requested. LEGACY device will always report unrotated thumbnail
      *   size.</li>
      * </ul>
+     * <p>The tag is also used as thumbnail size for HEIC image format capture, in which case the
+     * the thumbnail rotation is reflected by
+     * <a href="https://developer.android.com/reference/android/media/ExifInterface.html#TAG_ORIENTATION">EXIF orientation flag</a>, and not by
+     * rotating the thumbnail data itself.</p>
      *
      * @see ACAMERA_JPEG_ORIENTATION
      */
@@ -2088,6 +2105,7 @@
      * and vice versa.</li>
      * <li>All non-<code>(0, 0)</code> sizes will have non-zero widths and heights.</li>
      * </ul>
+     * <p>This list is also used as supported thumbnail sizes for HEIC image format capture.</p>
      *
      * @see ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS
      */
@@ -5757,6 +5775,80 @@
             ACAMERA_DISTORTION_CORRECTION_START + 1,
     ACAMERA_DISTORTION_CORRECTION_END,
 
+    /**
+     * <p>The available HEIC (ISO/IEC 23008-12) stream
+     * configurations that this camera device supports
+     * (i.e. format, width, height, output/input stream).</p>
+     *
+     * <p>Type: int32[n*4] (acamera_metadata_enum_android_heic_available_heic_stream_configurations_t)</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul></p>
+     *
+     * <p>The configurations are listed as <code>(format, width, height, input?)</code> tuples.</p>
+     * <p>If the camera device supports HEIC image format, it will support identical set of stream
+     * combinations involving HEIC image format, compared to the combinations involving JPEG
+     * image format as required by the device's hardware level and capabilities.</p>
+     * <p>All the static, control, and dynamic metadata tags related to JPEG apply to HEIC formats.
+     * Configuring JPEG and HEIC streams at the same time is not supported.</p>
+     * <p>All the configuration tuples <code>(format, width, height, input?)</code> will contain
+     * AIMAGE_FORMAT_HEIC format as OUTPUT only.</p>
+     */
+    ACAMERA_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS =         // int32[n*4] (acamera_metadata_enum_android_heic_available_heic_stream_configurations_t)
+            ACAMERA_HEIC_START,
+    /**
+     * <p>This lists the minimum frame duration for each
+     * format/size combination for HEIC output formats.</p>
+     *
+     * <p>Type: int64[4*n]</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul></p>
+     *
+     * <p>This should correspond to the frame duration when only that
+     * stream is active, with all processing (typically in android.*.mode)
+     * set to either OFF or FAST.</p>
+     * <p>When multiple streams are used in a request, the minimum frame
+     * duration will be max(individual stream min durations).</p>
+     * <p>See ACAMERA_SENSOR_FRAME_DURATION and
+     * ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS for more details about
+     * calculating the max frame rate.</p>
+     *
+     * @see ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS
+     * @see ACAMERA_SENSOR_FRAME_DURATION
+     */
+    ACAMERA_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS =           // int64[4*n]
+            ACAMERA_HEIC_START + 1,
+    /**
+     * <p>This lists the maximum stall duration for each
+     * output format/size combination for HEIC streams.</p>
+     *
+     * <p>Type: int64[4*n]</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+     * </ul></p>
+     *
+     * <p>A stall duration is how much extra time would get added
+     * to the normal minimum frame duration for a repeating request
+     * that has streams with non-zero stall.</p>
+     * <p>This functions similarly to
+     * ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS for HEIC
+     * streams.</p>
+     * <p>All HEIC output stream formats may have a nonzero stall
+     * duration.</p>
+     *
+     * @see ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS
+     */
+    ACAMERA_HEIC_AVAILABLE_HEIC_STALL_DURATIONS =               // int64[4*n]
+            ACAMERA_HEIC_START + 2,
+    ACAMERA_HEIC_END,
+
 } acamera_metadata_tag_t;
 
 /**
@@ -7608,6 +7700,13 @@
      */
     ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_MONOCHROME                = 12,
 
+    /**
+     * <p>The camera device is capable of writing image data into a region of memory
+     * inaccessible to Android userspace or the Android kernel, and only accessible to
+     * trusted execution environments (TEE).</p>
+     */
+    ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_SECURE_IMAGE_DATA         = 13,
+
 } acamera_metadata_enum_android_request_available_capabilities_t;
 
 
@@ -8366,6 +8465,16 @@
 } acamera_metadata_enum_android_distortion_correction_mode_t;
 
 
+// ACAMERA_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS
+typedef enum acamera_metadata_enum_acamera_heic_available_heic_stream_configurations {
+    ACAMERA_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS_OUTPUT         = 0,
+
+    ACAMERA_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS_INPUT          = 1,
+
+} acamera_metadata_enum_android_heic_available_heic_stream_configurations_t;
+
+
+
 #endif /* __ANDROID_API__ >= 24 */
 
 __END_DECLS
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
index f7863a5..9aafcd3 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
@@ -289,7 +289,7 @@
 }
 
 camera_status_t
-CameraDevice::allocateCaptureRequest(
+CameraDevice::allocateCaptureRequestLocked(
         const ACaptureRequest* request, /*out*/sp<CaptureRequest> &outReq) {
     sp<CaptureRequest> req(new CaptureRequest());
     req->mCaptureRequest.physicalCameraSettings.resize(1);
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.h b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
index c63b97f..d571585 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
@@ -169,7 +169,12 @@
 
     camera_status_t updateOutputConfigurationLocked(ACaptureSessionOutput *output);
 
-    camera_status_t allocateCaptureRequest(
+    // Since this writes to ICameraDeviceUser's fmq, clients must take care that:
+    //   a) This function is called serially.
+    //   b) This function is called in accordance with ICameraDeviceUser.submitRequestList,
+    //      otherwise, the wrong capture request might have the wrong settings
+    //      metadata associated with it.
+    camera_status_t allocateCaptureRequestLocked(
             const ACaptureRequest* request, sp<CaptureRequest>& outReq);
 
     static ACaptureRequest* allocateACaptureRequest(sp<CaptureRequest>& req);
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDeviceVendor.inc b/camera/ndk/ndk_vendor/impl/ACameraDeviceVendor.inc
index 7d2304e..8bd5a52 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDeviceVendor.inc
+++ b/camera/ndk/ndk_vendor/impl/ACameraDeviceVendor.inc
@@ -73,7 +73,7 @@
     requestsV.setCapacity(numRequests);
     for (int i = 0; i < numRequests; i++) {
         sp<CaptureRequest> req;
-        ret = allocateCaptureRequest(requests[i], req);
+        ret = allocateCaptureRequestLocked(requests[i], req);
         // We need to call this method since after submitRequestList is called,
         // the request metadata queue might have removed the capture request
         // metadata. Therefore we simply add the metadata to its wrapper class,
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index 7803ccc..c361690 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -86,6 +86,7 @@
 using android::INFO_FORMAT_CHANGED;
 using android::INFO_OUTPUT_BUFFERS_CHANGED;
 using android::INVALID_OPERATION;
+using android::NAME_NOT_FOUND;
 using android::NO_ERROR;
 using android::UNKNOWN_ERROR;
 
@@ -585,8 +586,12 @@
     self->startThreadPool();
 
     // Get main display parameters.
-    sp<IBinder> mainDpy = SurfaceComposerClient::getBuiltInDisplay(
-            ISurfaceComposer::eDisplayIdMain);
+    const sp<IBinder> mainDpy = SurfaceComposerClient::getInternalDisplayToken();
+    if (mainDpy == nullptr) {
+        fprintf(stderr, "ERROR: no display\n");
+        return NAME_NOT_FOUND;
+    }
+
     DisplayInfo mainDpyInfo;
     err = SurfaceComposerClient::getDisplayInfo(mainDpy, &mainDpyInfo);
     if (err != NO_ERROR) {
diff --git a/cmds/stagefright/codec.cpp b/cmds/stagefright/codec.cpp
index a463ec5..e5a4337 100644
--- a/cmds/stagefright/codec.cpp
+++ b/cmds/stagefright/codec.cpp
@@ -411,10 +411,12 @@
         composerClient = new SurfaceComposerClient;
         CHECK_EQ(composerClient->initCheck(), (status_t)OK);
 
-        sp<IBinder> display(SurfaceComposerClient::getBuiltInDisplay(
-                ISurfaceComposer::eDisplayIdMain));
+        const sp<IBinder> display = SurfaceComposerClient::getInternalDisplayToken();
+        CHECK(display != nullptr);
+
         DisplayInfo info;
-        SurfaceComposerClient::getDisplayInfo(display, &info);
+        CHECK_EQ(SurfaceComposerClient::getDisplayInfo(display, &info), NO_ERROR);
+
         ssize_t displayWidth = info.w;
         ssize_t displayHeight = info.h;
 
diff --git a/cmds/stagefright/mediafilter.cpp b/cmds/stagefright/mediafilter.cpp
index f0ee0e1..2cf6955 100644
--- a/cmds/stagefright/mediafilter.cpp
+++ b/cmds/stagefright/mediafilter.cpp
@@ -748,10 +748,12 @@
         composerClient = new SurfaceComposerClient;
         CHECK_EQ((status_t)OK, composerClient->initCheck());
 
-        android::sp<IBinder> display(SurfaceComposerClient::getBuiltInDisplay(
-                ISurfaceComposer::eDisplayIdMain));
+        const android::sp<IBinder> display = SurfaceComposerClient::getInternalDisplayToken();
+        CHECK(display != nullptr);
+
         DisplayInfo info;
-        SurfaceComposerClient::getDisplayInfo(display, &info);
+        CHECK_EQ(SurfaceComposerClient::getDisplayInfo(display, &info), NO_ERROR);
+
         ssize_t displayWidth = info.w;
         ssize_t displayHeight = info.h;
 
diff --git a/cmds/stagefright/stream.cpp b/cmds/stagefright/stream.cpp
index b2f39dc..35bdbc0 100644
--- a/cmds/stagefright/stream.cpp
+++ b/cmds/stagefright/stream.cpp
@@ -318,10 +318,12 @@
     sp<SurfaceComposerClient> composerClient = new SurfaceComposerClient;
     CHECK_EQ(composerClient->initCheck(), (status_t)OK);
 
-    sp<IBinder> display(SurfaceComposerClient::getBuiltInDisplay(
-            ISurfaceComposer::eDisplayIdMain));
+    const sp<IBinder> display = SurfaceComposerClient::getInternalDisplayToken();
+    CHECK(display != nullptr);
+
     DisplayInfo info;
-    SurfaceComposerClient::getDisplayInfo(display, &info);
+    CHECK_EQ(SurfaceComposerClient::getDisplayInfo(display, &info), NO_ERROR);
+
     ssize_t displayWidth = info.w;
     ssize_t displayHeight = info.h;
 
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
index 27bd631..bf35224 100644
--- a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
+++ b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
@@ -97,13 +97,13 @@
 ///////////////////////////////////////////////////////////////////////////////
 ClearKeyCasPlugin::ClearKeyCasPlugin(
         void *appData, CasPluginCallback callback)
-    : mCallback(callback), mAppData(appData) {
+    : mCallback(callback), mCallbackExt(NULL), mAppData(appData) {
     ALOGV("CTOR");
 }
 
 ClearKeyCasPlugin::ClearKeyCasPlugin(
         void *appData, CasPluginCallbackExt callback)
-    : mCallbackExt(callback), mAppData(appData) {
+    : mCallback(NULL), mCallbackExt(callback), mAppData(appData) {
     ALOGV("CTOR");
 }
 
diff --git a/include/media/AudioAttributes.h b/include/media/AudioAttributes.h
new file mode 120000
index 0000000..27ba471
--- /dev/null
+++ b/include/media/AudioAttributes.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/AudioAttributes.h
\ No newline at end of file
diff --git a/include/media/AudioCommonTypes.h b/include/media/AudioCommonTypes.h
new file mode 120000
index 0000000..ae7c99a
--- /dev/null
+++ b/include/media/AudioCommonTypes.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/AudioCommonTypes.h
\ No newline at end of file
diff --git a/include/media/AudioPolicyHelper.h b/include/media/AudioPolicyHelper.h
deleted file mode 120000
index 558657e..0000000
--- a/include/media/AudioPolicyHelper.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/AudioPolicyHelper.h
\ No newline at end of file
diff --git a/include/media/AudioProductStrategy.h b/include/media/AudioProductStrategy.h
new file mode 120000
index 0000000..6bfaf11
--- /dev/null
+++ b/include/media/AudioProductStrategy.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/AudioProductStrategy.h
\ No newline at end of file
diff --git a/media/codec2/components/aac/C2SoftAacDec.cpp b/media/codec2/components/aac/C2SoftAacDec.cpp
index c7c8442..4d00d35 100644
--- a/media/codec2/components/aac/C2SoftAacDec.cpp
+++ b/media/codec2/components/aac/C2SoftAacDec.cpp
@@ -52,37 +52,30 @@
 
 namespace android {
 
-class C2SoftAacDec::IntfImpl : public C2InterfaceHelper {
+constexpr char COMPONENT_NAME[] = "c2.android.aac.decoder";
+
+class C2SoftAacDec::IntfImpl : public SimpleInterface<void>::BaseParams {
 public:
     explicit IntfImpl(const std::shared_ptr<C2ReflectorHelper> &helper)
-        : C2InterfaceHelper(helper) {
-
-        setDerivedInstance(this);
+        : SimpleInterface<void>::BaseParams(
+                helper,
+                COMPONENT_NAME,
+                C2Component::KIND_DECODER,
+                C2Component::DOMAIN_AUDIO,
+                MEDIA_MIMETYPE_AUDIO_AAC) {
+        noPrivateBuffers();
+        noInputReferences();
+        noOutputReferences();
+        noInputLatency();
+        noTimeStretch();
 
         addParameter(
-                DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+                DefineParam(mActualOutputDelay, C2_PARAMKEY_OUTPUT_DELAY)
+                .withConstValue(new C2PortActualDelayTuning::output(2u))
                 .build());
 
         addParameter(
-                DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
-                .build());
-
-        addParameter(
-                DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
-                        MEDIA_MIMETYPE_AUDIO_AAC))
-                .build());
-
-        addParameter(
-                DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
-                        MEDIA_MIMETYPE_AUDIO_RAW))
-                .build());
-
-        addParameter(
-                DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+                DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
                 .withDefault(new C2StreamSampleRateInfo::output(0u, 44100))
                 .withFields({C2F(mSampleRate, value).oneOf({
                     7350, 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
@@ -91,15 +84,15 @@
                 .build());
 
         addParameter(
-                DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+                DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
                 .withDefault(new C2StreamChannelCountInfo::output(0u, 1))
                 .withFields({C2F(mChannelCount, value).inRange(1, 8)})
                 .withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
                 .build());
 
         addParameter(
-                DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
-                .withDefault(new C2BitrateTuning::input(0u, 64000))
+                DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+                .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
                 .withFields({C2F(mBitrate, value).inRange(8000, 960000)})
                 .withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
                 .build());
@@ -110,10 +103,10 @@
                 .build());
 
         addParameter(
-                DefineParam(mAacFormat, C2_NAME_STREAM_AAC_FORMAT_SETTING)
-                .withDefault(new C2StreamAacFormatInfo::input(0u, C2AacStreamFormatRaw))
+                DefineParam(mAacFormat, C2_PARAMKEY_AAC_PACKAGING)
+                .withDefault(new C2StreamAacFormatInfo::input(0u, C2Config::AAC_PACKAGING_RAW))
                 .withFields({C2F(mAacFormat, value).oneOf({
-                    C2AacStreamFormatRaw, C2AacStreamFormatAdts
+                    C2Config::AAC_PACKAGING_RAW, C2Config::AAC_PACKAGING_ADTS
                 })})
                 .withSetter(Setter<decltype(*mAacFormat)>::StrictValueWithNoDeps)
                 .build());
@@ -198,7 +191,7 @@
                 .build());
     }
 
-    bool isAdts() const { return mAacFormat->value == C2AacStreamFormatAdts; }
+    bool isAdts() const { return mAacFormat->value == C2Config::AAC_PACKAGING_ADTS; }
     static C2R ProfileLevelSetter(bool mayBlock, C2P<C2StreamProfileLevelInfo::input> &me) {
         (void)mayBlock;
         (void)me;  // TODO: validate
@@ -212,13 +205,13 @@
     int32_t getDrcEffectType() const { return mDrcEffectType->value; }
 
 private:
-    std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
-    std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
-    std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
-    std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+    std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+    std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+    std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+    std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
     std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
     std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
-    std::shared_ptr<C2BitrateTuning::input> mBitrate;
+    std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
     std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
     std::shared_ptr<C2StreamAacFormatInfo::input> mAacFormat;
     std::shared_ptr<C2StreamProfileLevelInfo::input> mProfileLevel;
@@ -231,8 +224,6 @@
     // TODO Add : C2StreamAacSbrModeTuning
 };
 
-constexpr char COMPONENT_NAME[] = "c2.android.aac.decoder";
-
 C2SoftAacDec::C2SoftAacDec(
         const char *name,
         c2_node_id_t id,
diff --git a/media/codec2/components/aac/C2SoftAacEnc.cpp b/media/codec2/components/aac/C2SoftAacEnc.cpp
index d1bdf0d..137e775 100644
--- a/media/codec2/components/aac/C2SoftAacEnc.cpp
+++ b/media/codec2/components/aac/C2SoftAacEnc.cpp
@@ -37,29 +37,29 @@
         setDerivedInstance(this);
 
         addParameter(
-                DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatAudio))
+                DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+                DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+                DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
                         MEDIA_MIMETYPE_AUDIO_RAW))
                 .build());
 
         addParameter(
-                DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+                DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
                         MEDIA_MIMETYPE_AUDIO_AAC))
                 .build());
 
         addParameter(
-                DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+                DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
                 .withDefault(new C2StreamSampleRateInfo::input(0u, 44100))
                 .withFields({C2F(mSampleRate, value).oneOf({
                     8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
@@ -68,15 +68,15 @@
                 .build());
 
         addParameter(
-                DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+                DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
                 .withDefault(new C2StreamChannelCountInfo::input(0u, 1))
                 .withFields({C2F(mChannelCount, value).inRange(1, 6)})
                 .withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
                 .build());
 
         addParameter(
-                DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
-                .withDefault(new C2BitrateTuning::output(0u, 64000))
+                DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+                .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
                 .withFields({C2F(mBitrate, value).inRange(8000, 960000)})
                 .withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
                 .build());
@@ -125,13 +125,13 @@
     }
 
 private:
-    std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
-    std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
-    std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
-    std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+    std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+    std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+    std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+    std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
     std::shared_ptr<C2StreamSampleRateInfo::input> mSampleRate;
     std::shared_ptr<C2StreamChannelCountInfo::input> mChannelCount;
-    std::shared_ptr<C2BitrateTuning::output> mBitrate;
+    std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
     std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
     std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
 };
@@ -323,8 +323,8 @@
             return;
         }
 
-        std::unique_ptr<C2StreamCsdInfo::output> csd =
-            C2StreamCsdInfo::output::AllocUnique(encInfo.confSize, 0u);
+        std::unique_ptr<C2StreamInitDataInfo::output> csd =
+            C2StreamInitDataInfo::output::AllocUnique(encInfo.confSize, 0u);
         if (!csd) {
             ALOGE("CSD allocation failed");
             mSignalledError = true;
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrDec.cpp b/media/codec2/components/amr_nb_wb/C2SoftAmrDec.cpp
index c591e21..edad75a 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrDec.cpp
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrDec.cpp
@@ -47,18 +47,18 @@
         setDerivedInstance(this);
 
         addParameter(
-                DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+                DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+                DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+                DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
 #ifdef AMRNB
                         MEDIA_MIMETYPE_AUDIO_AMR_NB
 #else
@@ -67,13 +67,13 @@
                 )).build());
 
         addParameter(
-                DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+                DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
                         MEDIA_MIMETYPE_AUDIO_RAW))
                 .build());
 
         addParameter(
-                DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+                DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
 #ifdef AMRNB
                 .withDefault(new C2StreamSampleRateInfo::output(0u, 8000))
                 .withFields({C2F(mSampleRate, value).equalTo(8000)})
@@ -85,19 +85,19 @@
                 .build());
 
         addParameter(
-                DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+                DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
                 .withDefault(new C2StreamChannelCountInfo::output(0u, 1))
                 .withFields({C2F(mChannelCount, value).equalTo(1)})
                 .withSetter((Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps))
                 .build());
 
         addParameter(
-                DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
+                DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
 #ifdef AMRNB
-                .withDefault(new C2BitrateTuning::input(0u, 4750))
+                .withDefault(new C2StreamBitrateInfo::input(0u, 4750))
                 .withFields({C2F(mBitrate, value).inRange(4750, 12200)})
 #else
-                .withDefault(new C2BitrateTuning::input(0u, 6600))
+                .withDefault(new C2StreamBitrateInfo::input(0u, 6600))
                 .withFields({C2F(mBitrate, value).inRange(6600, 23850)})
 #endif
                 .withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
@@ -110,13 +110,13 @@
     }
 
 private:
-    std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
-    std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
-    std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
-    std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+    std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+    std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+    std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+    std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
     std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
     std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
-    std::shared_ptr<C2BitrateTuning::input> mBitrate;
+    std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
     std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
 };
 
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp b/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp
index 8c03257..3d3aa7d 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp
@@ -36,38 +36,38 @@
         setDerivedInstance(this);
 
         addParameter(
-            DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
+            DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
                 .withConstValue(
-                    new C2StreamFormatConfig::input(0u, C2FormatAudio))
+                    new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-            DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
+            DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
                 .withConstValue(
-                    new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+                    new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-            DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+            DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
                     MEDIA_MIMETYPE_AUDIO_RAW))
                 .build());
 
         addParameter(
-            DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+            DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
                     MEDIA_MIMETYPE_AUDIO_AMR_NB))
                 .build());
 
         addParameter(
-                DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+                DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
                 .withDefault(new C2StreamChannelCountInfo::input(0u, 1))
                 .withFields({C2F(mChannelCount, value).equalTo(1)})
                 .withSetter((Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps))
                 .build());
 
         addParameter(
-            DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+            DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
                 .withDefault(new C2StreamSampleRateInfo::input(0u, 8000))
                 .withFields({C2F(mSampleRate, value).equalTo(8000)})
                 .withSetter(
@@ -75,8 +75,8 @@
                 .build());
 
         addParameter(
-                DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
-                .withDefault(new C2BitrateTuning::output(0u, 4750))
+                DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+                .withDefault(new C2StreamBitrateInfo::output(0u, 4750))
                 .withFields({C2F(mBitrate, value).inRange(4750, 12200)})
                 .withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
                 .build());
@@ -92,13 +92,13 @@
     uint32_t getBitrate() const { return mBitrate->value; }
 
    private:
-    std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
-    std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
-    std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
-    std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+    std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+    std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+    std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+    std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
     std::shared_ptr<C2StreamSampleRateInfo::input> mSampleRate;
     std::shared_ptr<C2StreamChannelCountInfo::input> mChannelCount;
-    std::shared_ptr<C2BitrateTuning::output> mBitrate;
+    std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
     std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
 };
 
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp b/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp
index 074493c..379cb32 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp
@@ -38,38 +38,38 @@
         setDerivedInstance(this);
 
         addParameter(
-            DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
+            DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
                 .withConstValue(
-                    new C2StreamFormatConfig::input(0u, C2FormatAudio))
+                    new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-            DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
+            DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
                 .withConstValue(
-                    new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+                    new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-            DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+            DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
                     MEDIA_MIMETYPE_AUDIO_RAW))
                 .build());
 
         addParameter(
-            DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+            DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
                     MEDIA_MIMETYPE_AUDIO_AMR_WB))
                 .build());
 
         addParameter(
-                DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+                DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
                 .withDefault(new C2StreamChannelCountInfo::input(0u, 1))
                 .withFields({C2F(mChannelCount, value).equalTo(1)})
                 .withSetter((Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps))
                 .build());
 
         addParameter(
-            DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+            DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
                 .withDefault(new C2StreamSampleRateInfo::input(0u, 16000))
                 .withFields({C2F(mSampleRate, value).equalTo(16000)})
                 .withSetter(
@@ -77,8 +77,8 @@
                 .build());
 
         addParameter(
-                DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
-                .withDefault(new C2BitrateTuning::output(0u, 6600))
+                DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+                .withDefault(new C2StreamBitrateInfo::output(0u, 6600))
                 .withFields({C2F(mBitrate, value).inRange(6600, 23850)})
                 .withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
                 .build());
@@ -94,13 +94,13 @@
     uint32_t getBitrate() const { return mBitrate->value; }
 
    private:
-    std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
-    std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
-    std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
-    std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+    std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+    std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+    std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+    std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
     std::shared_ptr<C2StreamSampleRateInfo::input> mSampleRate;
     std::shared_ptr<C2StreamChannelCountInfo::input> mChannelCount;
-    std::shared_ptr<C2BitrateTuning::output> mBitrate;
+    std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
     std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
 };
 
diff --git a/media/codec2/components/aom/C2SoftAomDec.cpp b/media/codec2/components/aom/C2SoftAomDec.cpp
index 6be1807..4bcc2c6 100644
--- a/media/codec2/components/aom/C2SoftAomDec.cpp
+++ b/media/codec2/components/aom/C2SoftAomDec.cpp
@@ -141,7 +141,7 @@
 
     static C2R SizeSetter(bool mayBlock,
                           const C2P<C2StreamPictureSizeInfo::output>& oldMe,
-                          C2P<C2VideoSizeStreamInfo::output>& me) {
+                          C2P<C2StreamPictureSizeInfo::output>& me) {
         (void)mayBlock;
         C2R res = C2R::Ok();
         if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -586,7 +586,7 @@
         mWidth = img->d_w;
         mHeight = img->d_h;
 
-        C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+        C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
         std::vector<std::unique_ptr<C2SettingResult>> failures;
         c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
         if (err == C2_OK) {
diff --git a/media/codec2/components/avc/C2SoftAvcDec.cpp b/media/codec2/components/avc/C2SoftAvcDec.cpp
index 3e62744..9290d74 100644
--- a/media/codec2/components/avc/C2SoftAvcDec.cpp
+++ b/media/codec2/components/avc/C2SoftAvcDec.cpp
@@ -51,6 +51,12 @@
         noInputLatency();
         noTimeStretch();
 
+        // TODO: Proper support for reorder depth.
+        addParameter(
+                DefineParam(mActualOutputDelay, C2_PARAMKEY_OUTPUT_DELAY)
+                .withConstValue(new C2PortActualDelayTuning::output(8u))
+                .build());
+
         // TODO: output latency and reordering
 
         addParameter(
@@ -192,7 +198,7 @@
     }
 
     static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
-                          C2P<C2VideoSizeStreamInfo::output> &me) {
+                          C2P<C2StreamPictureSizeInfo::output> &me) {
         (void)mayBlock;
         C2R res = C2R::Ok();
         if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -839,7 +845,7 @@
                 mHeight = s_decode_op.u4_pic_ht;
                 CHECK_EQ(0u, s_decode_op.u4_output_present);
 
-                C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+                C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
                 std::vector<std::unique_ptr<C2SettingResult>> failures;
                 c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
                 if (err == OK) {
@@ -877,6 +883,8 @@
     } else if (!hasPicture) {
         fillEmptyWork(work);
     }
+
+    work->input.buffers.clear();
 }
 
 c2_status_t C2SoftAvcDec::drainInternal(
diff --git a/media/codec2/components/avc/C2SoftAvcEnc.cpp b/media/codec2/components/avc/C2SoftAvcEnc.cpp
index 6ddb9ff..b851908 100644
--- a/media/codec2/components/avc/C2SoftAvcEnc.cpp
+++ b/media/codec2/components/avc/C2SoftAvcEnc.cpp
@@ -45,36 +45,36 @@
         setDerivedInstance(this);
 
         addParameter(
-                DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatVideo))
+                DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::GRAPHIC))
                 .build());
 
         addParameter(
-                DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+                DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+                DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
                         MEDIA_MIMETYPE_VIDEO_RAW))
                 .build());
 
         addParameter(
-                DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+                DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
                         MEDIA_MIMETYPE_VIDEO_AVC))
                 .build());
 
         addParameter(
-                DefineParam(mUsage, C2_NAME_INPUT_STREAM_USAGE_SETTING)
+                DefineParam(mUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
                 .withConstValue(new C2StreamUsageTuning::input(
                         0u, (uint64_t)C2MemoryUsage::CPU_READ))
                 .build());
 
         addParameter(
-                DefineParam(mSize, C2_NAME_STREAM_VIDEO_SIZE_SETTING)
-                .withDefault(new C2VideoSizeStreamTuning::input(0u, 320, 240))
+                DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+                .withDefault(new C2StreamPictureSizeInfo::input(0u, 320, 240))
                 .withFields({
                     C2F(mSize, width).inRange(2, 2560, 2),
                     C2F(mSize, height).inRange(2, 2560, 2),
@@ -83,7 +83,7 @@
                 .build());
 
         addParameter(
-                DefineParam(mFrameRate, C2_NAME_STREAM_FRAME_RATE_SETTING)
+                DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
                 .withDefault(new C2StreamFrameRateInfo::output(0u, 30.))
                 // TODO: More restriction?
                 .withFields({C2F(mFrameRate, value).greaterThan(0.)})
@@ -91,8 +91,8 @@
                 .build());
 
         addParameter(
-                DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
-                .withDefault(new C2BitrateTuning::output(0u, 64000))
+                DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+                .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
                 .withFields({C2F(mBitrate, value).inRange(4096, 12000000)})
                 .withSetter(BitrateSetter)
                 .build());
@@ -182,9 +182,9 @@
     static C2R ProfileLevelSetter(
             bool mayBlock,
             C2P<C2StreamProfileLevelInfo::output> &me,
-            const C2P<C2VideoSizeStreamTuning::input> &size,
+            const C2P<C2StreamPictureSizeInfo::input> &size,
             const C2P<C2StreamFrameRateInfo::output> &frameRate,
-            const C2P<C2BitrateTuning::output> &bitrate) {
+            const C2P<C2StreamBitrateInfo::output> &bitrate) {
         (void)mayBlock;
         if (!me.F(me.v.profile).supportsAtAll(me.v.profile)) {
             me.set().profile = PROFILE_AVC_CONSTRAINED_BASELINE;
@@ -325,16 +325,16 @@
     std::shared_ptr<C2StreamRequestSyncFrameTuning::output> getRequestSync_l() const { return mRequestSync; }
 
 private:
-    std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
-    std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
-    std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
-    std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+    std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+    std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+    std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+    std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
     std::shared_ptr<C2StreamUsageTuning::input> mUsage;
-    std::shared_ptr<C2VideoSizeStreamTuning::input> mSize;
+    std::shared_ptr<C2StreamPictureSizeInfo::input> mSize;
     std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
     std::shared_ptr<C2StreamRequestSyncFrameTuning::output> mRequestSync;
     std::shared_ptr<C2StreamIntraRefreshTuning::output> mIntraRefresh;
-    std::shared_ptr<C2BitrateTuning::output> mBitrate;
+    std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
     std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
     std::shared_ptr<C2StreamSyncFrameIntervalTuning::output> mSyncFramePeriod;
 };
@@ -1332,8 +1332,8 @@
 
         mSpsPpsHeaderReceived = true;
 
-        std::unique_ptr<C2StreamCsdInfo::output> csd =
-            C2StreamCsdInfo::output::AllocUnique(s_encode_op.s_out_buf.u4_bytes, 0u);
+        std::unique_ptr<C2StreamInitDataInfo::output> csd =
+            C2StreamInitDataInfo::output::AllocUnique(s_encode_op.s_out_buf.u4_bytes, 0u);
         if (!csd) {
             ALOGE("CSD allocation failed");
             mSignalledError = true;
@@ -1492,7 +1492,7 @@
         if (IV_IDR_FRAME == s_encode_op.u4_encoded_frame_type) {
             ALOGV("IDR frame produced");
             buffer->setInfo(std::make_shared<C2StreamPictureTypeMaskInfo::output>(
-                    0u /* stream id */, C2PictureTypeKeyFrame));
+                    0u /* stream id */, C2Config::SYNC_FRAME));
         }
         work->worklets.front()->output.buffers.push_back(buffer);
     }
diff --git a/media/codec2/components/base/Android.bp b/media/codec2/components/base/Android.bp
index d02f541..78a444b 100644
--- a/media/codec2/components/base/Android.bp
+++ b/media/codec2/components/base/Android.bp
@@ -74,9 +74,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 }
 
@@ -90,9 +87,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 }
 
@@ -128,9 +122,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     ldflags: ["-Wl,-Bsymbolic"],
diff --git a/media/codec2/components/base/SimpleC2Component.cpp b/media/codec2/components/base/SimpleC2Component.cpp
index b8baec8..44f1fe0 100644
--- a/media/codec2/components/base/SimpleC2Component.cpp
+++ b/media/codec2/components/base/SimpleC2Component.cpp
@@ -151,7 +151,7 @@
         c2_status_t status;
         do {
             status = mBase->fetchLinearBlock(capacity, usage, block);
-        } while (status == C2_TIMED_OUT);
+        } while (status == C2_BLOCKING);
         return status;
     }
 
@@ -162,7 +162,7 @@
         c2_status_t status;
         do {
             status = mBase->fetchCircularBlock(capacity, usage, block);
-        } while (status == C2_TIMED_OUT);
+        } while (status == C2_BLOCKING);
         return status;
     }
 
@@ -174,7 +174,7 @@
         do {
             status = mBase->fetchGraphicBlock(width, height, format, usage,
                                               block);
-        } while (status == C2_TIMED_OUT);
+        } while (status == C2_BLOCKING);
         return status;
     }
 
@@ -473,7 +473,7 @@
     if (!mOutputBlockPool) {
         c2_status_t err = [this] {
             // TODO: don't use query_vb
-            C2StreamFormatConfig::output outputFormat(0u);
+            C2StreamBufferTypeSetting::output outputFormat(0u);
             std::vector<std::unique_ptr<C2Param>> params;
             c2_status_t err = intf()->query_vb(
                     { &outputFormat },
@@ -485,7 +485,7 @@
                 return err;
             }
             C2BlockPool::local_id_t poolId =
-                outputFormat.value == C2FormatVideo
+                outputFormat.value == C2BufferData::GRAPHIC
                         ? C2BlockPool::BASIC_GRAPHIC
                         : C2BlockPool::BASIC_LINEAR;
             if (params.size()) {
diff --git a/media/codec2/components/flac/C2SoftFlacDec.cpp b/media/codec2/components/flac/C2SoftFlacDec.cpp
index 86b16e8..10b14ce 100644
--- a/media/codec2/components/flac/C2SoftFlacDec.cpp
+++ b/media/codec2/components/flac/C2SoftFlacDec.cpp
@@ -37,44 +37,44 @@
         setDerivedInstance(this);
 
         addParameter(
-                DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+                DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+                DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+                DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
                         MEDIA_MIMETYPE_AUDIO_FLAC))
                 .build());
 
         addParameter(
-                DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+                DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
                         MEDIA_MIMETYPE_AUDIO_RAW))
                 .build());
 
         addParameter(
-                DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+                DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
                 .withDefault(new C2StreamSampleRateInfo::output(0u, 44100))
                 .withFields({C2F(mSampleRate, value).inRange(1, 655350)})
                 .withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
                 .build());
 
         addParameter(
-                DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+                DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
                 .withDefault(new C2StreamChannelCountInfo::output(0u, 1))
                 .withFields({C2F(mChannelCount, value).inRange(1, 8)})
                 .withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
                 .build());
 
         addParameter(
-                DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
-                .withDefault(new C2BitrateTuning::input(0u, 768000))
+                DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+                .withDefault(new C2StreamBitrateInfo::input(0u, 768000))
                 .withFields({C2F(mBitrate, value).inRange(1, 21000000)})
                 .withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
                 .build());
@@ -99,13 +99,13 @@
     int32_t getPcmEncodingInfo() const { return mPcmEncodingInfo->value; }
 
 private:
-    std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
-    std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
-    std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
-    std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+    std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+    std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+    std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+    std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
     std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
     std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
-    std::shared_ptr<C2BitrateTuning::input> mBitrate;
+    std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
     std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
     std::shared_ptr<C2StreamPcmEncodingInfo::output> mPcmEncodingInfo;
 };
diff --git a/media/codec2/components/flac/C2SoftFlacEnc.cpp b/media/codec2/components/flac/C2SoftFlacEnc.cpp
index 4ea35c2..0ce2543 100644
--- a/media/codec2/components/flac/C2SoftFlacEnc.cpp
+++ b/media/codec2/components/flac/C2SoftFlacEnc.cpp
@@ -34,38 +34,38 @@
         : C2InterfaceHelper(helper) {
         setDerivedInstance(this);
         addParameter(
-                DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatAudio))
+                DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
                 .build());
         addParameter(
-                DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+                DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
                 .build());
         addParameter(
-                DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+                DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
                         MEDIA_MIMETYPE_AUDIO_RAW))
                 .build());
         addParameter(
-                DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+                DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
                         MEDIA_MIMETYPE_AUDIO_FLAC))
                 .build());
         addParameter(
-                DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+                DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
                 .withDefault(new C2StreamSampleRateInfo::input(0u, 44100))
                 .withFields({C2F(mSampleRate, value).inRange(1, 655350)})
                 .withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
                 .build());
         addParameter(
-                DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+                DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
                 .withDefault(new C2StreamChannelCountInfo::input(0u, 1))
                 .withFields({C2F(mChannelCount, value).inRange(1, 2)})
                 .withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
                 .build());
         addParameter(
-                DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
-                .withDefault(new C2BitrateTuning::output(0u, 768000))
+                DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+                .withDefault(new C2StreamBitrateInfo::output(0u, 768000))
                 .withFields({C2F(mBitrate, value).inRange(1, 21000000)})
                 .withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
                 .build());
@@ -92,13 +92,13 @@
     int32_t getPcmEncodingInfo() const { return mPcmEncodingInfo->value; }
 
 private:
-    std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
-    std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
-    std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
-    std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+    std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+    std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+    std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+    std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
     std::shared_ptr<C2StreamSampleRateInfo::input> mSampleRate;
     std::shared_ptr<C2StreamChannelCountInfo::input> mChannelCount;
-    std::shared_ptr<C2BitrateTuning::output> mBitrate;
+    std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
     std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
     std::shared_ptr<C2StreamPcmEncodingInfo::input> mPcmEncodingInfo;
 };
@@ -223,8 +223,8 @@
     }
 
     if (!mWroteHeader) {
-        std::unique_ptr<C2StreamCsdInfo::output> csd =
-            C2StreamCsdInfo::output::AllocUnique(mHeaderOffset, 0u);
+        std::unique_ptr<C2StreamInitDataInfo::output> csd =
+            C2StreamInitDataInfo::output::AllocUnique(mHeaderOffset, 0u);
         if (!csd) {
             ALOGE("CSD allocation failed");
             mSignalledError = true;
diff --git a/media/codec2/components/g711/C2SoftG711Dec.cpp b/media/codec2/components/g711/C2SoftG711Dec.cpp
index 1c71d45..504ca78 100644
--- a/media/codec2/components/g711/C2SoftG711Dec.cpp
+++ b/media/codec2/components/g711/C2SoftG711Dec.cpp
@@ -41,18 +41,18 @@
         setDerivedInstance(this);
 
         addParameter(
-                DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+                DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+                DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+                DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
 #ifdef ALAW
                         MEDIA_MIMETYPE_AUDIO_G711_ALAW
 #else
@@ -61,28 +61,28 @@
                 )).build());
 
         addParameter(
-                DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+                DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
                         MEDIA_MIMETYPE_AUDIO_RAW))
                 .build());
 
         addParameter(
-                DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+                DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
                 .withDefault(new C2StreamSampleRateInfo::output(0u, 8000))
                 .withFields({C2F(mSampleRate, value).inRange(8000, 48000)})
                 .withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
                 .build());
 
         addParameter(
-                DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+                DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
                 .withDefault(new C2StreamChannelCountInfo::output(0u, 1))
                 .withFields({C2F(mChannelCount, value).equalTo(1)})
                 .withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
                 .build());
 
         addParameter(
-                DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
-                .withDefault(new C2BitrateTuning::input(0u, 64000))
+                DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+                .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
                 .withFields({C2F(mBitrate, value).equalTo(64000)})
                 .withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
                 .build());
@@ -94,13 +94,13 @@
     }
 
 private:
-    std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
-    std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
-    std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
-    std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+    std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+    std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+    std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+    std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
     std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
     std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
-    std::shared_ptr<C2BitrateTuning::input> mBitrate;
+    std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
     std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
 };
 
diff --git a/media/codec2/components/gsm/C2SoftGsmDec.cpp b/media/codec2/components/gsm/C2SoftGsmDec.cpp
index 7101c79..69d4885 100644
--- a/media/codec2/components/gsm/C2SoftGsmDec.cpp
+++ b/media/codec2/components/gsm/C2SoftGsmDec.cpp
@@ -36,44 +36,44 @@
         setDerivedInstance(this);
 
         addParameter(
-                DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+                DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+                DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+                DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
                         MEDIA_MIMETYPE_AUDIO_MSGSM))
                 .build());
 
         addParameter(
-                DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+                DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
                         MEDIA_MIMETYPE_AUDIO_RAW))
                 .build());
 
         addParameter(
-                DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+                DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
                 .withDefault(new C2StreamSampleRateInfo::output(0u, 8000))
                 .withFields({C2F(mSampleRate, value).equalTo(8000)})
                 .withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
                 .build());
 
         addParameter(
-                DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+                DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
                 .withDefault(new C2StreamChannelCountInfo::output(0u, 1))
                 .withFields({C2F(mChannelCount, value).equalTo(1)})
                 .withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
                 .build());
 
         addParameter(
-                DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
-                .withDefault(new C2BitrateTuning::input(0u, 13200))
+                DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+                .withDefault(new C2StreamBitrateInfo::input(0u, 13200))
                 .withFields({C2F(mBitrate, value).equalTo(13200)})
                 .withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
                 .build());
@@ -85,13 +85,13 @@
     }
 
    private:
-    std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
-    std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
-    std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
-    std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+    std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+    std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+    std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+    std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
     std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
     std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
-    std::shared_ptr<C2BitrateTuning::input> mBitrate;
+    std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
     std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
 };
 
diff --git a/media/codec2/components/hevc/Android.bp b/media/codec2/components/hevc/Android.bp
index 2a045e1..369bd78 100644
--- a/media/codec2/components/hevc/Android.bp
+++ b/media/codec2/components/hevc/Android.bp
@@ -9,8 +9,17 @@
 
     static_libs: ["libhevcdec"],
 
-    include_dirs: [
-        "external/libhevc/decoder",
-        "external/libhevc/common",
+}
+
+cc_library_shared {
+    name: "libcodec2_soft_hevcenc",
+    defaults: [
+        "libcodec2_soft-defaults",
+        "libcodec2_soft_sanitize_signed-defaults",
     ],
+
+    srcs: ["C2SoftHevcEnc.cpp"],
+
+    static_libs: ["libhevcenc"],
+
 }
diff --git a/media/codec2/components/hevc/C2SoftHevcDec.cpp b/media/codec2/components/hevc/C2SoftHevcDec.cpp
index 99892ce..bb8dda0 100644
--- a/media/codec2/components/hevc/C2SoftHevcDec.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcDec.cpp
@@ -51,7 +51,11 @@
         noInputLatency();
         noTimeStretch();
 
-        // TODO: output latency and reordering
+        // TODO: Proper support for reorder depth.
+        addParameter(
+                DefineParam(mActualOutputDelay, C2_PARAMKEY_OUTPUT_DELAY)
+                .withConstValue(new C2PortActualDelayTuning::output(8u))
+                .build());
 
         addParameter(
                 DefineParam(mAttrib, C2_PARAMKEY_COMPONENT_ATTRIBUTES)
@@ -188,7 +192,7 @@
     }
 
     static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
-                          C2P<C2VideoSizeStreamInfo::output> &me) {
+                          C2P<C2StreamPictureSizeInfo::output> &me) {
         (void)mayBlock;
         C2R res = C2R::Ok();
         if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -835,7 +839,7 @@
                 mHeight = s_decode_op.u4_pic_ht;
                 CHECK_EQ(0u, s_decode_op.u4_output_present);
 
-                C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+                C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
                 std::vector<std::unique_ptr<C2SettingResult>> failures;
                 c2_status_t err =
                     mIntf->config({&size}, C2_MAY_BLOCK, &failures);
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.cpp b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
new file mode 100644
index 0000000..2c0a7a0
--- /dev/null
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
@@ -0,0 +1,802 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2SoftHevcEnc"
+#include <log/log.h>
+
+#include <media/hardware/VideoAPI.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/foundation/AUtils.h>
+
+#include <C2Debug.h>
+#include <C2PlatformSupport.h>
+#include <Codec2BufferUtils.h>
+#include <SimpleC2Interface.h>
+#include <util/C2InterfaceHelper.h>
+
+#include "ihevc_typedefs.h"
+#include "itt_video_api.h"
+#include "ihevce_api.h"
+#include "ihevce_plugin.h"
+#include "C2SoftHevcEnc.h"
+
+namespace android {
+
+class C2SoftHevcEnc::IntfImpl : public C2InterfaceHelper {
+   public:
+    explicit IntfImpl(const std::shared_ptr<C2ReflectorHelper>& helper)
+        : C2InterfaceHelper(helper) {
+        setDerivedInstance(this);
+
+        addParameter(
+            DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(
+                    new C2StreamBufferTypeSetting::input(0u, C2BufferData::GRAPHIC))
+                .build());
+
+        addParameter(
+            DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(
+                    new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
+                .build());
+
+        addParameter(
+            DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
+                    MEDIA_MIMETYPE_VIDEO_RAW))
+                .build());
+
+        addParameter(
+            DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
+                    MEDIA_MIMETYPE_VIDEO_HEVC))
+                .build());
+
+        addParameter(DefineParam(mUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
+                         .withConstValue(new C2StreamUsageTuning::input(
+                             0u, (uint64_t)C2MemoryUsage::CPU_READ))
+                         .build());
+
+        addParameter(
+            DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+                .withDefault(new C2StreamPictureSizeInfo::input(0u, 320, 240))
+                .withFields({
+                    C2F(mSize, width).inRange(320, 1920, 2),
+                    C2F(mSize, height).inRange(128, 1088, 2),
+                })
+                .withSetter(SizeSetter)
+                .build());
+
+        addParameter(
+            DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
+                .withDefault(new C2StreamFrameRateInfo::output(0u, 30.))
+                .withFields({C2F(mFrameRate, value).greaterThan(0.)})
+                .withSetter(
+                    Setter<decltype(*mFrameRate)>::StrictValueWithNoDeps)
+                .build());
+
+        addParameter(
+            DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+                .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
+                .withFields({C2F(mBitrate, value).inRange(4096, 12000000)})
+                .withSetter(BitrateSetter)
+                .build());
+
+        addParameter(
+            DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+                .withDefault(new C2StreamProfileLevelInfo::output(
+                    0u, PROFILE_HEVC_MAIN, LEVEL_HEVC_MAIN_1))
+                .withFields({
+                    C2F(mProfileLevel, profile)
+                        .oneOf({C2Config::PROFILE_HEVC_MAIN,
+                                C2Config::PROFILE_HEVC_MAIN_STILL}),
+                    C2F(mProfileLevel, level)
+                        .oneOf({LEVEL_HEVC_MAIN_1, LEVEL_HEVC_MAIN_2,
+                                LEVEL_HEVC_MAIN_2_1, LEVEL_HEVC_MAIN_3,
+                                LEVEL_HEVC_MAIN_3_1, LEVEL_HEVC_MAIN_4,
+                                LEVEL_HEVC_MAIN_4_1, LEVEL_HEVC_MAIN_5,
+                                LEVEL_HEVC_MAIN_5_1, LEVEL_HEVC_MAIN_5_2}),
+                })
+                .withSetter(ProfileLevelSetter, mSize, mFrameRate, mBitrate)
+                .build());
+
+        addParameter(
+                DefineParam(mRequestSync, C2_PARAMKEY_REQUEST_SYNC_FRAME)
+                .withDefault(new C2StreamRequestSyncFrameTuning::output(0u, C2_FALSE))
+                .withFields({C2F(mRequestSync, value).oneOf({ C2_FALSE, C2_TRUE }) })
+                .withSetter(Setter<decltype(*mRequestSync)>::NonStrictValueWithNoDeps)
+                .build());
+
+        addParameter(
+            DefineParam(mSyncFramePeriod, C2_PARAMKEY_SYNC_FRAME_INTERVAL)
+                .withDefault(
+                    new C2StreamSyncFrameIntervalTuning::output(0u, 1000000))
+                .withFields({C2F(mSyncFramePeriod, value).any()})
+                .withSetter(
+                    Setter<decltype(*mSyncFramePeriod)>::StrictValueWithNoDeps)
+                .build());
+    }
+
+    static C2R BitrateSetter(bool mayBlock,
+                             C2P<C2StreamBitrateInfo::output>& me) {
+        (void)mayBlock;
+        C2R res = C2R::Ok();
+        if (me.v.value <= 4096) {
+            me.set().value = 4096;
+        }
+        return res;
+    }
+
+    static C2R SizeSetter(bool mayBlock,
+                          const C2P<C2StreamPictureSizeInfo::input>& oldMe,
+                          C2P<C2StreamPictureSizeInfo::input>& me) {
+        (void)mayBlock;
+        C2R res = C2R::Ok();
+        if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
+            res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.width)));
+            me.set().width = oldMe.v.width;
+        }
+        if (!me.F(me.v.height).supportsAtAll(me.v.height)) {
+            res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.height)));
+            me.set().height = oldMe.v.height;
+        }
+        return res;
+    }
+
+    static C2R ProfileLevelSetter(
+            bool mayBlock,
+            C2P<C2StreamProfileLevelInfo::output> &me,
+            const C2P<C2StreamPictureSizeInfo::input> &size,
+            const C2P<C2StreamFrameRateInfo::output> &frameRate,
+            const C2P<C2StreamBitrateInfo::output> &bitrate) {
+        (void)mayBlock;
+        if (!me.F(me.v.profile).supportsAtAll(me.v.profile)) {
+            me.set().profile = PROFILE_HEVC_MAIN;
+        }
+
+        struct LevelLimits {
+            C2Config::level_t level;
+            uint64_t samplesPerSec;
+            uint64_t samples;
+            uint32_t bitrate;
+        };
+
+        constexpr LevelLimits kLimits[] = {
+            { LEVEL_HEVC_MAIN_1,       552960,    36864,    128000 },
+            { LEVEL_HEVC_MAIN_2,      3686400,   122880,   1500000 },
+            { LEVEL_HEVC_MAIN_2_1,    7372800,   245760,   3000000 },
+            { LEVEL_HEVC_MAIN_3,     16588800,   552960,   6000000 },
+            { LEVEL_HEVC_MAIN_3_1,   33177600,   983040,  10000000 },
+            { LEVEL_HEVC_MAIN_4,     66846720,  2228224,  12000000 },
+            { LEVEL_HEVC_MAIN_4_1,  133693440,  2228224,  20000000 },
+            { LEVEL_HEVC_MAIN_5,    267386880,  8912896,  25000000 },
+            { LEVEL_HEVC_MAIN_5_1,  534773760,  8912896,  40000000 },
+            { LEVEL_HEVC_MAIN_5_2, 1069547520,  8912896,  60000000 },
+            { LEVEL_HEVC_MAIN_6,   1069547520, 35651584,  60000000 },
+            { LEVEL_HEVC_MAIN_6_1, 2139095040, 35651584, 120000000 },
+            { LEVEL_HEVC_MAIN_6_2, 4278190080, 35651584, 240000000 },
+        };
+
+        uint64_t samples = size.v.width * size.v.height;
+        uint64_t samplesPerSec = samples * frameRate.v.value;
+
+        // Check if the supplied level meets the MB / bitrate requirements. If
+        // not, update the level with the lowest level meeting the requirements.
+
+        bool found = false;
+        // By default needsUpdate = false in case the supplied level does meet
+        // the requirements.
+        bool needsUpdate = false;
+        for (const LevelLimits &limit : kLimits) {
+            if (samples <= limit.samples && samplesPerSec <= limit.samplesPerSec &&
+                    bitrate.v.value <= limit.bitrate) {
+                // This is the lowest level that meets the requirements, and if
+                // we haven't seen the supplied level yet, that means we don't
+                // need the update.
+                if (needsUpdate) {
+                    ALOGD("Given level %x does not cover current configuration: "
+                          "adjusting to %x", me.v.level, limit.level);
+                    me.set().level = limit.level;
+                }
+                found = true;
+                break;
+            }
+            if (me.v.level == limit.level) {
+                // We break out of the loop when the lowest feasible level is
+                // found. The fact that we're here means that our level doesn't
+                // meet the requirement and needs to be updated.
+                needsUpdate = true;
+            }
+        }
+        if (!found) {
+            // We set to the highest supported level.
+            me.set().level = LEVEL_HEVC_MAIN_5_2;
+        }
+        return C2R::Ok();
+    }
+
+    UWORD32 getProfile_l() const {
+        switch (mProfileLevel->profile) {
+        case PROFILE_HEVC_MAIN:  [[fallthrough]];
+        case PROFILE_HEVC_MAIN_STILL: return 1;
+        default:
+            ALOGD("Unrecognized profile: %x", mProfileLevel->profile);
+            return 1;
+        }
+    }
+
+    UWORD32 getLevel_l() const {
+        struct Level {
+            C2Config::level_t c2Level;
+            UWORD32 hevcLevel;
+        };
+        constexpr Level levels[] = {
+            { LEVEL_HEVC_MAIN_1,    30 },
+            { LEVEL_HEVC_MAIN_2,    60 },
+            { LEVEL_HEVC_MAIN_2_1,  63 },
+            { LEVEL_HEVC_MAIN_3,    90 },
+            { LEVEL_HEVC_MAIN_3_1,  93 },
+            { LEVEL_HEVC_MAIN_4,   120 },
+            { LEVEL_HEVC_MAIN_4_1, 123 },
+            { LEVEL_HEVC_MAIN_5,   150 },
+            { LEVEL_HEVC_MAIN_5_1, 153 },
+            { LEVEL_HEVC_MAIN_5_2, 156 },
+            { LEVEL_HEVC_MAIN_6,   180 },
+            { LEVEL_HEVC_MAIN_6_1, 183 },
+            { LEVEL_HEVC_MAIN_6_2, 186 },
+        };
+        for (const Level &level : levels) {
+            if (mProfileLevel->level == level.c2Level) {
+                return level.hevcLevel;
+            }
+        }
+        ALOGD("Unrecognized level: %x", mProfileLevel->level);
+        return 156;
+    }
+    uint32_t getSyncFramePeriod_l() const {
+        if (mSyncFramePeriod->value < 0 ||
+            mSyncFramePeriod->value == INT64_MAX) {
+            return 0;
+        }
+        double period = mSyncFramePeriod->value / 1e6 * mFrameRate->value;
+        return (uint32_t)c2_max(c2_min(period + 0.5, double(UINT32_MAX)), 1.);
+    }
+
+   std::shared_ptr<C2StreamPictureSizeInfo::input> getSize_l() const {
+        return mSize;
+    }
+    std::shared_ptr<C2StreamFrameRateInfo::output> getFrameRate_l() const {
+        return mFrameRate;
+    }
+    std::shared_ptr<C2StreamBitrateInfo::output> getBitrate_l() const {
+        return mBitrate;
+    }
+    std::shared_ptr<C2StreamRequestSyncFrameTuning::output> getRequestSync_l() const {
+        return mRequestSync;
+    }
+
+   private:
+    std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+    std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+    std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+    std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
+    std::shared_ptr<C2StreamUsageTuning::input> mUsage;
+    std::shared_ptr<C2StreamPictureSizeInfo::input> mSize;
+    std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
+    std::shared_ptr<C2StreamRequestSyncFrameTuning::output> mRequestSync;
+    std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
+    std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
+    std::shared_ptr<C2StreamSyncFrameIntervalTuning::output> mSyncFramePeriod;
+};
+constexpr char COMPONENT_NAME[] = "c2.android.hevc.encoder";
+
+static size_t GetCPUCoreCount() {
+    long cpuCoreCount = 1;
+#if defined(_SC_NPROCESSORS_ONLN)
+    cpuCoreCount = sysconf(_SC_NPROCESSORS_ONLN);
+#else
+    // _SC_NPROC_ONLN must be defined...
+    cpuCoreCount = sysconf(_SC_NPROC_ONLN);
+#endif
+    CHECK(cpuCoreCount >= 1);
+    ALOGV("Number of CPU cores: %ld", cpuCoreCount);
+    return (size_t)cpuCoreCount;
+}
+
+C2SoftHevcEnc::C2SoftHevcEnc(const char* name, c2_node_id_t id,
+                             const std::shared_ptr<IntfImpl>& intfImpl)
+    : SimpleC2Component(
+          std::make_shared<SimpleInterface<IntfImpl>>(name, id, intfImpl)),
+      mIntf(intfImpl),
+      mIvVideoColorFormat(IV_YUV_420P),
+      mHevcEncProfile(1),
+      mHevcEncLevel(30),
+      mStarted(false),
+      mSpsPpsHeaderReceived(false),
+      mSignalledEos(false),
+      mSignalledError(false),
+      mCodecCtx(nullptr) {
+    // If dump is enabled, then create an empty file
+    GENERATE_FILE_NAMES();
+    CREATE_DUMP_FILE(mInFile);
+    CREATE_DUMP_FILE(mOutFile);
+
+    gettimeofday(&mTimeStart, nullptr);
+    gettimeofday(&mTimeEnd, nullptr);
+}
+
+C2SoftHevcEnc::~C2SoftHevcEnc() {
+    releaseEncoder();
+}
+
+c2_status_t C2SoftHevcEnc::onInit() {
+    return initEncoder();
+}
+
+c2_status_t C2SoftHevcEnc::onStop() {
+    if (!mStarted) {
+        return C2_OK;
+    }
+    return releaseEncoder();
+}
+
+void C2SoftHevcEnc::onReset() {
+    onStop();
+    initEncoder();
+}
+
+void C2SoftHevcEnc::onRelease() {
+    onStop();
+}
+
+c2_status_t C2SoftHevcEnc::onFlush_sm() {
+    return C2_OK;
+}
+
+static void fillEmptyWork(const std::unique_ptr<C2Work>& work) {
+    uint32_t flags = 0;
+    if (work->input.flags & C2FrameData::FLAG_END_OF_STREAM) {
+        flags |= C2FrameData::FLAG_END_OF_STREAM;
+        ALOGV("Signalling EOS");
+    }
+    work->worklets.front()->output.flags = (C2FrameData::flags_t)flags;
+    work->worklets.front()->output.buffers.clear();
+    work->worklets.front()->output.ordinal = work->input.ordinal;
+    work->workletsProcessed = 1u;
+}
+
+c2_status_t C2SoftHevcEnc::initEncParams() {
+    mCodecCtx = nullptr;
+    mNumCores = MIN(GetCPUCoreCount(), CODEC_MAX_CORES);
+    memset(&mEncParams, 0, sizeof(ihevce_static_cfg_params_t));
+
+    // default configuration
+    IHEVCE_PLUGIN_STATUS_T err = ihevce_set_def_params(&mEncParams);
+    if (IHEVCE_EOK != err) {
+        ALOGE("HEVC default init failed : 0x%x", err);
+        return C2_CORRUPTED;
+    }
+
+    // update configuration
+    mEncParams.s_src_prms.i4_width = mSize->width;
+    mEncParams.s_src_prms.i4_height = mSize->height;
+    mEncParams.s_src_prms.i4_frm_rate_denom = 1000;
+    mEncParams.s_src_prms.i4_frm_rate_num = mFrameRate->value * mEncParams.s_src_prms.i4_frm_rate_denom;
+    mEncParams.s_tgt_lyr_prms.as_tgt_params[0].i4_quality_preset = IHEVCE_QUALITY_P5;
+    mEncParams.s_tgt_lyr_prms.as_tgt_params[0].ai4_tgt_bitrate[0] =
+        mBitrate->value;
+    mEncParams.s_tgt_lyr_prms.as_tgt_params[0].ai4_peak_bitrate[0] =
+        mBitrate->value << 1;
+    mEncParams.s_tgt_lyr_prms.as_tgt_params[0].i4_codec_level = mHevcEncLevel;
+    mEncParams.s_coding_tools_prms.i4_max_i_open_gop_period = mIDRInterval;
+    mEncParams.s_coding_tools_prms.i4_max_cra_open_gop_period = mIDRInterval;
+    mIvVideoColorFormat = IV_YUV_420P;
+    mEncParams.s_multi_thrd_prms.i4_max_num_cores = mNumCores;
+    mEncParams.s_out_strm_prms.i4_codec_profile = mHevcEncProfile;
+    mEncParams.s_config_prms.i4_rate_control_mode = 2;
+    mEncParams.s_lap_prms.i4_rc_look_ahead_pics = 0;
+
+    return C2_OK;
+}
+
+c2_status_t C2SoftHevcEnc::releaseEncoder() {
+    mSpsPpsHeaderReceived = false;
+    mSignalledEos = false;
+    mSignalledError = false;
+    mStarted = false;
+
+    if (mCodecCtx) {
+        IHEVCE_PLUGIN_STATUS_T err = ihevce_close(mCodecCtx);
+        if (IHEVCE_EOK != err) return C2_CORRUPTED;
+        mCodecCtx = nullptr;
+    }
+    return C2_OK;
+}
+
+c2_status_t C2SoftHevcEnc::drain(uint32_t drainMode,
+                                 const std::shared_ptr<C2BlockPool>& pool) {
+    (void)drainMode;
+    (void)pool;
+    return C2_OK;
+}
+c2_status_t C2SoftHevcEnc::initEncoder() {
+    CHECK(!mCodecCtx);
+    {
+        IntfImpl::Lock lock = mIntf->lock();
+        mSize = mIntf->getSize_l();
+        mBitrate = mIntf->getBitrate_l();
+        mFrameRate = mIntf->getFrameRate_l();
+        mHevcEncProfile = mIntf->getProfile_l();
+        mHevcEncLevel = mIntf->getLevel_l();
+        mIDRInterval = mIntf->getSyncFramePeriod_l();
+    }
+
+    c2_status_t status = initEncParams();
+
+    if (C2_OK != status) {
+        ALOGE("Failed to initialize encoder params : 0x%x", status);
+        mSignalledError = true;
+        return status;
+    }
+
+    IHEVCE_PLUGIN_STATUS_T err = IHEVCE_EOK;
+    err = ihevce_init(&mEncParams, &mCodecCtx);
+    if (IHEVCE_EOK != err) {
+        ALOGE("HEVC encoder init failed : 0x%x", err);
+        return C2_CORRUPTED;
+    }
+
+    mStarted = true;
+    return C2_OK;
+}
+
+c2_status_t C2SoftHevcEnc::setEncodeArgs(ihevce_inp_buf_t* ps_encode_ip,
+                                         const C2GraphicView* const input,
+                                         uint64_t timestamp) {
+    ihevce_static_cfg_params_t* params = &mEncParams;
+    memset(ps_encode_ip, 0, sizeof(ihevce_inp_buf_t));
+
+    if (!input) {
+        return C2_OK;
+    }
+
+    if (input->width() < mSize->width ||
+        input->height() < mSize->height) {
+        /* Expect width height to be configured */
+        ALOGW("unexpected Capacity Aspect %d(%d) x %d(%d)", input->width(),
+              mSize->width, input->height(), mSize->height);
+        return C2_BAD_VALUE;
+    }
+
+    const C2PlanarLayout& layout = input->layout();
+    uint8_t* yPlane =
+        const_cast<uint8_t *>(input->data()[C2PlanarLayout::PLANE_Y]);
+    uint8_t* uPlane =
+        const_cast<uint8_t *>(input->data()[C2PlanarLayout::PLANE_U]);
+    uint8_t* vPlane =
+        const_cast<uint8_t *>(input->data()[C2PlanarLayout::PLANE_V]);
+    int32_t yStride = layout.planes[C2PlanarLayout::PLANE_Y].rowInc;
+    int32_t uStride = layout.planes[C2PlanarLayout::PLANE_U].rowInc;
+    int32_t vStride = layout.planes[C2PlanarLayout::PLANE_V].rowInc;
+
+    uint32_t width = mSize->width;
+    uint32_t height = mSize->height;
+
+    // width and height are always even
+    // width and height are always even (as block size is 16x16)
+    CHECK_EQ((width & 1u), 0u);
+    CHECK_EQ((height & 1u), 0u);
+
+    size_t yPlaneSize = width * height;
+
+    switch (layout.type) {
+        case C2PlanarLayout::TYPE_RGB:
+            [[fallthrough]];
+        case C2PlanarLayout::TYPE_RGBA: {
+            MemoryBlock conversionBuffer =
+                mConversionBuffers.fetch(yPlaneSize * 3 / 2);
+            mConversionBuffersInUse.emplace(conversionBuffer.data(),
+                                            conversionBuffer);
+            yPlane = conversionBuffer.data();
+            uPlane = yPlane + yPlaneSize;
+            vPlane = uPlane + yPlaneSize / 4;
+            yStride = width;
+            uStride = vStride = yStride / 2;
+            ConvertRGBToPlanarYUV(yPlane, yStride, height,
+                                  conversionBuffer.size(), *input);
+            break;
+        }
+        case C2PlanarLayout::TYPE_YUV: {
+            if (!IsYUV420(*input)) {
+                ALOGE("input is not YUV420");
+                return C2_BAD_VALUE;
+            }
+
+            if (layout.planes[layout.PLANE_Y].colInc == 1 &&
+                layout.planes[layout.PLANE_U].colInc == 1 &&
+                layout.planes[layout.PLANE_V].colInc == 1 &&
+                uStride == vStride && yStride == 2 * vStride) {
+                // I420 compatible - already set up above
+                break;
+            }
+
+            // copy to I420
+            yStride = width;
+            uStride = vStride = yStride / 2;
+            MemoryBlock conversionBuffer =
+                mConversionBuffers.fetch(yPlaneSize * 3 / 2);
+            mConversionBuffersInUse.emplace(conversionBuffer.data(),
+                                            conversionBuffer);
+            MediaImage2 img =
+                CreateYUV420PlanarMediaImage2(width, height, yStride, height);
+            status_t err = ImageCopy(conversionBuffer.data(), &img, *input);
+            if (err != OK) {
+                ALOGE("Buffer conversion failed: %d", err);
+                return C2_BAD_VALUE;
+            }
+            yPlane = conversionBuffer.data();
+            uPlane = yPlane + yPlaneSize;
+            vPlane = uPlane + yPlaneSize / 4;
+            break;
+        }
+
+        case C2PlanarLayout::TYPE_YUVA:
+            ALOGE("YUVA plane type is not supported");
+            return C2_BAD_VALUE;
+
+        default:
+            ALOGE("Unrecognized plane type: %d", layout.type);
+            return C2_BAD_VALUE;
+    }
+
+    switch (mIvVideoColorFormat) {
+        case IV_YUV_420P: {
+            // input buffer is supposed to be const but Ittiam API wants bare
+            // pointer.
+            ps_encode_ip->apv_inp_planes[0] = yPlane;
+            ps_encode_ip->apv_inp_planes[1] = uPlane;
+            ps_encode_ip->apv_inp_planes[2] = vPlane;
+
+            ps_encode_ip->ai4_inp_strd[0] = yStride;
+            ps_encode_ip->ai4_inp_strd[1] = uStride;
+            ps_encode_ip->ai4_inp_strd[2] = vStride;
+
+            ps_encode_ip->ai4_inp_size[0] = yStride * height;
+            ps_encode_ip->ai4_inp_size[1] = uStride * height >> 1;
+            ps_encode_ip->ai4_inp_size[2] = vStride * height >> 1;
+            break;
+        }
+
+        case IV_YUV_422ILE: {
+            // TODO
+            break;
+        }
+
+        case IV_YUV_420SP_UV:
+        case IV_YUV_420SP_VU:
+        default: {
+            ps_encode_ip->apv_inp_planes[0] = yPlane;
+            ps_encode_ip->apv_inp_planes[1] = uPlane;
+            ps_encode_ip->apv_inp_planes[2] = nullptr;
+
+            ps_encode_ip->ai4_inp_strd[0] = yStride;
+            ps_encode_ip->ai4_inp_strd[1] = uStride;
+            ps_encode_ip->ai4_inp_strd[2] = 0;
+
+            ps_encode_ip->ai4_inp_size[0] = yStride * height;
+            ps_encode_ip->ai4_inp_size[1] = uStride * height >> 1;
+            ps_encode_ip->ai4_inp_size[2] = 0;
+            break;
+        }
+    }
+
+    ps_encode_ip->i4_curr_bitrate =
+        params->s_tgt_lyr_prms.as_tgt_params[0].ai4_tgt_bitrate[0];
+    ps_encode_ip->i4_curr_peak_bitrate =
+        params->s_tgt_lyr_prms.as_tgt_params[0].ai4_peak_bitrate[0];
+    ps_encode_ip->i4_curr_rate_factor = params->s_config_prms.i4_rate_factor;
+    ps_encode_ip->u8_pts = timestamp;
+    return C2_OK;
+}
+
+void C2SoftHevcEnc::process(const std::unique_ptr<C2Work>& work,
+                            const std::shared_ptr<C2BlockPool>& pool) {
+    // Initialize output work
+    work->result = C2_OK;
+    work->workletsProcessed = 1u;
+    work->worklets.front()->output.flags = work->input.flags;
+
+    if (mSignalledError || mSignalledEos) {
+        work->result = C2_BAD_VALUE;
+        ALOGD("Signalled Error / Signalled Eos");
+        return;
+    }
+    c2_status_t status = C2_OK;
+
+    // Initialize encoder if not already initialized
+    if (!mStarted) {
+        status = initEncoder();
+        if (C2_OK != status) {
+            ALOGE("Failed to initialize encoder : 0x%x", status);
+            mSignalledError = true;
+            work->result = status;
+            return;
+        }
+    }
+
+    std::shared_ptr<const C2GraphicView> view;
+    std::shared_ptr<C2Buffer> inputBuffer = nullptr;
+    bool eos = ((work->input.flags & C2FrameData::FLAG_END_OF_STREAM) != 0);
+    if (!work->input.buffers.empty()) {
+        inputBuffer = work->input.buffers[0];
+        view = std::make_shared<const C2GraphicView>(
+            inputBuffer->data().graphicBlocks().front().map().get());
+        if (view->error() != C2_OK) {
+            ALOGE("graphic view map err = %d", view->error());
+            mSignalledError = true;
+            return;
+        }
+    }
+
+    IHEVCE_PLUGIN_STATUS_T err = IHEVCE_EOK;
+
+    fillEmptyWork(work);
+    if (!mSpsPpsHeaderReceived) {
+        ihevce_out_buf_t s_header_op{};
+        err = ihevce_encode_header(mCodecCtx, &s_header_op);
+        if (err == IHEVCE_EOK && s_header_op.i4_bytes_generated) {
+            std::unique_ptr<C2StreamInitDataInfo::output> csd =
+                C2StreamInitDataInfo::output::AllocUnique(
+                    s_header_op.i4_bytes_generated, 0u);
+            if (!csd) {
+                ALOGE("CSD allocation failed");
+                mSignalledError = true;
+                work->result = C2_NO_MEMORY;
+                return;
+            }
+            memcpy(csd->m.value, s_header_op.pu1_output_buf,
+                   s_header_op.i4_bytes_generated);
+            DUMP_TO_FILE(mOutFile, csd->m.value, csd->flexCount());
+            work->worklets.front()->output.configUpdate.push_back(
+                std::move(csd));
+            mSpsPpsHeaderReceived = true;
+        }
+        if (!inputBuffer) {
+            return;
+        }
+    }
+    ihevce_inp_buf_t s_encode_ip{};
+    ihevce_out_buf_t s_encode_op{};
+    uint64_t timestamp = work->input.ordinal.timestamp.peekull();
+
+    status = setEncodeArgs(&s_encode_ip, view.get(), timestamp);
+    if (C2_OK != status) {
+        mSignalledError = true;
+        ALOGE("setEncodeArgs failed : 0x%x", status);
+        work->result = status;
+        return;
+    }
+
+    uint64_t timeDelay = 0;
+    uint64_t timeTaken = 0;
+    GETTIME(&mTimeStart, nullptr);
+    TIME_DIFF(mTimeEnd, mTimeStart, timeDelay);
+
+    ihevce_inp_buf_t* ps_encode_ip = (inputBuffer) ? &s_encode_ip : nullptr;
+
+    err = ihevce_encode(mCodecCtx, ps_encode_ip, &s_encode_op);
+    if (IHEVCE_EOK != err) {
+        ALOGE("Encode Frame failed : 0x%x", err);
+        mSignalledError = true;
+        work->result = C2_CORRUPTED;
+        return;
+    }
+
+    GETTIME(&mTimeEnd, nullptr);
+    /* Compute time taken for decode() */
+    TIME_DIFF(mTimeStart, mTimeEnd, timeTaken);
+
+    ALOGV("timeTaken=%6d delay=%6d numBytes=%6d", (int)timeTaken,
+          (int)timeDelay, s_encode_op.i4_bytes_generated);
+
+    if (s_encode_op.i4_bytes_generated) {
+        std::shared_ptr<C2LinearBlock> block;
+        C2MemoryUsage usage = {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
+        status = pool->fetchLinearBlock(s_encode_op.i4_bytes_generated, usage, &block);
+        if (C2_OK != status) {
+            ALOGE("fetchLinearBlock for Output failed with status 0x%x", status);
+            work->result = C2_NO_MEMORY;
+            mSignalledError = true;
+            return;
+        }
+        C2WriteView wView = block->map().get();
+        if (C2_OK != wView.error()) {
+            ALOGE("write view map failed with status 0x%x", wView.error());
+            work->result = wView.error();
+            mSignalledError = true;
+            return;
+        }
+        memcpy(wView.data(), s_encode_op.pu1_output_buf,
+               s_encode_op.i4_bytes_generated);
+
+        std::shared_ptr<C2Buffer> buffer =
+            createLinearBuffer(block, 0, s_encode_op.i4_bytes_generated);
+
+        DUMP_TO_FILE(mOutFile, s_encode_op.pu1_output_buf,
+                     s_encode_op.i4_bytes_generated);
+
+        work->worklets.front()->output.ordinal.timestamp = s_encode_op.u8_pts;
+        if (s_encode_op.i4_is_key_frame) {
+            ALOGV("IDR frame produced");
+            buffer->setInfo(
+                std::make_shared<C2StreamPictureTypeMaskInfo::output>(
+                    0u /* stream id */, C2Config::SYNC_FRAME));
+        }
+        work->worklets.front()->output.buffers.push_back(buffer);
+    }
+    if (eos) {
+        mSignalledEos = true;
+    }
+}
+
+class C2SoftHevcEncFactory : public C2ComponentFactory {
+   public:
+    C2SoftHevcEncFactory()
+        : mHelper(std::static_pointer_cast<C2ReflectorHelper>(
+              GetCodec2PlatformComponentStore()->getParamReflector())) {}
+
+    virtual c2_status_t createComponent(
+        c2_node_id_t id, std::shared_ptr<C2Component>* const component,
+        std::function<void(C2Component*)> deleter) override {
+        *component = std::shared_ptr<C2Component>(
+            new C2SoftHevcEnc(
+                COMPONENT_NAME, id,
+                std::make_shared<C2SoftHevcEnc::IntfImpl>(mHelper)),
+            deleter);
+        return C2_OK;
+    }
+
+    virtual c2_status_t createInterface(
+        c2_node_id_t id, std::shared_ptr<C2ComponentInterface>* const interface,
+        std::function<void(C2ComponentInterface*)> deleter) override {
+        *interface = std::shared_ptr<C2ComponentInterface>(
+            new SimpleInterface<C2SoftHevcEnc::IntfImpl>(
+                COMPONENT_NAME, id,
+                std::make_shared<C2SoftHevcEnc::IntfImpl>(mHelper)),
+            deleter);
+        return C2_OK;
+    }
+
+    virtual ~C2SoftHevcEncFactory() override = default;
+
+   private:
+    std::shared_ptr<C2ReflectorHelper> mHelper;
+};
+
+}  // namespace android
+
+extern "C" ::C2ComponentFactory* CreateCodec2Factory() {
+    ALOGV("in %s", __func__);
+    return new ::android::C2SoftHevcEncFactory();
+}
+
+extern "C" void DestroyCodec2Factory(::C2ComponentFactory* factory) {
+    ALOGV("in %s", __func__);
+    delete factory;
+}
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.h b/media/codec2/components/hevc/C2SoftHevcEnc.h
new file mode 100644
index 0000000..c22fea2
--- /dev/null
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_C2_SOFT_HEVC_ENC_H_
+#define ANDROID_C2_SOFT_HEVC_ENC_H_
+
+#include <map>
+#include <utils/Vector.h>
+#include <media/stagefright/foundation/ColorUtils.h>
+#include <SimpleC2Component.h>
+
+#include "ihevc_typedefs.h"
+
+namespace android {
+#define MIN(a, b) ((a) < (b)) ? (a) : (b)
+
+/** Get time */
+#define GETTIME(a, b) gettimeofday(a, b);
+
+/** Compute difference between start and end */
+#define TIME_DIFF(start, end, diff)                      \
+    diff = (((end).tv_sec - (start).tv_sec) * 1000000) + \
+           ((end).tv_usec - (start).tv_usec);
+
+#define CODEC_MAX_CORES 4
+
+struct C2SoftHevcEnc : public SimpleC2Component {
+    class IntfImpl;
+
+    C2SoftHevcEnc(const char* name, c2_node_id_t id,
+                  const std::shared_ptr<IntfImpl>& intfImpl);
+
+    // From SimpleC2Component
+    c2_status_t onInit() override;
+    c2_status_t onStop() override;
+    void onReset() override;
+    void onRelease() override;
+    c2_status_t onFlush_sm() override;
+    void process(const std::unique_ptr<C2Work>& work,
+                 const std::shared_ptr<C2BlockPool>& pool) override;
+    c2_status_t drain(uint32_t drainMode,
+                      const std::shared_ptr<C2BlockPool>& pool) override;
+
+   protected:
+    virtual ~C2SoftHevcEnc();
+
+   private:
+    std::shared_ptr<IntfImpl> mIntf;
+    ihevce_static_cfg_params_t mEncParams;
+    size_t mNumCores;
+    UWORD32 mIDRInterval;
+    IV_COLOR_FORMAT_T mIvVideoColorFormat;
+    UWORD32 mHevcEncProfile;
+    UWORD32 mHevcEncLevel;
+    bool mStarted;
+    bool mSpsPpsHeaderReceived;
+    bool mSignalledEos;
+    bool mSignalledError;
+    void* mCodecCtx;
+    MemoryBlockPool mConversionBuffers;
+    std::map<void*, MemoryBlock> mConversionBuffersInUse;
+    // configurations used by component in process
+    // (TODO: keep this in intf but make them internal only)
+    std::shared_ptr<C2StreamPictureSizeInfo::input> mSize;
+    std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
+    std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
+
+#ifdef FILE_DUMP_ENABLE
+    char mInFile[200];
+    char mOutFile[200];
+#endif /* FILE_DUMP_ENABLE */
+
+    // profile
+    struct timeval mTimeStart;
+    struct timeval mTimeEnd;
+
+    c2_status_t initEncParams();
+    c2_status_t initEncoder();
+    c2_status_t releaseEncoder();
+    c2_status_t setEncodeArgs(ihevce_inp_buf_t* ps_encode_ip,
+                              const C2GraphicView* const input,
+                              uint64_t timestamp);
+    C2_DO_NOT_COPY(C2SoftHevcEnc);
+};
+
+#ifdef FILE_DUMP_ENABLE
+
+#define INPUT_DUMP_PATH "/data/local/tmp/hevc"
+#define INPUT_DUMP_EXT "yuv"
+#define OUTPUT_DUMP_PATH "/data/local/tmp/hevc"
+#define OUTPUT_DUMP_EXT "h265"
+#define GENERATE_FILE_NAMES()                                             \
+{                                                                         \
+    GETTIME(&mTimeStart, NULL);                                           \
+    strcpy(mInFile, "");                                                  \
+    ALOGD("GENERATE_FILE_NAMES");                                         \
+    sprintf(mInFile, "%s_%ld.%ld.%s", INPUT_DUMP_PATH, mTimeStart.tv_sec, \
+            mTimeStart.tv_usec, INPUT_DUMP_EXT);                          \
+    strcpy(mOutFile, "");                                                 \
+    sprintf(mOutFile, "%s_%ld.%ld.%s", OUTPUT_DUMP_PATH,                  \
+            mTimeStart.tv_sec, mTimeStart.tv_usec, OUTPUT_DUMP_EXT);      \
+}
+
+#define CREATE_DUMP_FILE(m_filename)                 \
+{                                                    \
+    FILE* fp = fopen(m_filename, "wb");              \
+    if (fp != NULL) {                                \
+        ALOGD("Opened file %s", m_filename);         \
+        fclose(fp);                                  \
+    } else {                                         \
+        ALOGD("Could not open file %s", m_filename); \
+    }                                                \
+}
+#define DUMP_TO_FILE(m_filename, m_buf, m_size)          \
+{                                                        \
+    FILE* fp = fopen(m_filename, "ab");                  \
+    if (fp != NULL && m_buf != NULL) {                   \
+        int i;                                           \
+        ALOGD("Dump to file!");                          \
+        i = fwrite(m_buf, 1, m_size, fp);                \
+        if (i != (int)m_size) {                          \
+            ALOGD("Error in fwrite, returned %d", i);    \
+            perror("Error in write to file");            \
+        }                                                \
+        fclose(fp);                                      \
+    } else {                                             \
+        ALOGD("Could not write to file %s", m_filename); \
+        if (fp != NULL) fclose(fp);                      \
+    }                                                    \
+}
+#else /* FILE_DUMP_ENABLE */
+#define INPUT_DUMP_PATH
+#define INPUT_DUMP_EXT
+#define OUTPUT_DUMP_PATH
+#define OUTPUT_DUMP_EXT
+#define GENERATE_FILE_NAMES()
+#define CREATE_DUMP_FILE(m_filename)
+#define DUMP_TO_FILE(m_filename, m_buf, m_size)
+#endif /* FILE_DUMP_ENABLE */
+
+}  // namespace android
+
+#endif  // C2_SOFT_HEVC_ENC_H__
diff --git a/media/codec2/components/mp3/C2SoftMp3Dec.cpp b/media/codec2/components/mp3/C2SoftMp3Dec.cpp
index c8b8397..9db6d8f 100644
--- a/media/codec2/components/mp3/C2SoftMp3Dec.cpp
+++ b/media/codec2/components/mp3/C2SoftMp3Dec.cpp
@@ -40,29 +40,29 @@
         setDerivedInstance(this);
 
         addParameter(
-                DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+                DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+                DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+                DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
                         MEDIA_MIMETYPE_AUDIO_MPEG))
                 .build());
 
         addParameter(
-                DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+                DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
                         MEDIA_MIMETYPE_AUDIO_RAW))
                 .build());
 
         addParameter(
-                DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+                DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
                 .withDefault(new C2StreamSampleRateInfo::output(0u, 44100))
                 .withFields({C2F(mSampleRate, value).oneOf({8000, 11025, 12000, 16000,
                     22050, 24000, 32000, 44100, 48000})})
@@ -70,15 +70,15 @@
                 .build());
 
         addParameter(
-                DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+                DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
                 .withDefault(new C2StreamChannelCountInfo::output(0u, 2))
                 .withFields({C2F(mChannelCount, value).inRange(1, 2)})
                 .withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
                 .build());
 
         addParameter(
-                DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
-                .withDefault(new C2BitrateTuning::input(0u, 64000))
+                DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+                .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
                 .withFields({C2F(mBitrate, value).inRange(8000, 320000)})
                 .withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
                 .build());
@@ -90,13 +90,13 @@
     }
 
 private:
-    std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
-    std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
-    std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
-    std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+    std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+    std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+    std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+    std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
     std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
     std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
-    std::shared_ptr<C2BitrateTuning::input> mBitrate;
+    std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
     std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
 };
 
@@ -555,4 +555,3 @@
     ALOGV("in %s", __func__);
     delete factory;
 }
-
diff --git a/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp b/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
index da32ec0..290677e 100644
--- a/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
+++ b/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
@@ -180,7 +180,7 @@
     }
 
     static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
-                          C2P<C2VideoSizeStreamInfo::output> &me) {
+                          C2P<C2StreamPictureSizeInfo::output> &me) {
         (void)mayBlock;
         C2R res = C2R::Ok();
         if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -892,7 +892,7 @@
 
             ALOGI("Configuring decoder: mWidth %d , mHeight %d ",
                    mWidth, mHeight);
-            C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+            C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
             std::vector<std::unique_ptr<C2SettingResult>> failures;
             c2_status_t err =
                 mIntf->config({&size}, C2_MAY_BLOCK, &failures);
@@ -931,7 +931,7 @@
 
                 ALOGI("Configuring decoder out: mWidth %d , mHeight %d ",
                        mWidth, mHeight);
-                C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+                C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
                 std::vector<std::unique_ptr<C2SettingResult>> failures;
                 c2_status_t err =
                     mIntf->config({&size}, C2_MAY_BLOCK, &failures);
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
index 901f5ed..3d4a733 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
@@ -60,7 +60,11 @@
         noInputLatency();
         noTimeStretch();
 
-        // TODO: output latency and reordering
+        // TODO: Proper support for reorder depth.
+        addParameter(
+                DefineParam(mActualOutputDelay, C2_PARAMKEY_OUTPUT_DELAY)
+                .withConstValue(new C2PortActualDelayTuning::output(1u))
+                .build());
 
         addParameter(
                 DefineParam(mAttrib, C2_PARAMKEY_COMPONENT_ATTRIBUTES)
@@ -182,7 +186,7 @@
     }
 
     static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
-                          C2P<C2VideoSizeStreamInfo::output> &me) {
+                          C2P<C2StreamPictureSizeInfo::output> &me) {
         (void)mayBlock;
         C2R res = C2R::Ok();
         if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -570,7 +574,7 @@
         PVSetPostProcType(mDecHandle, 0);
         if (handleResChange(work)) {
             ALOGI("Setting width and height");
-            C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+            C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
             std::vector<std::unique_ptr<C2SettingResult>> failures;
             c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
             if (err == OK) {
@@ -642,7 +646,7 @@
             return;
         } else if (resChange) {
             ALOGI("Setting width and height");
-            C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+            C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
             std::vector<std::unique_ptr<C2SettingResult>> failures;
             c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
             if (err == OK) {
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp
index c8796f3..89fa59d 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp
@@ -52,26 +52,26 @@
         setDerivedInstance(this);
 
         addParameter(
-            DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
+            DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
                 .withConstValue(
-                    new C2StreamFormatConfig::input(0u, C2FormatVideo))
+                    new C2StreamBufferTypeSetting::input(0u, C2BufferData::GRAPHIC))
                 .build());
 
         addParameter(
-            DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
+            DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
                 .withConstValue(
-                    new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+                    new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-            DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+            DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
                     MEDIA_MIMETYPE_VIDEO_RAW))
                 .build());
 
         addParameter(
-            DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+            DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
 #ifdef MPEG4
                     MEDIA_MIMETYPE_VIDEO_MPEG4
 #else
@@ -80,14 +80,14 @@
                     ))
                 .build());
 
-        addParameter(DefineParam(mUsage, C2_NAME_INPUT_STREAM_USAGE_SETTING)
+        addParameter(DefineParam(mUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
                          .withConstValue(new C2StreamUsageTuning::input(
                              0u, (uint64_t)C2MemoryUsage::CPU_READ))
                          .build());
 
         addParameter(
-            DefineParam(mSize, C2_NAME_STREAM_VIDEO_SIZE_SETTING)
-                .withDefault(new C2VideoSizeStreamTuning::input(0u, 176, 144))
+            DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+                .withDefault(new C2StreamPictureSizeInfo::input(0u, 176, 144))
                 .withFields({
 #ifdef MPEG4
                     C2F(mSize, width).inRange(16, 176, 16),
@@ -101,7 +101,7 @@
                 .build());
 
         addParameter(
-            DefineParam(mFrameRate, C2_NAME_STREAM_FRAME_RATE_SETTING)
+            DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
                 .withDefault(new C2StreamFrameRateInfo::output(0u, 17.))
                 // TODO: More restriction?
                 .withFields({C2F(mFrameRate, value).greaterThan(0.)})
@@ -110,8 +110,8 @@
                 .build());
 
         addParameter(
-            DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
-                .withDefault(new C2BitrateTuning::output(0u, 64000))
+            DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+                .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
                 .withFields({C2F(mBitrate, value).inRange(4096, 12000000)})
                 .withSetter(BitrateSetter)
                 .build());
@@ -217,14 +217,14 @@
     }
 
    private:
-    std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
-    std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
-    std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
-    std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+    std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+    std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+    std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+    std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
     std::shared_ptr<C2StreamUsageTuning::input> mUsage;
-    std::shared_ptr<C2VideoSizeStreamTuning::input> mSize;
+    std::shared_ptr<C2StreamPictureSizeInfo::input> mSize;
     std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
-    std::shared_ptr<C2BitrateTuning::output> mBitrate;
+    std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
     std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
     std::shared_ptr<C2StreamSyncFrameIntervalTuning::output> mSyncFramePeriod;
 };
@@ -446,8 +446,8 @@
         }
 
         ++mNumInputFrames;
-        std::unique_ptr<C2StreamCsdInfo::output> csd =
-            C2StreamCsdInfo::output::AllocUnique(outputSize, 0u);
+        std::unique_ptr<C2StreamInitDataInfo::output> csd =
+            C2StreamInitDataInfo::output::AllocUnique(outputSize, 0u);
         if (!csd) {
             ALOGE("CSD allocation failed");
             mSignalledError = true;
@@ -595,7 +595,7 @@
         work->worklets.front()->output.ordinal.timestamp = inputTimeStamp;
         if (hintTrack.CodeType == 0) {
             buffer->setInfo(std::make_shared<C2StreamPictureTypeMaskInfo::output>(
-                    0u /* stream id */, C2PictureTypeKeyFrame));
+                    0u /* stream id */, C2Config::SYNC_FRAME));
         }
         work->worklets.front()->output.buffers.push_back(buffer);
     }
diff --git a/media/codec2/components/opus/C2SoftOpusDec.cpp b/media/codec2/components/opus/C2SoftOpusDec.cpp
index 3ce1fd6..680712e 100644
--- a/media/codec2/components/opus/C2SoftOpusDec.cpp
+++ b/media/codec2/components/opus/C2SoftOpusDec.cpp
@@ -40,44 +40,44 @@
         setDerivedInstance(this);
 
         addParameter(
-                DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+                DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+                DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+                DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
                         MEDIA_MIMETYPE_AUDIO_OPUS))
                 .build());
 
         addParameter(
-                DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+                DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
                         MEDIA_MIMETYPE_AUDIO_RAW))
                 .build());
 
         addParameter(
-                DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+                DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
                 .withDefault(new C2StreamSampleRateInfo::output(0u, 48000))
                 .withFields({C2F(mSampleRate, value).equalTo(48000)})
                 .withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
                 .build());
 
         addParameter(
-                DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+                DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
                 .withDefault(new C2StreamChannelCountInfo::output(0u, 1))
                 .withFields({C2F(mChannelCount, value).inRange(1, 8)})
                 .withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
                 .build());
 
         addParameter(
-                DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
-                .withDefault(new C2BitrateTuning::input(0u, 6000))
+                DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+                .withDefault(new C2StreamBitrateInfo::input(0u, 6000))
                 .withFields({C2F(mBitrate, value).inRange(6000, 510000)})
                 .withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
                 .build());
@@ -89,13 +89,13 @@
     }
 
    private:
-    std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
-    std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
-    std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
-    std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+    std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+    std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+    std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+    std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
     std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
     std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
-    std::shared_ptr<C2BitrateTuning::input> mBitrate;
+    std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
     std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
 };
 
diff --git a/media/codec2/components/opus/C2SoftOpusEnc.cpp b/media/codec2/components/opus/C2SoftOpusEnc.cpp
index 68fcea1..a0b2443 100644
--- a/media/codec2/components/opus/C2SoftOpusEnc.cpp
+++ b/media/codec2/components/opus/C2SoftOpusEnc.cpp
@@ -42,29 +42,29 @@
         setDerivedInstance(this);
 
         addParameter(
-                DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatAudio))
+                DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+                DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+                DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
                         MEDIA_MIMETYPE_AUDIO_RAW))
                 .build());
 
         addParameter(
-                DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+                DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
                         MEDIA_MIMETYPE_AUDIO_OPUS))
                 .build());
 
         addParameter(
-                DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+                DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
                 .withDefault(new C2StreamSampleRateInfo::input(0u, 48000))
                 .withFields({C2F(mSampleRate, value).oneOf({
                     8000, 12000, 16000, 24000, 48000})})
@@ -72,15 +72,15 @@
                 .build());
 
         addParameter(
-                DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+                DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
                 .withDefault(new C2StreamChannelCountInfo::input(0u, 1))
                 .withFields({C2F(mChannelCount, value).inRange(1, 8)})
                 .withSetter((Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps))
                 .build());
 
         addParameter(
-                DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
-                .withDefault(new C2BitrateTuning::output(0u, 128000))
+                DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+                .withDefault(new C2StreamBitrateInfo::output(0u, 128000))
                 .withFields({C2F(mBitrate, value).inRange(500, 512000)})
                 .withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
                 .build());
@@ -104,13 +104,13 @@
     uint32_t getComplexity() const { return mComplexity->value; }
 
 private:
-    std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
-    std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
-    std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
-    std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+    std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+    std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+    std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+    std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
     std::shared_ptr<C2StreamSampleRateInfo::input> mSampleRate;
     std::shared_ptr<C2StreamChannelCountInfo::input> mChannelCount;
-    std::shared_ptr<C2BitrateTuning::output> mBitrate;
+    std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
     std::shared_ptr<C2StreamComplexityTuning::output> mComplexity;
     std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
 };
@@ -423,8 +423,8 @@
         int headerLen = WriteOpusHeaders(opusHeader, mSampleRate, header,
             sizeof(header), mCodecDelay, mSeekPreRoll);
 
-        std::unique_ptr<C2StreamCsdInfo::output> csd =
-            C2StreamCsdInfo::output::AllocUnique(headerLen, 0u);
+        std::unique_ptr<C2StreamInitDataInfo::output> csd =
+            C2StreamInitDataInfo::output::AllocUnique(headerLen, 0u);
         if (!csd) {
             ALOGE("CSD allocation failed");
             mSignalledError = true;
diff --git a/media/codec2/components/raw/C2SoftRawDec.cpp b/media/codec2/components/raw/C2SoftRawDec.cpp
index 5c83481..802caa4 100644
--- a/media/codec2/components/raw/C2SoftRawDec.cpp
+++ b/media/codec2/components/raw/C2SoftRawDec.cpp
@@ -37,44 +37,44 @@
         setDerivedInstance(this);
 
         addParameter(
-                DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+                DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+                DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+                DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
                         MEDIA_MIMETYPE_AUDIO_RAW))
                 .build());
 
         addParameter(
-                DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+                DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
                         MEDIA_MIMETYPE_AUDIO_RAW))
                 .build());
 
         addParameter(
-                DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+                DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
                 .withDefault(new C2StreamSampleRateInfo::output(0u, 44100))
                 .withFields({C2F(mSampleRate, value).inRange(8000, 192000)})
                 .withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
                 .build());
 
         addParameter(
-                DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+                DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
                 .withDefault(new C2StreamChannelCountInfo::output(0u, 2))
                 .withFields({C2F(mChannelCount, value).inRange(1, 8)})
                 .withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
                 .build());
 
         addParameter(
-                DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
-                .withDefault(new C2BitrateTuning::input(0u, 64000))
+                DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+                .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
                 .withFields({C2F(mBitrate, value).inRange(1, 10000000)})
                 .withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
                 .build());
@@ -98,13 +98,13 @@
     }
 
 private:
-    std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
-    std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
-    std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
-    std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+    std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+    std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+    std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+    std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
     std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
     std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
-    std::shared_ptr<C2BitrateTuning::input> mBitrate;
+    std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
     std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
     std::shared_ptr<C2StreamPcmEncodingInfo::output> mPcmEncodingInfo;
 };
diff --git a/media/codec2/components/vorbis/C2SoftVorbisDec.cpp b/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
index 48825e4..e7393ee 100644
--- a/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
+++ b/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
@@ -45,44 +45,44 @@
         setDerivedInstance(this);
 
         addParameter(
-                DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+                DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+                DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+                DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
                         MEDIA_MIMETYPE_AUDIO_VORBIS))
                 .build());
 
         addParameter(
-                DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+                DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
                         MEDIA_MIMETYPE_AUDIO_RAW))
                 .build());
 
         addParameter(
-                DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+                DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
                 .withDefault(new C2StreamSampleRateInfo::output(0u, 48000))
                 .withFields({C2F(mSampleRate, value).inRange(8000, 96000)})
                 .withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
                 .build());
 
         addParameter(
-                DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+                DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
                 .withDefault(new C2StreamChannelCountInfo::output(0u, 1))
                 .withFields({C2F(mChannelCount, value).inRange(1, 8)})
                 .withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
                 .build());
 
         addParameter(
-                DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
-                .withDefault(new C2BitrateTuning::input(0u, 64000))
+                DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+                .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
                 .withFields({C2F(mBitrate, value).inRange(32000, 500000)})
                 .withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
                 .build());
@@ -94,13 +94,13 @@
     }
 
 private:
-    std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
-    std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
-    std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
-    std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+    std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+    std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+    std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+    std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
     std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
     std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
-    std::shared_ptr<C2BitrateTuning::input> mBitrate;
+    std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
     std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
 };
 
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index 9ba2362..3120f7a 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -215,7 +215,7 @@
     }
 
     static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
-                          C2P<C2VideoSizeStreamInfo::output> &me) {
+                          C2P<C2StreamPictureSizeInfo::output> &me) {
         (void)mayBlock;
         C2R res = C2R::Ok();
         if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -700,7 +700,7 @@
         mWidth = img->d_w;
         mHeight = img->d_h;
 
-        C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+        C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
         std::vector<std::unique_ptr<C2SettingResult>> failures;
         c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
         if (err == C2_OK) {
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.cpp b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
index 155a84f..6509a88 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
@@ -633,7 +633,7 @@
             std::shared_ptr<C2Buffer> buffer = createLinearBuffer(block);
             if (encoded_packet->data.frame.flags & VPX_FRAME_IS_KEY) {
                 buffer->setInfo(std::make_shared<C2StreamPictureTypeMaskInfo::output>(
-                        0u /* stream id */, C2PictureTypeKeyFrame));
+                        0u /* stream id */, C2Config::SYNC_FRAME));
             }
             work->worklets.front()->output.buffers.push_back(buffer);
             work->worklets.front()->output.ordinal = work->input.ordinal;
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.h b/media/codec2/components/vpx/C2SoftVpxEnc.h
index 87ed1a9..5591a49 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.h
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.h
@@ -229,26 +229,26 @@
         setDerivedInstance(this);
 
         addParameter(
-            DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
+            DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
                 .withConstValue(
-                    new C2StreamFormatConfig::input(0u, C2FormatVideo))
+                    new C2StreamBufferTypeSetting::input(0u, C2BufferData::GRAPHIC))
                 .build());
 
         addParameter(
-            DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
+            DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
                 .withConstValue(
-                    new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+                    new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-            DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+            DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
                     MEDIA_MIMETYPE_VIDEO_RAW))
                 .build());
 
         addParameter(
-            DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+            DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
 #ifdef VP9
                     MEDIA_MIMETYPE_VIDEO_VP9
 #else
@@ -257,14 +257,14 @@
                     ))
                 .build());
 
-        addParameter(DefineParam(mUsage, C2_NAME_INPUT_STREAM_USAGE_SETTING)
+        addParameter(DefineParam(mUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
                          .withConstValue(new C2StreamUsageTuning::input(
                              0u, (uint64_t)C2MemoryUsage::CPU_READ))
                          .build());
 
         addParameter(
-            DefineParam(mSize, C2_NAME_STREAM_VIDEO_SIZE_SETTING)
-                .withDefault(new C2VideoSizeStreamTuning::input(0u, 320, 240))
+            DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+                .withDefault(new C2StreamPictureSizeInfo::input(0u, 320, 240))
                 .withFields({
                     C2F(mSize, width).inRange(2, 2048, 2),
                     C2F(mSize, height).inRange(2, 2048, 2),
@@ -285,7 +285,7 @@
                 .build());
 
         addParameter(
-            DefineParam(mFrameRate, C2_NAME_STREAM_FRAME_RATE_SETTING)
+            DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
                 .withDefault(new C2StreamFrameRateInfo::output(0u, 30.))
                 // TODO: More restriction?
                 .withFields({C2F(mFrameRate, value).greaterThan(0.)})
@@ -312,8 +312,8 @@
                 .build());
 
         addParameter(
-            DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
-                .withDefault(new C2BitrateTuning::output(0u, 64000))
+            DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+                .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
                 .withFields({C2F(mBitrate, value).inRange(4096, 40000000)})
                 .withSetter(BitrateSetter)
                 .build());
@@ -416,18 +416,18 @@
     }
 
    private:
-    std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
-    std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
-    std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
-    std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+    std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+    std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+    std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+    std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
     std::shared_ptr<C2StreamUsageTuning::input> mUsage;
-    std::shared_ptr<C2VideoSizeStreamTuning::input> mSize;
+    std::shared_ptr<C2StreamPictureSizeInfo::input> mSize;
     std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
     std::shared_ptr<C2StreamTemporalLayeringTuning::output> mLayering;
     std::shared_ptr<C2StreamIntraRefreshTuning::output> mIntraRefresh;
     std::shared_ptr<C2StreamRequestSyncFrameTuning::output> mRequestSync;
     std::shared_ptr<C2StreamSyncFrameIntervalTuning::output> mSyncFramePeriod;
-    std::shared_ptr<C2BitrateTuning::output> mBitrate;
+    std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
     std::shared_ptr<C2StreamBitrateModeTuning::output> mBitrateMode;
     std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
 };
diff --git a/media/codec2/components/xaac/C2SoftXaacDec.cpp b/media/codec2/components/xaac/C2SoftXaacDec.cpp
index c0ad992..ed730c3 100644
--- a/media/codec2/components/xaac/C2SoftXaacDec.cpp
+++ b/media/codec2/components/xaac/C2SoftXaacDec.cpp
@@ -66,29 +66,29 @@
         setDerivedInstance(this);
 
         addParameter(
-                DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+                DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
-                .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+                DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+                .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
                 .build());
 
         addParameter(
-                DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+                DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
                         MEDIA_MIMETYPE_AUDIO_AAC))
                 .build());
 
         addParameter(
-                DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
-                .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+                DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+                .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
                         MEDIA_MIMETYPE_AUDIO_RAW))
                 .build());
 
         addParameter(
-                DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+                DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
                 .withDefault(new C2StreamSampleRateInfo::output(0u, 44100))
                 .withFields({C2F(mSampleRate, value).oneOf({
                     7350, 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
@@ -97,15 +97,15 @@
                 .build());
 
         addParameter(
-                DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+                DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
                 .withDefault(new C2StreamChannelCountInfo::output(0u, 1))
                 .withFields({C2F(mChannelCount, value).inRange(1, 8)})
                 .withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
                 .build());
 
         addParameter(
-                DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
-                .withDefault(new C2BitrateTuning::input(0u, 64000))
+                DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+                .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
                 .withFields({C2F(mBitrate, value).inRange(8000, 960000)})
                 .withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
                 .build());
@@ -116,10 +116,10 @@
                 .build());
 
         addParameter(
-                DefineParam(mAacFormat, C2_NAME_STREAM_AAC_FORMAT_SETTING)
-                .withDefault(new C2StreamAacFormatInfo::input(0u, C2AacStreamFormatRaw))
+                DefineParam(mAacFormat, C2_PARAMKEY_AAC_PACKAGING)
+                .withDefault(new C2StreamAacFormatInfo::input(0u, C2Config::AAC_PACKAGING_RAW))
                 .withFields({C2F(mAacFormat, value).oneOf({
-                    C2AacStreamFormatRaw, C2AacStreamFormatAdts
+                    C2Config::AAC_PACKAGING_RAW, C2Config::AAC_PACKAGING_ADTS
                 })})
                 .withSetter(Setter<decltype(*mAacFormat)>::StrictValueWithNoDeps)
                 .build());
@@ -203,7 +203,7 @@
                 .build());
     }
 
-    bool isAdts() const { return mAacFormat->value == C2AacStreamFormatAdts; }
+    bool isAdts() const { return mAacFormat->value == C2Config::AAC_PACKAGING_ADTS; }
     uint32_t getBitrate() const { return mBitrate->value; }
     static C2R ProfileLevelSetter(bool mayBlock, C2P<C2StreamProfileLevelInfo::input> &me) {
         (void)mayBlock;
@@ -218,13 +218,13 @@
     int32_t getDrcEffectType() const { return mDrcEffectType->value; }
 
 private:
-    std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
-    std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
-    std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
-    std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+    std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+    std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+    std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+    std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
     std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
     std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
-    std::shared_ptr<C2BitrateTuning::input> mBitrate;
+    std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
     std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
     std::shared_ptr<C2StreamAacFormatInfo::input> mAacFormat;
     std::shared_ptr<C2StreamProfileLevelInfo::input> mProfileLevel;
diff --git a/media/codec2/core/include/C2Buffer.h b/media/codec2/core/include/C2Buffer.h
index 2997f6e..3d3587c 100644
--- a/media/codec2/core/include/C2Buffer.h
+++ b/media/codec2/core/include/C2Buffer.h
@@ -888,6 +888,7 @@
      * \retval C2_OK        the operation was successful
      * \retval C2_NO_MEMORY not enough memory to complete any required allocation
      * \retval C2_TIMED_OUT the operation timed out
+     * \retval C2_BLOCKING  the operation is blocked
      * \retval C2_REFUSED   no permission to complete any required allocation
      * \retval C2_BAD_VALUE capacity or usage are not supported (invalid) (caller error)
      * \retval C2_OMITTED   this pool does not support linear blocks
@@ -916,6 +917,7 @@
      * \retval C2_OK        the operation was successful
      * \retval C2_NO_MEMORY not enough memory to complete any required allocation
      * \retval C2_TIMED_OUT the operation timed out
+     * \retval C2_BLOCKING  the operation is blocked
      * \retval C2_REFUSED   no permission to complete any required allocation
      * \retval C2_BAD_VALUE capacity or usage are not supported (invalid) (caller error)
      * \retval C2_OMITTED   this pool does not support circular blocks
@@ -946,6 +948,7 @@
      * \retval C2_OK        the operation was successful
      * \retval C2_NO_MEMORY not enough memory to complete any required allocation
      * \retval C2_TIMED_OUT the operation timed out
+     * \retval C2_BLOCKING  the operation is blocked
      * \retval C2_REFUSED   no permission to complete any required allocation
      * \retval C2_BAD_VALUE width, height, format or usage are not supported (invalid) (caller
      *                      error)
@@ -1991,7 +1994,6 @@
         GRAPHIC,            ///< the buffer contains a single graphic block
         GRAPHIC_CHUNKS,     ///< the buffer contains one of more graphic blocks
     };
-    typedef type_t Type; // deprecated
 
     /**
      * Gets the type of this buffer (data).
@@ -2039,23 +2041,6 @@
      */
     const C2BufferData data() const;
 
-    /**
-     * These will still work if used in onDeathNotify.
-     */
-#if 0
-    inline std::shared_ptr<C2LinearBuffer> asLinearBuffer() const {
-        return mType == LINEAR ? std::shared_ptr::reinterpret_cast<C2LinearBuffer>(this) : nullptr;
-    }
-
-    inline std::shared_ptr<C2GraphicBuffer> asGraphicBuffer() const {
-        return mType == GRAPHIC ? std::shared_ptr::reinterpret_cast<C2GraphicBuffer>(this) : nullptr;
-    }
-
-    inline std::shared_ptr<C2CircularBuffer> asCircularBuffer() const {
-        return mType == CIRCULAR ? std::shared_ptr::reinterpret_cast<C2CircularBuffer>(this) : nullptr;
-    }
-#endif
-
     ///@name Pre-destroy notification handling
     ///@{
 
@@ -2160,8 +2145,6 @@
      */
     static std::shared_ptr<C2Buffer> CreateGraphicBuffer(const C2ConstGraphicBlock &block);
 
-
-
 protected:
     // no public constructor
     explicit C2Buffer(const std::vector<C2ConstLinearBlock> &blocks);
@@ -2170,7 +2153,6 @@
 private:
     class Impl;
     std::shared_ptr<Impl> mImpl;
-//    Type _mType;
 };
 
 /**
@@ -2197,109 +2179,6 @@
 
 /// @}
 
-/// \cond INTERNAL
-
-/// \todo These are no longer used
-
-/// \addtogroup linear
-/// @{
-
-/** \deprecated */
-class C2LinearBuffer
-    : public C2Buffer, public _C2LinearRangeAspect,
-      public std::enable_shared_from_this<C2LinearBuffer> {
-public:
-    /** \todo what is this? */
-    const C2Handle *handle() const;
-
-protected:
-    inline C2LinearBuffer(const C2ConstLinearBlock &block);
-
-private:
-    class Impl;
-    Impl *mImpl;
-};
-
-class C2ReadCursor;
-
-class C2WriteCursor {
-public:
-    uint32_t remaining() const; // remaining data to be read
-    void commit(); // commits the current position. discard data before current position
-    void reset() const;  // resets position to the last committed position
-    // slices off at most |size| bytes, and moves cursor ahead by the number of bytes
-    // sliced off.
-    C2ReadCursor slice(uint32_t size) const;
-    // slices off at most |size| bytes, and moves cursor ahead by the number of bytes
-    // sliced off.
-    C2WriteCursor reserve(uint32_t size);
-    // bool read(T&);
-    // bool write(T&);
-    C2Fence waitForSpace(uint32_t size);
-};
-
-/// @}
-
-/// \addtogroup graphic
-/// @{
-
-struct C2ColorSpace {
-//public:
-    enum Standard {
-        BT601,
-        BT709,
-        BT2020,
-        // TODO
-    };
-
-    enum Range {
-        LIMITED,
-        FULL,
-        // TODO
-    };
-
-    enum TransferFunction {
-        BT709Transfer,
-        BT2020Transfer,
-        HybridLogGamma2,
-        HybridLogGamma4,
-        // TODO
-    };
-};
-
-/** \deprecated */
-class C2GraphicBuffer : public C2Buffer {
-public:
-    // constant attributes
-    inline uint32_t width() const  { return mWidth; }
-    inline uint32_t height() const { return mHeight; }
-    inline uint32_t format() const { return mFormat; }
-    inline const C2MemoryUsage usage() const { return mUsage; }
-
-    // modifiable attributes
-
-
-    virtual const C2ColorSpace colorSpace() const = 0;
-    // best effort
-    virtual void setColorSpace_be(const C2ColorSpace &colorSpace) = 0;
-    virtual bool setColorSpace(const C2ColorSpace &colorSpace) = 0;
-
-    const C2Handle *handle() const;
-
-protected:
-    uint32_t mWidth;
-    uint32_t mHeight;
-    uint32_t mFormat;
-    C2MemoryUsage mUsage;
-
-    class Impl;
-    Impl *mImpl;
-};
-
-/// @}
-
-/// \endcond
-
 /// @}
 
 #endif  // C2BUFFER_H_
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index 0357115..9545c45 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -240,19 +240,6 @@
     kParamIndexTimestampGapAdjustment, // input-surface, struct
 
     kParamIndexSurfaceAllocator, // u32
-
-    // deprecated indices due to renaming
-    kParamIndexAacStreamFormat = kParamIndexAacPackaging,
-    kParamIndexCsd = kParamIndexInitData,
-    kParamIndexMaxVideoSizeHint = kParamIndexMaxPictureSize,
-    kParamIndexMime = kParamIndexMediaType,
-    kParamIndexRequestedInfos = kParamIndexSubscribedParamIndices,
-
-
-    // deprecated indices due to removal
-    kParamIndexSupportedParams = 0xDEAD0000,
-    kParamIndexReadOnlyParams,
-    kParamIndexTemporal,
 };
 
 }
@@ -337,14 +324,8 @@
 // read-only
 typedef C2GlobalParam<C2Setting, C2SimpleValueStruct<C2Component::domain_t>, kParamIndexDomain>
         C2ComponentDomainSetting;
-typedef C2ComponentDomainSetting C2ComponentDomainInfo; // deprecated
-typedef C2Component::domain_t C2DomainKind; // deprecated
 constexpr char C2_PARAMKEY_COMPONENT_DOMAIN[]  = "component.domain";
 
-constexpr C2Component::domain_t C2DomainAudio = C2Component::DOMAIN_AUDIO; // deprecated
-constexpr C2Component::domain_t C2DomainOther = C2Component::DOMAIN_OTHER; // deprecate
-constexpr C2Component::domain_t C2DomainVideo = C2Component::DOMAIN_VIDEO; // deprecate
-
 /**
  * Component attributes.
  *
@@ -359,9 +340,6 @@
         C2ComponentAttributesSetting;
 constexpr char C2_PARAMKEY_COMPONENT_ATTRIBUTES[] = "component.attributes";
 
-// deprecated
-typedef C2ComponentAttributesSetting C2ComponentTemporalInfo;
-
 /**
  * Time stretching.
  *
@@ -597,6 +575,9 @@
     LEVEL_AVC_5,                                ///< AVC (H.264) Level 5
     LEVEL_AVC_5_1,                              ///< AVC (H.264) Level 5.1
     LEVEL_AVC_5_2,                              ///< AVC (H.264) Level 5.2
+    LEVEL_AVC_6,                                ///< AVC (H.264) Level 6
+    LEVEL_AVC_6_1,                              ///< AVC (H.264) Level 6.1
+    LEVEL_AVC_6_2,                              ///< AVC (H.264) Level 6.2
 
     // HEVC (H.265) tiers and levels
     LEVEL_HEVC_MAIN_1 = _C2_PL_HEVC_BASE,       ///< HEVC (H.265) Main Tier Level 1
@@ -704,7 +685,6 @@
 typedef C2StreamParam<C2Info, C2ProfileLevelStruct, kParamIndexProfileLevel>
         C2StreamProfileLevelInfo;
 constexpr char C2_PARAMKEY_PROFILE_LEVEL[] = "coded.pl";
-#define C2_PARAMKEY_STREAM_PROFILE_LEVEL C2_PARAMKEY_PROFILE_LEVEL
 
 /**
  * Codec-specific initialization data.
@@ -716,9 +696,7 @@
  * TODO: define for other codecs.
  */
 typedef C2StreamParam<C2Info, C2BlobValue, kParamIndexInitData> C2StreamInitDataInfo;
-typedef C2StreamInitDataInfo C2StreamCsdInfo; // deprecated
 constexpr char C2_PARAMKEY_INIT_DATA[] = "coded.init-data";
-#define C2_PARAMKEY_STREAM_INIT_DATA C2_PARAMKEY_INIT_DATA
 
 /**
  * Supplemental Data.
@@ -778,11 +756,8 @@
  * port media type.
  */
 typedef C2PortParam<C2Setting, C2StringValue, kParamIndexMediaType> C2PortMediaTypeSetting;
-typedef C2PortMediaTypeSetting C2PortMimeConfig; // deprecated
 constexpr char C2_PARAMKEY_INPUT_MEDIA_TYPE[] = "input.media-type";
 constexpr char C2_PARAMKEY_OUTPUT_MEDIA_TYPE[] = "output.media-type";
-#define C2_NAME_INPUT_PORT_MIME_SETTING C2_PARAMKEY_INPUT_MEDIA_TYPE
-#define C2_NAME_OUTPUT_PORT_MIME_SETTING C2_PARAMKEY_OUTPUT_MEDIA_TYPE
 
 typedef C2StreamParam<C2Setting, C2StringValue, kParamIndexMediaType> C2StreamMediaTypeSetting;
 
@@ -805,24 +780,20 @@
  */
 
 typedef C2PortParam<C2Tuning, C2Uint32Value, kParamIndexDelayRequest> C2PortRequestedDelayTuning;
-typedef C2PortRequestedDelayTuning C2PortRequestedLatencyTuning; // deprecated
 constexpr char C2_PARAMKEY_INPUT_DELAY_REQUEST[] = "input.delay.requested";
 constexpr char C2_PARAMKEY_OUTPUT_DELAY_REQUEST[] = "output.delay.requested";
 
 typedef C2GlobalParam<C2Tuning, C2Uint32Value, kParamIndexDelayRequest>
         C2RequestedPipelineDelayTuning;
-typedef C2RequestedPipelineDelayTuning C2ComponentRequestedLatencyTuning; // deprecated
 constexpr char C2_PARAMKEY_PIPELINE_DELAY_REQUEST[] = "pipeline-delay.requested";
 
 // read-only
 typedef C2PortParam<C2Tuning, C2Uint32Value, kParamIndexDelay> C2PortActualDelayTuning;
-typedef C2PortActualDelayTuning C2PortLatencyInfo; // deprecated
 constexpr char C2_PARAMKEY_INPUT_DELAY[] = "input.delay.actual";
 constexpr char C2_PARAMKEY_OUTPUT_DELAY[] = "output.delay.actual";
 
 // read-only
 typedef C2GlobalParam<C2Tuning, C2Uint32Value, kParamIndexDelay> C2ActualPipelineDelayTuning;
-typedef C2ActualPipelineDelayTuning C2ComponentLatencyInfo; // deprecated
 constexpr char C2_PARAMKEY_PIPELINE_DELAY[] = "algo.delay.actual";
 
 /**
@@ -872,7 +843,6 @@
  */
 // private
 typedef C2PortParam<C2Tuning, C2Uint32Value, kParamIndexStreamCount> C2PortStreamCountTuning;
-typedef C2PortStreamCountTuning C2PortStreamCountConfig; // deprecated
 constexpr char C2_PARAMKEY_INPUT_STREAM_COUNT[] = "input.stream-count";
 constexpr char C2_PARAMKEY_OUTPUT_STREAM_COUNT[] = "output.stream-count";
 
@@ -982,20 +952,9 @@
 typedef C2StreamParam<C2Setting, C2SimpleValueStruct<C2EasyEnum<C2BufferData::type_t>>,
                 kParamIndexBufferType>
         C2StreamBufferTypeSetting;
-
-constexpr C2BufferData::type_t C2FormatAudio      = C2BufferData::LINEAR; // deprecated
-constexpr C2BufferData::type_t C2FormatCompressed = C2BufferData::LINEAR; // deprecated
-constexpr C2BufferData::type_t C2FormatVideo      = C2BufferData::GRAPHIC; // deprecated
-typedef C2BufferData::type_t C2FormatKind; // deprecated
-
-typedef C2StreamBufferTypeSetting C2StreamFormatConfig; // deprecated
 constexpr char C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE[] = "input.buffers.type";
 constexpr char C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE[] = "output.buffers.type";
 
-// deprecated
-#define C2_NAME_INPUT_STREAM_FORMAT_SETTING C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE
-#define C2_NAME_OUTPUT_STREAM_FORMAT_SETTING C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE
-
 /**
  * Memory usage.
  *
@@ -1004,8 +963,6 @@
 typedef C2StreamParam<C2Tuning, C2Uint64Value, kParamIndexUsage> C2StreamUsageTuning;
 constexpr char C2_PARAMKEY_INPUT_STREAM_USAGE[] = "input.buffers.usage";
 constexpr char C2_PARAMKEY_OUTPUT_STREAM_USAGE[] = "output.buffers.usage";
-// deprecated
-#define C2_NAME_INPUT_STREAM_USAGE_SETTING C2_PARAMKEY_INPUT_STREAM_USAGE
 
 /**
  * Picture (video or image frame) size.
@@ -1065,8 +1022,6 @@
 constexpr char C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE[] = "input.buffers.max-size";
 constexpr char C2_PARAMKEY_OUTPUT_MAX_BUFFER_SIZE[] = "output.buffers.max-size";
 
-#define C2_NAME_STREAM_MAX_BUFFER_SIZE_SETTING C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE
-
 /* ---------------------------------------- misc. state ---------------------------------------- */
 
 /**
@@ -1167,9 +1122,7 @@
  * Bitrate
  */
 typedef C2StreamParam<C2Info, C2Uint32Value, kParamIndexBitrate> C2StreamBitrateInfo;
-typedef C2StreamBitrateInfo C2BitrateTuning; // deprecated
 constexpr char C2_PARAMKEY_BITRATE[] = "coded.bitrate";
-#define C2_NAME_STREAM_BITRATE_SETTING C2_PARAMKEY_BITRATE
 
 /**
  * Bitrate mode.
@@ -1258,15 +1211,8 @@
  *
  * This is used for the output of the video decoder, and the input of the video encoder.
  */
-typedef C2PictureSizeStruct C2VideoSizeStruct; // deprecated
-
 typedef C2StreamParam<C2Info, C2PictureSizeStruct, kParamIndexPictureSize> C2StreamPictureSizeInfo;
 constexpr char C2_PARAMKEY_PICTURE_SIZE[] = "raw.size";
-#define C2_PARAMKEY_STREAM_PICTURE_SIZE C2_PARAMKEY_PICTURE_SIZE
-#define C2_NAME_STREAM_VIDEO_SIZE_INFO C2_PARAMKEY_PICTURE_SIZE
-typedef C2StreamPictureSizeInfo C2VideoSizeStreamInfo; // deprecated
-typedef C2StreamPictureSizeInfo C2VideoSizeStreamTuning; // deprecated
-#define C2_NAME_STREAM_VIDEO_SIZE_SETTING C2_PARAMKEY_PICTURE_SIZE
 
 /**
  * Crop rectangle.
@@ -1341,12 +1287,10 @@
                 kParamIndexScalingMethod>
         C2StreamScalingMethodTuning;
 constexpr char C2_PARAMKEY_SCALING_MODE[] = "raw.scaling-method";
-#define C2_PARAMKEY_STREAM_SCALING_MODE C2_PARAMKEY_SCALING_MODE
 
 typedef C2StreamParam<C2Tuning, C2PictureSizeStruct, kParamIndexScaledPictureSize>
         C2StreamScaledPictureSizeTuning;
 constexpr char C2_PARAMKEY_SCALED_PICTURE_SIZE[] = "raw.scaled-size";
-#define C2_PARAMKEY_STREAM_SCALED_PICTURE_SIZE C2_PARAMKEY_SCALED_PICTURE_SIZE
 
 typedef C2StreamParam<C2Tuning, C2RectStruct, kParamIndexScaledCropRect>
         C2StreamScaledCropRectTuning;
@@ -1501,15 +1445,8 @@
     MATRIX_BT2020_CONSTANT,         ///< Rec.ITU-R BT.2020 constant luminance
     MATRIX_VENDOR_START = 0x80,     ///< vendor-specific matrix coefficient values start here
     MATRIX_OTHER = 0xff,            ///< max value, reserved for undefined values
-
-    MATRIX_SMPTE240M = MATRIX_240M, // deprecated
-    MATRIX_BT2020CONSTANT = MATRIX_BT2020_CONSTANT, // deprecated
 )
 
-constexpr C2Color::matrix_t MATRIX_BT470_6M = MATRIX_FCC47_73_682; // deprecated
-constexpr C2Color::matrix_t MATRIX_BT709_5 = MATRIX_BT709; // deprecated
-constexpr C2Color::matrix_t MATRIX_BT601_6 = MATRIX_BT601; // deprecated
-
 struct C2ColorAspectsStruct {
     C2Color::range_t range;
     C2Color::primaries_t primaries;
@@ -1632,7 +1569,6 @@
  */
 typedef C2StreamParam<C2Info, C2FloatValue, kParamIndexFrameRate> C2StreamFrameRateInfo;
 constexpr char C2_PARAMKEY_FRAME_RATE[] = "coded.frame-rate";
-#define C2_NAME_STREAM_FRAME_RATE_SETTING C2_PARAMKEY_FRAME_RATE
 
 typedef C2PortParam<C2Info, C2FloatValue, kParamIndexFrameRate> C2PortFrameRateInfo;
 constexpr char C2_PARAMKEY_INPUT_FRAME_RATE[] = "input.frame-rate";
@@ -1665,9 +1601,6 @@
     B_FRAME    = (1 << 3),  ///< backward predicted (out-of-order) frame
 )
 
-typedef C2Config::picture_type_t C2PictureTypeMask; // deprecated
-constexpr C2Config::picture_type_t C2PictureTypeKeyFrame = C2Config::SYNC_FRAME; // deprecated
-
 /**
  * Allowed picture types.
  */
@@ -1747,8 +1680,6 @@
 typedef C2StreamParam<C2Tuning, C2Int64Value, kParamIndexSyncFrameInterval>
         C2StreamSyncFrameIntervalTuning;
 constexpr char C2_PARAMKEY_SYNC_FRAME_INTERVAL[] = "coding.sync-frame-interval";
-// deprecated
-#define C2_PARAMKEY_SYNC_FRAME_PERIOD C2_PARAMKEY_SYNC_FRAME_INTERVAL
 
 /**
  * Temporal layering
@@ -1882,8 +1813,6 @@
 typedef C2StreamParam<C2Info, C2Uint32Value, kParamIndexSampleRate> C2StreamSampleRateInfo;
 constexpr char C2_PARAMKEY_SAMPLE_RATE[] = "raw.sample-rate";
 constexpr char C2_PARAMKEY_CODED_SAMPLE_RATE[] = "coded.sample-rate";
-// deprecated
-#define C2_NAME_STREAM_SAMPLE_RATE_SETTING C2_PARAMKEY_SAMPLE_RATE
 
 /**
  * Channel count.
@@ -1891,8 +1820,6 @@
 typedef C2StreamParam<C2Info, C2Uint32Value, kParamIndexChannelCount> C2StreamChannelCountInfo;
 constexpr char C2_PARAMKEY_CHANNEL_COUNT[] = "raw.channel-count";
 constexpr char C2_PARAMKEY_CODED_CHANNEL_COUNT[] = "coded.channel-count";
-// deprecated
-#define C2_NAME_STREAM_CHANNEL_COUNT_SETTING C2_PARAMKEY_CHANNEL_COUNT
 
 /**
  * Max channel count. Used to limit the number of coded or decoded channels.
@@ -2002,16 +1929,10 @@
     AAC_PACKAGING_ADTS
 )
 
-typedef C2Config::aac_packaging_t C2AacStreamFormatKind; // deprecated
-// deprecated
-constexpr C2Config::aac_packaging_t C2AacStreamFormatRaw = C2Config::AAC_PACKAGING_RAW;
-constexpr C2Config::aac_packaging_t C2AacStreamFormatAdts = C2Config::AAC_PACKAGING_ADTS;
-
 typedef C2StreamParam<C2Info, C2SimpleValueStruct<C2EasyEnum<C2Config::aac_packaging_t>>,
         kParamIndexAacPackaging> C2StreamAacPackagingInfo;
 typedef C2StreamAacPackagingInfo C2StreamAacFormatInfo;
 constexpr char C2_PARAMKEY_AAC_PACKAGING[] = "coded.aac-packaging";
-#define C2_NAME_STREAM_AAC_FORMAT_SETTING C2_PARAMKEY_AAC_PACKAGING
 
 /* ================================ PLATFORM-DEFINED PARAMETERS ================================ */
 
@@ -2131,7 +2052,6 @@
 typedef C2GlobalParam<C2Tuning, C2EasyBoolValue, kParamIndexInputSurfaceEos>
         C2InputSurfaceEosTuning;
 constexpr char C2_PARAMKEY_INPUT_SURFACE_EOS[] = "input-surface.eos";
-#define C2_NAME_INPUT_SURFACE_EOS_TUNING C2_PARAMKEY_INPUT_SURFACE_EOS
 
 /**
  * Start/suspend/resume/stop controls and timestamps for input surface.
diff --git a/media/codec2/core/include/C2Param.h b/media/codec2/core/include/C2Param.h
index efc5c89..d264bf3 100644
--- a/media/codec2/core/include/C2Param.h
+++ b/media/codec2/core/include/C2Param.h
@@ -1012,15 +1012,6 @@
           _mNamedValues(_NamedValuesGetter<B>::getNamedValues()),
           _mFieldId(offset) {}
 
-/*
-    template<typename T, typename B=typename std::remove_extent<T>::type>
-    inline C2FieldDescriptor<T, B, false>(T* offset, const char *name)
-        : _mType(this->GetType((B*)nullptr)),
-          _mExtent(std::is_array<T>::value ? std::extent<T>::value : 1),
-          _mName(name),
-          _mFieldId(offset) {}
-*/
-
     /// \deprecated
     template<typename T, typename S, class B=typename std::remove_extent<T>::type>
     inline C2FieldDescriptor(S*, T S::* field, const char *name)
diff --git a/media/codec2/hidl/1.0/utils/Configurable.cpp b/media/codec2/hidl/1.0/utils/Configurable.cpp
index a35b74c..ec9c170 100644
--- a/media/codec2/hidl/1.0/utils/Configurable.cpp
+++ b/media/codec2/hidl/1.0/utils/Configurable.cpp
@@ -171,17 +171,15 @@
             c2fields,
             mayBlock ? C2_MAY_BLOCK : C2_DONT_BLOCK);
     hidl_vec<FieldSupportedValuesQueryResult> outFields(inFields.size());
-    {
-        size_t ix = 0;
-        for (const C2FieldSupportedValuesQuery &result : c2fields) {
-            if (!objcpy(&outFields[ix], result)) {
-                ++ix;
-            } else {
-                outFields.resize(ix);
-                c2res = C2_CORRUPTED;
-                LOG(WARNING) << "querySupportedValues -- invalid output params.";
-                break;
-            }
+    size_t dstIx = 0;
+    for (const C2FieldSupportedValuesQuery &result : c2fields) {
+        if (objcpy(&outFields[dstIx], result)) {
+            ++dstIx;
+        } else {
+            outFields.resize(dstIx);
+            c2res = C2_CORRUPTED;
+            LOG(WARNING) << "querySupportedValues -- invalid output params.";
+            break;
         }
     }
     _hidl_cb((Status)c2res, outFields);
diff --git a/media/codec2/hidl/1.0/utils/InputSurface.cpp b/media/codec2/hidl/1.0/utils/InputSurface.cpp
index 2cbe64b..85c44c3 100644
--- a/media/codec2/hidl/1.0/utils/InputSurface.cpp
+++ b/media/codec2/hidl/1.0/utils/InputSurface.cpp
@@ -45,7 +45,7 @@
         setDerivedInstance(this);
 
         addParameter(
-                DefineParam(mEos, C2_NAME_INPUT_SURFACE_EOS_TUNING)
+                DefineParam(mEos, C2_PARAMKEY_INPUT_SURFACE_EOS)
                 .withDefault(new C2InputSurfaceEosTuning(false))
                 .withFields({C2F(mEos, value).oneOf({true, false})})
                 .withSetter(EosSetter)
diff --git a/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp b/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp
index 1024f50..c9932ef 100644
--- a/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp
+++ b/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp
@@ -124,7 +124,7 @@
         }
 
         // TODO: read settings properly from the interface
-        C2VideoSizeStreamTuning::input inputSize;
+        C2StreamPictureSizeInfo::input inputSize;
         C2StreamUsageTuning::input usage;
         c2_status_t c2Status = queryFromSink({ &inputSize, &usage },
                                          {},
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/types.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/types.h
index c38e674..b9f3aa8 100644
--- a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/types.h
+++ b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/types.h
@@ -120,11 +120,9 @@
         IComponentStore::ComponentTraits* d,
         const C2Component::Traits& s);
 
-// ComponentTraits -> C2Component::Traits, std::unique_ptr<std::vector<std::string>>
-// Note: The output d is only valid as long as aliasesBuffer remains alive.
+// ComponentTraits -> C2Component::Traits
 bool objcpy(
         C2Component::Traits* d,
-        std::unique_ptr<std::vector<std::string>>* aliasesBuffer,
         const IComponentStore::ComponentTraits& s);
 
 // C2StructDescriptor -> StructDescriptor
diff --git a/media/codec2/hidl/1.0/utils/types.cpp b/media/codec2/hidl/1.0/utils/types.cpp
index caed839..343bcb5 100644
--- a/media/codec2/hidl/1.0/utils/types.cpp
+++ b/media/codec2/hidl/1.0/utils/types.cpp
@@ -351,7 +351,6 @@
 // ComponentTraits -> C2Component::Traits, std::unique_ptr<std::vector<std::string>>
 bool objcpy(
         C2Component::Traits* d,
-        std::unique_ptr<std::vector<std::string>>* aliasesBuffer,
         const IComponentStore::ComponentTraits& s) {
     d->name = s.name.c_str();
 
@@ -394,15 +393,9 @@
 
     d->rank = static_cast<C2Component::rank_t>(s.rank);
     d->mediaType = s.mediaType.c_str();
-
-    // aliasesBuffer must not be resized after this.
-    *aliasesBuffer = std::make_unique<std::vector<std::string>>(
-            s.aliases.size());
-    (*aliasesBuffer)->resize(s.aliases.size());
-    std::vector<C2StringLiteral> dAliases(s.aliases.size());
+    d->aliases.resize(s.aliases.size());
     for (size_t i = 0; i < s.aliases.size(); ++i) {
-        (**aliasesBuffer)[i] = s.aliases[i].c_str();
-        d->aliases[i] = (**aliasesBuffer)[i].c_str();
+        d->aliases[i] = s.aliases[i];
     }
     return true;
 }
@@ -1810,7 +1803,8 @@
 }
 
 sp<HGraphicBufferProducer> getHgbp(const sp<IGraphicBufferProducer>& igbp) {
-    sp<HGraphicBufferProducer> hgbp = igbp->getHalInterface();
+    sp<HGraphicBufferProducer> hgbp =
+            igbp->getHalInterface<HGraphicBufferProducer>();
     return hgbp ? hgbp :
             new TWGraphicBufferProducer<HGraphicBufferProducer>(igbp);
 }
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp
index d4b973f..d3b37d7 100644
--- a/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp
@@ -272,7 +272,7 @@
 }
 
 // Set Default config param.
-void setupConfigParam(
+bool setupConfigParam(
     const std::shared_ptr<android::Codec2Client::Component>& component,
     int32_t* bitStreamInfo) {
     std::vector<std::unique_ptr<C2SettingResult>> failures;
@@ -282,8 +282,8 @@
     std::vector<C2Param*> configParam{&sampleRateInfo, &channelCountInfo};
     c2_status_t status =
         component->config(configParam, C2_DONT_BLOCK, &failures);
-    ASSERT_EQ(failures.size(), 0u);
-    ASSERT_EQ(status, C2_OK);
+    if (status == C2_OK && failures.size() == 0u) return true;
+    return false;
 }
 
 // In decoder components, often the input parameters get updated upon
@@ -557,7 +557,11 @@
         ASSERT_NO_FATAL_FAILURE(
             getInputChannelInfo(mComponent, mCompName, bitStreamInfo));
     }
-    setupConfigParam(mComponent, bitStreamInfo);
+    if (!setupConfigParam(mComponent, bitStreamInfo)) {
+        std::cout << "[   WARN   ] Test Skipped \n";
+        return;
+    }
+    ASSERT_EQ(mComponent->start(), C2_OK);
     ALOGV("mURL : %s", mURL);
     eleStream.open(mURL, std::ifstream::binary);
     ASSERT_EQ(eleStream.is_open(), true);
@@ -613,7 +617,6 @@
     description("Test Request for thumbnail");
     if (mDisableTest) return;
 
-    ASSERT_EQ(mComponent->start(), C2_OK);
     char mURL[512], info[512];
     std::ifstream eleStream, eleInfo;
 
@@ -642,7 +645,11 @@
         ASSERT_NO_FATAL_FAILURE(
             getInputChannelInfo(mComponent, mCompName, bitStreamInfo));
     }
-    setupConfigParam(mComponent, bitStreamInfo);
+    if (!setupConfigParam(mComponent, bitStreamInfo)) {
+        std::cout << "[   WARN   ] Test Skipped \n";
+        return;
+    }
+    ASSERT_EQ(mComponent->start(), C2_OK);
     ALOGV("mURL : %s", mURL);
 
     // request EOS for thumbnail
@@ -711,7 +718,6 @@
     description("Tests Flush calls");
     if (mDisableTest) return;
     typedef std::unique_lock<std::mutex> ULock;
-    ASSERT_EQ(mComponent->start(), C2_OK);
     char mURL[512], info[512];
     std::ifstream eleStream, eleInfo;
 
@@ -741,7 +747,11 @@
         ASSERT_NO_FATAL_FAILURE(
             getInputChannelInfo(mComponent, mCompName, bitStreamInfo));
     }
-    setupConfigParam(mComponent, bitStreamInfo);
+    if (!setupConfigParam(mComponent, bitStreamInfo)) {
+        std::cout << "[   WARN   ] Test Skipped \n";
+        return;
+    }
+    ASSERT_EQ(mComponent->start(), C2_OK);
     ALOGV("mURL : %s", mURL);
     eleStream.open(mURL, std::ifstream::binary);
     ASSERT_EQ(eleStream.is_open(), true);
@@ -833,8 +843,6 @@
     description("Decode with multiple empty input frames");
     if (mDisableTest) return;
 
-    ASSERT_EQ(mComponent->start(), C2_OK);
-
     char mURL[512], info[512];
     std::ifstream eleStream, eleInfo;
 
@@ -868,7 +876,19 @@
         frameId++;
     }
     eleInfo.close();
-
+    int32_t bitStreamInfo[2] = {0};
+    if (mCompName == raw) {
+        bitStreamInfo[0] = 8000;
+        bitStreamInfo[1] = 1;
+    } else {
+        ASSERT_NO_FATAL_FAILURE(
+            getInputChannelInfo(mComponent, mCompName, bitStreamInfo));
+    }
+    if (!setupConfigParam(mComponent, bitStreamInfo)) {
+        std::cout << "[   WARN   ] Test Skipped \n";
+        return;
+    }
+    ASSERT_EQ(mComponent->start(), C2_OK);
     ALOGV("mURL : %s", mURL);
     eleStream.open(mURL, std::ifstream::binary);
     ASSERT_EQ(eleStream.is_open(), true);
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp
index 5d66ee5..a74d43e 100644
--- a/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp
@@ -219,7 +219,7 @@
 }
 
 // Set Default config param.
-void setupConfigParam(
+bool setupConfigParam(
     const std::shared_ptr<android::Codec2Client::Component>& component,
     int32_t nChannels, int32_t nSampleRate) {
     std::vector<std::unique_ptr<C2SettingResult>> failures;
@@ -229,8 +229,8 @@
     std::vector<C2Param*> configParam{&sampleRateInfo, &channelCountInfo};
     c2_status_t status =
         component->config(configParam, C2_DONT_BLOCK, &failures);
-    ASSERT_EQ(failures.size(), 0u);
-    ASSERT_EQ(status, C2_OK);
+    if (status == C2_OK && failures.size() == 0u) return true;
+    return false;
 }
 
 // LookUpTable of clips and metadata for component testing
@@ -358,7 +358,6 @@
 TEST_F(Codec2AudioEncHidlTest, EncodeTest) {
     ALOGV("EncodeTest");
     if (mDisableTest) return;
-    ASSERT_EQ(mComponent->start(), C2_OK);
     char mURL[512];
     strcpy(mURL, gEnv->getRes().c_str());
     GetURLForComponent(mCompName, mURL);
@@ -396,7 +395,11 @@
         default:
             ASSERT_TRUE(false);
     }
-    setupConfigParam(mComponent, nChannels, nSampleRate);
+    if (!setupConfigParam(mComponent, nChannels, nSampleRate)) {
+        std::cout << "[   WARN   ] Test Skipped \n";
+        return;
+    }
+    ASSERT_EQ(mComponent->start(), C2_OK);
     std::ifstream eleStream;
     uint32_t numFrames = 128;
     eleStream.open(mURL, std::ifstream::binary);
@@ -469,7 +472,6 @@
 TEST_F(Codec2AudioEncHidlTest, FlushTest) {
     description("Test Request for flush");
     if (mDisableTest) return;
-    ASSERT_EQ(mComponent->start(), C2_OK);
 
     typedef std::unique_lock<std::mutex> ULock;
     char mURL[512];
@@ -510,7 +512,13 @@
         default:
             ASSERT_TRUE(false);
     }
-    setupConfigParam(mComponent, nChannels, nSampleRate);
+
+    if (!setupConfigParam(mComponent, nChannels, nSampleRate)) {
+        std::cout << "[   WARN   ] Test Skipped \n";
+        return;
+    }
+    ASSERT_EQ(mComponent->start(), C2_OK);
+
     std::ifstream eleStream;
     uint32_t numFramesFlushed = 30;
     uint32_t numFrames = 128;
diff --git a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
index 64a458c..1f36270 100644
--- a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
@@ -93,36 +93,38 @@
         std::vector<std::unique_ptr<C2SettingResult>> failures;
         for (size_t i = 0; i < updates.size(); ++i) {
             C2Param* param = updates[i].get();
-            if (param->index() == C2StreamCsdInfo::output::PARAM_TYPE) {
+            if (param->index() == C2StreamInitDataInfo::output::PARAM_TYPE) {
                 csd = true;
             } else if ((param->index() ==
                         C2StreamSampleRateInfo::output::PARAM_TYPE) ||
                        (param->index() ==
                         C2StreamChannelCountInfo::output::PARAM_TYPE) ||
                        (param->index() ==
-                        C2VideoSizeStreamInfo::output::PARAM_TYPE)) {
+                        C2StreamPictureSizeInfo::output::PARAM_TYPE)) {
                 configParam.push_back(param);
             }
         }
         component->config(configParam, C2_DONT_BLOCK, &failures);
         ASSERT_EQ(failures.size(), 0u);
     }
-    framesReceived++;
-    eos = (work->worklets.front()->output.flags &
-           C2FrameData::FLAG_END_OF_STREAM) != 0;
-    auto frameIndexIt = std::find(flushedIndices.begin(), flushedIndices.end(),
-                                  work->input.ordinal.frameIndex.peeku());
-    ALOGV("WorkDone: frameID received %d",
-          (int)work->worklets.front()->output.ordinal.frameIndex.peeku());
-    work->input.buffers.clear();
-    work->worklets.clear();
-    {
-        typedef std::unique_lock<std::mutex> ULock;
-        ULock l(queueLock);
-        workQueue.push_back(std::move(work));
-        if (!flushedIndices.empty()) {
-            flushedIndices.erase(frameIndexIt);
+    if (work->worklets.front()->output.flags != C2FrameData::FLAG_INCOMPLETE) {
+        framesReceived++;
+        eos = (work->worklets.front()->output.flags &
+               C2FrameData::FLAG_END_OF_STREAM) != 0;
+        auto frameIndexIt = std::find(flushedIndices.begin(), flushedIndices.end(),
+                                      work->input.ordinal.frameIndex.peeku());
+        ALOGV("WorkDone: frameID received %d",
+              (int)work->worklets.front()->output.ordinal.frameIndex.peeku());
+        work->input.buffers.clear();
+        work->worklets.clear();
+        {
+            typedef std::unique_lock<std::mutex> ULock;
+            ULock l(queueLock);
+            workQueue.push_back(std::move(work));
+            if (!flushedIndices.empty()) {
+                flushedIndices.erase(frameIndexIt);
+            }
+            queueCondition.notify_all();
         }
-        queueCondition.notify_all();
     }
 }
\ No newline at end of file
diff --git a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
index d1557cb..fca2902 100644
--- a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
+++ b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
@@ -55,12 +55,10 @@
         : callBack(fn) {}
     virtual void onWorkDone(
         const std::weak_ptr<android::Codec2Client::Component>& comp,
-        std::list<std::unique_ptr<C2Work>>& workItems,
-        size_t numDiscardedInputBuffers) override {
+        std::list<std::unique_ptr<C2Work>>& workItems) override {
         /* TODO */
         ALOGD("onWorkDone called");
         (void)comp;
-        (void)numDiscardedInputBuffers;
         if (callBack) callBack(workItems);
     }
 
@@ -89,9 +87,10 @@
     }
 
     virtual void onInputBufferDone(
-        const std::shared_ptr<C2Buffer>& buffer) override {
+        uint64_t frameIndex, size_t arrayIndex) override {
         /* TODO */
-        (void)buffer;
+        (void)frameIndex;
+        (void)arrayIndex;
     }
 
     virtual void onFrameRendered(
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoEncTest.cpp
index 8585c87..7db41c0 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoEncTest.cpp
@@ -118,7 +118,6 @@
         }
         mEos = false;
         mCsd = false;
-        mConfig = false;
         mFramesReceived = 0;
         mFailedWorkReceived = 0;
         if (mCompName == unknown_comp) mDisableTest = true;
@@ -134,7 +133,7 @@
         Super::TearDown();
     }
 
-    void setupConfigParam(int32_t nWidth, int32_t nHeight);
+    bool setupConfigParam(int32_t nWidth, int32_t nHeight);
 
     // callback function to process onWorkDone received by Listener
     void handleWorkDone(std::list<std::unique_ptr<C2Work>>& workItems) {
@@ -228,14 +227,14 @@
 }
 
 // Set Default config param.
-void Codec2VideoEncHidlTest::setupConfigParam(int32_t nWidth, int32_t nHeight) {
+bool Codec2VideoEncHidlTest::setupConfigParam(int32_t nWidth, int32_t nHeight) {
     std::vector<std::unique_ptr<C2SettingResult>> failures;
-    C2VideoSizeStreamTuning::input inputSize(0u, nWidth, nHeight);
+    C2StreamPictureSizeInfo::input inputSize(0u, nWidth, nHeight);
     std::vector<C2Param*> configParam{&inputSize};
     c2_status_t status =
         mComponent->config(configParam, C2_DONT_BLOCK, &failures);
-    if (failures.size() == 0u ) mConfig = true;
-    ASSERT_EQ(status, C2_OK);
+    if (status == C2_OK && failures.size() == 0u) return true;
+    return false;
 }
 
 // LookUpTable of clips for component testing
@@ -360,8 +359,7 @@
     ASSERT_EQ(eleStream.is_open(), true) << mURL << " file not found";
     ALOGV("mURL : %s", mURL);
 
-    setupConfigParam(nWidth, nHeight);
-    if (!mConfig) {
+    if (!setupConfigParam(nWidth, nHeight)) {
         std::cout << "[   WARN   ] Test Skipped \n";
         return;
     }
@@ -439,7 +437,6 @@
 TEST_F(Codec2VideoEncHidlTest, FlushTest) {
     description("Test Request for flush");
     if (mDisableTest) return;
-    ASSERT_EQ(mComponent->start(), C2_OK);
 
     typedef std::unique_lock<std::mutex> ULock;
     char mURL[512];
@@ -447,7 +444,12 @@
     int32_t nHeight = ENC_DEFAULT_FRAME_HEIGHT;
     strcpy(mURL, gEnv->getRes().c_str());
     GetURLForComponent(mURL);
-    setupConfigParam(nWidth, nHeight);
+
+    if (!setupConfigParam(nWidth, nHeight)) {
+        std::cout << "[   WARN   ] Test Skipped \n";
+        return;
+    }
+    ASSERT_EQ(mComponent->start(), C2_OK);
 
     // Setting default configuration
     mFlushedIndices.clear();
@@ -522,12 +524,16 @@
 TEST_F(Codec2VideoEncHidlTest, InvalidBufferTest) {
     description("Tests feeding larger/smaller input buffer");
     if (mDisableTest) return;
-    ASSERT_EQ(mComponent->start(), C2_OK);
 
     std::ifstream eleStream;
     int32_t nWidth = ENC_DEFAULT_FRAME_WIDTH / 2;
     int32_t nHeight = ENC_DEFAULT_FRAME_HEIGHT / 2;
-    setupConfigParam(nWidth, nHeight);
+
+    if (!setupConfigParam(nWidth, nHeight)) {
+        std::cout << "[   WARN   ] Test Skipped \n";
+        return;
+    }
+    ASSERT_EQ(mComponent->start(), C2_OK);
 
     ASSERT_NO_FATAL_FAILURE(
         encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
@@ -579,10 +585,12 @@
     int32_t nWidth = GetParam().first;
     int32_t nHeight = GetParam().second;
     ALOGD("Trying encode for width %d height %d", nWidth, nHeight);
-    mConfig = false;
     mEos = false;
-    setupConfigParam(nWidth, nHeight);
-    if (!mConfig) return;
+
+    if (!setupConfigParam(nWidth, nHeight)) {
+        std::cout << "[   WARN   ] Test Skipped \n";
+        return;
+    }
     ASSERT_EQ(mComponent->start(), C2_OK);
 
     ASSERT_NO_FATAL_FAILURE(
diff --git a/media/codec2/hidl/client/client.cpp b/media/codec2/hidl/client/client.cpp
index 5b52fcd..7a2e549 100644
--- a/media/codec2/hidl/client/client.cpp
+++ b/media/codec2/hidl/client/client.cpp
@@ -344,17 +344,13 @@
             return Void();
         }
         // release input buffers potentially held by the component from queue
-        size_t numDiscardedInputBuffers = 0;
         std::shared_ptr<Codec2Client::Component> strongComponent =
                 component.lock();
         if (strongComponent) {
-            numDiscardedInputBuffers =
-                    strongComponent->handleOnWorkDone(workItems);
+            strongComponent->handleOnWorkDone(workItems);
         }
         if (std::shared_ptr<Codec2Client::Listener> listener = base.lock()) {
-            listener->onWorkDone(component,
-                                 workItems,
-                                 numDiscardedInputBuffers);
+            listener->onWorkDone(component, workItems);
         } else {
             LOG(DEBUG) << "onWorkDone -- listener died.";
         }
@@ -418,26 +414,15 @@
             LOG(DEBUG) << "onInputBuffersReleased -- listener died.";
             return Void();
         }
-        std::shared_ptr<Codec2Client::Component> strongComponent =
-                component.lock();
-        if (!strongComponent) {
-            LOG(DEBUG) << "onInputBuffersReleased -- component died.";
-            return Void();
-        }
         for (const InputBuffer& inputBuffer : inputBuffers) {
-            std::shared_ptr<C2Buffer> buffer =
-                    strongComponent->freeInputBuffer(
-                        inputBuffer.frameIndex,
-                        inputBuffer.arrayIndex);
             LOG(VERBOSE) << "onInputBuffersReleased --"
                             " received death notification of"
                             " input buffer:"
                             " frameIndex = " << inputBuffer.frameIndex
                          << ", bufferIndex = " << inputBuffer.arrayIndex
                          << ".";
-            if (buffer) {
-                listener->onInputBufferDone(buffer);
-            }
+            listener->onInputBufferDone(
+                    inputBuffer.frameIndex, inputBuffer.arrayIndex);
         }
         return Void();
     }
@@ -579,9 +564,8 @@
                     return;
                 }
                 mTraitsList.resize(t.size());
-                mAliasesBuffer.resize(t.size());
                 for (size_t i = 0; i < t.size(); ++i) {
-                    if (!objcpy(&mTraitsList[i], &mAliasesBuffer[i], t[i])) {
+                    if (!objcpy(&mTraitsList[i], t[i])) {
                         LOG(ERROR) << "listComponents -- corrupted output.";
                         return;
                     }
@@ -918,43 +902,8 @@
     return static_cast<c2_status_t>(static_cast<Status>(transResult));
 }
 
-size_t Codec2Client::Component::handleOnWorkDone(
+void Codec2Client::Component::handleOnWorkDone(
         const std::list<std::unique_ptr<C2Work>> &workItems) {
-    // Input buffers' lifetime management
-    std::vector<uint64_t> inputDone;
-    for (const std::unique_ptr<C2Work> &work : workItems) {
-        if (work) {
-            if (work->worklets.empty()
-                    || !work->worklets.back()
-                    || (work->worklets.back()->output.flags &
-                        C2FrameData::FLAG_INCOMPLETE) == 0) {
-                // input is complete
-                inputDone.emplace_back(work->input.ordinal.frameIndex.peeku());
-            }
-        }
-    }
-
-    size_t numDiscardedInputBuffers = 0;
-    {
-        std::lock_guard<std::mutex> lock(mInputBuffersMutex);
-        for (uint64_t inputIndex : inputDone) {
-            auto it = mInputBuffers.find(inputIndex);
-            if (it == mInputBuffers.end()) {
-                LOG(VERBOSE) << "onWorkDone -- returned consumed/unknown "
-                                "input frame: index = "
-                             << inputIndex << ".";
-            } else {
-                LOG(VERBOSE) << "onWorkDone -- processed input frame: "
-                             << inputIndex
-                             << " (containing " << it->second.size()
-                                 << " buffers).";
-                mInputBuffers.erase(it);
-                mInputBufferCount.erase(inputIndex);
-                ++numDiscardedInputBuffers;
-            }
-        }
-    }
-
     // Output bufferqueue-based blocks' lifetime management
     mOutputBufferQueueMutex.lock();
     sp<IGraphicBufferProducer> igbp = mOutputIgbp;
@@ -965,72 +914,10 @@
     if (igbp) {
         holdBufferQueueBlocks(workItems, igbp, bqId, generation);
     }
-    return numDiscardedInputBuffers;
-}
-
-std::shared_ptr<C2Buffer> Codec2Client::Component::freeInputBuffer(
-        uint64_t frameIndex,
-        size_t bufferIndex) {
-    std::shared_ptr<C2Buffer> buffer;
-    std::lock_guard<std::mutex> lock(mInputBuffersMutex);
-    auto it = mInputBuffers.find(frameIndex);
-    if (it == mInputBuffers.end()) {
-        LOG(INFO) << "freeInputBuffer -- Unrecognized input frame index "
-                  << frameIndex << ".";
-        return nullptr;
-    }
-    if (bufferIndex >= it->second.size()) {
-        LOG(INFO) << "freeInputBuffer -- Input buffer number " << bufferIndex
-                  << " is not valid in input with frame index " << frameIndex
-                  << ".";
-        return nullptr;
-    }
-    buffer = it->second[bufferIndex];
-    if (!buffer) {
-        LOG(INFO) << "freeInputBuffer -- Input buffer number " << bufferIndex
-                  << " in input with frame index " << frameIndex
-                  << " has already been freed.";
-        return nullptr;
-    }
-    it->second[bufferIndex] = nullptr;
-    if (--mInputBufferCount[frameIndex] == 0) {
-        mInputBuffers.erase(it);
-        mInputBufferCount.erase(frameIndex);
-    }
-    return buffer;
 }
 
 c2_status_t Codec2Client::Component::queue(
         std::list<std::unique_ptr<C2Work>>* const items) {
-    // remember input buffers queued to hold reference to them
-    {
-        std::lock_guard<std::mutex> lock(mInputBuffersMutex);
-        for (const std::unique_ptr<C2Work> &work : *items) {
-            if (!work) {
-                continue;
-            }
-            if (work->input.buffers.size() == 0) {
-                continue;
-            }
-
-            uint64_t inputIndex = work->input.ordinal.frameIndex.peeku();
-            auto res = mInputBuffers.emplace(inputIndex, work->input.buffers);
-            if (!res.second) {
-                // TODO: append? - for now we are replacing
-                res.first->second = work->input.buffers;
-                LOG(INFO) << "queue -- duplicate input frame index: "
-                          << inputIndex
-                          << ". Discarding the old input frame...";
-            }
-            mInputBufferCount[inputIndex] = work->input.buffers.size();
-            LOG(VERBOSE) << "queue -- queuing input frame: "
-                         << "index = " << inputIndex
-                         << ", number of buffers = "
-                             << work->input.buffers.size()
-                         << ".";
-        }
-    }
-
     WorkBundle workBundle;
     if (!objcpy(&workBundle, *items, &mBufferPoolSender)) {
         LOG(ERROR) << "queue -- bad input.";
@@ -1088,24 +975,6 @@
         }
     }
 
-    // Input buffers' lifetime management
-    for (uint64_t flushedIndex : flushedIndices) {
-        std::lock_guard<std::mutex> lock(mInputBuffersMutex);
-        auto it = mInputBuffers.find(flushedIndex);
-        if (it == mInputBuffers.end()) {
-            LOG(VERBOSE) << "flush -- returned consumed/unknown input frame: "
-                            "index = " << flushedIndex << ".";
-        } else {
-            LOG(VERBOSE) << "flush -- returned unprocessed input frame: "
-                            "index = " << flushedIndex
-                         << ", number of buffers = "
-                             << mInputBufferCount[flushedIndex]
-                         << ".";
-            mInputBuffers.erase(it);
-            mInputBufferCount.erase(flushedIndex);
-        }
-    }
-
     // Output bufferqueue-based blocks' lifetime management
     mOutputBufferQueueMutex.lock();
     sp<IGraphicBufferProducer> igbp = mOutputIgbp;
@@ -1160,10 +1029,6 @@
     if (status != C2_OK) {
         LOG(DEBUG) << "stop -- call failed: " << status << ".";
     }
-    mInputBuffersMutex.lock();
-    mInputBuffers.clear();
-    mInputBufferCount.clear();
-    mInputBuffersMutex.unlock();
     return status;
 }
 
@@ -1178,10 +1043,6 @@
     if (status != C2_OK) {
         LOG(DEBUG) << "reset -- call failed: " << status << ".";
     }
-    mInputBuffersMutex.lock();
-    mInputBuffers.clear();
-    mInputBufferCount.clear();
-    mInputBuffersMutex.unlock();
     return status;
 }
 
@@ -1196,10 +1057,6 @@
     if (status != C2_OK) {
         LOG(DEBUG) << "release -- call failed: " << status << ".";
     }
-    mInputBuffersMutex.lock();
-    mInputBuffers.clear();
-    mInputBufferCount.clear();
-    mInputBuffersMutex.unlock();
     return status;
 }
 
@@ -1207,7 +1064,9 @@
         C2BlockPool::local_id_t blockPoolId,
         const sp<IGraphicBufferProducer>& surface,
         uint32_t generation) {
-    sp<HGraphicBufferProducer> igbp = surface->getHalInterface();
+    sp<HGraphicBufferProducer> igbp =
+            surface->getHalInterface<HGraphicBufferProducer>();
+
     if (!igbp) {
         igbp = new TWGraphicBufferProducer<HGraphicBufferProducer>(surface);
     }
diff --git a/media/codec2/hidl/client/include/codec2/hidl/client.h b/media/codec2/hidl/client/include/codec2/hidl/client.h
index f320ef3..478ce6e 100644
--- a/media/codec2/hidl/client/include/codec2/hidl/client.h
+++ b/media/codec2/hidl/client/include/codec2/hidl/client.h
@@ -232,8 +232,6 @@
     mutable bool mListed;
     std::string mServiceName;
     mutable std::vector<C2Component::Traits> mTraitsList;
-    mutable std::vector<std::unique_ptr<std::vector<std::string>>>
-            mAliasesBuffer;
 
     sp<::android::hardware::media::bufferpool::V2_0::IClientManager>
             mHostPoolManager;
@@ -252,16 +250,9 @@
 struct Codec2Client::Listener {
 
     // This is called when the component produces some output.
-    //
-    // numDiscardedInputBuffers is the number of input buffers contained in
-    // workItems that have just become unused. Note that workItems may contain
-    // more input buffers than numDiscardedInputBuffers because buffers that
-    // have been previously reported by onInputBufferDone() are not counted
-    // towards numDiscardedInputBuffers, but may still show up in workItems.
     virtual void onWorkDone(
             const std::weak_ptr<Component>& comp,
-            std::list<std::unique_ptr<C2Work>>& workItems,
-            size_t numDiscardedInputBuffers) = 0;
+            std::list<std::unique_ptr<C2Work>>& workItems) = 0;
 
     // This is called when the component goes into a tripped state.
     virtual void onTripped(
@@ -283,7 +274,7 @@
     // Input buffers that have been returned by onWorkDone() or flush() will not
     // trigger a call to this function.
     virtual void onInputBufferDone(
-            const std::shared_ptr<C2Buffer>& buffer) = 0;
+            uint64_t frameIndex, size_t arrayIndex) = 0;
 
     // This is called when the component becomes aware of a frame being
     // rendered.
@@ -385,24 +376,6 @@
 protected:
     sp<Base> mBase;
 
-    // Mutex for mInputBuffers and mInputBufferCount.
-    mutable std::mutex mInputBuffersMutex;
-
-    // Map: frameIndex -> vector of bufferIndices
-    //
-    // mInputBuffers[frameIndex][bufferIndex] may be null if the buffer in that
-    // slot has been freed.
-    mutable std::map<uint64_t, std::vector<std::shared_ptr<C2Buffer>>>
-            mInputBuffers;
-
-    // Map: frameIndex -> number of bufferIndices that have not been freed
-    //
-    // mInputBufferCount[frameIndex] keeps track of the number of non-null
-    // elements in mInputBuffers[frameIndex]. When mInputBufferCount[frameIndex]
-    // decreases to 0, frameIndex can be removed from both mInputBuffers and
-    // mInputBufferCount.
-    mutable std::map<uint64_t, size_t> mInputBufferCount;
-
     ::android::hardware::media::c2::V1_0::utils::DefaultBufferPoolSender
             mBufferPoolSender;
 
@@ -419,10 +392,7 @@
     friend struct Codec2Client;
 
     struct HidlListener;
-    // Return the number of input buffers that should be discarded.
-    size_t handleOnWorkDone(const std::list<std::unique_ptr<C2Work>> &workItems);
-    // Remove an input buffer from mInputBuffers and return it.
-    std::shared_ptr<C2Buffer> freeInputBuffer(uint64_t frameIndex, size_t bufferIndex);
+    void handleOnWorkDone(const std::list<std::unique_ptr<C2Work>> &workItems);
 
 };
 
diff --git a/media/codec2/sfplugin/Android.bp b/media/codec2/sfplugin/Android.bp
index 2870d39..a212651 100644
--- a/media/codec2/sfplugin/Android.bp
+++ b/media/codec2/sfplugin/Android.bp
@@ -8,6 +8,7 @@
         "CCodecConfig.cpp",
         "Codec2Buffer.cpp",
         "Codec2InfoBuilder.cpp",
+        "PipelineWatcher.cpp",
         "ReflectedParamUpdater.cpp",
         "SkipCutBuffer.cpp",
     ],
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index 10263de..dce3222 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -448,14 +448,13 @@
 
     virtual void onWorkDone(
             const std::weak_ptr<Codec2Client::Component>& component,
-            std::list<std::unique_ptr<C2Work>>& workItems,
-            size_t numDiscardedInputBuffers) override {
+            std::list<std::unique_ptr<C2Work>>& workItems) override {
         (void)component;
         sp<CCodec> codec(mCodec.promote());
         if (!codec) {
             return;
         }
-        codec->onWorkDone(workItems, numDiscardedInputBuffers);
+        codec->onWorkDone(workItems);
     }
 
     virtual void onTripped(
@@ -504,10 +503,10 @@
     }
 
     virtual void onInputBufferDone(
-            const std::shared_ptr<C2Buffer>& buffer) override {
+            uint64_t frameIndex, size_t arrayIndex) override {
         sp<CCodec> codec(mCodec.promote());
         if (codec) {
-            codec->onInputBufferDone(buffer);
+            codec->onInputBufferDone(frameIndex, arrayIndex);
         }
     }
 
@@ -531,10 +530,6 @@
                 {RenderedFrameInfo(mediaTimeUs, renderTimeNs)});
     }
 
-    void onWorkQueued(bool eos) override {
-        mCodec->onWorkQueued(eos);
-    }
-
     void onOutputBuffersChanged() override {
         mCodec->mCallback->onOutputBuffersChanged();
     }
@@ -546,8 +541,7 @@
 // CCodec
 
 CCodec::CCodec()
-    : mChannel(new CCodecBufferChannel(std::make_shared<CCodecCallbackImpl>(this))),
-      mQueuedWorkCount(0) {
+    : mChannel(new CCodecBufferChannel(std::make_shared<CCodecCallbackImpl>(this))) {
 }
 
 CCodec::~CCodec() {
@@ -778,8 +772,16 @@
         }
 
         std::vector<std::unique_ptr<C2Param>> configUpdate;
+        // NOTE: We used to ignore "video-bitrate" at configure; replicate
+        //       the behavior here.
+        sp<AMessage> sdkParams = msg;
+        int32_t videoBitrate;
+        if (sdkParams->findInt32(PARAMETER_KEY_VIDEO_BITRATE, &videoBitrate)) {
+            sdkParams = msg->dup();
+            sdkParams->removeEntryAt(sdkParams->findEntryByName(PARAMETER_KEY_VIDEO_BITRATE));
+        }
         status_t err = config->getConfigUpdateFromSdkParams(
-                comp, msg, Config::IS_CONFIG, C2_DONT_BLOCK, &configUpdate);
+                comp, sdkParams, Config::IS_CONFIG, C2_DONT_BLOCK, &configUpdate);
         if (err != OK) {
             ALOGW("failed to convert configuration to c2 params");
         }
@@ -943,6 +945,47 @@
     (new AMessage(kWhatCreateInputSurface, this))->post();
 }
 
+sp<PersistentSurface> CCodec::CreateOmxInputSurface() {
+    using namespace android::hardware::media::omx::V1_0;
+    using namespace android::hardware::media::omx::V1_0::utils;
+    using namespace android::hardware::graphics::bufferqueue::V1_0::utils;
+    typedef android::hardware::media::omx::V1_0::Status OmxStatus;
+    android::sp<IOmx> omx = IOmx::getService();
+    typedef android::hardware::graphics::bufferqueue::V1_0::
+            IGraphicBufferProducer HGraphicBufferProducer;
+    typedef android::hardware::media::omx::V1_0::
+            IGraphicBufferSource HGraphicBufferSource;
+    OmxStatus s;
+    android::sp<HGraphicBufferProducer> gbp;
+    android::sp<HGraphicBufferSource> gbs;
+    android::Return<void> transStatus = omx->createInputSurface(
+            [&s, &gbp, &gbs](
+                    OmxStatus status,
+                    const android::sp<HGraphicBufferProducer>& producer,
+                    const android::sp<HGraphicBufferSource>& source) {
+                s = status;
+                gbp = producer;
+                gbs = source;
+            });
+    if (transStatus.isOk() && s == OmxStatus::OK) {
+        return new PersistentSurface(
+                new H2BGraphicBufferProducer(gbp),
+                sp<::android::IGraphicBufferSource>(new LWGraphicBufferSource(gbs)));
+    }
+
+    return nullptr;
+}
+
+sp<PersistentSurface> CCodec::CreateCompatibleInputSurface() {
+    sp<PersistentSurface> surface(CreateInputSurface());
+
+    if (surface == nullptr) {
+        surface = CreateOmxInputSurface();
+    }
+
+    return surface;
+}
+
 void CCodec::createInputSurface() {
     status_t err;
     sp<IGraphicBufferProducer> bufferProducer;
@@ -955,7 +998,7 @@
         outputFormat = config->mOutputFormat;
     }
 
-    std::shared_ptr<PersistentSurface> persistentSurface(CreateInputSurface());
+    sp<PersistentSurface> persistentSurface = CreateCompatibleInputSurface();
 
     if (persistentSurface->getHidlTarget()) {
         sp<IInputSurface> hidlInputSurface = IInputSurface::castFrom(
@@ -1343,7 +1386,6 @@
     }
 
     mChannel->flush(flushedWork);
-    subQueuedWorkCount(flushedWork.size());
 
     {
         Mutexed<State>::Locked state(mState);
@@ -1381,11 +1423,7 @@
     (void)mChannel->requestInitialInputBuffers();
 }
 
-void CCodec::signalSetParameters(const sp<AMessage> &params) {
-    setParameters(params);
-}
-
-void CCodec::setParameters(const sp<AMessage> &params) {
+void CCodec::signalSetParameters(const sp<AMessage> &msg) {
     std::shared_ptr<Codec2Client::Component> comp;
     auto checkState = [this, &comp] {
         Mutexed<State>::Locked state(mState);
@@ -1399,6 +1437,15 @@
         return;
     }
 
+    // NOTE: We used to ignore "bitrate" at setParameters; replicate
+    //       the behavior here.
+    sp<AMessage> params = msg;
+    int32_t bitrate;
+    if (params->findInt32(KEY_BIT_RATE, &bitrate)) {
+        params = msg->dup();
+        params->removeEntryAt(params->findEntryByName(KEY_BIT_RATE));
+    }
+
     Mutexed<Config>::Locked config(mConfig);
 
     /**
@@ -1465,28 +1512,16 @@
     config->setParameters(comp, params, C2_MAY_BLOCK);
 }
 
-void CCodec::onWorkDone(std::list<std::unique_ptr<C2Work>> &workItems,
-                        size_t numDiscardedInputBuffers) {
+void CCodec::onWorkDone(std::list<std::unique_ptr<C2Work>> &workItems) {
     if (!workItems.empty()) {
-        {
-            Mutexed<std::list<size_t>>::Locked numDiscardedInputBuffersQueue(
-                    mNumDiscardedInputBuffersQueue);
-            numDiscardedInputBuffersQueue->insert(
-                    numDiscardedInputBuffersQueue->end(),
-                    workItems.size() - 1, 0);
-            numDiscardedInputBuffersQueue->emplace_back(
-                    numDiscardedInputBuffers);
-        }
-        {
-            Mutexed<std::list<std::unique_ptr<C2Work>>>::Locked queue(mWorkDoneQueue);
-            queue->splice(queue->end(), workItems);
-        }
+        Mutexed<std::list<std::unique_ptr<C2Work>>>::Locked queue(mWorkDoneQueue);
+        queue->splice(queue->end(), workItems);
     }
     (new AMessage(kWhatWorkDone, this))->post();
 }
 
-void CCodec::onInputBufferDone(const std::shared_ptr<C2Buffer>& buffer) {
-    mChannel->onInputBufferDone(buffer);
+void CCodec::onInputBufferDone(uint64_t frameIndex, size_t arrayIndex) {
+    mChannel->onInputBufferDone(frameIndex, arrayIndex);
 }
 
 void CCodec::onMessageReceived(const sp<AMessage> &msg) {
@@ -1512,7 +1547,6 @@
         case kWhatStart: {
             // C2Component::start() should return within 500ms.
             setDeadline(now, 550ms, "start");
-            mQueuedWorkCount = 0;
             start();
             break;
         }
@@ -1520,10 +1554,6 @@
             // C2Component::stop() should return within 500ms.
             setDeadline(now, 550ms, "stop");
             stop();
-
-            mQueuedWorkCount = 0;
-            Mutexed<NamedTimePoint>::Locked deadline(mQueueDeadline);
-            deadline->set(TimePoint::max(), "none");
             break;
         }
         case kWhatFlush: {
@@ -1549,7 +1579,6 @@
         }
         case kWhatWorkDone: {
             std::unique_ptr<C2Work> work;
-            size_t numDiscardedInputBuffers;
             bool shouldPost = false;
             {
                 Mutexed<std::list<std::unique_ptr<C2Work>>>::Locked queue(mWorkDoneQueue);
@@ -1560,24 +1589,10 @@
                 queue->pop_front();
                 shouldPost = !queue->empty();
             }
-            {
-                Mutexed<std::list<size_t>>::Locked numDiscardedInputBuffersQueue(
-                        mNumDiscardedInputBuffersQueue);
-                if (numDiscardedInputBuffersQueue->empty()) {
-                    numDiscardedInputBuffers = 0;
-                } else {
-                    numDiscardedInputBuffers = numDiscardedInputBuffersQueue->front();
-                    numDiscardedInputBuffersQueue->pop_front();
-                }
-            }
             if (shouldPost) {
                 (new AMessage(kWhatWorkDone, this))->post();
             }
 
-            if (work->worklets.empty()
-                    || !(work->worklets.front()->output.flags & C2FrameData::FLAG_INCOMPLETE)) {
-                subQueuedWorkCount(1);
-            }
             // handle configuration changes in work done
             Mutexed<Config>::Locked config(mConfig);
             bool changed = false;
@@ -1641,8 +1656,7 @@
             }
             mChannel->onWorkDone(
                     std::move(work), changed ? config->mOutputFormat : nullptr,
-                    initData.hasChanged() ? initData.update().get() : nullptr,
-                    numDiscardedInputBuffers);
+                    initData.hasChanged() ? initData.update().get() : nullptr);
             break;
         }
         case kWhatWatch: {
@@ -1669,17 +1683,26 @@
 void CCodec::initiateReleaseIfStuck() {
     std::string name;
     bool pendingDeadline = false;
-    for (Mutexed<NamedTimePoint> *deadlinePtr : { &mDeadline, &mQueueDeadline, &mEosDeadline }) {
-        Mutexed<NamedTimePoint>::Locked deadline(*deadlinePtr);
+    {
+        Mutexed<NamedTimePoint>::Locked deadline(mDeadline);
         if (deadline->get() < std::chrono::steady_clock::now()) {
             name = deadline->getName();
-            break;
         }
         if (deadline->get() != TimePoint::max()) {
             pendingDeadline = true;
         }
     }
     if (name.empty()) {
+        constexpr std::chrono::steady_clock::duration kWorkDurationThreshold = 3s;
+        std::chrono::steady_clock::duration elapsed = mChannel->elapsed();
+        if (elapsed >= kWorkDurationThreshold) {
+            name = "queue";
+        }
+        if (elapsed > 0s) {
+            pendingDeadline = true;
+        }
+    }
+    if (name.empty()) {
         // We're not stuck.
         if (pendingDeadline) {
             // If we are not stuck yet but still has deadline coming up,
@@ -1694,79 +1717,23 @@
     mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
 }
 
-void CCodec::onWorkQueued(bool eos) {
-    ALOGV("queued work count +1 from %d", mQueuedWorkCount.load());
-    int32_t count = ++mQueuedWorkCount;
-    if (eos) {
-        CCodecWatchdog::getInstance()->watch(this);
-        Mutexed<NamedTimePoint>::Locked deadline(mEosDeadline);
-        deadline->set(std::chrono::steady_clock::now() + 3s, "eos");
-    }
-    // TODO: query and use input/pipeline/output delay combined
-    if (count >= 4) {
-        CCodecWatchdog::getInstance()->watch(this);
-        Mutexed<NamedTimePoint>::Locked deadline(mQueueDeadline);
-        deadline->set(std::chrono::steady_clock::now() + 3s, "queue");
-    }
-}
-
-void CCodec::subQueuedWorkCount(uint32_t count) {
-    ALOGV("queued work count -%u from %d", count, mQueuedWorkCount.load());
-    int32_t currentCount = (mQueuedWorkCount -= count);
-    if (currentCount == 0) {
-        Mutexed<NamedTimePoint>::Locked deadline(mEosDeadline);
-        deadline->set(TimePoint::max(), "none");
-    }
-    Mutexed<NamedTimePoint>::Locked deadline(mQueueDeadline);
-    deadline->set(TimePoint::max(), "none");
-}
-
 }  // namespace android
 
 extern "C" android::CodecBase *CreateCodec() {
     return new android::CCodec;
 }
 
+// Create Codec 2.0 input surface
 extern "C" android::PersistentSurface *CreateInputSurface() {
     // Attempt to create a Codec2's input surface.
     std::shared_ptr<android::Codec2Client::InputSurface> inputSurface =
             android::Codec2Client::CreateInputSurface();
-    if (inputSurface) {
-        return new android::PersistentSurface(
-                inputSurface->getGraphicBufferProducer(),
-                static_cast<android::sp<android::hidl::base::V1_0::IBase>>(
-                inputSurface->getHalInterface()));
+    if (!inputSurface) {
+        return nullptr;
     }
-
-    // Fall back to OMX.
-    using namespace android::hardware::media::omx::V1_0;
-    using namespace android::hardware::media::omx::V1_0::utils;
-    using namespace android::hardware::graphics::bufferqueue::V1_0::utils;
-    typedef android::hardware::media::omx::V1_0::Status OmxStatus;
-    android::sp<IOmx> omx = IOmx::getService();
-    typedef android::hardware::graphics::bufferqueue::V1_0::
-            IGraphicBufferProducer HGraphicBufferProducer;
-    typedef android::hardware::media::omx::V1_0::
-            IGraphicBufferSource HGraphicBufferSource;
-    OmxStatus s;
-    android::sp<HGraphicBufferProducer> gbp;
-    android::sp<HGraphicBufferSource> gbs;
-    android::Return<void> transStatus = omx->createInputSurface(
-            [&s, &gbp, &gbs](
-                    OmxStatus status,
-                    const android::sp<HGraphicBufferProducer>& producer,
-                    const android::sp<HGraphicBufferSource>& source) {
-                s = status;
-                gbp = producer;
-                gbs = source;
-            });
-    if (transStatus.isOk() && s == OmxStatus::OK) {
-        return new android::PersistentSurface(
-                new H2BGraphicBufferProducer(gbp),
-                sp<::android::IGraphicBufferSource>(
-                    new LWGraphicBufferSource(gbs)));
-    }
-
-    return nullptr;
+    return new android::PersistentSurface(
+            inputSurface->getGraphicBufferProducer(),
+            static_cast<android::sp<android::hidl::base::V1_0::IBase>>(
+            inputSurface->getHalInterface()));
 }
 
diff --git a/media/codec2/sfplugin/CCodec.h b/media/codec2/sfplugin/CCodec.h
index 78b009e..b0b3c4f 100644
--- a/media/codec2/sfplugin/CCodec.h
+++ b/media/codec2/sfplugin/CCodec.h
@@ -66,9 +66,8 @@
     virtual void signalRequestIDRFrame() override;
 
     void initiateReleaseIfStuck();
-    void onWorkDone(std::list<std::unique_ptr<C2Work>> &workItems,
-                    size_t numDiscardedInputBuffers);
-    void onInputBufferDone(const std::shared_ptr<C2Buffer>& buffer);
+    void onWorkDone(std::list<std::unique_ptr<C2Work>> &workItems);
+    void onInputBufferDone(uint64_t frameIndex, size_t arrayIndex);
 
 protected:
     virtual ~CCodec();
@@ -76,7 +75,7 @@
     virtual void onMessageReceived(const sp<AMessage> &msg) override;
 
 private:
-    typedef std::chrono::time_point<std::chrono::steady_clock> TimePoint;
+    typedef std::chrono::steady_clock::time_point TimePoint;
 
     status_t tryAndReportOnError(std::function<status_t()> job);
 
@@ -90,19 +89,25 @@
     void flush();
     void release(bool sendCallback);
 
+    /**
+     * Creates an input surface for the current device configuration compatible with CCodec.
+     * This could be backed by the C2 HAL or the OMX HAL.
+     */
+    static sp<PersistentSurface> CreateCompatibleInputSurface();
+
+    /// Creates an input surface to the OMX HAL
+    static sp<PersistentSurface> CreateOmxInputSurface();
+
+    /// handle a create input surface call
     void createInputSurface();
     void setInputSurface(const sp<PersistentSurface> &surface);
     status_t setupInputSurface(const std::shared_ptr<InputSurfaceWrapper> &surface);
-    void setParameters(const sp<AMessage> &params);
 
     void setDeadline(
             const TimePoint &now,
             const std::chrono::milliseconds &timeout,
             const char *name);
 
-    void onWorkQueued(bool eos);
-    void subQueuedWorkCount(uint32_t count);
-
     enum {
         kWhatAllocate,
         kWhatConfigure,
@@ -167,13 +172,9 @@
     struct ClientListener;
 
     Mutexed<NamedTimePoint> mDeadline;
-    std::atomic_int32_t mQueuedWorkCount;
-    Mutexed<NamedTimePoint> mQueueDeadline;
-    Mutexed<NamedTimePoint> mEosDeadline;
     typedef CCodecConfig Config;
     Mutexed<Config> mConfig;
     Mutexed<std::list<std::unique_ptr<C2Work>>> mWorkDoneQueue;
-    Mutexed<std::list<size_t>> mNumDiscardedInputBuffersQueue;
 
     friend class CCodecCallbackImpl;
 
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 8e6a3f8..7a444a3 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -94,6 +94,11 @@
      */
     virtual void getArray(Vector<sp<MediaCodecBuffer>> *) const {}
 
+    /**
+     * Return number of buffers the client owns.
+     */
+    virtual size_t numClientBuffers() const = 0;
+
 protected:
     std::string mComponentName; ///< name of component for debugging
     std::string mChannelName; ///< name of channel for debugging
@@ -181,7 +186,7 @@
      * MediaCodec behavior.
      */
     virtual status_t registerCsd(
-            const C2StreamCsdInfo::output * /* csd */,
+            const C2StreamInitDataInfo::output * /* csd */,
             size_t * /* index */,
             sp<MediaCodecBuffer> * /* clientBuffer */) = 0;
 
@@ -250,6 +255,34 @@
         mSkipCutBuffer = scb;
     }
 
+    void handleImageData(const sp<Codec2Buffer> &buffer) {
+        sp<ABuffer> imageDataCandidate = buffer->getImageData();
+        if (imageDataCandidate == nullptr) {
+            return;
+        }
+        sp<ABuffer> imageData;
+        if (!mFormat->findBuffer("image-data", &imageData)
+                || imageDataCandidate->size() != imageData->size()
+                || memcmp(imageDataCandidate->data(), imageData->data(), imageData->size()) != 0) {
+            ALOGD("[%s] updating image-data", mName);
+            sp<AMessage> newFormat = dupFormat();
+            newFormat->setBuffer("image-data", imageDataCandidate);
+            MediaImage2 *img = (MediaImage2*)imageDataCandidate->data();
+            if (img->mNumPlanes > 0 && img->mType != img->MEDIA_IMAGE_TYPE_UNKNOWN) {
+                int32_t stride = img->mPlane[0].mRowInc;
+                newFormat->setInt32(KEY_STRIDE, stride);
+                ALOGD("[%s] updating stride = %d", mName, stride);
+                if (img->mNumPlanes > 1 && stride > 0) {
+                    int32_t vstride = (img->mPlane[1].mOffset - img->mPlane[0].mOffset) / stride;
+                    newFormat->setInt32(KEY_SLICE_HEIGHT, vstride);
+                    ALOGD("[%s] updating vstride = %d", mName, vstride);
+                }
+            }
+            setFormat(newFormat);
+            buffer->setFormat(newFormat);
+        }
+    }
+
 protected:
     sp<SkipCutBuffer> mSkipCutBuffer;
 
@@ -508,6 +541,14 @@
         mBuffers.clear();
     }
 
+    size_t numClientBuffers() const {
+        return std::count_if(
+                mBuffers.begin(), mBuffers.end(),
+                [](const Entry &entry) {
+                    return (entry.clientBuffer != nullptr);
+                });
+    }
+
 private:
     friend class BuffersArrayImpl;
 
@@ -693,6 +734,14 @@
         }
     }
 
+    size_t numClientBuffers() const {
+        return std::count_if(
+                mBuffers.begin(), mBuffers.end(),
+                [](const Entry &entry) {
+                    return entry.ownedByClient;
+                });
+    }
+
 private:
     std::string mImplName; ///< name for debugging
     const char *mName; ///< C-string version of name
@@ -756,6 +805,10 @@
         mImpl.flush();
     }
 
+    size_t numClientBuffers() const final {
+        return mImpl.numClientBuffers();
+    }
+
 private:
     BuffersArrayImpl mImpl;
 };
@@ -823,6 +876,10 @@
         return std::move(array);
     }
 
+    size_t numClientBuffers() const final {
+        return mImpl.numClientBuffers();
+    }
+
     virtual sp<Codec2Buffer> alloc(size_t size) {
         C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
         std::shared_ptr<C2LinearBlock> block;
@@ -967,6 +1024,10 @@
         return std::move(array);
     }
 
+    size_t numClientBuffers() const final {
+        return mImpl.numClientBuffers();
+    }
+
 private:
     FlexBuffersImpl mImpl;
     std::shared_ptr<C2AllocatorStore> mStore;
@@ -1030,6 +1091,10 @@
         return std::move(array);
     }
 
+    size_t numClientBuffers() const final {
+        return mImpl.numClientBuffers();
+    }
+
 private:
     FlexBuffersImpl mImpl;
     std::shared_ptr<LocalBufferPool> mLocalBufferPool;
@@ -1065,6 +1130,10 @@
     void getArray(Vector<sp<MediaCodecBuffer>> *array) const final {
         array->clear();
     }
+
+    size_t numClientBuffers() const final {
+        return 0u;
+    }
 };
 
 class OutputBuffersArray : public CCodecBufferChannel::OutputBuffers {
@@ -1111,13 +1180,14 @@
             return WOULD_BLOCK;
         }
         submit(c2Buffer);
+        handleImageData(c2Buffer);
         *clientBuffer = c2Buffer;
         ALOGV("[%s] grabbed buffer %zu", mName, *index);
         return OK;
     }
 
     status_t registerCsd(
-            const C2StreamCsdInfo::output *csd,
+            const C2StreamInitDataInfo::output *csd,
             size_t *index,
             sp<MediaCodecBuffer> *clientBuffer) final {
         sp<Codec2Buffer> c2Buffer;
@@ -1185,6 +1255,10 @@
         mImpl.realloc(alloc);
     }
 
+    size_t numClientBuffers() const final {
+        return mImpl.numClientBuffers();
+    }
+
 private:
     BuffersArrayImpl mImpl;
 };
@@ -1205,13 +1279,14 @@
         }
         newBuffer->setFormat(mFormat);
         *index = mImpl.assignSlot(newBuffer);
+        handleImageData(newBuffer);
         *clientBuffer = newBuffer;
         ALOGV("[%s] registered buffer %zu", mName, *index);
         return OK;
     }
 
     status_t registerCsd(
-            const C2StreamCsdInfo::output *csd,
+            const C2StreamInitDataInfo::output *csd,
             size_t *index,
             sp<MediaCodecBuffer> *clientBuffer) final {
         sp<Codec2Buffer> newBuffer = new LocalLinearBuffer(
@@ -1246,6 +1321,10 @@
         return std::move(array);
     }
 
+    size_t numClientBuffers() const final {
+        return mImpl.numClientBuffers();
+    }
+
     /**
      * Return an appropriate Codec2Buffer object for the type of buffers.
      *
@@ -1422,90 +1501,6 @@
     count->value = -1;
 }
 
-// CCodecBufferChannel::PipelineCapacity
-
-CCodecBufferChannel::PipelineCapacity::PipelineCapacity()
-      : input(0), component(0),
-        mName("<UNKNOWN COMPONENT>") {
-}
-
-void CCodecBufferChannel::PipelineCapacity::initialize(
-        int newInput,
-        int newComponent,
-        const char* newName,
-        const char* callerTag) {
-    input.store(newInput, std::memory_order_relaxed);
-    component.store(newComponent, std::memory_order_relaxed);
-    mName = newName;
-    ALOGV("[%s] %s -- PipelineCapacity::initialize(): "
-          "pipeline availability initialized ==> "
-          "input = %d, component = %d",
-            mName, callerTag ? callerTag : "*",
-            newInput, newComponent);
-}
-
-bool CCodecBufferChannel::PipelineCapacity::allocate(const char* callerTag) {
-    int prevInput = input.fetch_sub(1, std::memory_order_relaxed);
-    int prevComponent = component.fetch_sub(1, std::memory_order_relaxed);
-    if (prevInput > 0 && prevComponent > 0) {
-        ALOGV("[%s] %s -- PipelineCapacity::allocate() returns true: "
-              "pipeline availability -1 all ==> "
-              "input = %d, component = %d",
-                mName, callerTag ? callerTag : "*",
-                prevInput - 1,
-                prevComponent - 1);
-        return true;
-    }
-    input.fetch_add(1, std::memory_order_relaxed);
-    component.fetch_add(1, std::memory_order_relaxed);
-    ALOGV("[%s] %s -- PipelineCapacity::allocate() returns false: "
-          "pipeline availability unchanged ==> "
-          "input = %d, component = %d",
-            mName, callerTag ? callerTag : "*",
-            prevInput,
-            prevComponent);
-    return false;
-}
-
-void CCodecBufferChannel::PipelineCapacity::free(const char* callerTag) {
-    int prevInput = input.fetch_add(1, std::memory_order_relaxed);
-    int prevComponent = component.fetch_add(1, std::memory_order_relaxed);
-    ALOGV("[%s] %s -- PipelineCapacity::free(): "
-          "pipeline availability +1 all ==> "
-          "input = %d, component = %d",
-            mName, callerTag ? callerTag : "*",
-            prevInput + 1,
-            prevComponent + 1);
-}
-
-int CCodecBufferChannel::PipelineCapacity::freeInputSlots(
-        size_t numDiscardedInputBuffers,
-        const char* callerTag) {
-    int prevInput = input.fetch_add(numDiscardedInputBuffers,
-                                    std::memory_order_relaxed);
-    ALOGV("[%s] %s -- PipelineCapacity::freeInputSlots(%zu): "
-          "pipeline availability +%zu input ==> "
-          "input = %d, component = %d",
-            mName, callerTag ? callerTag : "*",
-            numDiscardedInputBuffers,
-            numDiscardedInputBuffers,
-            prevInput + static_cast<int>(numDiscardedInputBuffers),
-            component.load(std::memory_order_relaxed));
-    return prevInput + static_cast<int>(numDiscardedInputBuffers);
-}
-
-int CCodecBufferChannel::PipelineCapacity::freeComponentSlot(
-        const char* callerTag) {
-    int prevComponent = component.fetch_add(1, std::memory_order_relaxed);
-    ALOGV("[%s] %s -- PipelineCapacity::freeComponentSlot(): "
-          "pipeline availability +1 component ==> "
-          "input = %d, component = %d",
-            mName, callerTag ? callerTag : "*",
-            input.load(std::memory_order_relaxed),
-            prevComponent + 1);
-    return prevComponent + 1;
-}
-
 // CCodecBufferChannel::ReorderStash
 
 CCodecBufferChannel::ReorderStash::ReorderStash() {
@@ -1592,11 +1587,12 @@
       mCCodecCallback(callback),
       mNumInputSlots(kSmoothnessFactor),
       mNumOutputSlots(kSmoothnessFactor),
+      mDelay(0),
       mFrameIndex(0u),
       mFirstValidFrameIndex(0u),
       mMetaMode(MODE_NONE),
-      mAvailablePipelineCapacity(),
       mInputMetEos(false) {
+    mOutputSurface.lock()->maxDequeueBuffers = kSmoothnessFactor + kRenderingDepth;
     Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
     buffers->reset(new DummyInputBuffers(""));
 }
@@ -1658,6 +1654,9 @@
     work->input.ordinal.customOrdinal = timeUs;
     work->input.buffers.clear();
 
+    uint64_t queuedFrameIndex = work->input.ordinal.frameIndex.peeku();
+    std::vector<std::shared_ptr<C2Buffer>> queuedBuffers;
+
     if (buffer->size() > 0u) {
         Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
         std::shared_ptr<C2Buffer> c2buffer;
@@ -1665,11 +1664,9 @@
             return -ENOENT;
         }
         work->input.buffers.push_back(c2buffer);
-    } else {
-        mAvailablePipelineCapacity.freeInputSlots(1, "queueInputBufferInternal");
-        if (eos) {
-            flags |= C2FrameData::FLAG_END_OF_STREAM;
-        }
+        queuedBuffers.push_back(c2buffer);
+    } else if (eos) {
+        flags |= C2FrameData::FLAG_END_OF_STREAM;
     }
     work->input.flags = (C2FrameData::flags_t)flags;
     // TODO: fill info's
@@ -1680,10 +1677,16 @@
 
     std::list<std::unique_ptr<C2Work>> items;
     items.push_back(std::move(work));
+    mPipelineWatcher.lock()->onWorkQueued(
+            queuedFrameIndex,
+            std::move(queuedBuffers),
+            PipelineWatcher::Clock::now());
     c2_status_t err = mComponent->queue(&items);
+    if (err != C2_OK) {
+        mPipelineWatcher.lock()->onWorkDone(queuedFrameIndex);
+    }
 
     if (err == C2_OK && eos && buffer->size() > 0u) {
-        mCCodecCallback->onWorkQueued(false);
         work.reset(new C2Work);
         work->input.ordinal.timestamp = timeUs;
         work->input.ordinal.frameIndex = mFrameIndex++;
@@ -1693,13 +1696,22 @@
         work->input.flags = C2FrameData::FLAG_END_OF_STREAM;
         work->worklets.emplace_back(new C2Worklet);
 
+        queuedFrameIndex = work->input.ordinal.frameIndex.peeku();
+        queuedBuffers.clear();
+
         items.clear();
         items.push_back(std::move(work));
+
+        mPipelineWatcher.lock()->onWorkQueued(
+                queuedFrameIndex,
+                std::move(queuedBuffers),
+                PipelineWatcher::Clock::now());
         err = mComponent->queue(&items);
+        if (err != C2_OK) {
+            mPipelineWatcher.lock()->onWorkDone(queuedFrameIndex);
+        }
     }
     if (err == C2_OK) {
-        mCCodecCallback->onWorkQueued(eos);
-
         Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
         bool released = (*buffers)->releaseBuffer(buffer, nullptr, true);
         ALOGV("[%s] queueInputBuffer: buffer %sreleased", mName, released ? "" : "not ");
@@ -1844,16 +1856,26 @@
 }
 
 void CCodecBufferChannel::feedInputBufferIfAvailableInternal() {
-    while (!mInputMetEos &&
-           !mReorderStash.lock()->hasPending() &&
-           mAvailablePipelineCapacity.allocate("feedInputBufferIfAvailable")) {
+    if (mInputMetEos ||
+           mReorderStash.lock()->hasPending() ||
+           mPipelineWatcher.lock()->pipelineFull()) {
+        return;
+    } else {
+        Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+        if ((*buffers)->numClientBuffers() >= mNumOutputSlots) {
+            return;
+        }
+    }
+    for (size_t i = 0; i < mNumInputSlots; ++i) {
         sp<MediaCodecBuffer> inBuffer;
         size_t index;
         {
             Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+            if ((*buffers)->numClientBuffers() >= mNumInputSlots) {
+                return;
+            }
             if (!(*buffers)->requestNewBuffer(&index, &inBuffer)) {
                 ALOGV("[%s] no new buffer available", mName);
-                mAvailablePipelineCapacity.free("feedInputBufferIfAvailable");
                 break;
             }
         }
@@ -2032,15 +2054,12 @@
     {
         Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
         if (*buffers && (*buffers)->releaseBuffer(buffer, nullptr, true)) {
-            buffers.unlock();
             released = true;
-            mAvailablePipelineCapacity.freeInputSlots(1, "discardBuffer");
         }
     }
     {
         Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
         if (*buffers && (*buffers)->releaseBuffer(buffer, nullptr)) {
-            buffers.unlock();
             released = true;
         }
     }
@@ -2117,11 +2136,13 @@
         }
     }
 
-    mNumInputSlots =
-        (inputDelay ? inputDelay.value : 0) +
-        (pipelineDelay ? pipelineDelay.value : 0) +
-        kSmoothnessFactor;
-    mNumOutputSlots = (outputDelay ? outputDelay.value : 0) + kSmoothnessFactor;
+    uint32_t inputDelayValue = inputDelay ? inputDelay.value : 0;
+    uint32_t pipelineDelayValue = pipelineDelay ? pipelineDelay.value : 0;
+    uint32_t outputDelayValue = outputDelay ? outputDelay.value : 0;
+
+    mNumInputSlots = inputDelayValue + pipelineDelayValue + kSmoothnessFactor;
+    mNumOutputSlots = outputDelayValue + kSmoothnessFactor;
+    mDelay = inputDelayValue + pipelineDelayValue + outputDelayValue;
 
     // TODO: get this from input format
     bool secure = mComponent->getName().find(".secure") != std::string::npos;
@@ -2133,7 +2154,7 @@
             1 << C2PlatformAllocatorStore::BUFFERQUEUE);
 
     if (inputFormat != nullptr) {
-        bool graphic = (iStreamFormat.value == C2FormatVideo);
+        bool graphic = (iStreamFormat.value == C2BufferData::GRAPHIC);
         std::shared_ptr<C2BlockPool> pool;
         {
             Mutexed<BlockPools>::Locked pools(mBlockPools);
@@ -2249,12 +2270,16 @@
         uint32_t outputGeneration;
         {
             Mutexed<OutputSurface>::Locked output(mOutputSurface);
+            output->maxDequeueBuffers = mNumOutputSlots + reorderDepth.value + kRenderingDepth;
             outputSurface = output->surface ?
                     output->surface->getIGraphicBufferProducer() : nullptr;
+            if (outputSurface) {
+                output->surface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
+            }
             outputGeneration = output->generation;
         }
 
-        bool graphic = (oStreamFormat.value == C2FormatVideo);
+        bool graphic = (oStreamFormat.value == C2BufferData::GRAPHIC);
         C2BlockPool::local_id_t outputPoolId_;
 
         {
@@ -2408,10 +2433,14 @@
     // about buffers from the previous generation do not interfere with the
     // newly initialized pipeline capacity.
 
-    mAvailablePipelineCapacity.initialize(
-            mNumInputSlots,
-            mNumInputSlots + mNumOutputSlots,
-            mName);
+    {
+        Mutexed<PipelineWatcher>::Locked watcher(mPipelineWatcher);
+        watcher->inputDelay(inputDelayValue)
+                .pipelineDelay(pipelineDelayValue)
+                .outputDelay(outputDelayValue)
+                .smoothnessFactor(kSmoothnessFactor);
+        watcher->flush();
+    }
 
     mInputMetEos = false;
     mSync.start();
@@ -2423,7 +2452,7 @@
         return OK;
     }
 
-    C2StreamFormatConfig::output oStreamFormat(0u);
+    C2StreamBufferTypeSetting::output oStreamFormat(0u);
     c2_status_t err = mComponent->query({ &oStreamFormat }, {}, C2_DONT_BLOCK, nullptr);
     if (err != C2_OK) {
         return UNKNOWN_ERROR;
@@ -2472,21 +2501,16 @@
                 buffer->meta()->setInt64("timeUs", 0);
                 post = false;
             }
-            if (mAvailablePipelineCapacity.allocate("requestInitialInputBuffers")) {
-                if (post) {
-                    mCallback->onInputBufferAvailable(index, buffer);
-                } else {
-                    toBeQueued.emplace_back(buffer);
-                }
+            if (post) {
+                mCallback->onInputBufferAvailable(index, buffer);
             } else {
-                ALOGD("[%s] pipeline is full while requesting %zu-th input buffer",
-                        mName, i);
+                toBeQueued.emplace_back(buffer);
             }
         }
     }
     for (const sp<MediaCodecBuffer> &buffer : toBeQueued) {
         if (queueInputBufferInternal(buffer) != OK) {
-            mAvailablePipelineCapacity.freeComponentSlot("requestInitialInputBuffers");
+            ALOGV("[%s] Error while queueing initial buffers", mName);
         }
     }
     return OK;
@@ -2532,28 +2556,25 @@
         (*buffers)->flush(flushedWork);
     }
     mReorderStash.lock()->flush();
+    mPipelineWatcher.lock()->flush();
 }
 
 void CCodecBufferChannel::onWorkDone(
         std::unique_ptr<C2Work> work, const sp<AMessage> &outputFormat,
-        const C2StreamInitDataInfo::output *initData,
-        size_t numDiscardedInputBuffers) {
+        const C2StreamInitDataInfo::output *initData) {
     if (handleWork(std::move(work), outputFormat, initData)) {
-        mAvailablePipelineCapacity.freeInputSlots(numDiscardedInputBuffers,
-                                                  "onWorkDone");
         feedInputBufferIfAvailable();
     }
 }
 
 void CCodecBufferChannel::onInputBufferDone(
-        const std::shared_ptr<C2Buffer>& buffer) {
+        uint64_t frameIndex, size_t arrayIndex) {
+    std::shared_ptr<C2Buffer> buffer =
+            mPipelineWatcher.lock()->onInputBufferReleased(frameIndex, arrayIndex);
     bool newInputSlotAvailable;
     {
         Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
         newInputSlotAvailable = (*buffers)->expireComponentBuffer(buffer);
-        if (newInputSlotAvailable) {
-            mAvailablePipelineCapacity.freeInputSlots(1, "onInputBufferDone");
-        }
     }
     if (newInputSlotAvailable) {
         feedInputBufferIfAvailable();
@@ -2573,7 +2594,7 @@
     if (work->worklets.size() != 1u
             || !work->worklets.front()
             || !(work->worklets.front()->output.flags & C2FrameData::FLAG_INCOMPLETE)) {
-        mAvailablePipelineCapacity.freeComponentSlot("handleWork");
+        mPipelineWatcher.lock()->onWorkDone(work->input.ordinal.frameIndex.peeku());
     }
 
     if (work->result == C2_NOT_FOUND) {
@@ -2622,6 +2643,11 @@
                     mReorderStash.lock()->setDepth(reorderDepth.value);
                     ALOGV("[%s] onWorkDone: updated reorder depth to %u",
                           mName, reorderDepth.value);
+                    Mutexed<OutputSurface>::Locked output(mOutputSurface);
+                    output->maxDequeueBuffers = mNumOutputSlots + reorderDepth.value + kRenderingDepth;
+                    if (output->surface) {
+                        output->surface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
+                    }
                 } else {
                     ALOGD("[%s] onWorkDone: failed to read reorder depth", mName);
                 }
@@ -2718,7 +2744,7 @@
             // TODO: properly translate these to metadata
             switch (info->coreIndex().coreIndex()) {
                 case C2StreamPictureTypeMaskInfo::CORE_INDEX:
-                    if (((C2StreamPictureTypeMaskInfo *)info.get())->value & C2PictureTypeKeyFrame) {
+                    if (((C2StreamPictureTypeMaskInfo *)info.get())->value & C2Config::SYNC_FRAME) {
                         flags |= MediaCodec::BUFFER_FLAG_SYNCFRAME;
                     }
                     break;
@@ -2797,7 +2823,6 @@
     sp<IGraphicBufferProducer> producer;
     if (newSurface) {
         newSurface->setScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
-        newSurface->setMaxDequeuedBufferCount(mNumOutputSlots + kRenderingDepth);
         producer = newSurface->getIGraphicBufferProducer();
         producer->setGenerationNumber(generation);
     } else {
@@ -2825,6 +2850,7 @@
 
     {
         Mutexed<OutputSurface>::Locked output(mOutputSurface);
+        newSurface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
         output->surface = newSurface;
         output->generation = generation;
     }
@@ -2832,6 +2858,14 @@
     return OK;
 }
 
+PipelineWatcher::Clock::duration CCodecBufferChannel::elapsed() {
+    // When client pushed EOS, we want all the work to be done quickly.
+    // Otherwise, component may have stalled work due to input starvation up to
+    // the sum of the delay in the pipeline.
+    size_t n = mInputMetEos ? 0 : mDelay;
+    return mPipelineWatcher.lock()->elapsed(PipelineWatcher::Clock::now(), n);
+}
+
 void CCodecBufferChannel::setMetaMode(MetaMode mode) {
     mMetaMode = mode;
 }
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.h b/media/codec2/sfplugin/CCodecBufferChannel.h
index ebc1491..1ea29b4 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.h
+++ b/media/codec2/sfplugin/CCodecBufferChannel.h
@@ -34,6 +34,7 @@
 #include <media/ICrypto.h>
 
 #include "InputSurfaceWrapper.h"
+#include "PipelineWatcher.h"
 
 namespace android {
 
@@ -44,7 +45,6 @@
     virtual ~CCodecCallback() = default;
     virtual void onError(status_t err, enum ActionCode actionCode) = 0;
     virtual void onOutputFramesRendered(int64_t mediaTimeUs, nsecs_t renderTimeNs) = 0;
-    virtual void onWorkQueued(bool eos) = 0;
     virtual void onOutputBuffersChanged() = 0;
 };
 
@@ -128,22 +128,21 @@
      * @param workItems   finished work item.
      * @param outputFormat new output format if it has changed, otherwise nullptr
      * @param initData    new init data (CSD) if it has changed, otherwise nullptr
-     * @param numDiscardedInputBuffers the number of input buffers that are
-     *                    returned for the first time (not previously returned by
-     *                    onInputBufferDone()).
      */
     void onWorkDone(
             std::unique_ptr<C2Work> work, const sp<AMessage> &outputFormat,
-            const C2StreamInitDataInfo::output *initData,
-            size_t numDiscardedInputBuffers);
+            const C2StreamInitDataInfo::output *initData);
 
     /**
      * Make an input buffer available for the client as it is no longer needed
      * by the codec.
      *
-     * @param buffer The buffer that becomes unused.
+     * @param frameIndex The index of input work
+     * @param arrayIndex The index of buffer in the input work buffers.
      */
-    void onInputBufferDone(const std::shared_ptr<C2Buffer>& buffer);
+    void onInputBufferDone(uint64_t frameIndex, size_t arrayIndex);
+
+    PipelineWatcher::Clock::duration elapsed();
 
     enum MetaMode {
         MODE_NONE,
@@ -237,6 +236,7 @@
 
     size_t mNumInputSlots;
     size_t mNumOutputSlots;
+    size_t mDelay;
 
     Mutexed<std::unique_ptr<InputBuffers>> mInputBuffers;
     Mutexed<std::list<sp<ABuffer>>> mFlushedConfigs;
@@ -250,6 +250,7 @@
     struct OutputSurface {
         sp<Surface> surface;
         uint32_t generation;
+        int maxDequeueBuffers;
     };
     Mutexed<OutputSurface> mOutputSurface;
 
@@ -266,79 +267,7 @@
 
     MetaMode mMetaMode;
 
-    // PipelineCapacity is used in the input buffer gating logic.
-    //
-    // There are three criteria that need to be met before
-    // onInputBufferAvailable() is called:
-    // 1. The number of input buffers that have been received by
-    //    CCodecBufferChannel but not returned via onWorkDone() or
-    //    onInputBufferDone() does not exceed a certain limit. (Let us call this
-    //    number the "input" capacity.)
-    // 2. The number of work items that have been received by
-    //    CCodecBufferChannel whose outputs have not been returned from the
-    //    component (by calling onWorkDone()) does not exceed a certain limit.
-    //    (Let us call this the "component" capacity.)
-    //
-    // These three criteria guarantee that a new input buffer that arrives from
-    // the invocation of onInputBufferAvailable() will not
-    // 1. overload CCodecBufferChannel's input buffers;
-    // 2. overload the component; or
-    //
-    struct PipelineCapacity {
-        // The number of available input capacity.
-        std::atomic_int input;
-        // The number of available component capacity.
-        std::atomic_int component;
-
-        PipelineCapacity();
-        // Set the values of #input and #component.
-        void initialize(int newInput, int newComponent,
-                        const char* newName = "<UNKNOWN COMPONENT>",
-                        const char* callerTag = nullptr);
-
-        // Return true and decrease #input and #component by one if
-        // they are all greater than zero; return false otherwise.
-        //
-        // callerTag is used for logging only.
-        //
-        // allocate() is called by CCodecBufferChannel to check whether it can
-        // receive another input buffer. If the return value is true,
-        // onInputBufferAvailable() and onOutputBufferAvailable() can be called
-        // afterwards.
-        bool allocate(const char* callerTag = nullptr);
-
-        // Increase #input and #component by one.
-        //
-        // callerTag is used for logging only.
-        //
-        // free() is called by CCodecBufferChannel after allocate() returns true
-        // but onInputBufferAvailable() cannot be called for any reasons. It
-        // essentially undoes an allocate() call.
-        void free(const char* callerTag = nullptr);
-
-        // Increase #input by @p numDiscardedInputBuffers.
-        //
-        // callerTag is used for logging only.
-        //
-        // freeInputSlots() is called by CCodecBufferChannel when onWorkDone()
-        // or onInputBufferDone() is called. @p numDiscardedInputBuffers is
-        // provided in onWorkDone(), and is 1 in onInputBufferDone().
-        int freeInputSlots(size_t numDiscardedInputBuffers,
-                           const char* callerTag = nullptr);
-
-        // Increase #component by one and return the updated value.
-        //
-        // callerTag is used for logging only.
-        //
-        // freeComponentSlot() is called by CCodecBufferChannel when
-        // onWorkDone() is called.
-        int freeComponentSlot(const char* callerTag = nullptr);
-
-    private:
-        // Component name. Used for logging.
-        const char* mName;
-    };
-    PipelineCapacity mAvailablePipelineCapacity;
+    Mutexed<PipelineWatcher> mPipelineWatcher;
 
     class ReorderStash {
     public:
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
index 2dec42e..0fd5731 100644
--- a/media/codec2/sfplugin/Codec2Buffer.cpp
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -84,17 +84,7 @@
 }
 
 void Codec2Buffer::setImageData(const sp<ABuffer> &imageData) {
-    meta()->setBuffer("image-data", imageData);
-    format()->setBuffer("image-data", imageData);
-    MediaImage2 *img = (MediaImage2*)imageData->data();
-    if (img->mNumPlanes > 0 && img->mType != img->MEDIA_IMAGE_TYPE_UNKNOWN) {
-        int32_t stride = img->mPlane[0].mRowInc;
-        format()->setInt32(KEY_STRIDE, stride);
-        if (img->mNumPlanes > 1 && stride > 0) {
-            int32_t vstride = (img->mPlane[1].mOffset - img->mPlane[0].mOffset) / stride;
-            format()->setInt32(KEY_SLICE_HEIGHT, vstride);
-        }
-    }
+    mImageData = imageData;
 }
 
 // LocalLinearBuffer
@@ -234,6 +224,7 @@
             mInitCheck = BAD_VALUE;
             return;
         }
+        memset(mediaImage, 0, sizeof(*mediaImage));
         mAllocatedDepth = layout.planes[0].allocatedDepth;
         uint32_t bitDepth = layout.planes[0].bitDepth;
 
@@ -546,7 +537,6 @@
     : Codec2Buffer(format, buffer),
       mView(view),
       mBlock(block),
-      mImageData(imageData),
       mWrapped(wrapped) {
     setImageData(imageData);
 }
@@ -683,9 +673,7 @@
       mView(std::move(view)),
       mBufferRef(buffer),
       mWrapped(wrapped) {
-    if (imageData != nullptr) {
-        setImageData(imageData);
-    }
+    setImageData(imageData);
 }
 
 std::shared_ptr<C2Buffer> ConstGraphicBlockBuffer::asC2Buffer() {
diff --git a/media/codec2/sfplugin/Codec2Buffer.h b/media/codec2/sfplugin/Codec2Buffer.h
index 481975f..dd618aa 100644
--- a/media/codec2/sfplugin/Codec2Buffer.h
+++ b/media/codec2/sfplugin/Codec2Buffer.h
@@ -23,6 +23,7 @@
 #include <android/hardware/cas/native/1.0/types.h>
 #include <binder/IMemory.h>
 #include <media/hardware/VideoAPI.h>
+#include <media/stagefright/foundation/ABuffer.h>
 #include <media/MediaCodecBuffer.h>
 #include <media/ICrypto.h>
 
@@ -85,6 +86,8 @@
         return false;
     }
 
+    sp<ABuffer> getImageData() const { return mImageData; }
+
 protected:
     /**
      * canCopy() implementation for linear buffers.
@@ -100,6 +103,8 @@
      * sets MediaImage data for flexible graphic buffers
      */
     void setImageData(const sp<ABuffer> &imageData);
+
+    sp<ABuffer> mImageData;
 };
 
 /**
@@ -239,7 +244,6 @@
 
     C2GraphicView mView;
     std::shared_ptr<C2GraphicBlock> mBlock;
-    sp<ABuffer> mImageData;
     const bool mWrapped;
 };
 
diff --git a/media/codec2/sfplugin/Codec2InfoBuilder.cpp b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
index 5f0dd0b..ead0a9b 100644
--- a/media/codec2/sfplugin/Codec2InfoBuilder.cpp
+++ b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
@@ -68,262 +68,146 @@
             s.compare(s.size() - suffixLen, suffixLen, suffix) == 0;
 }
 
-// Constants from ACodec
-constexpr OMX_U32 kPortIndexInput = 0;
-constexpr OMX_U32 kPortIndexOutput = 1;
-constexpr OMX_U32 kMaxIndicesToCheck = 32;
+void addSupportedProfileLevels(
+        std::shared_ptr<Codec2Client::Interface> intf,
+        MediaCodecInfo::CapabilitiesWriter *caps,
+        const Traits& trait, const std::string &mediaType) {
+    std::shared_ptr<C2Mapper::ProfileLevelMapper> mapper =
+        C2Mapper::GetProfileLevelMapper(trait.mediaType);
+    // if we don't know the media type, pass through all values unmapped
 
-status_t queryOmxCapabilities(
-        const char* name, const char* mediaType, bool isEncoder,
-        MediaCodecInfo::CapabilitiesWriter* caps) {
-
-    const char *role = GetComponentRole(isEncoder, mediaType);
-    if (role == nullptr) {
-        return BAD_VALUE;
-    }
-
-    using namespace ::android::hardware::media::omx::V1_0;
-    using ::android::hardware::Return;
-    using ::android::hardware::Void;
-    using ::android::hardware::hidl_vec;
-    using ::android::hardware::media::omx::V1_0::utils::LWOmxNode;
-
-    sp<IOmx> omx = IOmx::getService();
-    if (!omx) {
-        ALOGW("Could not obtain IOmx service.");
-        return NO_INIT;
-    }
-
-    struct Observer : IOmxObserver {
-        virtual Return<void> onMessages(const hidl_vec<Message>&) override {
-            return Void();
-        }
+    // TODO: we cannot find levels that are local 'maxima' without knowing the coding
+    // e.g. H.263 level 45 and level 30 could be two values for highest level as
+    // they don't include one another. For now we use the last supported value.
+    bool encoder = trait.kind == C2Component::KIND_ENCODER;
+    C2StreamProfileLevelInfo pl(encoder /* output */, 0u);
+    std::vector<C2FieldSupportedValuesQuery> profileQuery = {
+        C2FieldSupportedValuesQuery::Possible(C2ParamField(&pl, &pl.profile))
     };
 
-    sp<Observer> observer = new Observer();
-    Status status;
-    sp<IOmxNode> tOmxNode;
-    Return<void> transStatus = omx->allocateNode(
-            name, observer,
-            [&status, &tOmxNode](Status s, const sp<IOmxNode>& n) {
-                status = s;
-                tOmxNode = n;
-            });
-    if (!transStatus.isOk()) {
-        ALOGW("IOmx::allocateNode -- transaction failed.");
-        return NO_INIT;
-    }
-    if (status != Status::OK) {
-        ALOGW("IOmx::allocateNode -- error returned: %d.",
-                static_cast<int>(status));
-        return NO_INIT;
+    c2_status_t err = intf->querySupportedValues(profileQuery, C2_DONT_BLOCK);
+    ALOGV("query supported profiles -> %s | %s", asString(err), asString(profileQuery[0].status));
+    if (err != C2_OK || profileQuery[0].status != C2_OK) {
+        return;
     }
 
-    sp<LWOmxNode> omxNode = new LWOmxNode(tOmxNode);
-
-    status_t err = SetComponentRole(omxNode, role);
-    if (err != OK) {
-        omxNode->freeNode();
-        ALOGW("Failed to SetComponentRole: component = %s, role = %s.",
-                name, role);
-        return err;
+    // we only handle enumerated values
+    if (profileQuery[0].values.type != C2FieldSupportedValues::VALUES) {
+        return;
     }
 
-    bool isVideo = hasPrefix(mediaType, "video/") == 0;
-    bool isImage = hasPrefix(mediaType, "image/") == 0;
+    // determine if codec supports HDR
+    bool supportsHdr = false;
+    bool supportsHdr10Plus = false;
 
-    if (isVideo || isImage) {
-        OMX_VIDEO_PARAM_PROFILELEVELTYPE param;
-        InitOMXParams(&param);
-        param.nPortIndex = isEncoder ? kPortIndexOutput : kPortIndexInput;
-
-        for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
-            param.nProfileIndex = index;
-            status_t err = omxNode->getParameter(
-                    OMX_IndexParamVideoProfileLevelQuerySupported,
-                    &param, sizeof(param));
-            if (err != OK) {
+    std::vector<std::shared_ptr<C2ParamDescriptor>> paramDescs;
+    c2_status_t err1 = intf->querySupportedParams(&paramDescs);
+    if (err1 == C2_OK) {
+        for (const std::shared_ptr<C2ParamDescriptor> &desc : paramDescs) {
+            switch ((uint32_t)desc->index()) {
+            case C2StreamHdr10PlusInfo::output::PARAM_TYPE:
+                supportsHdr10Plus = true;
+                break;
+            case C2StreamHdrStaticInfo::output::PARAM_TYPE:
+                supportsHdr = true;
+                break;
+            default:
                 break;
             }
-            caps->addProfileLevel(param.eProfile, param.eLevel);
-
-            // AVC components may not list the constrained profiles explicitly, but
-            // decoders that support a profile also support its constrained version.
-            // Encoders must explicitly support constrained profiles.
-            if (!isEncoder && strcasecmp(mediaType, MEDIA_MIMETYPE_VIDEO_AVC) == 0) {
-                if (param.eProfile == OMX_VIDEO_AVCProfileHigh) {
-                    caps->addProfileLevel(OMX_VIDEO_AVCProfileConstrainedHigh, param.eLevel);
-                } else if (param.eProfile == OMX_VIDEO_AVCProfileBaseline) {
-                    caps->addProfileLevel(OMX_VIDEO_AVCProfileConstrainedBaseline, param.eLevel);
-                }
-            }
-
-            if (index == kMaxIndicesToCheck) {
-                ALOGW("[%s] stopping checking profiles after %u: %x/%x",
-                        name, index,
-                        param.eProfile, param.eLevel);
-            }
-        }
-
-        // Color format query
-        // return colors in the order reported by the OMX component
-        // prefix "flexible" standard ones with the flexible equivalent
-        OMX_VIDEO_PARAM_PORTFORMATTYPE portFormat;
-        InitOMXParams(&portFormat);
-        portFormat.nPortIndex = isEncoder ? kPortIndexInput : kPortIndexOutput;
-        for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
-            portFormat.nIndex = index;
-            status_t err = omxNode->getParameter(
-                    OMX_IndexParamVideoPortFormat,
-                    &portFormat, sizeof(portFormat));
-            if (err != OK) {
-                break;
-            }
-
-            OMX_U32 flexibleEquivalent;
-            if (IsFlexibleColorFormat(
-                    omxNode, portFormat.eColorFormat, false /* usingNativeWindow */,
-                    &flexibleEquivalent)) {
-                caps->addColorFormat(flexibleEquivalent);
-            }
-            caps->addColorFormat(portFormat.eColorFormat);
-
-            if (index == kMaxIndicesToCheck) {
-                ALOGW("[%s] stopping checking formats after %u: %s(%x)",
-                        name, index,
-                        asString(portFormat.eColorFormat), portFormat.eColorFormat);
-            }
-        }
-    } else if (strcasecmp(mediaType, MEDIA_MIMETYPE_AUDIO_AAC) == 0) {
-        // More audio codecs if they have profiles.
-        OMX_AUDIO_PARAM_ANDROID_PROFILETYPE param;
-        InitOMXParams(&param);
-        param.nPortIndex = isEncoder ? kPortIndexOutput : kPortIndexInput;
-        for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
-            param.nProfileIndex = index;
-            status_t err = omxNode->getParameter(
-                    (OMX_INDEXTYPE)OMX_IndexParamAudioProfileQuerySupported,
-                    &param, sizeof(param));
-            if (err != OK) {
-                break;
-            }
-            // For audio, level is ignored.
-            caps->addProfileLevel(param.eProfile, 0 /* level */);
-
-            if (index == kMaxIndicesToCheck) {
-                ALOGW("[%s] stopping checking profiles after %u: %x",
-                        name, index,
-                        param.eProfile);
-            }
-        }
-
-        // NOTE: Without Android extensions, OMX does not provide a way to query
-        // AAC profile support
-        if (param.nProfileIndex == 0) {
-            ALOGW("component %s doesn't support profile query.", name);
         }
     }
 
-    if (isVideo && !isEncoder) {
-        native_handle_t *sidebandHandle = nullptr;
-        if (omxNode->configureVideoTunnelMode(
-                kPortIndexOutput, OMX_TRUE, 0, &sidebandHandle) == OK) {
-            // tunneled playback includes adaptive playback
-        } else {
-            // tunneled playback is not supported
-            caps->removeDetail(MediaCodecInfo::Capabilities::FEATURE_TUNNELED_PLAYBACK);
-            if (omxNode->setPortMode(
-                    kPortIndexOutput, IOMX::kPortModeDynamicANWBuffer) == OK ||
-                    omxNode->prepareForAdaptivePlayback(
-                            kPortIndexOutput, OMX_TRUE,
-                            1280 /* width */, 720 /* height */) != OK) {
-                // adaptive playback is not supported
-                caps->removeDetail(MediaCodecInfo::Capabilities::FEATURE_ADAPTIVE_PLAYBACK);
-            }
-        }
-    }
+    // For VP9, the static info is always propagated by framework.
+    supportsHdr |= (mediaType == MIMETYPE_VIDEO_VP9);
 
-    if (isVideo && isEncoder) {
-        OMX_VIDEO_CONFIG_ANDROID_INTRAREFRESHTYPE params;
-        InitOMXParams(&params);
-        params.nPortIndex = kPortIndexOutput;
-
-        OMX_VIDEO_PARAM_INTRAREFRESHTYPE fallbackParams;
-        InitOMXParams(&fallbackParams);
-        fallbackParams.nPortIndex = kPortIndexOutput;
-        fallbackParams.eRefreshMode = OMX_VIDEO_IntraRefreshCyclic;
-
-        if (omxNode->getConfig(
-                (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh,
-                &params, sizeof(params)) != OK &&
-                omxNode->getParameter(
-                    OMX_IndexParamVideoIntraRefresh, &fallbackParams,
-                    sizeof(fallbackParams)) != OK) {
-            // intra refresh is not supported
-            caps->removeDetail(MediaCodecInfo::Capabilities::FEATURE_INTRA_REFRESH);
-        }
-    }
-
-    omxNode->freeNode();
-    return OK;
-}
-
-void buildOmxInfo(const MediaCodecsXmlParser& parser,
-                  MediaCodecListWriter* writer) {
-    uint32_t omxRank = ::android::base::GetUintProperty(
-            "debug.stagefright.omx_default_rank", uint32_t(0x100));
-    for (const MediaCodecsXmlParser::Codec& codec : parser.getCodecMap()) {
-        const std::string &name = codec.first;
-        if (!hasPrefix(codec.first, "OMX.")) {
+    for (C2Value::Primitive profile : profileQuery[0].values.values) {
+        pl.profile = (C2Config::profile_t)profile.ref<uint32_t>();
+        std::vector<std::unique_ptr<C2SettingResult>> failures;
+        err = intf->config({&pl}, C2_DONT_BLOCK, &failures);
+        ALOGV("set profile to %u -> %s", pl.profile, asString(err));
+        std::vector<C2FieldSupportedValuesQuery> levelQuery = {
+            C2FieldSupportedValuesQuery::Current(C2ParamField(&pl, &pl.level))
+        };
+        err = intf->querySupportedValues(levelQuery, C2_DONT_BLOCK);
+        ALOGV("query supported levels -> %s | %s", asString(err), asString(levelQuery[0].status));
+        if (err != C2_OK || levelQuery[0].status != C2_OK
+                || levelQuery[0].values.type != C2FieldSupportedValues::VALUES
+                || levelQuery[0].values.values.size() == 0) {
             continue;
         }
-        const MediaCodecsXmlParser::CodecProperties &properties = codec.second;
-        bool encoder = properties.isEncoder;
-        std::unique_ptr<MediaCodecInfoWriter> info =
-                writer->addMediaCodecInfo();
-        info->setName(name.c_str());
-        info->setOwner("default");
-        typename std::underlying_type<MediaCodecInfo::Attributes>::type attrs = 0;
-        if (encoder) {
-            attrs |= MediaCodecInfo::kFlagIsEncoder;
-        }
-        // NOTE: we don't support software-only codecs in OMX
-        if (!hasPrefix(name, "OMX.google.")) {
-            attrs |= MediaCodecInfo::kFlagIsVendor;
-            if (properties.quirkSet.find("attribute::software-codec")
-                    == properties.quirkSet.end()) {
-                attrs |= MediaCodecInfo::kFlagIsHardwareAccelerated;
-            }
-        }
-        info->setAttributes(attrs);
-        info->setRank(omxRank);
-        // OMX components don't have aliases
-        for (const MediaCodecsXmlParser::Type &type : properties.typeMap) {
-            const std::string &mediaType = type.first;
-            std::unique_ptr<MediaCodecInfo::CapabilitiesWriter> caps =
-                    info->addMediaType(mediaType.c_str());
-            const MediaCodecsXmlParser::AttributeMap &attrMap = type.second;
-            for (const MediaCodecsXmlParser::Attribute& attr : attrMap) {
-                const std::string &key = attr.first;
-                const std::string &value = attr.second;
-                if (hasPrefix(key, "feature-") &&
-                        !hasPrefix(key, "feature-bitrate-modes")) {
-                    caps->addDetail(key.c_str(), hasPrefix(value, "1") ? 1 : 0);
-                } else {
-                    caps->addDetail(key.c_str(), value.c_str());
+
+        C2Value::Primitive level = levelQuery[0].values.values.back();
+        pl.level = (C2Config::level_t)level.ref<uint32_t>();
+        ALOGV("supporting level: %u", pl.level);
+        int32_t sdkProfile, sdkLevel;
+        if (mapper && mapper->mapProfile(pl.profile, &sdkProfile)
+                && mapper->mapLevel(pl.level, &sdkLevel)) {
+            caps->addProfileLevel((uint32_t)sdkProfile, (uint32_t)sdkLevel);
+            // also list HDR profiles if component supports HDR
+            if (supportsHdr) {
+                auto hdrMapper = C2Mapper::GetHdrProfileLevelMapper(trait.mediaType);
+                if (hdrMapper && hdrMapper->mapProfile(pl.profile, &sdkProfile)) {
+                    caps->addProfileLevel((uint32_t)sdkProfile, (uint32_t)sdkLevel);
+                }
+                if (supportsHdr10Plus) {
+                    hdrMapper = C2Mapper::GetHdrProfileLevelMapper(
+                            trait.mediaType, true /*isHdr10Plus*/);
+                    if (hdrMapper && hdrMapper->mapProfile(pl.profile, &sdkProfile)) {
+                        caps->addProfileLevel((uint32_t)sdkProfile, (uint32_t)sdkLevel);
+                    }
                 }
             }
-            status_t err = queryOmxCapabilities(
-                    name.c_str(),
-                    mediaType.c_str(),
-                    encoder,
-                    caps.get());
-            if (err != OK) {
-                ALOGI("Failed to query capabilities for %s (media type: %s). Error: %d",
-                        name.c_str(),
-                        mediaType.c_str(),
-                        static_cast<int>(err));
+        } else if (!mapper) {
+            caps->addProfileLevel(pl.profile, pl.level);
+        }
+
+        // for H.263 also advertise the second highest level if the
+        // codec supports level 45, as level 45 only covers level 10
+        // TODO: move this to some form of a setting so it does not
+        // have to be here
+        if (mediaType == MIMETYPE_VIDEO_H263) {
+            C2Config::level_t nextLevel = C2Config::LEVEL_UNUSED;
+            for (C2Value::Primitive v : levelQuery[0].values.values) {
+                C2Config::level_t level = (C2Config::level_t)v.ref<uint32_t>();
+                if (level < C2Config::LEVEL_H263_45 && level > nextLevel) {
+                    nextLevel = level;
+                }
             }
+            if (nextLevel != C2Config::LEVEL_UNUSED
+                    && nextLevel != pl.level
+                    && mapper
+                    && mapper->mapProfile(pl.profile, &sdkProfile)
+                    && mapper->mapLevel(nextLevel, &sdkLevel)) {
+                caps->addProfileLevel(
+                        (uint32_t)sdkProfile, (uint32_t)sdkLevel);
+            }
+        }
+    }
+}
+
+void addSupportedColorFormats(
+        std::shared_ptr<Codec2Client::Interface> intf,
+        MediaCodecInfo::CapabilitiesWriter *caps,
+        const Traits& trait, const std::string &mediaType) {
+    (void)intf;
+
+    // TODO: get this from intf() as well, but how do we map them to
+    // MediaCodec color formats?
+    bool encoder = trait.kind == C2Component::KIND_ENCODER;
+    if (mediaType.find("video") != std::string::npos) {
+        // vendor video codecs prefer opaque format
+        if (trait.name.find("android") == std::string::npos) {
+            caps->addColorFormat(COLOR_FormatSurface);
+        }
+        caps->addColorFormat(COLOR_FormatYUV420Flexible);
+        caps->addColorFormat(COLOR_FormatYUV420Planar);
+        caps->addColorFormat(COLOR_FormatYUV420SemiPlanar);
+        caps->addColorFormat(COLOR_FormatYUV420PackedPlanar);
+        caps->addColorFormat(COLOR_FormatYUV420PackedSemiPlanar);
+        // framework video encoders must support surface format, though it is unclear
+        // that they will be able to map it if it is opaque
+        if (encoder && trait.name.find("android") != std::string::npos) {
+            caps->addColorFormat(COLOR_FormatSurface);
         }
     }
 }
@@ -335,7 +219,7 @@
     // properly. (Assume "full" behavior eventually.)
     //
     // debug.stagefright.ccodec supports 5 values.
-    //   0 - Only OMX components are available.
+    //   0 - No Codec 2.0 components are available.
     //   1 - Audio decoders and encoders with prefix "c2.android." are available
     //       and ranked first.
     //       All other components with prefix "c2.android." are available with
@@ -366,306 +250,156 @@
 
     MediaCodecsXmlParser parser(
             MediaCodecsXmlParser::defaultSearchDirs,
-            option == 0 ? "media_codecs.xml" :
-                          "media_codecs_c2.xml",
-            option == 0 ? "media_codecs_performance.xml" :
-                          "media_codecs_performance_c2.xml");
+            "media_codecs_c2.xml",
+            "media_codecs_performance_c2.xml");
     if (parser.getParsingStatus() != OK) {
         ALOGD("XML parser no good");
         return OK;
     }
 
-    bool surfaceTest(Codec2Client::CreateInputSurface());
-    if (option == 0 || (option != 4 && !surfaceTest)) {
-        buildOmxInfo(parser, writer);
-    }
-
     for (const Traits& trait : traits) {
         C2Component::rank_t rank = trait.rank;
 
-        std::shared_ptr<Codec2Client::Interface> intf =
-            Codec2Client::CreateInterfaceByName(trait.name.c_str());
-        if (!intf || parser.getCodecMap().count(intf->getName()) == 0) {
-            ALOGD("%s not found in xml", trait.name.c_str());
-            continue;
-        }
-        std::string canonName = intf->getName();
-
-        // TODO: Remove this block once all codecs are enabled by default.
-        switch (option) {
-        case 0:
-            continue;
-        case 1:
-            if (hasPrefix(canonName, "c2.vda.")) {
-                break;
+        // Interface must be accessible for us to list the component, and there also
+        // must be an XML entry for the codec. Codec aliases listed in the traits
+        // allow additional XML entries to be specified for each alias. These will
+        // be listed as separate codecs. If no XML entry is specified for an alias,
+        // those will be treated as an additional alias specified in the XML entry
+        // for the interface name.
+        std::vector<std::string> nameAndAliases = trait.aliases;
+        nameAndAliases.insert(nameAndAliases.begin(), trait.name);
+        for (const std::string &nameOrAlias : nameAndAliases) {
+            bool isAlias = trait.name != nameOrAlias;
+            std::shared_ptr<Codec2Client::Interface> intf =
+                Codec2Client::CreateInterfaceByName(nameOrAlias.c_str());
+            if (!intf) {
+                ALOGD("could not create interface for %s'%s'",
+                        isAlias ? "alias " : "",
+                        nameOrAlias.c_str());
+                continue;
             }
-            if (hasPrefix(canonName, "c2.android.")) {
-                if (trait.domain == C2Component::DOMAIN_AUDIO) {
+            if (parser.getCodecMap().count(nameOrAlias) == 0) {
+                if (isAlias) {
+                    std::unique_ptr<MediaCodecInfoWriter> baseCodecInfo =
+                        writer->findMediaCodecInfo(trait.name.c_str());
+                    if (!baseCodecInfo) {
+                        ALOGD("alias '%s' not found in xml but canonical codec info '%s' missing",
+                                nameOrAlias.c_str(),
+                                trait.name.c_str());
+                    } else {
+                        ALOGD("alias '%s' not found in xml; use an XML <Alias> tag for this",
+                                nameOrAlias.c_str());
+                        // merge alias into existing codec
+                        baseCodecInfo->addAlias(nameOrAlias.c_str());
+                    }
+                } else {
+                    ALOGD("component '%s' not found in xml", trait.name.c_str());
+                }
+                continue;
+            }
+            std::string canonName = trait.name;
+
+            // TODO: Remove this block once all codecs are enabled by default.
+            switch (option) {
+            case 0:
+                continue;
+            case 1:
+                if (hasPrefix(canonName, "c2.vda.")) {
+                    break;
+                }
+                if (hasPrefix(canonName, "c2.android.")) {
+                    if (trait.domain == C2Component::DOMAIN_AUDIO) {
+                        rank = 1;
+                        break;
+                    }
+                    break;
+                }
+                if (hasSuffix(canonName, ".avc.decoder") ||
+                        hasSuffix(canonName, ".avc.encoder")) {
+                    rank = std::numeric_limits<decltype(rank)>::max();
+                    break;
+                }
+                continue;
+            case 2:
+                if (hasPrefix(canonName, "c2.vda.")) {
+                    break;
+                }
+                if (hasPrefix(canonName, "c2.android.")) {
                     rank = 1;
                     break;
                 }
+                if (hasSuffix(canonName, ".avc.decoder") ||
+                        hasSuffix(canonName, ".avc.encoder")) {
+                    rank = std::numeric_limits<decltype(rank)>::max();
+                    break;
+                }
+                continue;
+            case 3:
+                if (hasPrefix(canonName, "c2.android.")) {
+                    rank = 1;
+                }
                 break;
             }
-            if (hasSuffix(canonName, ".avc.decoder") ||
-                    hasSuffix(canonName, ".avc.encoder")) {
-                rank = std::numeric_limits<decltype(rank)>::max();
-                break;
-            }
-            continue;
-        case 2:
-            if (hasPrefix(canonName, "c2.vda.")) {
-                break;
-            }
-            if (hasPrefix(canonName, "c2.android.")) {
-                rank = 1;
-                break;
-            }
-            if (hasSuffix(canonName, ".avc.decoder") ||
-                    hasSuffix(canonName, ".avc.encoder")) {
-                rank = std::numeric_limits<decltype(rank)>::max();
-                break;
-            }
-            continue;
-        case 3:
-            if (hasPrefix(canonName, "c2.android.")) {
-                rank = 1;
-            }
-            break;
-        }
 
-        ALOGV("canonName = %s", canonName.c_str());
-        std::unique_ptr<MediaCodecInfoWriter> codecInfo = writer->addMediaCodecInfo();
-        codecInfo->setName(trait.name.c_str());
-        codecInfo->setOwner(("codec2::" + trait.owner).c_str());
-        const MediaCodecsXmlParser::CodecProperties &codec = parser.getCodecMap().at(canonName);
+            ALOGV("adding codec entry for '%s'", nameOrAlias.c_str());
+            std::unique_ptr<MediaCodecInfoWriter> codecInfo = writer->addMediaCodecInfo();
+            codecInfo->setName(nameOrAlias.c_str());
+            codecInfo->setOwner(("codec2::" + trait.owner).c_str());
+            const MediaCodecsXmlParser::CodecProperties &codec =
+                parser.getCodecMap().at(nameOrAlias);
 
-        bool encoder = trait.kind == C2Component::KIND_ENCODER;
-        typename std::underlying_type<MediaCodecInfo::Attributes>::type attrs = 0;
+            bool encoder = trait.kind == C2Component::KIND_ENCODER;
+            typename std::underlying_type<MediaCodecInfo::Attributes>::type attrs = 0;
 
-        if (encoder) {
-            attrs |= MediaCodecInfo::kFlagIsEncoder;
-        }
-        if (trait.owner == "software") {
-            attrs |= MediaCodecInfo::kFlagIsSoftwareOnly;
-        } else {
-            attrs |= MediaCodecInfo::kFlagIsVendor;
-            if (trait.owner == "vendor-software") {
+            if (encoder) {
+                attrs |= MediaCodecInfo::kFlagIsEncoder;
+            }
+            if (trait.owner == "software") {
                 attrs |= MediaCodecInfo::kFlagIsSoftwareOnly;
-            } else if (codec.quirkSet.find("attribute::software-codec") == codec.quirkSet.end()) {
-                attrs |= MediaCodecInfo::kFlagIsHardwareAccelerated;
-            }
-        }
-        codecInfo->setAttributes(attrs);
-        codecInfo->setRank(rank);
-
-        for (const std::string &alias : codec.aliases) {
-            codecInfo->addAlias(alias.c_str());
-        }
-
-        for (auto typeIt = codec.typeMap.begin(); typeIt != codec.typeMap.end(); ++typeIt) {
-            const std::string &mediaType = typeIt->first;
-            const MediaCodecsXmlParser::AttributeMap &attrMap = typeIt->second;
-            std::unique_ptr<MediaCodecInfo::CapabilitiesWriter> caps =
-                codecInfo->addMediaType(mediaType.c_str());
-            for (auto attrIt = attrMap.begin(); attrIt != attrMap.end(); ++attrIt) {
-                std::string key, value;
-                std::tie(key, value) = *attrIt;
-                if (key.find("feature-") == 0 && key.find("feature-bitrate-modes") != 0) {
-                    caps->addDetail(key.c_str(), std::stoi(value));
-                } else {
-                    caps->addDetail(key.c_str(), value.c_str());
+            } else {
+                attrs |= MediaCodecInfo::kFlagIsVendor;
+                if (trait.owner == "vendor-software") {
+                    attrs |= MediaCodecInfo::kFlagIsSoftwareOnly;
+                } else if (codec.quirkSet.find("attribute::software-codec")
+                        == codec.quirkSet.end()) {
+                    attrs |= MediaCodecInfo::kFlagIsHardwareAccelerated;
                 }
             }
-
-            bool gotProfileLevels = false;
-            if (intf) {
-                std::shared_ptr<C2Mapper::ProfileLevelMapper> mapper =
-                    C2Mapper::GetProfileLevelMapper(trait.mediaType);
-                // if we don't know the media type, pass through all values unmapped
-
-                // TODO: we cannot find levels that are local 'maxima' without knowing the coding
-                // e.g. H.263 level 45 and level 30 could be two values for highest level as
-                // they don't include one another. For now we use the last supported value.
-                C2StreamProfileLevelInfo pl(encoder /* output */, 0u);
-                std::vector<C2FieldSupportedValuesQuery> profileQuery = {
-                    C2FieldSupportedValuesQuery::Possible(C2ParamField(&pl, &pl.profile))
-                };
-
-                c2_status_t err = intf->querySupportedValues(profileQuery, C2_DONT_BLOCK);
-                ALOGV("query supported profiles -> %s | %s",
-                        asString(err), asString(profileQuery[0].status));
-                if (err == C2_OK && profileQuery[0].status == C2_OK) {
-                    if (profileQuery[0].values.type == C2FieldSupportedValues::VALUES) {
-                        std::vector<std::shared_ptr<C2ParamDescriptor>> paramDescs;
-                        c2_status_t err1 = intf->querySupportedParams(&paramDescs);
-                        bool isHdr = false, isHdr10Plus = false;
-                        if (err1 == C2_OK) {
-                            for (const std::shared_ptr<C2ParamDescriptor> &desc : paramDescs) {
-                                if ((uint32_t)desc->index() ==
-                                        C2StreamHdr10PlusInfo::output::PARAM_TYPE) {
-                                    isHdr10Plus = true;
-                                } else if ((uint32_t)desc->index() ==
-                                        C2StreamHdrStaticInfo::output::PARAM_TYPE) {
-                                    isHdr = true;
-                                }
-                            }
-                        }
-                        // For VP9, the static info is always propagated by framework.
-                        isHdr |= (mediaType == MIMETYPE_VIDEO_VP9);
-
-                        for (C2Value::Primitive profile : profileQuery[0].values.values) {
-                            pl.profile = (C2Config::profile_t)profile.ref<uint32_t>();
-                            std::vector<std::unique_ptr<C2SettingResult>> failures;
-                            err = intf->config({&pl}, C2_DONT_BLOCK, &failures);
-                            ALOGV("set profile to %u -> %s", pl.profile, asString(err));
-                            std::vector<C2FieldSupportedValuesQuery> levelQuery = {
-                                C2FieldSupportedValuesQuery::Current(C2ParamField(&pl, &pl.level))
-                            };
-                            err = intf->querySupportedValues(levelQuery, C2_DONT_BLOCK);
-                            ALOGV("query supported levels -> %s | %s",
-                                    asString(err), asString(levelQuery[0].status));
-                            if (err == C2_OK && levelQuery[0].status == C2_OK) {
-                                if (levelQuery[0].values.type == C2FieldSupportedValues::VALUES
-                                        && levelQuery[0].values.values.size() > 0) {
-                                    C2Value::Primitive level = levelQuery[0].values.values.back();
-                                    pl.level = (C2Config::level_t)level.ref<uint32_t>();
-                                    ALOGV("supporting level: %u", pl.level);
-                                    int32_t sdkProfile, sdkLevel;
-                                    if (mapper && mapper->mapProfile(pl.profile, &sdkProfile)
-                                            && mapper->mapLevel(pl.level, &sdkLevel)) {
-                                        caps->addProfileLevel(
-                                                (uint32_t)sdkProfile, (uint32_t)sdkLevel);
-                                        gotProfileLevels = true;
-                                        if (isHdr) {
-                                            auto hdrMapper = C2Mapper::GetHdrProfileLevelMapper(
-                                                    trait.mediaType);
-                                            if (hdrMapper && hdrMapper->mapProfile(
-                                                    pl.profile, &sdkProfile)) {
-                                                caps->addProfileLevel(
-                                                        (uint32_t)sdkProfile,
-                                                        (uint32_t)sdkLevel);
-                                            }
-                                            if (isHdr10Plus) {
-                                                hdrMapper = C2Mapper::GetHdrProfileLevelMapper(
-                                                        trait.mediaType, true /*isHdr10Plus*/);
-                                                if (hdrMapper && hdrMapper->mapProfile(
-                                                        pl.profile, &sdkProfile)) {
-                                                    caps->addProfileLevel(
-                                                            (uint32_t)sdkProfile,
-                                                            (uint32_t)sdkLevel);
-                                                }
-                                            }
-                                        }
-                                    } else if (!mapper) {
-                                        caps->addProfileLevel(pl.profile, pl.level);
-                                        gotProfileLevels = true;
-                                    }
-
-                                    // for H.263 also advertise the second highest level if the
-                                    // codec supports level 45, as level 45 only covers level 10
-                                    // TODO: move this to some form of a setting so it does not
-                                    // have to be here
-                                    if (mediaType == MIMETYPE_VIDEO_H263) {
-                                        C2Config::level_t nextLevel = C2Config::LEVEL_UNUSED;
-                                        for (C2Value::Primitive v : levelQuery[0].values.values) {
-                                            C2Config::level_t level =
-                                                (C2Config::level_t)v.ref<uint32_t>();
-                                            if (level < C2Config::LEVEL_H263_45
-                                                    && level > nextLevel) {
-                                                nextLevel = level;
-                                            }
-                                        }
-                                        if (nextLevel != C2Config::LEVEL_UNUSED
-                                                && nextLevel != pl.level
-                                                && mapper
-                                                && mapper->mapProfile(pl.profile, &sdkProfile)
-                                                && mapper->mapLevel(nextLevel, &sdkLevel)) {
-                                            caps->addProfileLevel(
-                                                    (uint32_t)sdkProfile, (uint32_t)sdkLevel);
-                                        }
-                                    }
-                                }
-                            }
-                        }
-                    }
+            codecInfo->setAttributes(attrs);
+            if (!codec.rank.empty()) {
+                uint32_t xmlRank;
+                char dummy;
+                if (sscanf(codec.rank.c_str(), "%u%c", &xmlRank, &dummy) == 1) {
+                    rank = xmlRank;
                 }
             }
+            codecInfo->setRank(rank);
 
-            if (!gotProfileLevels) {
-                if (mediaType == MIMETYPE_VIDEO_VP9) {
-                    if (encoder) {
-                        caps->addProfileLevel(VP9Profile0,    VP9Level41);
-                    } else {
-                        caps->addProfileLevel(VP9Profile0,    VP9Level5);
-                        caps->addProfileLevel(VP9Profile2,    VP9Level5);
-                        caps->addProfileLevel(VP9Profile2HDR, VP9Level5);
-                    }
-                } else if (mediaType == MIMETYPE_VIDEO_AV1 && !encoder) {
-                    caps->addProfileLevel(AV1Profile0,      AV1Level2);
-                    caps->addProfileLevel(AV1Profile0,      AV1Level21);
-                    caps->addProfileLevel(AV1Profile1,      AV1Level22);
-                    caps->addProfileLevel(AV1Profile1,      AV1Level3);
-                    caps->addProfileLevel(AV1Profile2,      AV1Level31);
-                    caps->addProfileLevel(AV1Profile2,      AV1Level32);
-                } else if (mediaType == MIMETYPE_VIDEO_HEVC && !encoder) {
-                    caps->addProfileLevel(HEVCProfileMain,      HEVCMainTierLevel51);
-                    caps->addProfileLevel(HEVCProfileMainStill, HEVCMainTierLevel51);
-                } else if (mediaType == MIMETYPE_VIDEO_VP8) {
-                    if (encoder) {
-                        caps->addProfileLevel(VP8ProfileMain, VP8Level_Version0);
-                    } else {
-                        caps->addProfileLevel(VP8ProfileMain, VP8Level_Version0);
-                    }
-                } else if (mediaType == MIMETYPE_VIDEO_AVC) {
-                    if (encoder) {
-                        caps->addProfileLevel(AVCProfileBaseline,            AVCLevel41);
-//                      caps->addProfileLevel(AVCProfileConstrainedBaseline, AVCLevel41);
-                        caps->addProfileLevel(AVCProfileMain,                AVCLevel41);
-                    } else {
-                        caps->addProfileLevel(AVCProfileBaseline,            AVCLevel52);
-                        caps->addProfileLevel(AVCProfileConstrainedBaseline, AVCLevel52);
-                        caps->addProfileLevel(AVCProfileMain,                AVCLevel52);
-                        caps->addProfileLevel(AVCProfileConstrainedHigh,     AVCLevel52);
-                        caps->addProfileLevel(AVCProfileHigh,                AVCLevel52);
-                    }
-                } else if (mediaType == MIMETYPE_VIDEO_MPEG4) {
-                    if (encoder) {
-                        caps->addProfileLevel(MPEG4ProfileSimple,  MPEG4Level2);
-                    } else {
-                        caps->addProfileLevel(MPEG4ProfileSimple,  MPEG4Level3);
-                    }
-                } else if (mediaType == MIMETYPE_VIDEO_H263) {
-                    if (encoder) {
-                        caps->addProfileLevel(H263ProfileBaseline, H263Level45);
-                    } else {
-                        caps->addProfileLevel(H263ProfileBaseline, H263Level30);
-                        caps->addProfileLevel(H263ProfileBaseline, H263Level45);
-                        caps->addProfileLevel(H263ProfileISWV2,    H263Level30);
-                        caps->addProfileLevel(H263ProfileISWV2,    H263Level45);
-                    }
-                } else if (mediaType == MIMETYPE_VIDEO_MPEG2 && !encoder) {
-                    caps->addProfileLevel(MPEG2ProfileSimple, MPEG2LevelHL);
-                    caps->addProfileLevel(MPEG2ProfileMain,   MPEG2LevelHL);
-                }
+            for (const std::string &alias : codec.aliases) {
+                ALOGV("adding alias '%s'", alias.c_str());
+                codecInfo->addAlias(alias.c_str());
             }
 
-            // TODO: get this from intf() as well, but how do we map them to
-            // MediaCodec color formats?
-            if (mediaType.find("video") != std::string::npos) {
-                // vendor video codecs prefer opaque format
-                if (trait.name.find("android") == std::string::npos) {
-                    caps->addColorFormat(COLOR_FormatSurface);
+            for (auto typeIt = codec.typeMap.begin(); typeIt != codec.typeMap.end(); ++typeIt) {
+                const std::string &mediaType = typeIt->first;
+                const MediaCodecsXmlParser::AttributeMap &attrMap = typeIt->second;
+                std::unique_ptr<MediaCodecInfo::CapabilitiesWriter> caps =
+                    codecInfo->addMediaType(mediaType.c_str());
+                for (auto attrIt = attrMap.begin(); attrIt != attrMap.end(); ++attrIt) {
+                    std::string key, value;
+                    std::tie(key, value) = *attrIt;
+                    if (key.find("feature-") == 0 && key.find("feature-bitrate-modes") != 0) {
+                        int32_t intValue = 0;
+                        // Ignore trailing bad characters and default to 0.
+                        (void)sscanf(value.c_str(), "%d", &intValue);
+                        caps->addDetail(key.c_str(), intValue);
+                    } else {
+                        caps->addDetail(key.c_str(), value.c_str());
+                    }
                 }
-                caps->addColorFormat(COLOR_FormatYUV420Flexible);
-                caps->addColorFormat(COLOR_FormatYUV420Planar);
-                caps->addColorFormat(COLOR_FormatYUV420SemiPlanar);
-                caps->addColorFormat(COLOR_FormatYUV420PackedPlanar);
-                caps->addColorFormat(COLOR_FormatYUV420PackedSemiPlanar);
-                // framework video encoders must support surface format, though it is unclear
-                // that they will be able to map it if it is opaque
-                if (encoder && trait.name.find("android") != std::string::npos) {
-                    caps->addColorFormat(COLOR_FormatSurface);
-                }
+
+                addSupportedProfileLevels(intf, caps.get(), trait, mediaType);
+                addSupportedColorFormats(intf, caps.get(), trait, mediaType);
             }
         }
     }
@@ -677,4 +411,3 @@
 extern "C" android::MediaCodecListBuilderBase *CreateBuilder() {
     return new android::Codec2InfoBuilder;
 }
-
diff --git a/media/codec2/sfplugin/PipelineWatcher.cpp b/media/codec2/sfplugin/PipelineWatcher.cpp
new file mode 100644
index 0000000..df81d49
--- /dev/null
+++ b/media/codec2/sfplugin/PipelineWatcher.cpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "PipelineWatcher"
+
+#include <numeric>
+
+#include <log/log.h>
+
+#include "PipelineWatcher.h"
+
+namespace android {
+
+PipelineWatcher &PipelineWatcher::inputDelay(uint32_t value) {
+    mInputDelay = value;
+    return *this;
+}
+
+PipelineWatcher &PipelineWatcher::pipelineDelay(uint32_t value) {
+    mPipelineDelay = value;
+    return *this;
+}
+
+PipelineWatcher &PipelineWatcher::outputDelay(uint32_t value) {
+    mOutputDelay = value;
+    return *this;
+}
+
+PipelineWatcher &PipelineWatcher::smoothnessFactor(uint32_t value) {
+    mSmoothnessFactor = value;
+    return *this;
+}
+
+void PipelineWatcher::onWorkQueued(
+        uint64_t frameIndex,
+        std::vector<std::shared_ptr<C2Buffer>> &&buffers,
+        const Clock::time_point &queuedAt) {
+    ALOGV("onWorkQueued(frameIndex=%llu, buffers(size=%zu), queuedAt=%lld)",
+          (unsigned long long)frameIndex,
+          buffers.size(),
+          (long long)queuedAt.time_since_epoch().count());
+    auto it = mFramesInPipeline.find(frameIndex);
+    if (it != mFramesInPipeline.end()) {
+        ALOGD("onWorkQueued: Duplicate frame index (%llu); previous entry removed",
+              (unsigned long long)frameIndex);
+        (void)mFramesInPipeline.erase(it);
+    }
+    (void)mFramesInPipeline.try_emplace(frameIndex, std::move(buffers), queuedAt);
+}
+
+std::shared_ptr<C2Buffer> PipelineWatcher::onInputBufferReleased(
+        uint64_t frameIndex, size_t arrayIndex) {
+    ALOGV("onInputBufferReleased(frameIndex=%llu, arrayIndex=%zu)",
+          (unsigned long long)frameIndex, arrayIndex);
+    auto it = mFramesInPipeline.find(frameIndex);
+    if (it == mFramesInPipeline.end()) {
+        ALOGD("onInputBufferReleased: frameIndex not found (%llu); ignored",
+              (unsigned long long)frameIndex);
+        return nullptr;
+    }
+    if (it->second.buffers.size() <= arrayIndex) {
+        ALOGD("onInputBufferReleased: buffers at %llu: size %zu, requested index: %zu",
+              (unsigned long long)frameIndex, it->second.buffers.size(), arrayIndex);
+        return nullptr;
+    }
+    std::shared_ptr<C2Buffer> buffer(std::move(it->second.buffers[arrayIndex]));
+    ALOGD_IF(!buffer, "onInputBufferReleased: buffer already released (%llu:%zu)",
+             (unsigned long long)frameIndex, arrayIndex);
+    return buffer;
+}
+
+void PipelineWatcher::onWorkDone(uint64_t frameIndex) {
+    ALOGV("onWorkDone(frameIndex=%llu)", (unsigned long long)frameIndex);
+    auto it = mFramesInPipeline.find(frameIndex);
+    if (it == mFramesInPipeline.end()) {
+        ALOGD("onWorkDone: frameIndex not found (%llu); ignored",
+              (unsigned long long)frameIndex);
+        return;
+    }
+    (void)mFramesInPipeline.erase(it);
+}
+
+void PipelineWatcher::flush() {
+    mFramesInPipeline.clear();
+}
+
+bool PipelineWatcher::pipelineFull() const {
+    if (mFramesInPipeline.size() >=
+            mInputDelay + mPipelineDelay + mOutputDelay + mSmoothnessFactor) {
+        ALOGV("pipelineFull: too many frames in pipeline (%zu)", mFramesInPipeline.size());
+        return true;
+    }
+    size_t sizeWithInputReleased = std::count_if(
+            mFramesInPipeline.begin(),
+            mFramesInPipeline.end(),
+            [](const decltype(mFramesInPipeline)::value_type &value) {
+                for (const std::shared_ptr<C2Buffer> &buffer : value.second.buffers) {
+                    if (buffer) {
+                        return false;
+                    }
+                }
+                return true;
+            });
+    if (sizeWithInputReleased >=
+            mPipelineDelay + mOutputDelay + mSmoothnessFactor) {
+        ALOGV("pipelineFull: too many frames in pipeline, with input released (%zu)",
+              sizeWithInputReleased);
+        return true;
+    }
+    ALOGV("pipeline has room (total: %zu, input released: %zu)",
+          mFramesInPipeline.size(), sizeWithInputReleased);
+    return false;
+}
+
+PipelineWatcher::Clock::duration PipelineWatcher::elapsed(
+        const PipelineWatcher::Clock::time_point &now, size_t n) const {
+    if (mFramesInPipeline.size() <= n) {
+        return Clock::duration::zero();
+    }
+    std::vector<Clock::duration> durations;
+    for (const decltype(mFramesInPipeline)::value_type &value : mFramesInPipeline) {
+        Clock::duration elapsed = now - value.second.queuedAt;
+        ALOGV("elapsed: frameIndex = %llu elapsed = %lldms",
+              (unsigned long long)value.first,
+              std::chrono::duration_cast<std::chrono::milliseconds>(elapsed).count());
+        durations.push_back(elapsed);
+    }
+    std::nth_element(durations.begin(), durations.end(), durations.begin() + n,
+                     std::greater<Clock::duration>());
+    return durations[n];
+}
+
+}  // namespace android
diff --git a/media/codec2/sfplugin/PipelineWatcher.h b/media/codec2/sfplugin/PipelineWatcher.h
new file mode 100644
index 0000000..1e23147
--- /dev/null
+++ b/media/codec2/sfplugin/PipelineWatcher.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef PIPELINE_WATCHER_H_
+#define PIPELINE_WATCHER_H_
+
+#include <chrono>
+#include <map>
+#include <memory>
+
+#include <C2Work.h>
+
+namespace android {
+
+/**
+ * PipelineWatcher watches the pipeline and infers the status of work items from
+ * events.
+ */
+class PipelineWatcher {
+public:
+    typedef std::chrono::steady_clock Clock;
+
+    PipelineWatcher()
+        : mInputDelay(0),
+          mPipelineDelay(0),
+          mOutputDelay(0),
+          mSmoothnessFactor(0) {}
+    ~PipelineWatcher() = default;
+
+    /**
+     * \param value the new input delay value
+     * \return  this object
+     */
+    PipelineWatcher &inputDelay(uint32_t value);
+
+    /**
+     * \param value the new pipeline delay value
+     * \return  this object
+     */
+    PipelineWatcher &pipelineDelay(uint32_t value);
+
+    /**
+     * \param value the new output delay value
+     * \return  this object
+     */
+    PipelineWatcher &outputDelay(uint32_t value);
+
+    /**
+     * \param value the new smoothness factor value
+     * \return  this object
+     */
+    PipelineWatcher &smoothnessFactor(uint32_t value);
+
+    /**
+     * Client queued a work item to the component.
+     *
+     * \param frameIndex  input frame index of this work
+     * \param buffers     input buffers of the queued work item
+     * \param queuedAt    time when the client queued the buffer
+     */
+    void onWorkQueued(
+            uint64_t frameIndex,
+            std::vector<std::shared_ptr<C2Buffer>> &&buffers,
+            const Clock::time_point &queuedAt);
+
+    /**
+     * The component released input buffers from a work item.
+     *
+     * \param frameIndex  input frame index
+     * \param arrayIndex  index of the buffer at the original |buffers| in
+     *                    onWorkQueued().
+     * \return  buffers[arrayIndex]
+     */
+    std::shared_ptr<C2Buffer> onInputBufferReleased(
+            uint64_t frameIndex, size_t arrayIndex);
+
+    /**
+     * The component finished processing a work item.
+     *
+     * \param frameIndex  input frame index
+     */
+    void onWorkDone(uint64_t frameIndex);
+
+    /**
+     * Flush the pipeline.
+     */
+    void flush();
+
+    /**
+     * \return  true  if pipeline does not need more work items to proceed
+     *                smoothly, considering delays and smoothness factor;
+     *          false otherwise.
+     */
+    bool pipelineFull() const;
+
+    /**
+     * Return elapsed processing time of a work item, nth from the longest
+     * processing time to the shortest.
+     *
+     * \param now current timestamp
+     * \param n   nth work item, from the longest processing time to the
+     *            shortest. It's a 0-based index.
+     * \return  elapsed processing time of nth work item.
+     */
+    Clock::duration elapsed(const Clock::time_point &now, size_t n) const;
+
+private:
+    uint32_t mInputDelay;
+    uint32_t mPipelineDelay;
+    uint32_t mOutputDelay;
+    uint32_t mSmoothnessFactor;
+
+    struct Frame {
+        Frame(std::vector<std::shared_ptr<C2Buffer>> &&b,
+              const Clock::time_point &q)
+            : buffers(b),
+              queuedAt(q) {}
+        std::vector<std::shared_ptr<C2Buffer>> buffers;
+        const Clock::time_point queuedAt;
+    };
+    std::map<uint64_t, Frame> mFramesInPipeline;
+};
+
+}  // namespace android
+
+#endif  // PIPELINE_WATCHER_H_
diff --git a/media/codec2/sfplugin/SkipCutBuffer.cpp b/media/codec2/sfplugin/SkipCutBuffer.cpp
index 5762440..8d1de65 100644
--- a/media/codec2/sfplugin/SkipCutBuffer.cpp
+++ b/media/codec2/sfplugin/SkipCutBuffer.cpp
@@ -20,7 +20,7 @@
 
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/SkipCutBuffer.h>
+#include "SkipCutBuffer.h"
 
 namespace android {
 
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.cpp b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
index c369e16..6da131f 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.cpp
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
@@ -65,7 +65,9 @@
     { C2Config::LEVEL_AVC_5,    AVCLevel5 },
     { C2Config::LEVEL_AVC_5_1,  AVCLevel51 },
     { C2Config::LEVEL_AVC_5_2,  AVCLevel52 },
-
+    { C2Config::LEVEL_AVC_6,    AVCLevel6 },
+    { C2Config::LEVEL_AVC_6_1,  AVCLevel61 },
+    { C2Config::LEVEL_AVC_6_2,  AVCLevel62 },
 };
 
 ALookup<C2Config::profile_t, int32_t> sAvcProfiles = {
@@ -99,7 +101,7 @@
     { C2Color::MATRIX_BT709,           ColorAspects::MatrixBT709_5 },
     { C2Color::MATRIX_FCC47_73_682,    ColorAspects::MatrixBT470_6M },
     { C2Color::MATRIX_BT601,           ColorAspects::MatrixBT601_6 },
-    { C2Color::MATRIX_SMPTE240M,       ColorAspects::MatrixSMPTE240M },
+    { C2Color::MATRIX_240M,       ColorAspects::MatrixSMPTE240M },
     { C2Color::MATRIX_BT2020,          ColorAspects::MatrixBT2020 },
     { C2Color::MATRIX_BT2020_CONSTANT, ColorAspects::MatrixBT2020Constant },
     { C2Color::MATRIX_OTHER,           ColorAspects::MatrixOther },
@@ -853,19 +855,19 @@
 
     switch (primaries) {
         case C2Color::PRIMARIES_BT601_525:
-            *dataSpace |= (matrix == C2Color::MATRIX_SMPTE240M
+            *dataSpace |= (matrix == C2Color::MATRIX_240M
                             || matrix == C2Color::MATRIX_BT709)
                     ? HAL_DATASPACE_STANDARD_BT601_525_UNADJUSTED
                     : HAL_DATASPACE_STANDARD_BT601_525;
             break;
         case C2Color::PRIMARIES_BT601_625:
-            *dataSpace |= (matrix == C2Color::MATRIX_SMPTE240M
+            *dataSpace |= (matrix == C2Color::MATRIX_240M
                             || matrix == C2Color::MATRIX_BT709)
                     ? HAL_DATASPACE_STANDARD_BT601_625_UNADJUSTED
                     : HAL_DATASPACE_STANDARD_BT601_625;
             break;
         case C2Color::PRIMARIES_BT2020:
-            *dataSpace |= (matrix == C2Color::MATRIX_BT2020CONSTANT
+            *dataSpace |= (matrix == C2Color::MATRIX_BT2020_CONSTANT
                     ? HAL_DATASPACE_STANDARD_BT2020_CONSTANT_LUMINANCE
                     : HAL_DATASPACE_STANDARD_BT2020);
             break;
diff --git a/media/codec2/tests/C2ComponentInterface_test.cpp b/media/codec2/tests/C2ComponentInterface_test.cpp
index e907964..67f733d 100644
--- a/media/codec2/tests/C2ComponentInterface_test.cpp
+++ b/media/codec2/tests/C2ComponentInterface_test.cpp
@@ -182,9 +182,9 @@
     return std::make_unique<T>();
 }
 
-template <> std::unique_ptr<C2PortMimeConfig::input> makeParam() {
+template <> std::unique_ptr<C2PortMediaTypeSetting::input> makeParam() {
     // TODO(hiroh): Set more precise length.
-    return C2PortMimeConfig::input::AllocUnique(100);
+    return C2PortMediaTypeSetting::input::AllocUnique(100);
 }
 
 #define TRACED_FAILURE(func)                            \
@@ -323,17 +323,17 @@
     EXPECT_EQ(C2SettingResult::BAD_VALUE, failures[0]->failure);
 }
 
-// There is only used enum type for the field type, that is C2DomainKind.
+// There is only used enum type for the field type, that is C2Component::domain_t.
 // If another field type is added, it is necessary to add function for that.
 template <>
 void C2CompIntfTest::getTestValues(
         const C2FieldSupportedValues &validValueInfos,
-        std::vector<C2DomainKind> *const validValues,
-        std::vector<C2DomainKind> *const invalidValues) {
+        std::vector<C2Component::domain_t> *const validValues,
+        std::vector<C2Component::domain_t> *const invalidValues) {
     UNUSED(validValueInfos);
-    validValues->emplace_back(C2DomainVideo);
-    validValues->emplace_back(C2DomainAudio);
-    validValues->emplace_back(C2DomainOther);
+    validValues->emplace_back(C2Component::DOMAIN_VIDEO);
+    validValues->emplace_back(C2Component::DOMAIN_AUDIO);
+    validValues->emplace_back(C2Component::DOMAIN_OTHER);
 
     // There is no invalid value.
     UNUSED(invalidValues);
@@ -634,20 +634,20 @@
     std::vector<std::shared_ptr<C2ParamDescriptor>> supportedParams;
     ASSERT_EQ(C2_OK, mIntf->querySupportedParams_nb(&supportedParams));
 
-    EACH_TEST_SELF(C2ComponentLatencyInfo, TEST_U32_WRITABLE_FIELD);
-    EACH_TEST_SELF(C2ComponentTemporalInfo, TEST_U32_WRITABLE_FIELD);
-    EACH_TEST_INPUT(C2PortLatencyInfo, TEST_U32_WRITABLE_FIELD);
-    EACH_TEST_OUTPUT(C2PortLatencyInfo, TEST_U32_WRITABLE_FIELD);
-    EACH_TEST_INPUT(C2StreamFormatConfig, TEST_U32_WRITABLE_FIELD);
-    EACH_TEST_OUTPUT(C2StreamFormatConfig, TEST_U32_WRITABLE_FIELD);
-    EACH_TEST_INPUT(C2PortStreamCountConfig, TEST_U32_WRITABLE_FIELD);
-    EACH_TEST_OUTPUT(C2PortStreamCountConfig, TEST_U32_WRITABLE_FIELD);
+    EACH_TEST_SELF(C2ActualPipelineDelayTuning, TEST_U32_WRITABLE_FIELD);
+    EACH_TEST_SELF(C2ComponentAttributesSetting, TEST_U32_WRITABLE_FIELD);
+    EACH_TEST_INPUT(C2PortActualDelayTuning, TEST_U32_WRITABLE_FIELD);
+    EACH_TEST_OUTPUT(C2PortActualDelayTuning, TEST_U32_WRITABLE_FIELD);
+    EACH_TEST_INPUT(C2StreamBufferTypeSetting, TEST_U32_WRITABLE_FIELD);
+    EACH_TEST_OUTPUT(C2StreamBufferTypeSetting, TEST_U32_WRITABLE_FIELD);
+    EACH_TEST_INPUT(C2PortStreamCountTuning, TEST_U32_WRITABLE_FIELD);
+    EACH_TEST_OUTPUT(C2PortStreamCountTuning, TEST_U32_WRITABLE_FIELD);
 
-    EACH_TEST_SELF(C2ComponentDomainInfo, TEST_ENUM_WRITABLE_FIELD);
+    EACH_TEST_SELF(C2ComponentDomainSetting, TEST_ENUM_WRITABLE_FIELD);
 
     // TODO(hiroh): Support parameters based on uint32_t[] and char[].
-    // EACH_TEST_INPUT(C2PortMimeConfig, TEST_STRING_WRITABLE_FIELD);
-    // EACH_TEST_OUTPUT(C2PortMimeConfig, TEST_STRING_WRITABLE_FIELD);
+    // EACH_TEST_INPUT(C2PortMediaTypeSetting, TEST_STRING_WRITABLE_FIELD);
+    // EACH_TEST_OUTPUT(C2PortMediaTypeSetting, TEST_STRING_WRITABLE_FIELD);
     // EACH_TEST_INPUT(C2StreamMimeConfig, TEST_STRING_WRITABLE_FIELD);
     // EACH_TEST_OUTPUT(C2StreamMimeConfig, TEST_STRING_WRITABLE_FIELD);
 
@@ -656,10 +656,10 @@
     // EACH_TEST_SELF(C2ReadOnlyParamsInfo, TEST_U32ARRAY_WRITABLE_FIELD);
     // EACH_TEST_SELF(C2RequestedInfosInfo, TEST_U32ARRAY_WRITABLE_FIELD);
 
-    EACH_TEST_INPUT(C2VideoSizeStreamInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
-    EACH_TEST_OUTPUT(C2VideoSizeStreamInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
-    EACH_TEST_INPUT(C2VideoSizeStreamTuning, TEST_VSSTRUCT_WRITABLE_FIELD);
-    EACH_TEST_OUTPUT(C2VideoSizeStreamTuning, TEST_VSSTRUCT_WRITABLE_FIELD);
+    EACH_TEST_INPUT(C2StreamPictureSizeInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
+    EACH_TEST_OUTPUT(C2StreamPictureSizeInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
+    EACH_TEST_INPUT(C2StreamPictureSizeInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
+    EACH_TEST_OUTPUT(C2StreamPictureSizeInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
     EACH_TEST_INPUT(C2MaxVideoSizeHintPortSetting, TEST_VSSTRUCT_WRITABLE_FIELD);
     EACH_TEST_OUTPUT(C2MaxVideoSizeHintPortSetting, TEST_VSSTRUCT_WRITABLE_FIELD);
 
diff --git a/media/codec2/tests/C2SampleComponent_test.cpp b/media/codec2/tests/C2SampleComponent_test.cpp
index cd354ad..9956834 100644
--- a/media/codec2/tests/C2SampleComponent_test.cpp
+++ b/media/codec2/tests/C2SampleComponent_test.cpp
@@ -152,7 +152,7 @@
 
     std::unordered_map<uint32_t, C2Param &> mMyParams;
 
-    C2ComponentDomainInfo mDomainInfo;
+    C2ComponentDomainSetting mDomainInfo;
 
     MyComponentInstance() {
         mMyParams.insert({mDomainInfo.index(), mDomainInfo});
@@ -187,12 +187,12 @@
             c2_blocking_t mayBlock) const override {
         (void)mayBlock;
         for (C2FieldSupportedValuesQuery &query : fields) {
-            if (query.field() == C2ParamField(&mDomainInfo, &C2ComponentDomainInfo::value)) {
+            if (query.field() == C2ParamField(&mDomainInfo, &C2ComponentDomainSetting::value)) {
                 query.values = C2FieldSupportedValues(
                     false /* flag */,
                     &mDomainInfo.value
                     //,
-                    //{(int32_t)C2DomainVideo}
+                    //{(int32_t)C2Component::DOMAIN_VIDEO}
                 );
                 query.status = C2_OK;
             } else {
@@ -391,20 +391,20 @@
 }
 
 TEST_F(C2SampleComponentTest, ReflectorTest) {
-    C2ComponentDomainInfo domainInfo;
+    C2ComponentDomainSetting domainInfo;
     std::shared_ptr<MyComponentInstance> myComp(new MyComponentInstance);
     std::shared_ptr<C2ComponentInterface> comp = myComp;
 
     std::unique_ptr<C2StructDescriptor> desc{
-        myComp->getParamReflector()->describe(C2ComponentDomainInfo::CORE_INDEX)};
+        myComp->getParamReflector()->describe(C2ComponentDomainSetting::CORE_INDEX)};
     dumpStruct(*desc);
 
     std::vector<C2FieldSupportedValuesQuery> query = {
-        { C2ParamField(&domainInfo, &C2ComponentDomainInfo::value),
+        { C2ParamField(&domainInfo, &C2ComponentDomainSetting::value),
           C2FieldSupportedValuesQuery::CURRENT },
-        C2FieldSupportedValuesQuery(C2ParamField(&domainInfo, &C2ComponentDomainInfo::value),
+        C2FieldSupportedValuesQuery(C2ParamField(&domainInfo, &C2ComponentDomainSetting::value),
           C2FieldSupportedValuesQuery::CURRENT),
-        C2FieldSupportedValuesQuery::Current(C2ParamField(&domainInfo, &C2ComponentDomainInfo::value)),
+        C2FieldSupportedValuesQuery::Current(C2ParamField(&domainInfo, &C2ComponentDomainSetting::value)),
     };
 
     EXPECT_EQ(C2_OK, comp->querySupportedValues_vb(query, C2_DONT_BLOCK));
diff --git a/media/codec2/vndk/C2Config.cpp b/media/codec2/vndk/C2Config.cpp
index 8a27088..34680a7 100644
--- a/media/codec2/vndk/C2Config.cpp
+++ b/media/codec2/vndk/C2Config.cpp
@@ -186,6 +186,9 @@
         { "avc-5", C2Config::LEVEL_AVC_5 },
         { "avc-5.1", C2Config::LEVEL_AVC_5_1 },
         { "avc-5.2", C2Config::LEVEL_AVC_5_2 },
+        { "avc-6", C2Config::LEVEL_AVC_6 },
+        { "avc-6.1", C2Config::LEVEL_AVC_6_1 },
+        { "avc-6.2", C2Config::LEVEL_AVC_6_2 },
         { "hevc-main-1", C2Config::LEVEL_HEVC_MAIN_1 },
         { "hevc-main-2", C2Config::LEVEL_HEVC_MAIN_2 },
         { "hevc-main-2.1", C2Config::LEVEL_HEVC_MAIN_2_1 },
diff --git a/media/codec2/vndk/C2Store.cpp b/media/codec2/vndk/C2Store.cpp
index dc7e89c..e075849 100644
--- a/media/codec2/vndk/C2Store.cpp
+++ b/media/codec2/vndk/C2Store.cpp
@@ -517,7 +517,6 @@
          *
          * \note Only used by ComponentLoader.
          *
-         * \param alias[in]   module alias
          * \param libPath[in] library path
          *
          * \retval C2_OK        the component module has been successfully loaded
@@ -527,7 +526,7 @@
          * \retval C2_REFUSED   permission denied to load the component module (unexpected)
          * \retval C2_TIMED_OUT could not load the module within the time limit (unexpected)
          */
-        c2_status_t init(std::string alias, std::string libPath);
+        c2_status_t init(std::string libPath);
 
         virtual ~ComponentModule() override;
 
@@ -570,7 +569,7 @@
             std::shared_ptr<ComponentModule> localModule = mModule.lock();
             if (localModule == nullptr) {
                 localModule = std::make_shared<ComponentModule>();
-                res = localModule->init(mAlias, mLibPath);
+                res = localModule->init(mLibPath);
                 if (res == C2_OK) {
                     mModule = localModule;
                 }
@@ -582,13 +581,12 @@
         /**
          * Creates a component loader for a specific library path (or name).
          */
-        ComponentLoader(std::string alias, std::string libPath)
-            : mAlias(alias), mLibPath(libPath) {}
+        ComponentLoader(std::string libPath)
+            : mLibPath(libPath) {}
 
     private:
         std::mutex mMutex; ///< mutex guarding the module
         std::weak_ptr<ComponentModule> mModule; ///< weak reference to the loaded module
-        std::string mAlias; ///< component alias
         std::string mLibPath; ///< library path
     };
 
@@ -624,9 +622,10 @@
     };
 
     /**
-     * Retrieves the component loader for a component.
+     * Retrieves the component module for a component.
      *
-     * \return a non-ref-holding pointer to the component loader.
+     * \param module pointer to a shared_pointer where the component module will be stored on
+     *               success.
      *
      * \retval C2_OK        the component loader has been successfully retrieved
      * \retval C2_NO_MEMORY not enough memory to locate the component loader
@@ -640,16 +639,25 @@
      *                      component but some components could not be loaded due to lack of
      *                      permissions)
      */
-    c2_status_t findComponent(C2String name, ComponentLoader **loader);
+    c2_status_t findComponent(C2String name, std::shared_ptr<ComponentModule> *module);
 
-    std::map<C2String, ComponentLoader> mComponents; ///< map of name -> components
-    std::vector<C2String> mComponentsList; ///< list of components
+    /**
+     * Loads each component module and discover its contents.
+     */
+    void visitComponents();
+
+    std::mutex mMutex; ///< mutex guarding the component lists during construction
+    bool mVisited; ///< component modules visited
+    std::map<C2String, ComponentLoader> mComponents; ///< path -> component module
+    std::map<C2String, C2String> mComponentNameToPath; ///< name -> path
+    std::vector<std::shared_ptr<const C2Component::Traits>> mComponentList;
+
     std::shared_ptr<C2ReflectorHelper> mReflector;
     Interface mInterface;
 };
 
 c2_status_t C2PlatformComponentStore::ComponentModule::init(
-        std::string alias, std::string libPath) {
+        std::string libPath) {
     ALOGV("in %s", __func__);
     ALOGV("loading dll");
     mLibHandle = dlopen(libPath.c_str(), RTLD_NOW|RTLD_NODELETE);
@@ -684,14 +692,28 @@
 
     std::shared_ptr<C2Component::Traits> traits(new (std::nothrow) C2Component::Traits);
     if (traits) {
-        if (alias != intf->getName()) {
-            ALOGV("%s is alias to %s", alias.c_str(), intf->getName().c_str());
+        traits->name = intf->getName();
+
+        C2ComponentKindSetting kind;
+        C2ComponentDomainSetting domain;
+        res = intf->query_vb({ &kind, &domain }, {}, C2_MAY_BLOCK, nullptr);
+        bool fixDomain = res != C2_OK;
+        if (res == C2_OK) {
+            traits->kind = kind.value;
+            traits->domain = domain.value;
+        } else {
+            // TODO: remove this fall-back
+            ALOGD("failed to query interface for kind and domain: %d", res);
+
+            traits->kind =
+                (traits->name.find("encoder") != std::string::npos) ? C2Component::KIND_ENCODER :
+                (traits->name.find("decoder") != std::string::npos) ? C2Component::KIND_DECODER :
+                C2Component::KIND_OTHER;
         }
-        traits->name = alias;
-        // TODO: get this from interface properly.
-        bool encoder = (traits->name.find("encoder") != std::string::npos);
-        uint32_t mediaTypeIndex = encoder ? C2PortMimeConfig::output::PARAM_TYPE
-                : C2PortMimeConfig::input::PARAM_TYPE;
+
+        uint32_t mediaTypeIndex =
+                traits->kind == C2Component::KIND_ENCODER ? C2PortMediaTypeSetting::output::PARAM_TYPE
+                : C2PortMediaTypeSetting::input::PARAM_TYPE;
         std::vector<std::unique_ptr<C2Param>> params;
         res = intf->query_vb({}, { mediaTypeIndex }, C2_MAY_BLOCK, &params);
         if (res != C2_OK) {
@@ -702,29 +724,54 @@
             ALOGD("failed to query interface: unexpected vector size: %zu", params.size());
             return mInit;
         }
-        C2PortMimeConfig *mediaTypeConfig = (C2PortMimeConfig *)(params[0].get());
+        C2PortMediaTypeSetting *mediaTypeConfig = C2PortMediaTypeSetting::From(params[0].get());
         if (mediaTypeConfig == nullptr) {
             ALOGD("failed to query media type");
             return mInit;
         }
-        traits->mediaType = mediaTypeConfig->m.value;
-        // TODO: get this properly.
-        traits->rank = 0x200;
+        traits->mediaType =
+            std::string(mediaTypeConfig->m.value,
+                        strnlen(mediaTypeConfig->m.value, mediaTypeConfig->flexCount()));
 
-        // TODO: define these values properly
-        bool decoder = (traits->name.find("decoder") != std::string::npos);
-        traits->kind =
-                decoder ? C2Component::KIND_DECODER :
-                encoder ? C2Component::KIND_ENCODER :
-                C2Component::KIND_OTHER;
-        if (strncmp(traits->mediaType.c_str(), "audio/", 6) == 0) {
-            traits->domain = C2Component::DOMAIN_AUDIO;
-        } else if (strncmp(traits->mediaType.c_str(), "video/", 6) == 0) {
-            traits->domain = C2Component::DOMAIN_VIDEO;
-        } else if (strncmp(traits->mediaType.c_str(), "image/", 6) == 0) {
-            traits->domain = C2Component::DOMAIN_IMAGE;
-        } else {
-            traits->domain = C2Component::DOMAIN_OTHER;
+        if (fixDomain) {
+            if (strncmp(traits->mediaType.c_str(), "audio/", 6) == 0) {
+                traits->domain = C2Component::DOMAIN_AUDIO;
+            } else if (strncmp(traits->mediaType.c_str(), "video/", 6) == 0) {
+                traits->domain = C2Component::DOMAIN_VIDEO;
+            } else if (strncmp(traits->mediaType.c_str(), "image/", 6) == 0) {
+                traits->domain = C2Component::DOMAIN_IMAGE;
+            } else {
+                traits->domain = C2Component::DOMAIN_OTHER;
+            }
+        }
+
+        // TODO: get this properly from the store during emplace
+        switch (traits->domain) {
+        case C2Component::DOMAIN_AUDIO:
+            traits->rank = 8;
+            break;
+        default:
+            traits->rank = 512;
+        }
+
+        params.clear();
+        res = intf->query_vb({}, { C2ComponentAliasesSetting::PARAM_TYPE }, C2_MAY_BLOCK, &params);
+        if (res == C2_OK && params.size() == 1u) {
+            C2ComponentAliasesSetting *aliasesSetting =
+                C2ComponentAliasesSetting::From(params[0].get());
+            if (aliasesSetting) {
+                // Split aliases on ','
+                // This looks simpler in plain C and even std::string would still make a copy.
+                char *aliases = ::strndup(aliasesSetting->m.value, aliasesSetting->flexCount());
+                ALOGD("'%s' has aliases: '%s'", intf->getName().c_str(), aliases);
+
+                for (char *tok, *ptr, *str = aliases; (tok = ::strtok_r(str, ",", &ptr));
+                        str = nullptr) {
+                    traits->aliases.push_back(tok);
+                    ALOGD("adding alias: '%s'", tok);
+                }
+                free(aliases);
+            }
         }
     }
     mTraits = traits;
@@ -783,82 +830,46 @@
 }
 
 C2PlatformComponentStore::C2PlatformComponentStore()
-    : mReflector(std::make_shared<C2ReflectorHelper>()),
+    : mVisited(false),
+      mReflector(std::make_shared<C2ReflectorHelper>()),
       mInterface(mReflector) {
 
-    auto emplace = [this](const char *alias, const char *libPath) {
-        // ComponentLoader is neither copiable nor movable, so it must be
-        // constructed in-place. Now ComponentLoader takes two arguments in
-        // constructor, so we need to use piecewise_construct to achieve this
-        // behavior.
-        mComponents.emplace(
-                std::piecewise_construct,
-                std::forward_as_tuple(alias),
-                std::forward_as_tuple(alias, libPath));
-        mComponentsList.emplace_back(alias);
+    auto emplace = [this](const char *libPath) {
+        mComponents.emplace(libPath, libPath);
     };
-    // TODO: move this also into a .so so it can be updated
-    emplace("c2.android.avc.decoder", "libcodec2_soft_avcdec.so");
-    emplace("c2.android.avc.encoder", "libcodec2_soft_avcenc.so");
-    emplace("c2.android.aac.decoder", "libcodec2_soft_aacdec.so");
-    emplace("c2.android.aac.encoder", "libcodec2_soft_aacenc.so");
-    emplace("c2.android.amrnb.decoder", "libcodec2_soft_amrnbdec.so");
-    emplace("c2.android.amrnb.encoder", "libcodec2_soft_amrnbenc.so");
-    emplace("c2.android.amrwb.decoder", "libcodec2_soft_amrwbdec.so");
-    emplace("c2.android.amrwb.encoder", "libcodec2_soft_amrwbenc.so");
-    emplace("c2.android.hevc.decoder", "libcodec2_soft_hevcdec.so");
-    emplace("c2.android.g711.alaw.decoder", "libcodec2_soft_g711alawdec.so");
-    emplace("c2.android.g711.mlaw.decoder", "libcodec2_soft_g711mlawdec.so");
-    emplace("c2.android.mpeg2.decoder", "libcodec2_soft_mpeg2dec.so");
-    emplace("c2.android.h263.decoder", "libcodec2_soft_h263dec.so");
-    emplace("c2.android.h263.encoder", "libcodec2_soft_h263enc.so");
-    emplace("c2.android.mpeg4.decoder", "libcodec2_soft_mpeg4dec.so");
-    emplace("c2.android.mpeg4.encoder", "libcodec2_soft_mpeg4enc.so");
-    emplace("c2.android.mp3.decoder", "libcodec2_soft_mp3dec.so");
-    emplace("c2.android.vorbis.decoder", "libcodec2_soft_vorbisdec.so");
-    emplace("c2.android.opus.decoder", "libcodec2_soft_opusdec.so");
-    emplace("c2.android.opus.encoder", "libcodec2_soft_opusenc.so");
-    emplace("c2.android.vp8.decoder", "libcodec2_soft_vp8dec.so");
-    emplace("c2.android.vp9.decoder", "libcodec2_soft_vp9dec.so");
-    emplace("c2.android.vp8.encoder", "libcodec2_soft_vp8enc.so");
-    emplace("c2.android.vp9.encoder", "libcodec2_soft_vp9enc.so");
-    emplace("c2.android.av1.decoder", "libcodec2_soft_av1dec.so");
-    emplace("c2.android.raw.decoder", "libcodec2_soft_rawdec.so");
-    emplace("c2.android.flac.decoder", "libcodec2_soft_flacdec.so");
-    emplace("c2.android.flac.encoder", "libcodec2_soft_flacenc.so");
-    emplace("c2.android.gsm.decoder", "libcodec2_soft_gsmdec.so");
-    emplace("c2.android.xaac.decoder", "libcodec2_soft_xaacdec.so");
 
-    // "Aliases"
-    // TODO: use aliases proper from C2Component::Traits
-    emplace("OMX.google.h264.decoder", "libcodec2_soft_avcdec.so");
-    emplace("OMX.google.h264.encoder", "libcodec2_soft_avcenc.so");
-    emplace("OMX.google.aac.decoder", "libcodec2_soft_aacdec.so");
-    emplace("OMX.google.aac.encoder", "libcodec2_soft_aacenc.so");
-    emplace("OMX.google.amrnb.decoder", "libcodec2_soft_amrnbdec.so");
-    emplace("OMX.google.amrnb.encoder", "libcodec2_soft_amrnbenc.so");
-    emplace("OMX.google.amrwb.decoder", "libcodec2_soft_amrwbdec.so");
-    emplace("OMX.google.amrwb.encoder", "libcodec2_soft_amrwbenc.so");
-    emplace("OMX.google.hevc.decoder", "libcodec2_soft_hevcdec.so");
-    emplace("OMX.google.g711.alaw.decoder", "libcodec2_soft_g711alawdec.so");
-    emplace("OMX.google.g711.mlaw.decoder", "libcodec2_soft_g711mlawdec.so");
-    emplace("OMX.google.mpeg2.decoder", "libcodec2_soft_mpeg2dec.so");
-    emplace("OMX.google.h263.decoder", "libcodec2_soft_h263dec.so");
-    emplace("OMX.google.h263.encoder", "libcodec2_soft_h263enc.so");
-    emplace("OMX.google.mpeg4.decoder", "libcodec2_soft_mpeg4dec.so");
-    emplace("OMX.google.mpeg4.encoder", "libcodec2_soft_mpeg4enc.so");
-    emplace("OMX.google.mp3.decoder", "libcodec2_soft_mp3dec.so");
-    emplace("OMX.google.vorbis.decoder", "libcodec2_soft_vorbisdec.so");
-    emplace("OMX.google.opus.decoder", "libcodec2_soft_opusdec.so");
-    emplace("OMX.google.vp8.decoder", "libcodec2_soft_vp8dec.so");
-    emplace("OMX.google.vp9.decoder", "libcodec2_soft_vp9dec.so");
-    emplace("OMX.google.vp8.encoder", "libcodec2_soft_vp8enc.so");
-    emplace("OMX.google.vp9.encoder", "libcodec2_soft_vp9enc.so");
-    emplace("OMX.google.raw.decoder", "libcodec2_soft_rawdec.so");
-    emplace("OMX.google.flac.decoder", "libcodec2_soft_flacdec.so");
-    emplace("OMX.google.flac.encoder", "libcodec2_soft_flacenc.so");
-    emplace("OMX.google.gsm.decoder", "libcodec2_soft_gsmdec.so");
-    emplace("OMX.google.xaac.decoder", "libcodec2_soft_xaacdec.so");
+    // TODO: move this also into a .so so it can be updated
+    emplace("libcodec2_soft_aacdec.so");
+    emplace("libcodec2_soft_aacenc.so");
+    emplace("libcodec2_soft_amrnbdec.so");
+    emplace("libcodec2_soft_amrnbenc.so");
+    emplace("libcodec2_soft_amrwbdec.so");
+    emplace("libcodec2_soft_amrwbenc.so");
+    emplace("libcodec2_soft_av1dec.so");
+    emplace("libcodec2_soft_avcdec.so");
+    emplace("libcodec2_soft_avcenc.so");
+    emplace("libcodec2_soft_flacdec.so");
+    emplace("libcodec2_soft_flacenc.so");
+    emplace("libcodec2_soft_g711alawdec.so");
+    emplace("libcodec2_soft_g711mlawdec.so");
+    emplace("libcodec2_soft_gsmdec.so");
+    emplace("libcodec2_soft_h263dec.so");
+    emplace("libcodec2_soft_h263enc.so");
+    emplace("libcodec2_soft_hevcdec.so");
+    emplace("libcodec2_soft_hevcenc.so");
+    emplace("libcodec2_soft_mp3dec.so");
+    emplace("libcodec2_soft_mpeg2dec.so");
+    emplace("libcodec2_soft_mpeg4dec.so");
+    emplace("libcodec2_soft_mpeg4enc.so");
+    emplace("libcodec2_soft_opusdec.so");
+    emplace("libcodec2_soft_opusenc.so");
+    emplace("libcodec2_soft_rawdec.so");
+    emplace("libcodec2_soft_vorbisdec.so");
+    emplace("libcodec2_soft_vp8dec.so");
+    emplace("libcodec2_soft_vp8enc.so");
+    emplace("libcodec2_soft_vp9dec.so");
+    emplace("libcodec2_soft_vp9enc.so");
+    emplace("libcodec2_soft_xaacdec.so");
 }
 
 c2_status_t C2PlatformComponentStore::copyBuffer(
@@ -881,47 +892,56 @@
     return mInterface.config(params, C2_MAY_BLOCK, failures);
 }
 
-std::vector<std::shared_ptr<const C2Component::Traits>> C2PlatformComponentStore::listComponents() {
-    // This method SHALL return within 500ms.
-    std::vector<std::shared_ptr<const C2Component::Traits>> list;
-    for (const C2String &alias : mComponentsList) {
-        ComponentLoader &loader = mComponents.at(alias);
+void C2PlatformComponentStore::visitComponents() {
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (mVisited) {
+        return;
+    }
+    for (auto &pathAndLoader : mComponents) {
+        const C2String &path = pathAndLoader.first;
+        ComponentLoader &loader = pathAndLoader.second;
         std::shared_ptr<ComponentModule> module;
-        c2_status_t res = loader.fetchModule(&module);
-        if (res == C2_OK) {
+        if (loader.fetchModule(&module) == C2_OK) {
             std::shared_ptr<const C2Component::Traits> traits = module->getTraits();
             if (traits) {
-                list.push_back(traits);
+                mComponentList.push_back(traits);
+                mComponentNameToPath.emplace(traits->name, path);
+                for (const C2String &alias : traits->aliases) {
+                    mComponentNameToPath.emplace(alias, path);
+                }
             }
         }
     }
-    return list;
+    mVisited = true;
 }
 
-c2_status_t C2PlatformComponentStore::findComponent(C2String name, ComponentLoader **loader) {
-    *loader = nullptr;
-    auto pos = mComponents.find(name);
-    // TODO: check aliases
-    if (pos == mComponents.end()) {
-        return C2_NOT_FOUND;
+std::vector<std::shared_ptr<const C2Component::Traits>> C2PlatformComponentStore::listComponents() {
+    // This method SHALL return within 500ms.
+    visitComponents();
+    return mComponentList;
+}
+
+c2_status_t C2PlatformComponentStore::findComponent(
+        C2String name, std::shared_ptr<ComponentModule> *module) {
+    (*module).reset();
+    visitComponents();
+
+    auto pos = mComponentNameToPath.find(name);
+    if (pos != mComponentNameToPath.end()) {
+        return mComponents.at(pos->second).fetchModule(module);
     }
-    *loader = &pos->second;
-    return C2_OK;
+    return C2_NOT_FOUND;
 }
 
 c2_status_t C2PlatformComponentStore::createComponent(
         C2String name, std::shared_ptr<C2Component> *const component) {
     // This method SHALL return within 100ms.
     component->reset();
-    ComponentLoader *loader;
-    c2_status_t res = findComponent(name, &loader);
+    std::shared_ptr<ComponentModule> module;
+    c2_status_t res = findComponent(name, &module);
     if (res == C2_OK) {
-        std::shared_ptr<ComponentModule> module;
-        res = loader->fetchModule(&module);
-        if (res == C2_OK) {
-            // TODO: get a unique node ID
-            res = module->createComponent(0, component);
-        }
+        // TODO: get a unique node ID
+        res = module->createComponent(0, component);
     }
     return res;
 }
@@ -930,15 +950,11 @@
         C2String name, std::shared_ptr<C2ComponentInterface> *const interface) {
     // This method SHALL return within 100ms.
     interface->reset();
-    ComponentLoader *loader;
-    c2_status_t res = findComponent(name, &loader);
+    std::shared_ptr<ComponentModule> module;
+    c2_status_t res = findComponent(name, &module);
     if (res == C2_OK) {
-        std::shared_ptr<ComponentModule> module;
-        res = loader->fetchModule(&module);
-        if (res == C2_OK) {
-            // TODO: get a unique node ID
-            res = module->createInterface(0, interface);
-        }
+        // TODO: get a unique node ID
+        res = module->createInterface(0, interface);
     }
     return res;
 }
diff --git a/media/codec2/vndk/platform/C2BqBuffer.cpp b/media/codec2/vndk/platform/C2BqBuffer.cpp
index 7bf3d64..41a5b3f 100644
--- a/media/codec2/vndk/platform/C2BqBuffer.cpp
+++ b/media/codec2/vndk/platform/C2BqBuffer.cpp
@@ -207,12 +207,16 @@
         // dequeueBuffer returns flag.
         if (!transStatus.isOk() || status < android::OK) {
             ALOGD("cannot dequeue buffer %d", status);
-            if (transStatus.isOk() && status == android::INVALID_OPERATION) {
-              // Too many buffer dequeued. retrying after some time is required.
-              return C2_TIMED_OUT;
-            } else {
-              return C2_BAD_VALUE;
+            if (transStatus.isOk()) {
+                if (status == android::INVALID_OPERATION ||
+                    status == android::TIMED_OUT ||
+                    status == android::WOULD_BLOCK) {
+                    // Dequeue buffer is blocked temporarily. Retrying is
+                    // required.
+                    return C2_BLOCKING;
+                }
             }
+            return C2_BAD_VALUE;
         }
         ALOGV("dequeued a buffer successfully");
         native_handle_t* nh = nullptr;
@@ -227,7 +231,7 @@
             if (status == -ETIME) {
                 // fence is not signalled yet.
                 (void)mProducer->cancelBuffer(slot, fenceHandle).isOk();
-                return C2_TIMED_OUT;
+                return C2_BLOCKING;
             }
             if (status != android::NO_ERROR) {
                 ALOGD("buffer fence wait error %d", status);
@@ -353,14 +357,14 @@
                 return C2_OK;
             }
             c2_status_t status = fetchFromIgbp_l(width, height, format, usage, block);
-            if (status == C2_TIMED_OUT) {
+            if (status == C2_BLOCKING) {
                 lock.unlock();
                 ::usleep(kMaxIgbpRetryDelayUs);
                 continue;
             }
             return status;
         }
-        return C2_TIMED_OUT;
+        return C2_BLOCKING;
     }
 
     void setRenderCallback(const OnRenderCallback &renderCallback) {
diff --git a/media/extractors/aac/AACExtractor.cpp b/media/extractors/aac/AACExtractor.cpp
index beddad0..9d183d4 100644
--- a/media/extractors/aac/AACExtractor.cpp
+++ b/media/extractors/aac/AACExtractor.cpp
@@ -150,6 +150,7 @@
 
     mMeta = AMediaFormat_new();
     MakeAACCodecSpecificData(mMeta, profile, sf_index, channel);
+    AMediaFormat_setInt32(mMeta, AMEDIAFORMAT_KEY_AAC_PROFILE, profile + 1);
 
     off64_t streamSize, numFrames = 0;
     size_t frameSize = 0;
diff --git a/media/extractors/mp3/MP3Extractor.cpp b/media/extractors/mp3/MP3Extractor.cpp
index 61838f6..a838ae6 100644
--- a/media/extractors/mp3/MP3Extractor.cpp
+++ b/media/extractors/mp3/MP3Extractor.cpp
@@ -708,6 +708,7 @@
 }
 
 static const char *extensions[] = {
+    "mp2",
     "mp3",
     "mpeg",
     "mpg",
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index ac54116..4b4d767 100755
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -2236,7 +2236,29 @@
             *offset += chunk_size;
             break;
         }
+        case FOURCC("av1C"):
+        {
+            auto buffer = heapbuffer<uint8_t>(chunk_data_size);
 
+            if (buffer.get() == NULL) {
+                ALOGE("b/28471206");
+                return NO_MEMORY;
+            }
+
+            if (mDataSource->readAt(
+                        data_offset, buffer.get(), chunk_data_size) < chunk_data_size) {
+                return ERROR_IO;
+            }
+
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
+            AMediaFormat_setBuffer(mLastTrack->meta,
+                   AMEDIAFORMAT_KEY_CSD_0, buffer.get(), chunk_data_size);
+
+            *offset += chunk_size;
+            break;
+        }
         case FOURCC("d263"):
         {
             *offset += chunk_size;
@@ -2663,19 +2685,40 @@
         case FOURCC("ac-3"):
         {
             *offset += chunk_size;
-            return parseAC3SpecificBox(data_offset);
+            // bypass ac-3 if parse fail
+            if (parseAC3SpecificBox(data_offset) != OK) {
+                if (mLastTrack != NULL) {
+                    ALOGW("Fail to parse ac-3");
+                    mLastTrack->skipTrack = true;
+                }
+            }
+            return OK;
         }
 
         case FOURCC("ec-3"):
         {
             *offset += chunk_size;
-            return parseEAC3SpecificBox(data_offset);
+            // bypass ec-3 if parse fail
+            if (parseEAC3SpecificBox(data_offset) != OK) {
+                if (mLastTrack != NULL) {
+                    ALOGW("Fail to parse ec-3");
+                    mLastTrack->skipTrack = true;
+                }
+            }
+            return OK;
         }
 
         case FOURCC("ac-4"):
         {
             *offset += chunk_size;
-            return parseAC4SpecificBox(data_offset);
+            // bypass ac-4 if parse fail
+            if (parseAC4SpecificBox(data_offset) != OK) {
+                if (mLastTrack != NULL) {
+                    ALOGW("Fail to parse ac-4");
+                    mLastTrack->skipTrack = true;
+                }
+            }
+            return OK;
         }
 
         case FOURCC("ftyp"):
@@ -3972,6 +4015,18 @@
         if (!strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC)) {
             itemTable = mItemTable;
         }
+    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AV1)) {
+        void *data;
+        size_t size;
+        if (!AMediaFormat_getBuffer(track->meta, AMEDIAFORMAT_KEY_CSD_0, &data, &size)) {
+            return NULL;
+        }
+
+        const uint8_t *ptr = (const uint8_t *)data;
+
+        if (size < 5 || ptr[0] != 0x81) {  // configurationVersion == 1
+            return NULL;
+        }
     }
 
     if (track->has_elst and !strncasecmp("video/", mime, 6) and track->elst_media_time > 0) {
@@ -4005,6 +4060,10 @@
         if (!AMediaFormat_getBuffer(track->meta, AMEDIAFORMAT_KEY_CSD_HEVC, &data, &size)) {
             return ERROR_MALFORMED;
         }
+    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AV1)) {
+        if (!AMediaFormat_getBuffer(track->meta, AMEDIAFORMAT_KEY_CSD_0, &data, &size)) {
+            return ERROR_MALFORMED;
+        }
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4)
             || !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG2)
             || !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
@@ -5621,10 +5680,10 @@
             }
 
             if (isMalFormed) {
-                ALOGE("Video is malformed");
-                mBuffer->release();
-                mBuffer = NULL;
-                return AMEDIA_ERROR_MALFORMED;
+                //if nallength abnormal,ignore it.
+                ALOGW("abnormal nallength, ignore this NAL");
+                srcOffset = size;
+                break;
             }
 
             if (nalLength == 0) {
@@ -6214,6 +6273,7 @@
 
 static const char *extensions[] = {
     "3g2",
+    "3ga",
     "3gp",
     "3gpp",
     "3gpp2",
@@ -6222,6 +6282,7 @@
     "m4v",
     "mov",
     "mp4",
+    "qt",
     NULL
 };
 
diff --git a/media/extractors/ogg/OggExtractor.cpp b/media/extractors/ogg/OggExtractor.cpp
index ba40690..d99493d 100644
--- a/media/extractors/ogg/OggExtractor.cpp
+++ b/media/extractors/ogg/OggExtractor.cpp
@@ -364,7 +364,13 @@
             return OK;
         }
 
-        ++*pageOffset;
+        // see how far ahead to skip; avoid some fruitless comparisons
+        unsigned int i;
+        for (i = 1; i < 4 ; i++) {
+            if (signature[i] == 'O')
+                break;
+        }
+        *pageOffset += i;
     }
 }
 
@@ -1382,6 +1388,7 @@
 static const char *extensions[] = {
     "oga",
     "ogg",
+    "opus",
     NULL
 };
 
diff --git a/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h b/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
index 9711b86..8eb70b1 100644
--- a/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
+++ b/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
@@ -310,7 +310,7 @@
     }
 
     // Write SHORT data from the first channel.
-    int write(int16_t *inputData, int inputChannelCount, int numFrames) {
+    int32_t write(int16_t *inputData, int32_t inputChannelCount, int32_t numFrames) {
         // stop at end of buffer
         if ((mFrameCounter + numFrames) > mMaxFrames) {
             numFrames = mMaxFrames - mFrameCounter;
@@ -322,7 +322,7 @@
     }
 
     // Write FLOAT data from the first channel.
-    int write(float *inputData, int inputChannelCount, int numFrames) {
+    int32_t write(float *inputData, int32_t inputChannelCount, int32_t numFrames) {
         // stop at end of buffer
         if ((mFrameCounter + numFrames) > mMaxFrames) {
             numFrames = mMaxFrames - mFrameCounter;
@@ -333,7 +333,7 @@
         return numFrames;
     }
 
-    int size() {
+    int32_t size() {
         return mFrameCounter;
     }
 
@@ -443,9 +443,14 @@
     virtual ~LoopbackProcessor() = default;
 
 
+    enum process_result {
+        PROCESS_RESULT_OK,
+        PROCESS_RESULT_GLITCH
+    };
+
     virtual void reset() {}
 
-    virtual void process(float *inputData, int inputChannelCount,
+    virtual process_result process(float *inputData, int inputChannelCount,
                  float *outputData, int outputChannelCount,
                  int numFrames) = 0;
 
@@ -639,7 +644,7 @@
         return getSampleRate() / 8;
     }
 
-    void process(float *inputData, int inputChannelCount,
+    process_result process(float *inputData, int inputChannelCount,
                  float *outputData, int outputChannelCount,
                  int numFrames) override {
         int channelsValid = std::min(inputChannelCount, outputChannelCount);
@@ -750,6 +755,7 @@
 
         mState = nextState;
         mLoopCounter++;
+        return PROCESS_RESULT_OK;
     }
 
     int save(const char *fileName) override {
@@ -896,9 +902,10 @@
      * @param inputData contains microphone data with sine signal feedback
      * @param outputData contains the reference sine wave
      */
-    void process(float *inputData, int inputChannelCount,
+    process_result process(float *inputData, int inputChannelCount,
                  float *outputData, int outputChannelCount,
                  int numFrames) override {
+        process_result result = PROCESS_RESULT_OK;
         mProcessCount++;
 
         float peak = measurePeakAmplitude(inputData, inputChannelCount, numFrames);
@@ -978,6 +985,7 @@
                     mMaxGlitchDelta = std::max(mMaxGlitchDelta, absDiff);
                     if (absDiff > mTolerance) {
                         mGlitchCount++;
+                        result = PROCESS_RESULT_GLITCH;
                         //printf("%5d: Got a glitch # %d, predicted = %f, actual = %f\n",
                         //       mFrameCounter, mGlitchCount, predicted, sample);
                         mState = STATE_IMMUNE;
@@ -1018,6 +1026,7 @@
 
             mFrameCounter++;
         }
+        return result;
     }
 
     void resetAccumulator() {
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index 3de1514..6578156 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -34,9 +34,13 @@
 #include "AAudioSimpleRecorder.h"
 #include "AAudioExampleUtils.h"
 #include "LoopbackAnalyzer.h"
+#include "../../utils/AAudioExampleUtils.h"
 
-// V0.4.00 = rectify and low-pass filter the echos, use auto-correlation on entire echo
-#define APP_VERSION             "0.4.00"
+// V0.4.00 = rectify and low-pass filter the echos, auto-correlate entire echo
+// V0.4.01 = add -h hang option
+//           fix -n option to set output buffer for -tm
+//           plot first glitch
+#define APP_VERSION             "0.4.01"
 
 // Tag for machine readable results as property = value pairs
 #define RESULT_TAG              "RESULT: "
@@ -47,10 +51,14 @@
 constexpr int kLogPeriodMillis       = 1000;
 constexpr int kNumInputChannels      = 1;
 constexpr int kNumCallbacksToDrain   = 20;
+constexpr int kNumCallbacksToNotRead = 0; // let input fill back up
 constexpr int kNumCallbacksToDiscard = 20;
+constexpr int kDefaultHangTimeMillis = 50;
+constexpr int kMaxGlitchEventsToSave = 32;
 
 struct LoopbackData {
     AAudioStream      *inputStream = nullptr;
+    AAudioStream      *outputStream = nullptr;
     int32_t            inputFramesMaximum = 0;
     int16_t           *inputShortData = nullptr;
     float             *inputFloatData = nullptr;
@@ -58,6 +66,7 @@
     int32_t            actualInputChannelCount = 0;
     int32_t            actualOutputChannelCount = 0;
     int32_t            numCallbacksToDrain = kNumCallbacksToDrain;
+    int32_t            numCallbacksToNotRead = kNumCallbacksToNotRead;
     int32_t            numCallbacksToDiscard = kNumCallbacksToDiscard;
     int32_t            minNumFrames = INT32_MAX;
     int32_t            maxNumFrames = 0;
@@ -65,6 +74,9 @@
     int32_t            insufficientReadFrames = 0;
     int32_t            framesReadTotal = 0;
     int32_t            framesWrittenTotal = 0;
+    int32_t            hangPeriodMillis = 5 * 1000; // time between hangs
+    int32_t            hangCountdownFrames = 5 * 48000; // frames til next hang
+    int32_t            hangTimeMillis = 0; // 0 for no hang
     bool               isDone = false;
 
     aaudio_result_t    inputError = AAUDIO_OK;
@@ -74,6 +86,29 @@
     EchoAnalyzer       echoAnalyzer;
     AudioRecording     audioRecording;
     LoopbackProcessor *loopbackProcessor;
+
+    int32_t            glitchFrames[kMaxGlitchEventsToSave];
+    int32_t            numGlitchEvents = 0;
+
+    void hangIfRequested(int32_t numFrames) {
+        if (hangTimeMillis > 0) {
+            hangCountdownFrames -= numFrames;
+            if (hangCountdownFrames <= 0) {
+                const int64_t startNanos = getNanoseconds();
+                usleep(hangTimeMillis * 1000);
+                const int64_t endNanos = getNanoseconds();
+                const int32_t elapsedMicros = (int32_t)
+                        ((endNanos - startNanos) / 1000);
+                printf("callback hanging for %d millis, actual = %d micros\n",
+                       hangTimeMillis, elapsedMicros);
+                hangCountdownFrames = (int64_t) hangPeriodMillis
+                        * AAudioStream_getSampleRate(outputStream)
+                        / 1000;
+            }
+        }
+
+
+    }
 };
 
 static void convertPcm16ToFloat(const int16_t *source,
@@ -166,6 +201,9 @@
             myData->numCallbacksToDrain--;
         }
 
+    } else if (myData->numCallbacksToNotRead > 0) {
+        // Let the input fill up a bit so we are not so close to the write pointer.
+        myData->numCallbacksToNotRead--;
     } else if (myData->numCallbacksToDiscard > 0) {
         // Ignore. Allow the input to fill back up to equilibrium with the output.
         actualFramesRead = readFormattedData(myData, numFrames);
@@ -175,6 +213,7 @@
         myData->numCallbacksToDiscard--;
 
     } else {
+        myData->hangIfRequested(numFrames);
 
         int32_t numInputBytes = numFrames * myData->actualInputChannelCount * sizeof(float);
         memset(myData->inputFloatData, 0 /* value */, numInputBytes);
@@ -191,7 +230,7 @@
 
             if (actualFramesRead < numFrames) {
                 if(actualFramesRead < (int32_t) framesAvailable) {
-                    printf("insufficient but numFrames = %d"
+                    printf("insufficient for no reason, numFrames = %d"
                                    ", actualFramesRead = %d"
                                    ", inputFramesWritten = %d"
                                    ", inputFramesRead = %d"
@@ -212,16 +251,25 @@
             if (myData->actualInputFormat == AAUDIO_FORMAT_PCM_I16) {
                 convertPcm16ToFloat(myData->inputShortData, myData->inputFloatData, numSamples);
             }
-            // Save for later.
-            myData->audioRecording.write(myData->inputFloatData,
-                                         myData->actualInputChannelCount,
-                                         numFrames);
+
             // Analyze the data.
-            myData->loopbackProcessor->process(myData->inputFloatData,
+            LoopbackProcessor::process_result procResult = myData->loopbackProcessor->process(myData->inputFloatData,
                                                myData->actualInputChannelCount,
                                                outputData,
                                                myData->actualOutputChannelCount,
                                                numFrames);
+
+            if (procResult == LoopbackProcessor::PROCESS_RESULT_GLITCH) {
+                if (myData->numGlitchEvents < kMaxGlitchEventsToSave) {
+                    myData->glitchFrames[myData->numGlitchEvents++] = myData->audioRecording.size();
+                }
+            }
+
+            // Save for later.
+            myData->audioRecording.write(myData->inputFloatData,
+                                         myData->actualInputChannelCount,
+                                         actualFramesRead);
+
             myData->isDone = myData->loopbackProcessor->isDone();
             if (myData->isDone) {
                 result = AAUDIO_CALLBACK_RESULT_STOP;
@@ -249,6 +297,7 @@
     printf("      -C{channels}      number of input channels\n");
     printf("      -F{0,1,2}         input format, 1=I16, 2=FLOAT\n");
     printf("      -g{gain}          recirculating loopback gain\n");
+    printf("      -h{hangMillis}    occasionally hang in the callback\n");
     printf("      -P{inPerf}        set input AAUDIO_PERFORMANCE_MODE*\n");
     printf("          n for _NONE\n");
     printf("          l for _LATENCY\n");
@@ -307,9 +356,7 @@
     return testMode;
 }
 
-void printAudioGraph(AudioRecording &recording, int numSamples) {
-    int32_t start = recording.size() / 2;
-    int32_t end = start + numSamples;
+void printAudioGraphRegion(AudioRecording &recording, int32_t start, int32_t end) {
     if (end >= recording.size()) {
         end = recording.size() - 1;
     }
@@ -352,7 +399,7 @@
     int32_t               requestedInputCapacity     = AAUDIO_UNSPECIFIED;
     aaudio_performance_mode_t inputPerformanceLevel  = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
 
-    int32_t               outputFramesPerBurst = 0;
+    int32_t               outputFramesPerBurst       = 0;
 
     aaudio_format_t       actualOutputFormat         = AAUDIO_FORMAT_INVALID;
     int32_t               actualSampleRate           = 0;
@@ -360,6 +407,7 @@
 
     int                   testMode                   = TEST_ECHO_LATENCY;
     double                gain                       = 1.0;
+    int                   hangTimeMillis             = 0;
 
     // Make printf print immediately so that debug info is not stuck
     // in a buffer if we hang or crash.
@@ -389,6 +437,15 @@
                     case 'g':
                         gain = atof(&arg[2]);
                         break;
+                    case 'h':
+                        // Was there a number after the "-h"?
+                        if (arg[2]) {
+                            hangTimeMillis = atoi(&arg[2]);
+                        } else {
+                            // If no number then use the default.
+                            hangTimeMillis = kDefaultHangTimeMillis;
+                        }
+                        break;
                     case 'P':
                         inputPerformanceLevel = parsePerformanceMode(arg[2]);
                         break;
@@ -422,6 +479,8 @@
     int32_t timeMillis = 0;
     int32_t recordingDuration = std::min(60 * 5, requestedDuration);
 
+    int32_t requestedOutputBursts = argParser.getNumberOfBursts();
+
     switch(testMode) {
         case TEST_SINE_MAGNITUDE:
             loopbackData.loopbackProcessor = &loopbackData.sineAnalyzer;
@@ -453,7 +512,7 @@
         fprintf(stderr, "ERROR -  player.open() returned %d\n", result);
         exit(1);
     }
-    outputStream = player.getStream();
+    outputStream = loopbackData.outputStream = player.getStream();
 
     actualOutputFormat = AAudioStream_getFormat(outputStream);
     if (actualOutputFormat != AAUDIO_FORMAT_PCM_FLOAT) {
@@ -489,24 +548,29 @@
 
     {
         int32_t actualCapacity = AAudioStream_getBufferCapacityInFrames(inputStream);
-        result = AAudioStream_setBufferSizeInFrames(inputStream, actualCapacity);
-        if (result < 0) {
-            fprintf(stderr, "ERROR -  AAudioStream_setBufferSizeInFrames() returned %d\n", result);
-            goto finish;
-        } else {}
-    }
+        (void) AAudioStream_setBufferSizeInFrames(inputStream, actualCapacity);
 
-    argParser.compareWithStream(inputStream);
+        if (testMode == TEST_SINE_MAGNITUDE
+                && requestedOutputBursts == AAUDIO_UNSPECIFIED) {
+            result = AAudioStream_setBufferSizeInFrames(outputStream, actualCapacity);
+            if (result < 0) {
+                fprintf(stderr, "ERROR -  AAudioStream_setBufferSizeInFrames(output) returned %d\n",
+                        result);
+                goto finish;
+            } else {
+                printf("Output buffer size set to match input capacity = %d frames!\n", result);
+            }
+        }
 
-    // If the input stream is too small then we cannot satisfy the output callback.
-    {
-        int32_t actualCapacity = AAudioStream_getBufferCapacityInFrames(inputStream);
+        // If the input stream is too small then we cannot satisfy the output callback.
         if (actualCapacity < 2 * outputFramesPerBurst) {
             fprintf(stderr, "ERROR - input capacity < 2 * outputFramesPerBurst\n");
             goto finish;
         }
     }
 
+    argParser.compareWithStream(inputStream);
+
     // ------- Setup loopbackData -----------------------------
     loopbackData.actualInputFormat = AAudioStream_getFormat(inputStream);
 
@@ -525,6 +589,8 @@
 
     loopbackData.loopbackProcessor->reset();
 
+    loopbackData.hangTimeMillis = hangTimeMillis;
+
     // Start OUTPUT first so INPUT does not overflow.
     result = player.start();
     if (result != AAUDIO_OK) {
@@ -611,7 +677,17 @@
 
     if (loopbackData.inputError == AAUDIO_OK) {
         if (testMode == TEST_SINE_MAGNITUDE) {
-            printAudioGraph(loopbackData.audioRecording, 200);
+            if (loopbackData.numGlitchEvents > 0) {
+                // Graph around the first glitch if there is one.
+                const int32_t start = loopbackData.glitchFrames[0] - 8;
+                const int32_t end = start + outputFramesPerBurst + 8 + 8;
+                printAudioGraphRegion(loopbackData.audioRecording, start, end);
+            } else {
+                // Or graph the middle of the signal.
+                const int32_t start = loopbackData.audioRecording.size() / 2;
+                const int32_t end = start + 200;
+                printAudioGraphRegion(loopbackData.audioRecording, start, end);
+            }
         }
 
         loopbackData.loopbackProcessor->report();
@@ -661,6 +737,11 @@
     delete[] loopbackData.inputShortData;
 
 report_result:
+
+    for (int i = 0; i < loopbackData.numGlitchEvents; i++) {
+        printf("  glitch at frame %d\n", loopbackData.glitchFrames[i]);
+    }
+
     written = loopbackData.loopbackProcessor->save(FILENAME_PROCESSED);
     if (written > 0) {
         printf("main() wrote %8d processed samples to \"%s\" on Android device\n",
diff --git a/media/libaaudio/examples/utils/AAudioArgsParser.h b/media/libaaudio/examples/utils/AAudioArgsParser.h
index a5dc55f..f5ed7aa 100644
--- a/media/libaaudio/examples/utils/AAudioArgsParser.h
+++ b/media/libaaudio/examples/utils/AAudioArgsParser.h
@@ -130,12 +130,10 @@
     }
 
     int32_t getBufferCapacity() const {
-        printf("%s() returns %d\n", __func__, mBufferCapacity);
         return mBufferCapacity;
     }
 
     void setBufferCapacity(int32_t frames) {
-        printf("%s(%d)\n", __func__, frames);
         mBufferCapacity = frames;
     }
 
diff --git a/media/libaaudio/examples/utils/AAudioSimplePlayer.h b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
index 1645986..4373fa9 100644
--- a/media/libaaudio/examples/utils/AAudioSimplePlayer.h
+++ b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
@@ -32,8 +32,6 @@
 
 // Arbitrary period for glitches
 #define FORCED_UNDERRUN_PERIOD_FRAMES    (2 * 48000)
-// How long to sleep in a callback to cause an intentional glitch. For testing.
-#define FORCED_UNDERRUN_SLEEP_MICROS     (10 * 1000)
 
 #define MAX_TIMESTAMPS                   16
 
@@ -275,7 +273,7 @@
 
     int                scheduler = 0;
     bool               schedulerChecked = false;
-    bool               forceUnderruns = false;
+    int32_t            hangTimeMSec = 0;
 
     AAudioSimplePlayer simplePlayer;
     int32_t            callbackCount = 0;
@@ -327,10 +325,12 @@
         sineData->setupSineSweeps();
     }
 
-    if (sineData->forceUnderruns) {
+    if (sineData->hangTimeMSec > 0) {
         if (sineData->framesTotal > sineData->nextFrameToGlitch) {
-            usleep(FORCED_UNDERRUN_SLEEP_MICROS);
-            printf("Simulate glitch at %lld\n", (long long) sineData->framesTotal);
+            usleep(sineData->hangTimeMSec * 1000);
+            printf("Hang callback at %lld frames for %d msec\n",
+                    (long long) sineData->framesTotal,
+                   sineData->hangTimeMSec);
             sineData->nextFrameToGlitch += FORCED_UNDERRUN_PERIOD_FRAMES;
         }
     }
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
index 7a48153..2b05f10 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
@@ -26,11 +26,14 @@
 #include <string.h>
 #include <time.h>
 #include <aaudio/AAudio.h>
+
 #include "AAudioExampleUtils.h"
 #include "AAudioSimplePlayer.h"
 #include "AAudioArgsParser.h"
 
-#define APP_VERSION  "0.1.5"
+#define APP_VERSION  "0.1.6"
+
+constexpr int32_t kDefaultHangTimeMSec = 10;
 
 /**
  * Open stream, play some sine waves, then close the stream.
@@ -41,7 +44,7 @@
 static aaudio_result_t testOpenPlayClose(AAudioArgsParser &argParser,
                                          int32_t loopCount,
                                          int32_t prefixToneMsec,
-                                         bool forceUnderruns)
+                                         int32_t hangTimeMSec)
 {
     SineThreadedData_t myData;
     AAudioSimplePlayer &player = myData.simplePlayer;
@@ -53,10 +56,12 @@
     printf("----------------------- run complete test --------------------------\n");
     myData.schedulerChecked = false;
     myData.callbackCount = 0;
-    myData.forceUnderruns = forceUnderruns; // test AAudioStream_getXRunCount()
+    myData.hangTimeMSec = hangTimeMSec; // test AAudioStream_getXRunCount()
 
     result = player.open(argParser,
-                         SimplePlayerDataCallbackProc, SimplePlayerErrorCallbackProc, &myData);
+                         SimplePlayerDataCallbackProc,
+                         SimplePlayerErrorCallbackProc,
+                         &myData);
     if (result != AAUDIO_OK) {
         fprintf(stderr, "ERROR -  player.open() returned %s\n",
                 AAudio_convertResultToText(result));
@@ -115,12 +120,17 @@
             int64_t millis =
                     (getNanoseconds(CLOCK_MONOTONIC) - startedAtNanos) / NANOS_PER_MILLISECOND;
             result = myData.waker.get();
+            const int32_t framesWritten = (int32_t) AAudioStream_getFramesWritten(player.getStream());
+            const int32_t framesRead = (int32_t) AAudioStream_getFramesRead(player.getStream());
+            const int32_t xruns = AAudioStream_getXRunCount(player.getStream());
             printf(" waker result = %d, at %6d millis"
-                           ", second = %3d, framesWritten = %8d, underruns = %d\n",
+                           ", second = %3d, frames written %8d - read %8d = %8d, underruns = %d\n",
                    result, (int) millis,
                    second,
-                   (int) AAudioStream_getFramesWritten(player.getStream()),
-                   (int) AAudioStream_getXRunCount(player.getStream()));
+                   framesWritten,
+                   framesRead,
+                   framesWritten - framesRead,
+                   xruns);
             if (result != AAUDIO_OK) {
                 disconnected = (result == AAUDIO_ERROR_DISCONNECTED);
                 bailOut = true;
@@ -210,7 +220,9 @@
     AAudioArgsParser::usage();
     printf("      -l{count} loopCount start/stop, every other one is silent\n");
     printf("      -t{msec}  play a high pitched tone at the beginning\n");
-    printf("      -z        force periodic underruns by sleeping in callback\n");
+    printf("      -h{msec}  force periodic underruns by hanging in callback\n");
+    printf("                If no value specified then %d used.\n",
+            kDefaultHangTimeMSec);
 }
 
 int main(int argc, const char **argv)
@@ -219,13 +231,14 @@
     aaudio_result_t    result;
     int32_t            loopCount = 1;
     int32_t            prefixToneMsec = 0;
-    bool               forceUnderruns = false;
+    int32_t            hangTimeMSec = 0;
 
     // Make printf print immediately so that debug info is not stuck
     // in a buffer if we hang or crash.
     setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
 
-    printf("%s - Play a sine sweep using an AAudio callback V%s\n", argv[0], APP_VERSION);
+    printf("%s - Play a sine sweep using an AAudio callback V%s\n",
+        argv[0], APP_VERSION);
 
     for (int i = 1; i < argc; i++) {
         const char *arg = argv[i];
@@ -240,8 +253,10 @@
                     case 't':
                         prefixToneMsec = atoi(&arg[2]);
                         break;
-                    case 'z':
-                        forceUnderruns = true;  // Zzzzzzz
+                    case 'h':
+                        hangTimeMSec = (arg[2]) // value specified?
+                                ? atoi(&arg[2])
+                                : kDefaultHangTimeMSec;
                         break;
                     default:
                         usage();
@@ -257,7 +272,8 @@
     }
 
     // Keep looping until we can complete the test without disconnecting.
-    while((result = testOpenPlayClose(argParser, loopCount, prefixToneMsec, forceUnderruns))
+    while((result = testOpenPlayClose(argParser, loopCount,
+            prefixToneMsec, hangTimeMSec))
             == AAUDIO_ERROR_DISCONNECTED);
 
     return (result) ? EXIT_FAILURE : EXIT_SUCCESS;
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 1417aaf..b111b78 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -5,6 +5,28 @@
 }
 
 cc_library_shared {
+    name: "libaudiopolicy",
+    srcs: [
+        "AudioAttributes.cpp",
+        "AudioPolicy.cpp",
+        "AudioProductStrategy.cpp",
+    ],
+    shared_libs: [
+        "libaudioutils",
+        "libbinder",
+        "libcutils",
+        "liblog",
+        "libutils",
+    ],
+    cflags: [
+        "-Werror",
+        "-Wall",
+    ],
+    include_dirs: ["system/media/audio_utils/include"],
+    export_include_dirs: ["include"],
+}
+
+cc_library_shared {
     name: "libaudioclient",
 
     aidl: {
@@ -23,7 +45,6 @@
         ":libaudioclient_aidl",
 
         "AudioEffect.cpp",
-        "AudioPolicy.cpp",
         "AudioRecord.cpp",
         "AudioSystem.cpp",
         "AudioTrack.cpp",
@@ -41,6 +62,7 @@
     ],
     shared_libs: [
         "libaudioutils",
+        "libaudiopolicy",
         "libaudiomanager",
         "libbinder",
         "libcutils",
@@ -52,6 +74,7 @@
         "libnblog",
         "libprocessgroup",
         "libutils",
+        "libvibrator",
     ],
     export_shared_lib_headers: ["libbinder"],
 
diff --git a/media/libaudioclient/AudioAttributes.cpp b/media/libaudioclient/AudioAttributes.cpp
new file mode 100644
index 0000000..0f327cf
--- /dev/null
+++ b/media/libaudioclient/AudioAttributes.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioAttributes"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <binder/Parcel.h>
+
+#include <media/AudioAttributes.h>
+
+namespace android {
+
+status_t AudioAttributes::readFromParcel(const Parcel *parcel)
+{
+    status_t ret = NO_ERROR;
+    mAttributes.content_type = static_cast<audio_content_type_t>(parcel->readInt32());
+    mAttributes.usage = static_cast<audio_usage_t>(parcel->readInt32());
+    mAttributes.source = static_cast<audio_source_t>(parcel->readInt32());
+    mAttributes.flags = static_cast<audio_flags_mask_t>(parcel->readInt32());
+    const bool hasFlattenedTag = (parcel->readInt32() == 1);
+    if (hasFlattenedTag) {
+        std::string tags;
+        ret = parcel->readUtf8FromUtf16(&tags);
+        if (ret != NO_ERROR) {
+            return ret;
+        }
+        std::strncpy(mAttributes.tags, tags.c_str(), AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
+    } else {
+        strcpy(mAttributes.tags, "");
+    }
+    mStreamType = static_cast<audio_stream_type_t>(parcel->readInt32());
+    mGroupId = parcel->readUint32();
+    return NO_ERROR;
+}
+
+status_t AudioAttributes::writeToParcel(Parcel *parcel) const
+{
+    parcel->writeInt32(static_cast<int32_t>(mAttributes.content_type));
+    parcel->writeInt32(static_cast<int32_t>(mAttributes.usage));
+    parcel->writeInt32(static_cast<int32_t>(mAttributes.source));
+    parcel->writeInt32(static_cast<int32_t>(mAttributes.flags));
+    if (strlen(mAttributes.tags) == 0) {
+        parcel->writeInt32(0);
+    } else {
+        parcel->writeInt32(1);
+        parcel->writeUtf8AsUtf16(mAttributes.tags);
+    }
+    parcel->writeInt32(static_cast<int32_t>(mStreamType));
+    parcel->writeUint32(mGroupId);
+    return NO_ERROR;
+}
+
+} // namespace android
diff --git a/media/libaudioclient/AudioProductStrategy.cpp b/media/libaudioclient/AudioProductStrategy.cpp
new file mode 100644
index 0000000..1da1114
--- /dev/null
+++ b/media/libaudioclient/AudioProductStrategy.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioProductStrategy"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+#include <media/AudioProductStrategy.h>
+#include <media/AudioAttributes.h>
+#include <media/AudioSystem.h>
+
+namespace android {
+
+status_t AudioProductStrategy::readFromParcel(const Parcel *parcel)
+{
+    mId = static_cast<product_strategy_t>(parcel->readInt32());
+    status_t ret = parcel->readUtf8FromUtf16(&mName);
+    if (ret != NO_ERROR) {
+        return ret;
+    }
+    size_t size = static_cast<size_t>(parcel->readInt32());
+    for (size_t i = 0; i < size; i++) {
+        AudioAttributes attribute;
+        ret = attribute.readFromParcel(parcel);
+        if (ret != NO_ERROR) {
+            mAudioAttributes.clear();
+            return ret;
+        }
+        mAudioAttributes.push_back(attribute);
+    }
+    return NO_ERROR;
+}
+
+status_t AudioProductStrategy::writeToParcel(Parcel *parcel) const
+{
+    parcel->writeInt32(static_cast<int32_t>(mId));
+    parcel->writeUtf8AsUtf16(mName);
+    size_t size = mAudioAttributes.size();
+    size_t sizePosition = parcel->dataPosition();
+    parcel->writeInt32(size);
+    size_t finalSize = size;
+
+    for (size_t i = 0; i < size; i++) {
+        size_t position = parcel->dataPosition();
+        AudioAttributes attribute(mAudioAttributes[i]);
+        status_t ret = attribute.writeToParcel(parcel);
+        if (ret != NO_ERROR) {
+            parcel->setDataPosition(position);
+            finalSize--;
+        }
+    }
+    if (size != finalSize) {
+        size_t position = parcel->dataPosition();
+        parcel->setDataPosition(sizePosition);
+        parcel->writeInt32(finalSize);
+        parcel->setDataPosition(position);
+    }
+    return NO_ERROR;
+}
+
+bool AudioProductStrategy::attributesMatches(const audio_attributes_t refAttributes,
+                                        const audio_attributes_t clientAttritubes)
+{
+    if (refAttributes == AUDIO_ATTRIBUTES_INITIALIZER) {
+        // The default product strategy is the strategy that holds default attributes by convention.
+        // All attributes that fail to match will follow the default strategy for routing.
+        // Choosing the default must be done as a fallback, the attributes match shall not
+        // select the default.
+        return false;
+    }
+    return ((refAttributes.usage == AUDIO_USAGE_UNKNOWN) ||
+            (clientAttritubes.usage == refAttributes.usage)) &&
+            ((refAttributes.content_type == AUDIO_CONTENT_TYPE_UNKNOWN) ||
+             (clientAttritubes.content_type == refAttributes.content_type)) &&
+            ((refAttributes.flags == AUDIO_FLAG_NONE) ||
+             (clientAttritubes.flags != AUDIO_FLAG_NONE &&
+            (clientAttritubes.flags & refAttributes.flags) == clientAttritubes.flags)) &&
+            ((strlen(refAttributes.tags) == 0) ||
+             (std::strcmp(clientAttritubes.tags, refAttributes.tags) == 0));
+}
+
+} // namespace android
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 72a23e3..baa1469 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -355,7 +355,10 @@
     }
 
     // create the IAudioRecord
-    status = createRecord_l(0 /*epoch*/, mOpPackageName);
+    {
+        AutoMutex lock(mLock);
+        status = createRecord_l(0 /*epoch*/, mOpPackageName);
+    }
 
     ALOGV("%s(%d): status %d", __func__, mPortId, status);
 
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 896198b..01d9b3d 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -25,6 +25,7 @@
 #include <media/AudioSystem.h>
 #include <media/IAudioFlinger.h>
 #include <media/IAudioPolicyService.h>
+#include <media/TypeConverter.h>
 #include <math.h>
 
 #include <system/audio.h>
@@ -521,10 +522,12 @@
     if (ioDesc == 0 || ioDesc->mIoHandle == AUDIO_IO_HANDLE_NONE) return;
 
     audio_port_handle_t deviceId = AUDIO_PORT_HANDLE_NONE;
-    Vector < wp<AudioDeviceCallback> > callbacks;
-
+    Vector<sp<AudioDeviceCallback>> callbacksToCall;
     {
         Mutex::Autolock _l(mLock);
+        bool deviceValidOrChanged = false;
+        bool sendCallbacks = false;
+        ssize_t ioIndex = -1;
 
         switch (event) {
         case AUDIO_OUTPUT_OPENED:
@@ -542,11 +545,17 @@
             if (ioDesc->getDeviceId() != AUDIO_PORT_HANDLE_NONE) {
                 deviceId = ioDesc->getDeviceId();
                 if (event == AUDIO_OUTPUT_OPENED || event == AUDIO_INPUT_OPENED) {
-                    ssize_t ioIndex = mAudioDeviceCallbacks.indexOfKey(ioDesc->mIoHandle);
+                    ioIndex = mAudioDeviceCallbackProxies.indexOfKey(ioDesc->mIoHandle);
                     if (ioIndex >= 0) {
-                        callbacks = mAudioDeviceCallbacks.valueAt(ioIndex);
+                        sendCallbacks = true;
+                        deviceValidOrChanged = true;
                     }
                 }
+                if (event == AUDIO_OUTPUT_REGISTERED || event == AUDIO_INPUT_REGISTERED) {
+                    ioIndex = mAudioDeviceCallbackProxies.indexOfKey(ioDesc->mIoHandle);
+                    sendCallbacks = (ioIndex >= 0)
+                            && !mAudioDeviceCallbackProxies.valueAt(ioIndex).notifiedOnce();
+                }
             }
             ALOGV("ioConfigChanged() new %s %s %d samplingRate %u, format %#x channel mask %#x "
                     "frameCount %zu deviceId %d",
@@ -568,7 +577,7 @@
                   event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->mIoHandle);
 
             mIoDescriptors.removeItem(ioDesc->mIoHandle);
-            mAudioDeviceCallbacks.removeItem(ioDesc->mIoHandle);
+            mAudioDeviceCallbackProxies.removeItem(ioDesc->mIoHandle);
             } break;
 
         case AUDIO_OUTPUT_CONFIG_CHANGED:
@@ -583,11 +592,10 @@
             mIoDescriptors.replaceValueFor(ioDesc->mIoHandle, ioDesc);
 
             if (deviceId != ioDesc->getDeviceId()) {
+                deviceValidOrChanged = true;
                 deviceId = ioDesc->getDeviceId();
-                ssize_t ioIndex = mAudioDeviceCallbacks.indexOfKey(ioDesc->mIoHandle);
-                if (ioIndex >= 0) {
-                    callbacks = mAudioDeviceCallbacks.valueAt(ioIndex);
-                }
+                ioIndex = mAudioDeviceCallbackProxies.indexOfKey(ioDesc->mIoHandle);
+                sendCallbacks = ioIndex >= 0;
             }
             ALOGV("ioConfigChanged() new config for %s %d samplingRate %u, format %#x "
                     "channel mask %#x frameCount %zu frameCountHAL %zu deviceId %d",
@@ -598,24 +606,34 @@
 
         } break;
         }
-    }
-    bool callbackRemoved = false;
-    // callbacks.size() != 0 =>  ioDesc->mIoHandle and deviceId are valid
-    for (size_t i = 0; i < callbacks.size(); ) {
-        sp<AudioDeviceCallback> callback = callbacks[i].promote();
-        if (callback.get() != nullptr) {
-            callback->onAudioDeviceUpdate(ioDesc->mIoHandle, deviceId);
-            i++;
-        } else {
-            callbacks.removeAt(i);
-            callbackRemoved = true;
+
+        // sendCallbacks true =>  ioDesc->mIoHandle and deviceId are valid
+        if (sendCallbacks) {
+            AudioDeviceCallbackProxies &callbackProxies =
+                mAudioDeviceCallbackProxies.editValueAt(ioIndex);
+            for (size_t i = 0; i < callbackProxies.size(); ) {
+                sp<AudioDeviceCallback> callback = callbackProxies[i]->callback();
+                if (callback.get() != nullptr) {
+                    // Call the callback only if the device actually changed, the input or output
+                    // was opened or closed or the client was newly registered and the callback
+                    // was never called
+                    if (!callbackProxies[i]->notifiedOnce() || deviceValidOrChanged) {
+                        callbacksToCall.add(callback);
+                        callbackProxies[i]->setNotifiedOnce();
+                    }
+                    i++;
+                } else {
+                    callbackProxies.removeAt(i);
+                }
+            }
+            callbackProxies.setNotifiedOnce();
         }
     }
-    // clean up callback list while we are here if some clients have disappeared without
-    // unregistering their callback
-    if (callbackRemoved) {
-        Mutex::Autolock _l(mLock);
-        mAudioDeviceCallbacks.replaceValueFor(ioDesc->mIoHandle, callbacks);
+
+    // Callbacks must be called without mLock held. May lead to dead lock if calling for
+    // example getRoutedDevice that updates the device and tries to acquire mLock.
+    for (size_t i = 0; i < callbacksToCall.size(); i++) {
+        callbacksToCall[i]->onAudioDeviceUpdate(ioDesc->mIoHandle, deviceId);
     }
 }
 
@@ -671,20 +689,21 @@
         const wp<AudioDeviceCallback>& callback, audio_io_handle_t audioIo)
 {
     Mutex::Autolock _l(mLock);
-    Vector < wp<AudioDeviceCallback> > callbacks;
-    ssize_t ioIndex = mAudioDeviceCallbacks.indexOfKey(audioIo);
+    AudioDeviceCallbackProxies callbackProxies;
+    ssize_t ioIndex = mAudioDeviceCallbackProxies.indexOfKey(audioIo);
     if (ioIndex >= 0) {
-        callbacks = mAudioDeviceCallbacks.valueAt(ioIndex);
+        callbackProxies = mAudioDeviceCallbackProxies.valueAt(ioIndex);
     }
 
-    for (size_t cbIndex = 0; cbIndex < callbacks.size(); cbIndex++) {
-        if (callbacks[cbIndex].unsafe_get() == callback.unsafe_get()) {
+    for (size_t cbIndex = 0; cbIndex < callbackProxies.size(); cbIndex++) {
+        sp<AudioDeviceCallback> cbk = callbackProxies[cbIndex]->callback();
+        if (cbk.get() == callback.unsafe_get()) {
             return INVALID_OPERATION;
         }
     }
-    callbacks.add(callback);
-
-    mAudioDeviceCallbacks.replaceValueFor(audioIo, callbacks);
+    callbackProxies.add(new AudioDeviceCallbackProxy(callback));
+    callbackProxies.resetNotifiedOnce();
+    mAudioDeviceCallbackProxies.replaceValueFor(audioIo, callbackProxies);
     return NO_ERROR;
 }
 
@@ -692,26 +711,26 @@
         const wp<AudioDeviceCallback>& callback, audio_io_handle_t audioIo)
 {
     Mutex::Autolock _l(mLock);
-    ssize_t ioIndex = mAudioDeviceCallbacks.indexOfKey(audioIo);
+    ssize_t ioIndex = mAudioDeviceCallbackProxies.indexOfKey(audioIo);
     if (ioIndex < 0) {
         return INVALID_OPERATION;
     }
-    Vector < wp<AudioDeviceCallback> > callbacks = mAudioDeviceCallbacks.valueAt(ioIndex);
-
+    AudioDeviceCallbackProxies callbackProxies = mAudioDeviceCallbackProxies.valueAt(ioIndex);
     size_t cbIndex;
-    for (cbIndex = 0; cbIndex < callbacks.size(); cbIndex++) {
-        if (callbacks[cbIndex].unsafe_get() == callback.unsafe_get()) {
+    for (cbIndex = 0; cbIndex < callbackProxies.size(); cbIndex++) {
+        sp<AudioDeviceCallback> cbk = callbackProxies[cbIndex]->callback();
+        if (cbk.get() == callback.unsafe_get()) {
             break;
         }
     }
-    if (cbIndex == callbacks.size()) {
+    if (cbIndex == callbackProxies.size()) {
         return INVALID_OPERATION;
     }
-    callbacks.removeAt(cbIndex);
-    if (callbacks.size() != 0) {
-        mAudioDeviceCallbacks.replaceValueFor(audioIo, callbacks);
+    callbackProxies.removeAt(cbIndex);
+    if (callbackProxies.size() != 0) {
+        mAudioDeviceCallbackProxies.replaceValueFor(audioIo, callbackProxies);
     } else {
-        mAudioDeviceCallbacks.removeItem(audioIo);
+        mAudioDeviceCallbackProxies.removeItem(audioIo);
     }
     return NO_ERROR;
 }
@@ -871,13 +890,14 @@
                                         const audio_config_t *config,
                                         audio_output_flags_t flags,
                                         audio_port_handle_t *selectedDeviceId,
-                                        audio_port_handle_t *portId)
+                                        audio_port_handle_t *portId,
+                                        std::vector<audio_io_handle_t> *secondaryOutputs)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return NO_INIT;
     return aps->getOutputForAttr(attr, output, session, stream, pid, uid,
                                  config,
-                                 flags, selectedDeviceId, portId);
+                                 flags, selectedDeviceId, portId, secondaryOutputs);
 }
 
 status_t AudioSystem::startOutput(audio_port_handle_t portId)
@@ -970,7 +990,7 @@
 uint32_t AudioSystem::getStrategyForStream(audio_stream_type_t stream)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
-    if (aps == 0) return 0;
+    if (aps == 0) return PRODUCT_STRATEGY_NONE;
     return aps->getStrategyForStream(stream);
 }
 
@@ -1327,7 +1347,6 @@
     return aps->setSurroundFormatEnabled(audioFormat, enabled);
 }
 
-
 status_t AudioSystem::setAssistantUid(uid_t uid)
 {
     const sp <IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
@@ -1352,11 +1371,62 @@
 }
 
 status_t AudioSystem::getHwOffloadEncodingFormatsSupportedForA2DP(
-                                std::vector<audio_format_t> *formats)
+                                std::vector<audio_format_t> *formats) {
+    const sp <IAudioPolicyService>
+        & aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+    return aps->getHwOffloadEncodingFormatsSupportedForA2DP(formats);
+}
+
+status_t AudioSystem::listAudioProductStrategies(AudioProductStrategyVector &strategies)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
-    return aps->getHwOffloadEncodingFormatsSupportedForA2DP(formats);
+    return aps->listAudioProductStrategies(strategies);
+}
+
+audio_attributes_t AudioSystem::streamTypeToAttributes(audio_stream_type_t stream)
+{
+    AudioProductStrategyVector strategies;
+    listAudioProductStrategies(strategies);
+    for (const auto &strategy : strategies) {
+        auto attrVect = strategy.getAudioAttributes();
+        auto iter = std::find_if(begin(attrVect), end(attrVect), [&stream](const auto &attributes) {
+                         return attributes.getStreamType() == stream; });
+        if (iter != end(attrVect)) {
+            return iter->getAttributes();
+        }
+    }
+    ALOGE("invalid stream type %s when converting to attributes",  toString(stream).c_str());
+    return AUDIO_ATTRIBUTES_INITIALIZER;
+}
+
+audio_stream_type_t AudioSystem::attributesToStreamType(const audio_attributes_t &attr)
+{
+    product_strategy_t strategyId =
+            AudioSystem::getProductStrategyFromAudioAttributes(AudioAttributes(attr));
+    AudioProductStrategyVector strategies;
+    listAudioProductStrategies(strategies);
+    for (const auto &strategy : strategies) {
+        if (strategy.getId() == strategyId) {
+            auto attrVect = strategy.getAudioAttributes();
+            auto iter = std::find_if(begin(attrVect), end(attrVect), [&attr](const auto &refAttr) {
+                             return AudioProductStrategy::attributesMatches(
+                                 refAttr.getAttributes(), attr); });
+            if (iter != end(attrVect)) {
+                return iter->getStreamType();
+            }
+        }
+    }
+    ALOGE("invalid attributes %s when converting to stream",  toString(attr).c_str());
+    return AUDIO_STREAM_MUSIC;
+}
+
+product_strategy_t AudioSystem::getProductStrategyFromAudioAttributes(const AudioAttributes &aa)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PRODUCT_STRATEGY_NONE;
+    return aps->getProductStrategyFromAudioAttributes(aa);
 }
 
 // ---------------------------------------------------------------------------
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index c2ee2ee..7881bb8 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -33,7 +33,6 @@
 #include <media/IAudioFlinger.h>
 #include <media/IAudioPolicyService.h>
 #include <media/AudioParameter.h>
-#include <media/AudioPolicyHelper.h>
 #include <media/AudioResamplerPublic.h>
 #include <media/AudioSystem.h>
 #include <media/MediaAnalyticsItem.h>
@@ -487,7 +486,7 @@
                 __func__,
                  mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
         mStreamType = AUDIO_STREAM_DEFAULT;
-        audio_attributes_flags_to_audio_output_flags(mAttributes.flags, flags);
+        audio_flags_to_audio_output_flags(mAttributes.flags, &flags);
     }
 
     // these below should probably come from the audioFlinger too...
@@ -622,8 +621,10 @@
     }
 
     // create the IAudioTrack
-    status = createTrack_l();
-
+    {
+        AutoMutex lock(mLock);
+        status = createTrack_l();
+    }
     if (status != NO_ERROR) {
         if (mAudioTrackThread != 0) {
             mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
@@ -1390,7 +1391,7 @@
 audio_stream_type_t AudioTrack::streamType() const
 {
     if (mStreamType == AUDIO_STREAM_DEFAULT) {
-        return audio_attributes_to_stream_type(&mAttributes);
+        return AudioSystem::attributesToStreamType(mAttributes);
     }
     return mStreamType;
 }
@@ -1473,7 +1474,7 @@
 
     IAudioFlinger::CreateTrackInput input;
     if (mStreamType != AUDIO_STREAM_DEFAULT) {
-        stream_type_to_audio_attributes(mStreamType, &input.attr);
+        input.attr = AudioSystem::streamTypeToAttributes(mStreamType);
     } else {
         input.attr = mAttributes;
     }
@@ -2891,7 +2892,8 @@
                         mPortId, mStatus, mState, mSessionId, mFlags);
     result.appendFormat("  stream type(%d), left - right volume(%f, %f)\n",
                         (mStreamType == AUDIO_STREAM_DEFAULT) ?
-                                audio_attributes_to_stream_type(&mAttributes) : mStreamType,
+                            AudioSystem::attributesToStreamType(mAttributes) :
+                            mStreamType,
                         mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
     result.appendFormat("  format(%#x), channel mask(%#x), channel count(%u)\n",
                   mFormat, mChannelMask, mChannelCount);
@@ -2959,7 +2961,7 @@
     }
     AutoMutex lock(mLock);
     if (mDeviceCallback.unsafe_get() != callback.get()) {
-        ALOGW("%s(%d): removing different callback!", __func__, mPortId);
+        ALOGW("%s removing different callback!", __FUNCTION__);
         return INVALID_OPERATION;
     }
     mDeviceCallback.clear();
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index 8c7fac5..1bce16f 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -92,7 +92,9 @@
     IS_HAPTIC_PLAYBACK_SUPPORTED,
     SET_UID_DEVICE_AFFINITY,
     REMOVE_UID_DEVICE_AFFINITY,
-    GET_OFFLOAD_FORMATS_A2DP
+    GET_OFFLOAD_FORMATS_A2DP,
+    LIST_AUDIO_PRODUCT_STRATEGIES,
+    GET_STRATEGY_FOR_ATTRIBUTES,
 };
 
 #define MAX_ITEMS_PER_LIST 1024
@@ -187,16 +189,17 @@
         return static_cast <audio_io_handle_t> (reply.readInt32());
     }
 
-    virtual status_t getOutputForAttr(const audio_attributes_t *attr,
-                                        audio_io_handle_t *output,
-                                        audio_session_t session,
-                                        audio_stream_type_t *stream,
-                                        pid_t pid,
-                                        uid_t uid,
-                                        const audio_config_t *config,
-                                        audio_output_flags_t flags,
-                                        audio_port_handle_t *selectedDeviceId,
-                                        audio_port_handle_t *portId)
+    status_t getOutputForAttr(const audio_attributes_t *attr,
+                              audio_io_handle_t *output,
+                              audio_session_t session,
+                              audio_stream_type_t *stream,
+                              pid_t pid,
+                              uid_t uid,
+                              const audio_config_t *config,
+                              audio_output_flags_t flags,
+                              audio_port_handle_t *selectedDeviceId,
+                              audio_port_handle_t *portId,
+                              std::vector<audio_io_handle_t> *secondaryOutputs) override
         {
             Parcel data, reply;
             data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -222,6 +225,10 @@
                 ALOGE("getOutputForAttr NULL portId - shouldn't happen");
                 return BAD_VALUE;
             }
+            if (secondaryOutputs == NULL) {
+                ALOGE("getOutputForAttr NULL secondaryOutputs - shouldn't happen");
+                return BAD_VALUE;
+            }
             if (attr == NULL) {
                 data.writeInt32(0);
             } else {
@@ -256,7 +263,9 @@
             }
             *selectedDeviceId = (audio_port_handle_t)reply.readInt32();
             *portId = (audio_port_handle_t)reply.readInt32();
-            return status;
+            secondaryOutputs->resize(reply.readInt32());
+            return reply.read(secondaryOutputs->data(),
+                              secondaryOutputs->size() * sizeof(audio_io_handle_t));
         }
 
     virtual status_t startOutput(audio_port_handle_t portId)
@@ -412,7 +421,7 @@
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
         data.writeInt32(static_cast <uint32_t>(stream));
         remote()->transact(GET_STRATEGY_FOR_STREAM, data, &reply);
-        return reply.readInt32();
+        return reply.readUint32();
     }
 
     virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream)
@@ -1051,19 +1060,61 @@
         return status;
     }
 
-    virtual status_t removeUidDeviceAffinities(uid_t uid)
-    {
+    virtual status_t removeUidDeviceAffinities(uid_t uid) {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
 
         data.writeInt32((int32_t) uid);
 
-        status_t status = remote()->transact(REMOVE_UID_DEVICE_AFFINITY, data, &reply);
+        status_t status =
+            remote()->transact(REMOVE_UID_DEVICE_AFFINITY, data, &reply);
         if (status == NO_ERROR) {
-            status = (status_t)reply.readInt32();
+            status = (status_t) reply.readInt32();
         }
         return status;
     }
+
+    virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+
+        status_t status = remote()->transact(LIST_AUDIO_PRODUCT_STRATEGIES, data, &reply);
+        if (status != NO_ERROR) {
+            ALOGE("%s: permission denied", __func__);
+            return status;
+        }
+        status = static_cast<status_t>(reply.readInt32());
+        if (status == NO_ERROR) {
+            uint32_t numStrategies = static_cast<uint32_t>(reply.readInt32());
+            for (size_t i = 0; i < numStrategies; i++) {
+                AudioProductStrategy strategy;
+                status = strategy.readFromParcel(&reply);
+                if (status != NO_ERROR) {
+                    ALOGE("%s: failed to read strategies", __FUNCTION__);
+                    strategies.clear();
+                    return status;
+                }
+                strategies.push_back(strategy);
+            }
+        }
+        return status;
+    }
+
+    virtual product_strategy_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        status_t status = aa.writeToParcel(&data);
+        if (status != NO_ERROR) {
+            return PRODUCT_STRATEGY_NONE;
+        }
+        status = remote()->transact(GET_STRATEGY_FOR_ATTRIBUTES, data, &reply);
+        if (status == NO_ERROR) {
+            return static_cast<product_strategy_t>(reply.readInt32());
+        }
+        return PRODUCT_STRATEGY_NONE;
+    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
@@ -1082,7 +1133,6 @@
         case START_INPUT:
         case STOP_INPUT:
         case RELEASE_INPUT:
-        case GET_STRATEGY_FOR_STREAM:
         case GET_OUTPUT_FOR_EFFECT:
         case REGISTER_EFFECT:
         case UNREGISTER_EFFECT:
@@ -1117,8 +1167,6 @@
         case SET_STREAM_VOLUME:
         case REGISTER_POLICY_MIXES:
         case SET_MASTER_MONO:
-        case START_AUDIO_SOURCE:
-        case STOP_AUDIO_SOURCE:
         case GET_SURROUND_FORMATS:
         case SET_SURROUND_FORMAT_ENABLED:
         case SET_ASSISTANT_UID:
@@ -1257,16 +1305,19 @@
             audio_port_handle_t selectedDeviceId = data.readInt32();
             audio_port_handle_t portId = (audio_port_handle_t)data.readInt32();
             audio_io_handle_t output = 0;
+            std::vector<audio_io_handle_t> secondaryOutputs;
             status_t status = getOutputForAttr(hasAttributes ? &attr : NULL,
                     &output, session, &stream, pid, uid,
                     &config,
-                    flags, &selectedDeviceId, &portId);
+                    flags, &selectedDeviceId, &portId, &secondaryOutputs);
             reply->writeInt32(status);
             reply->writeInt32(output);
             reply->writeInt32(stream);
             reply->writeInt32(selectedDeviceId);
             reply->writeInt32(portId);
-            return NO_ERROR;
+            reply->writeInt32(secondaryOutputs.size());
+            return reply->write(secondaryOutputs.data(),
+                                secondaryOutputs.size() * sizeof(audio_io_handle_t));
         } break;
 
         case START_OUTPUT: {
@@ -1378,7 +1429,7 @@
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             audio_stream_type_t stream =
                     static_cast <audio_stream_type_t>(data.readInt32());
-            reply->writeInt32(getStrategyForStream(stream));
+            reply->writeUint32(getStrategyForStream(stream));
             return NO_ERROR;
         } break;
 
@@ -1935,6 +1986,46 @@
             return NO_ERROR;
         }
 
+        case LIST_AUDIO_PRODUCT_STRATEGIES: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            AudioProductStrategyVector strategies;
+            status_t status = listAudioProductStrategies(strategies);
+            reply->writeInt32(status);
+            if (status != NO_ERROR) {
+                return status;
+            }
+            size_t size = strategies.size();
+            size_t sizePosition = reply->dataPosition();
+            reply->writeInt32(size);
+            size_t finalSize = size;
+            for (size_t i = 0; i < size; i++) {
+                size_t position = reply->dataPosition();
+                if (strategies[i].writeToParcel(reply) != NO_ERROR) {
+                    reply->setDataPosition(position);
+                    finalSize--;
+                }
+            }
+            if (size != finalSize) {
+                size_t position = reply->dataPosition();
+                reply->setDataPosition(sizePosition);
+                reply->writeInt32(finalSize);
+                reply->setDataPosition(position);
+            }
+            return NO_ERROR;
+        }
+
+        case GET_STRATEGY_FOR_ATTRIBUTES: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            AudioAttributes attributes;
+            status_t status = attributes.readFromParcel(&data);
+            if (status != NO_ERROR) {
+                return status;
+            }
+            product_strategy_t strategy = getProductStrategyFromAudioAttributes(attributes);
+            reply->writeUint32(static_cast<int>(strategy));
+            return NO_ERROR;
+        }
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libaudioclient/ToneGenerator.cpp b/media/libaudioclient/ToneGenerator.cpp
index 5c5dbd6..536b00d 100644
--- a/media/libaudioclient/ToneGenerator.cpp
+++ b/media/libaudioclient/ToneGenerator.cpp
@@ -20,7 +20,6 @@
 #include <math.h>
 #include <utils/Log.h>
 #include <cutils/properties.h>
-#include <media/AudioPolicyHelper.h>
 #include "media/ToneGenerator.h"
 
 
@@ -1242,7 +1241,7 @@
     if (mStreamType == AUDIO_STREAM_VOICE_CALL) {
         streamType = AUDIO_STREAM_DTMF;
     }
-    stream_type_to_audio_attributes(streamType, &attr);
+    attr = AudioSystem::streamTypeToAttributes(streamType);
 
     const size_t frameCount = mProcessSize;
     status_t status = mpAudioTrack->set(
diff --git a/media/libaudioclient/include/media/AudioAttributes.h b/media/libaudioclient/include/media/AudioAttributes.h
new file mode 100644
index 0000000..edf26eb
--- /dev/null
+++ b/media/libaudioclient/include/media/AudioAttributes.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#pragma once
+
+#include <system/audio.h>
+#include <system/audio_policy.h>
+#include <binder/Parcelable.h>
+
+namespace android {
+
+class AudioAttributes : public Parcelable
+{
+public:
+    AudioAttributes() = default;
+    AudioAttributes(const audio_attributes_t &attributes) : mAttributes(attributes) {}
+    AudioAttributes(uint32_t groupId,
+                    audio_stream_type_t stream,
+                    const audio_attributes_t &attributes) :
+         mAttributes(attributes), mStreamType(stream), mGroupId(groupId) {}
+
+    audio_attributes_t getAttributes() const { return mAttributes; }
+
+    status_t readFromParcel(const Parcel *parcel) override;
+    status_t writeToParcel(Parcel *parcel) const override;
+
+    audio_stream_type_t getStreamType() const { return mStreamType; }
+    uint32_t getGroupId() const { return mGroupId; }
+
+private:
+    audio_attributes_t mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
+    /**
+     * @brief mStreamType: for legacy volume management, we need to be able to convert an attribute
+     * to a given stream type.
+     */
+    audio_stream_type_t mStreamType = AUDIO_STREAM_DEFAULT;
+
+    /**
+     * @brief mGroupId: for future volume management, define groups within a strategy that follows
+     * the same curves of volume (extension of stream types to manage volume)
+     */
+    uint32_t mGroupId = 0;
+};
+
+} // namespace android
diff --git a/media/libaudioclient/include/media/AudioCommonTypes.h b/media/libaudioclient/include/media/AudioCommonTypes.h
new file mode 100644
index 0000000..5188da1
--- /dev/null
+++ b/media/libaudioclient/include/media/AudioCommonTypes.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#pragma once
+
+#include <system/audio.h>
+#include <system/audio_policy.h>
+#include <binder/Parcelable.h>
+
+namespace android {
+
+enum product_strategy_t : uint32_t;
+const product_strategy_t PRODUCT_STRATEGY_NONE = static_cast<product_strategy_t>(-1);
+
+using AttributesVector = std::vector<audio_attributes_t>;
+using StreamTypes = std::vector<audio_stream_type_t>;
+
+constexpr bool operator==(const audio_attributes_t &lhs, const audio_attributes_t &rhs)
+{
+    return lhs.usage == rhs.usage && lhs.content_type == rhs.content_type &&
+            lhs.flags == rhs.flags && (std::strcmp(lhs.tags, rhs.tags) == 0);
+}
+constexpr bool operator!=(const audio_attributes_t &lhs, const audio_attributes_t &rhs)
+{
+    return !(lhs==rhs);
+}
+} // namespace android
+
diff --git a/media/libaudioclient/include/media/AudioMixer.h b/media/libaudioclient/include/media/AudioMixer.h
index fbbbd11..41b425f 100644
--- a/media/libaudioclient/include/media/AudioMixer.h
+++ b/media/libaudioclient/include/media/AudioMixer.h
@@ -26,6 +26,7 @@
 #include <unordered_map>
 #include <vector>
 
+#include <android/os/IExternalVibratorService.h>
 #include <media/AudioBufferProvider.h>
 #include <media/AudioResampler.h>
 #include <media/AudioResamplerPublic.h>
@@ -103,20 +104,21 @@
                                   // parameter 'value' is a pointer to the new playback rate.
     };
 
-    enum { // Haptic intensity, should keep consistent with VibratorService
-        HAPTIC_SCALE_VERY_LOW = -2,
-        HAPTIC_SCALE_LOW = -1,
-        HAPTIC_SCALE_NONE = 0,
-        HAPTIC_SCALE_HIGH = 1,
-        HAPTIC_SCALE_VERY_HIGH = 2,
-    };
-    typedef int32_t haptic_intensity_t;
-    static constexpr float HAPTIC_SCALE_VERY_LOW_RATIO = 2 / 3;
-    static constexpr float HAPTIC_SCALE_LOW_RATIO = 3 / 4;
-    static const CONSTEXPR float HAPTIC_MAX_AMPLITUDE_FLOAT = 1.0f;
+    typedef enum { // Haptic intensity, should keep consistent with VibratorService
+        HAPTIC_SCALE_MUTE = os::IExternalVibratorService::SCALE_MUTE,
+        HAPTIC_SCALE_VERY_LOW = os::IExternalVibratorService::SCALE_VERY_LOW,
+        HAPTIC_SCALE_LOW = os::IExternalVibratorService::SCALE_LOW,
+        HAPTIC_SCALE_NONE = os::IExternalVibratorService::SCALE_NONE,
+        HAPTIC_SCALE_HIGH = os::IExternalVibratorService::SCALE_HIGH,
+        HAPTIC_SCALE_VERY_HIGH = os::IExternalVibratorService::SCALE_VERY_HIGH,
+    } haptic_intensity_t;
+    static constexpr float HAPTIC_SCALE_VERY_LOW_RATIO = 2.0f / 3.0f;
+    static constexpr float HAPTIC_SCALE_LOW_RATIO = 3.0f / 4.0f;
+    static const constexpr float HAPTIC_MAX_AMPLITUDE_FLOAT = 1.0f;
 
     static inline bool isValidHapticIntensity(haptic_intensity_t hapticIntensity) {
         switch (hapticIntensity) {
+        case HAPTIC_SCALE_MUTE:
         case HAPTIC_SCALE_VERY_LOW:
         case HAPTIC_SCALE_LOW:
         case HAPTIC_SCALE_NONE:
@@ -428,8 +430,9 @@
         case HAPTIC_SCALE_NONE:
         case HAPTIC_SCALE_HIGH:
         case HAPTIC_SCALE_VERY_HIGH:
-        default:
             return 1.0f;
+        default:
+            return 0.0f;
         }
         }
 
diff --git a/media/libaudioclient/include/media/AudioPolicy.h b/media/libaudioclient/include/media/AudioPolicy.h
index 786fb9a..bf8d627 100644
--- a/media/libaudioclient/include/media/AudioPolicy.h
+++ b/media/libaudioclient/include/media/AudioPolicy.h
@@ -49,8 +49,12 @@
 #define MIX_STATE_IDLE 0
 #define MIX_STATE_MIXING 1
 
+/** Control to which device some audio is rendered */
 #define MIX_ROUTE_FLAG_RENDER 0x1
+/** Loop back some audio instead of rendering it */
 #define MIX_ROUTE_FLAG_LOOP_BACK (0x1 << 1)
+/** Loop back some audio while it is rendered */
+#define MIX_ROUTE_FLAG_LOOP_BACK_AND_RENDER (MIX_ROUTE_FLAG_RENDER | MIX_ROUTE_FLAG_LOOP_BACK)
 #define MIX_ROUTE_FLAG_ALL (MIX_ROUTE_FLAG_RENDER | MIX_ROUTE_FLAG_LOOP_BACK)
 
 #define MAX_MIXES_PER_POLICY 10
@@ -119,6 +123,11 @@
 #define RECORD_CONFIG_EVENT_START 1
 #define RECORD_CONFIG_EVENT_STOP  0
 
+static inline bool is_mix_loopback_render(uint32_t routeFlags) {
+    return (routeFlags & MIX_ROUTE_FLAG_LOOP_BACK_AND_RENDER)
+           == MIX_ROUTE_FLAG_LOOP_BACK_AND_RENDER;
+}
+
 }; // namespace android
 
 #endif  // ANDROID_AUDIO_POLICY_H
diff --git a/media/libaudioclient/include/media/AudioPolicyHelper.h b/media/libaudioclient/include/media/AudioPolicyHelper.h
deleted file mode 100644
index 46de6b3..0000000
--- a/media/libaudioclient/include/media/AudioPolicyHelper.h
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef AUDIO_POLICY_HELPER_H_
-#define AUDIO_POLICY_HELPER_H_
-
-#include <android-base/macros.h>
-#include <system/audio.h>
-
-static inline
-audio_stream_type_t audio_usage_to_stream_type(const audio_usage_t usage)
-{
-    switch(usage) {
-        case AUDIO_USAGE_MEDIA:
-        case AUDIO_USAGE_GAME:
-        case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
-        case AUDIO_USAGE_ASSISTANT:
-            return AUDIO_STREAM_MUSIC;
-        case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
-            return AUDIO_STREAM_ACCESSIBILITY;
-        case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
-            return AUDIO_STREAM_SYSTEM;
-        case AUDIO_USAGE_VOICE_COMMUNICATION:
-            return AUDIO_STREAM_VOICE_CALL;
-
-        case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
-            return AUDIO_STREAM_DTMF;
-
-        case AUDIO_USAGE_ALARM:
-            return AUDIO_STREAM_ALARM;
-        case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
-            return AUDIO_STREAM_RING;
-
-        case AUDIO_USAGE_NOTIFICATION:
-        case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
-        case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
-        case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
-        case AUDIO_USAGE_NOTIFICATION_EVENT:
-            return AUDIO_STREAM_NOTIFICATION;
-
-        case AUDIO_USAGE_UNKNOWN:
-        default:
-            return AUDIO_STREAM_MUSIC;
-    }
-}
-
-static inline
-audio_stream_type_t audio_attributes_to_stream_type(const audio_attributes_t *attr)
-{
-    // flags to stream type mapping
-    if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
-        return AUDIO_STREAM_ENFORCED_AUDIBLE;
-    }
-    if ((attr->flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) {
-        return AUDIO_STREAM_BLUETOOTH_SCO;
-    }
-
-    // usage to stream type mapping
-    return audio_usage_to_stream_type(attr->usage);
-}
-
-static inline
-void stream_type_to_audio_attributes(audio_stream_type_t streamType,
-                                     audio_attributes_t *attr) {
-    memset(attr, 0, sizeof(audio_attributes_t));
-
-    switch (streamType) {
-    case AUDIO_STREAM_DEFAULT:
-    case AUDIO_STREAM_MUSIC:
-        attr->content_type = AUDIO_CONTENT_TYPE_MUSIC;
-        attr->usage = AUDIO_USAGE_MEDIA;
-        break;
-    case AUDIO_STREAM_VOICE_CALL:
-        attr->content_type = AUDIO_CONTENT_TYPE_SPEECH;
-        attr->usage = AUDIO_USAGE_VOICE_COMMUNICATION;
-        break;
-    case AUDIO_STREAM_ENFORCED_AUDIBLE:
-        attr->flags  |= AUDIO_FLAG_AUDIBILITY_ENFORCED;
-        FALLTHROUGH_INTENDED; // attributes in common with STREAM_SYSTEM
-    case AUDIO_STREAM_SYSTEM:
-        attr->content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
-        attr->usage = AUDIO_USAGE_ASSISTANCE_SONIFICATION;
-        break;
-    case AUDIO_STREAM_RING:
-        attr->content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
-        attr->usage = AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
-        break;
-    case AUDIO_STREAM_ALARM:
-        attr->content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
-        attr->usage = AUDIO_USAGE_ALARM;
-        break;
-    case AUDIO_STREAM_NOTIFICATION:
-        attr->content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
-        attr->usage = AUDIO_USAGE_NOTIFICATION;
-        break;
-    case AUDIO_STREAM_BLUETOOTH_SCO:
-        attr->content_type = AUDIO_CONTENT_TYPE_SPEECH;
-        attr->usage = AUDIO_USAGE_VOICE_COMMUNICATION;
-        attr->flags |= AUDIO_FLAG_SCO;
-        break;
-    case AUDIO_STREAM_DTMF:
-        attr->content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
-        attr->usage = AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
-        break;
-    case AUDIO_STREAM_TTS:
-        attr->content_type = AUDIO_CONTENT_TYPE_SPEECH;
-        attr->usage = AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
-        break;
-    default:
-        ALOGE("invalid stream type %d when converting to attributes", streamType);
-    }
-}
-
-// Convert flags sent from Java AudioAttributes.getFlags() method to audio_output_flags_t
-static inline
-void audio_attributes_flags_to_audio_output_flags(const audio_flags_mask_t audioAttributeFlags,
-            audio_output_flags_t &flags) {
-    if ((audioAttributeFlags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
-        flags = static_cast<audio_output_flags_t>(flags |
-            AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_DIRECT);
-    }
-    if ((audioAttributeFlags & AUDIO_FLAG_LOW_LATENCY) != 0) {
-        flags = static_cast<audio_output_flags_t>(flags | AUDIO_OUTPUT_FLAG_FAST);
-    }
-    // check deep buffer after flags have been modified above
-    if (flags == AUDIO_OUTPUT_FLAG_NONE && (audioAttributeFlags & AUDIO_FLAG_DEEP_BUFFER) != 0) {
-        flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
-    }
-}
-
-#endif //AUDIO_POLICY_HELPER_H_
diff --git a/media/libaudioclient/include/media/AudioProductStrategy.h b/media/libaudioclient/include/media/AudioProductStrategy.h
new file mode 100644
index 0000000..7441095
--- /dev/null
+++ b/media/libaudioclient/include/media/AudioProductStrategy.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#pragma once
+
+#include <media/AudioCommonTypes.h>
+#include <media/AudioAttributes.h>
+#include <system/audio.h>
+#include <system/audio_policy.h>
+#include <binder/Parcelable.h>
+
+namespace android {
+
+class AudioProductStrategy : public Parcelable
+{
+public:
+    AudioProductStrategy() {}
+    AudioProductStrategy(const std::string &name, const std::vector<AudioAttributes> &attributes,
+                         product_strategy_t id) :
+        mName(name), mAudioAttributes(attributes), mId(id) {}
+
+    const std::string &getName() const { return mName; }
+    std::vector<AudioAttributes> getAudioAttributes() const { return mAudioAttributes; }
+    product_strategy_t getId() const { return mId; }
+
+    status_t readFromParcel(const Parcel *parcel) override;
+    status_t writeToParcel(Parcel *parcel) const override;
+
+    /**
+     * @brief attributesMatches: checks if client attributes matches with a reference attributes
+     * "matching" means the usage shall match if reference attributes has a defined usage, AND
+     * content type shall match if reference attributes has a defined content type AND
+     * flags shall match if reference attributes has defined flags AND
+     * tags shall match if reference attributes has defined tags.
+     * Reference attributes "default" shall not be considered as a "true" case. This convention
+     * is used to identify the default strategy.
+     * @param refAttributes to be considered
+     * @param clientAttritubes to be considered
+     * @return true if matching, false otherwise
+     */
+    static bool attributesMatches(const audio_attributes_t refAttributes,
+                                  const audio_attributes_t clientAttritubes);
+private:
+    std::string mName;
+    std::vector<AudioAttributes> mAudioAttributes;
+    product_strategy_t mId;
+};
+
+using AudioProductStrategyVector = std::vector<AudioProductStrategy>;
+
+} // namespace android
+
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index 1f71844..4707c4a 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -677,7 +677,7 @@
     sp<IMemory>             mCblkMemory;
     audio_track_cblk_t*     mCblk;              // re-load after mLock.unlock()
     sp<IMemory>             mBufferMemory;
-    audio_io_handle_t       mInput;             // returned by AudioSystem::getInput()
+    audio_io_handle_t       mInput = AUDIO_IO_HANDLE_NONE; // from AudioSystem::getInputforAttr()
 
     int                     mPreviousPriority;  // before start()
     SchedPolicy             mPreviousSchedulingGroup;
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 1fb7add..b9ee24a 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -20,6 +20,7 @@
 #include <sys/types.h>
 
 #include <media/AudioPolicy.h>
+#include <media/AudioProductStrategy.h>
 #include <media/AudioIoDescriptor.h>
 #include <media/IAudioFlingerClient.h>
 #include <media/IAudioPolicyServiceClient.h>
@@ -230,7 +231,8 @@
                                      const audio_config_t *config,
                                      audio_output_flags_t flags,
                                      audio_port_handle_t *selectedDeviceId,
-                                     audio_port_handle_t *portId);
+                                     audio_port_handle_t *portId,
+                                     std::vector<audio_io_handle_t> *secondaryOutputs);
     static status_t startOutput(audio_port_handle_t portId);
     static status_t stopOutput(audio_port_handle_t portId);
     static void releaseOutput(audio_port_handle_t portId);
@@ -364,6 +366,12 @@
 
     static bool     isHapticPlaybackSupported();
 
+    static status_t listAudioProductStrategies(AudioProductStrategyVector &strategies);
+    static product_strategy_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa);
+
+    static audio_attributes_t streamTypeToAttributes(audio_stream_type_t stream);
+    static audio_stream_type_t attributesToStreamType(const audio_attributes_t &attr);
+
     // ----------------------------------------------------------------------------
 
     class AudioPortCallback : public RefBase
@@ -393,6 +401,28 @@
                                          audio_port_handle_t deviceId) = 0;
     };
 
+    class AudioDeviceCallbackProxy : public RefBase
+    {
+    public:
+
+          AudioDeviceCallbackProxy(wp<AudioDeviceCallback> callback)
+              : mCallback(callback) {}
+          ~AudioDeviceCallbackProxy() override {}
+
+          sp<AudioDeviceCallback> callback() const { return mCallback.promote(); };
+
+          bool notifiedOnce() const { return mNotifiedOnce; }
+          void setNotifiedOnce() { mNotifiedOnce = true; }
+    private:
+        /**
+         * @brief mNotifiedOnce it forces the callback to be called at least once when
+         * registered with a VALID AudioDevice, and allows not to flood other listeners
+         * on this iohandle that already know the valid device.
+         */
+         bool mNotifiedOnce = false;
+         wp<AudioDeviceCallback> mCallback;
+    };
+
     static status_t addAudioDeviceCallback(const wp<AudioDeviceCallback>& callback,
                                            audio_io_handle_t audioIo);
     static status_t removeAudioDeviceCallback(const wp<AudioDeviceCallback>& callback,
@@ -436,8 +466,27 @@
     private:
         Mutex                               mLock;
         DefaultKeyedVector<audio_io_handle_t, sp<AudioIoDescriptor> >   mIoDescriptors;
-        DefaultKeyedVector<audio_io_handle_t, Vector < wp<AudioDeviceCallback> > >
-                                                                        mAudioDeviceCallbacks;
+
+        class AudioDeviceCallbackProxies : public Vector<sp<AudioDeviceCallbackProxy>>
+        {
+        public:
+            /**
+             * @brief notifiedOnce ensures that if a client adds a callback, it must at least be
+             * called once with the device on which it will be routed to.
+             * @return true if already notified or nobody waits for a callback, false otherwise.
+             */
+            bool notifiedOnce() const { return (size() == 0) || mNotifiedOnce; }
+            void setNotifiedOnce() { mNotifiedOnce = true; }
+            void resetNotifiedOnce() { mNotifiedOnce = false; }
+        private:
+            /**
+             * @brief mNotifiedOnce it forces each callback to be called at least once when
+             * registered with a VALID AudioDevice
+             */
+            bool mNotifiedOnce = false;
+        };
+        DefaultKeyedVector<audio_io_handle_t, AudioDeviceCallbackProxies>
+                mAudioDeviceCallbackProxies;
         // cached values for recording getInputBufferSize() queries
         size_t                              mInBuffSize;    // zero indicates cache is invalid
         uint32_t                            mInSamplingRate;
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index cbb750f..12f5d71 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -1021,7 +1021,7 @@
     sp<IAudioTrack>         mAudioTrack;
     sp<IMemory>             mCblkMemory;
     audio_track_cblk_t*     mCblk;                  // re-load after mLock.unlock()
-    audio_io_handle_t       mOutput;                // returned by AudioSystem::getOutputForAttr()
+    audio_io_handle_t       mOutput = AUDIO_IO_HANDLE_NONE; // from AudioSystem::getOutputForAttr()
 
     sp<AudioTrackThread>    mAudioTrackThread;
     bool                    mThreadCanCallJava;
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index 177adc2..e89a55d 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -66,7 +66,8 @@
                                       const audio_config_t *config,
                                       audio_output_flags_t flags,
                                       audio_port_handle_t *selectedDeviceId,
-                                      audio_port_handle_t *portId) = 0;
+                                      audio_port_handle_t *portId,
+                                      std::vector<audio_io_handle_t> *secondaryOutputs) = 0;
     virtual status_t startOutput(audio_port_handle_t portId) = 0;
     virtual status_t stopOutput(audio_port_handle_t portId) = 0;
     virtual void releaseOutput(audio_port_handle_t portId) = 0;
@@ -196,6 +197,8 @@
     virtual status_t setA11yServicesUids(const std::vector<uid_t>& uids) = 0;
 
     virtual bool     isHapticPlaybackSupported() = 0;
+    virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies) = 0;
+    virtual product_strategy_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa) = 0;
 };
 
 
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index a1e869f..b25f82e 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -123,15 +123,13 @@
 
 status_t DeviceHalHidl::setMasterVolume(float volume) {
     if (mDevice == 0) return NO_INIT;
-    if (mPrimaryDevice == 0) return INVALID_OPERATION;
-    return processReturn("setMasterVolume", mPrimaryDevice->setMasterVolume(volume));
+    return processReturn("setMasterVolume", mDevice->setMasterVolume(volume));
 }
 
 status_t DeviceHalHidl::getMasterVolume(float *volume) {
     if (mDevice == 0) return NO_INIT;
-    if (mPrimaryDevice == 0) return INVALID_OPERATION;
     Result retval;
-    Return<void> ret = mPrimaryDevice->getMasterVolume(
+    Return<void> ret = mDevice->getMasterVolume(
             [&](Result r, float v) {
                 retval = r;
                 if (retval == Result::OK) {
diff --git a/media/libaudioprocessing/Android.bp b/media/libaudioprocessing/Android.bp
index 817fb0b..cb78063 100644
--- a/media/libaudioprocessing/Android.bp
+++ b/media/libaudioprocessing/Android.bp
@@ -12,6 +12,11 @@
         "libnblog",
         "libsonic",
         "libutils",
+        "libvibrator",
+    ],
+
+    header_libs: [
+        "libbase_headers",
     ],
 
     cflags: [
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index 86777d6..2c57db7 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -113,10 +113,10 @@
         // Integer volume.
         // Currently integer volume is kept for the legacy integer mixer.
         // Will be removed when the legacy mixer path is removed.
-        t->volume[0] = UNITY_GAIN_INT;
-        t->volume[1] = UNITY_GAIN_INT;
-        t->prevVolume[0] = UNITY_GAIN_INT << 16;
-        t->prevVolume[1] = UNITY_GAIN_INT << 16;
+        t->volume[0] = 0;
+        t->volume[1] = 0;
+        t->prevVolume[0] = 0 << 16;
+        t->prevVolume[1] = 0 << 16;
         t->volumeInc[0] = 0;
         t->volumeInc[1] = 0;
         t->auxLevel = 0;
@@ -124,10 +124,10 @@
         t->prevAuxLevel = 0;
 
         // Floating point volume.
-        t->mVolume[0] = UNITY_GAIN_FLOAT;
-        t->mVolume[1] = UNITY_GAIN_FLOAT;
-        t->mPrevVolume[0] = UNITY_GAIN_FLOAT;
-        t->mPrevVolume[1] = UNITY_GAIN_FLOAT;
+        t->mVolume[0] = 0.f;
+        t->mVolume[1] = 0.f;
+        t->mPrevVolume[0] = 0.f;
+        t->mPrevVolume[1] = 0.f;
         t->mVolumeInc[0] = 0.;
         t->mVolumeInc[1] = 0.;
         t->mAuxLevel = 0.;
diff --git a/media/libaudioprocessing/tests/Android.bp b/media/libaudioprocessing/tests/Android.bp
index 811c16b..0c8e5bb 100644
--- a/media/libaudioprocessing/tests/Android.bp
+++ b/media/libaudioprocessing/tests/Android.bp
@@ -10,6 +10,7 @@
         "libcutils",
         "liblog",
         "libutils",
+        "libvibrator",
     ],
 
     cflags: [
diff --git a/media/libeffects/downmix/tests/Android.bp b/media/libeffects/downmix/tests/Android.bp
new file mode 100644
index 0000000..e2e7dbd
--- /dev/null
+++ b/media/libeffects/downmix/tests/Android.bp
@@ -0,0 +1,31 @@
+// Build testbench for downmix module.
+cc_test {
+    name:"downmixtest",
+    host_supported: false,
+    proprietary: true,
+    include_dirs: [
+        "frameworks/av/media/libeffects/downmix",
+    ],
+
+    header_libs: [
+        "libaudioeffects",
+    ],
+
+    shared_libs: [
+        "libaudioutils",
+        "libdownmix",
+        "liblog",
+    ],
+
+    relative_install_path: "soundfx",
+
+    srcs: [
+        "downmixtest.cpp",
+    ],
+
+    cflags: [
+        "-v",
+        "-Werror",
+        "-Wextra",
+    ],
+}
diff --git a/media/libeffects/downmix/tests/build_and_run_all_unit_tests.sh b/media/libeffects/downmix/tests/build_and_run_all_unit_tests.sh
new file mode 100755
index 0000000..d0faebe
--- /dev/null
+++ b/media/libeffects/downmix/tests/build_and_run_all_unit_tests.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+#
+#Run tests in this directory.
+#
+
+if [ -z "$ANDROID_BUILD_TOP" ]; then
+    echo "Android build environment not set"
+    exit -1
+fi
+#ensure we have mm
+. $ANDROID_BUILD_TOP/build/envsetup.sh
+
+mm -j
+
+echo "waiting for device"
+
+adb root && adb wait-for-device remount
+
+#location of test files
+testdir="/data/local/tmp/downmixtest"
+
+fs_arr=(
+    8000
+    11025
+    12000
+    16000
+    22050
+    24000
+    32000
+    44100
+    48000
+    88200
+    96000
+    176400
+    192000
+)
+
+echo "========================================"
+echo "testing Downmix"
+adb shell mkdir $testdir
+
+adb push $ANDROID_BUILD_TOP/cts/tests/tests/media/res/raw/sinesweepraw.raw \
+$testdir
+adb push $OUT/testcases/downmixtest/arm64/downmixtest $testdir
+
+#run the downmix test application for test.
+for fs in ${fs_arr[*]}
+do
+    for f_ch in {1..8}
+    do
+        for ch_fmt in {0..4}
+        do
+            adb shell  LD_LIBRARY_PATH=/vendor/lib64/soundfx \
+            $testdir/downmixtest $testdir/sinesweepraw.raw \
+            $testdir/sinesweep_fmt_$((ch_fmt))_fch_$((f_ch))_$((fs)).raw \
+            -ch_fmt:$ch_fmt -fch:$f_ch -fs:$fs
+
+            # Implementation dependent test:
+            # check that higher frequencies match 8 kHz result.
+            if [ $fs != 8000 ]
+            then
+                adb shell cmp \
+                $testdir/sinesweep_fmt_$((ch_fmt))_fch_$((f_ch))_8000.raw \
+                $testdir/sinesweep_fmt_$((ch_fmt))_fch_$((f_ch))_$((fs)).raw
+            fi
+        done
+    done
+done
+adb shell rm -r $testdir
diff --git a/media/libeffects/downmix/tests/downmixtest.cpp b/media/libeffects/downmix/tests/downmixtest.cpp
new file mode 100644
index 0000000..71f83e5
--- /dev/null
+++ b/media/libeffects/downmix/tests/downmixtest.cpp
@@ -0,0 +1,305 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <inttypes.h>
+#include <stdlib.h>
+#include <string.h>
+#include <vector>
+
+#include <audio_effects/effect_downmix.h>
+#include <audio_utils/channels.h>
+#include <audio_utils/primitives.h>
+#include <log/log.h>
+#include <system/audio.h>
+
+#include "EffectDownmix.h"
+#define FRAME_LENGTH 256
+#define MAX_NUM_CHANNELS 8
+
+struct downmix_cntxt_s {
+  effect_descriptor_t desc;
+  effect_handle_t handle;
+  effect_config_t config;
+
+  int numFileChannels;
+  int numProcessChannels;
+};
+
+extern audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM;
+
+void printUsage() {
+  printf("\nUsage:");
+  printf("\n     downmixtest <input_file> <out_file> [options]\n");
+  printf("\nwhere,");
+  printf("\n     <input_file>  is the input file name");
+  printf("\n                  on which LVM effects are applied");
+  printf("\n     <output_file> processed output file");
+  printf("\n     and options are mentioned below");
+  printf("\n");
+  printf("\n      -h");
+  printf("\n           Prints this usage information");
+  printf("\n");
+  printf("\n     -ch_fmt:<format_of_input_audio>");
+  printf("\n         0:AUDIO_CHANNEL_OUT_7POINT1(default)");
+  printf("\n         1:AUDIO_CHANNEL_OUT_5POINT1_SIDE");
+  printf("\n         2:AUDIO_CHANNEL_OUT_5POINT1_BACK");
+  printf("\n         3:AUDIO_CHANNEL_OUT_QUAD_SIDE");
+  printf("\n         4:AUDIO_CHANNEL_OUT_QUAD_BACK");
+  printf("\n");
+  printf("\n     -fch:<file_channels> (1 through 8)");
+  printf("\n");
+}
+
+int32_t DownmixDefaultConfig(effect_config_t *pConfig) {
+  pConfig->inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
+  pConfig->inputCfg.format = AUDIO_FORMAT_PCM_FLOAT;
+  pConfig->inputCfg.channels = AUDIO_CHANNEL_OUT_7POINT1;
+  pConfig->inputCfg.bufferProvider.getBuffer = nullptr;
+  pConfig->inputCfg.bufferProvider.releaseBuffer = nullptr;
+  pConfig->inputCfg.bufferProvider.cookie = nullptr;
+  pConfig->inputCfg.mask = EFFECT_CONFIG_ALL;
+
+  pConfig->inputCfg.samplingRate = 44100;
+  pConfig->outputCfg.samplingRate = pConfig->inputCfg.samplingRate;
+
+  // set a default value for the access mode, but should be overwritten by caller
+  pConfig->outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
+  pConfig->outputCfg.format = AUDIO_FORMAT_PCM_FLOAT;
+  pConfig->outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+  pConfig->outputCfg.bufferProvider.getBuffer = nullptr;
+  pConfig->outputCfg.bufferProvider.releaseBuffer = nullptr;
+  pConfig->outputCfg.bufferProvider.cookie = nullptr;
+  pConfig->outputCfg.mask = EFFECT_CONFIG_ALL;
+
+  return 0;
+}
+
+int32_t DownmixConfiureAndEnable(downmix_cntxt_s *pDescriptor) {
+  effect_handle_t *effectHandle = &pDescriptor->handle;
+  downmix_module_t *downmixEffectHandle = (downmix_module_t *)*effectHandle;
+  const struct effect_interface_s *Downmix_api = downmixEffectHandle->itfe;
+  int32_t err = 0;
+  uint32_t replySize = (uint32_t)sizeof(err);
+
+  err = (Downmix_api->command)(*effectHandle, EFFECT_CMD_SET_CONFIG,
+                               sizeof(effect_config_t), &(pDescriptor->config),
+                               &replySize, &err);
+  if (err != 0) {
+    ALOGE("Downmix command to configure returned an error %d", err);
+    return err;
+  }
+
+  err = ((Downmix_api->command))(*effectHandle, EFFECT_CMD_ENABLE, 0, nullptr,
+                                 &replySize, &err);
+  if (err != 0) {
+    ALOGE("Downmix command to enable effect returned an error %d", err);
+    return err;
+  }
+  return 0;
+}
+
+int32_t DownmixExecute(downmix_cntxt_s *pDescriptor, FILE *finp,
+                       FILE *fout) {
+  effect_handle_t *effectHandle = &pDescriptor->handle;
+  downmix_module_t *downmixEffectHandle = (downmix_module_t *)*effectHandle;
+  const struct effect_interface_s *Downmix_api = downmixEffectHandle->itfe;
+
+  const int numFileChannels = pDescriptor->numFileChannels;
+  const int numProcessChannels = pDescriptor->numProcessChannels;
+  const int fileFrameSize = numFileChannels * sizeof(short);
+  const unsigned int outputChannels =
+      audio_channel_count_from_out_mask(AUDIO_CHANNEL_OUT_STEREO);
+
+  std::vector<float> outFloat(FRAME_LENGTH * MAX_NUM_CHANNELS);
+  std::vector<float> inFloat(FRAME_LENGTH * MAX_NUM_CHANNELS);
+
+  audio_buffer_t inbuffer, outbuffer;
+  inbuffer.f32 = inFloat.data();
+  outbuffer.f32 = outFloat.data();
+  inbuffer.frameCount = FRAME_LENGTH;
+  outbuffer.frameCount = FRAME_LENGTH;
+
+  audio_buffer_t *pinbuf, *poutbuf;
+  pinbuf = &inbuffer;
+  poutbuf = &outbuffer;
+
+  int frameCounter = 0;
+  std::vector<short> inS16(FRAME_LENGTH * MAX_NUM_CHANNELS);
+  std::vector<short> outS16(FRAME_LENGTH * MAX_NUM_CHANNELS);
+
+  while (fread(inS16.data(), fileFrameSize, FRAME_LENGTH, finp) ==
+         FRAME_LENGTH) {
+    if (numFileChannels != numProcessChannels) {
+      adjust_channels(inS16.data(), numFileChannels, inS16.data(),
+                      numProcessChannels, sizeof(short),
+                      FRAME_LENGTH * fileFrameSize);
+    }
+
+    memcpy_to_float_from_i16(inFloat.data(), inS16.data(),
+                             FRAME_LENGTH * numProcessChannels);
+
+    const int32_t err = (Downmix_api->process)(*effectHandle, pinbuf, poutbuf);
+    if (err != 0) {
+      ALOGE("DownmixProcess returned an error %d", err);
+      return -1;
+    }
+
+    memcpy_to_i16_from_float(outS16.data(), outFloat.data(),
+                             FRAME_LENGTH * outputChannels);
+    fwrite(outS16.data(), sizeof(short), (FRAME_LENGTH * outputChannels),
+           fout);
+    frameCounter++;
+  }
+  printf("frameCounter: [%d]\n", frameCounter);
+  return 0;
+}
+
+int32_t DowmixMainProcess(downmix_cntxt_s *pDescriptor, FILE *finp,
+                          FILE *fout) {
+  effect_handle_t *effectHandle = &pDescriptor->handle;
+  int32_t sessionId = 0, ioId = 0;
+  const effect_uuid_t downmix_uuid = {
+      0x93f04452, 0xe4fe, 0x41cc, 0x91f9, {0xe4, 0x75, 0xb6, 0xd1, 0xd6, 0x9f}};
+
+  int32_t err = AUDIO_EFFECT_LIBRARY_INFO_SYM.create_effect(
+      &downmix_uuid, sessionId, ioId,
+      effectHandle);
+  if (err != 0) {
+    ALOGE("DownmixLib_Create returned an error %d", err);
+    return -1;
+  }
+
+  // Passing the init config for time being.
+  err = DownmixConfiureAndEnable(pDescriptor);
+  if (err != 0) {
+    ALOGE("DownmixConfigureAndEnable returned an error %d", err);
+    return -1;
+  }
+  // execute call for downmix.
+  err = DownmixExecute(pDescriptor, finp, fout);
+  if (err != 0) {
+    ALOGE("DownmixExecute returned an error %d", err);
+    return -1;
+  }
+  // Release the library function.
+  err = AUDIO_EFFECT_LIBRARY_INFO_SYM.release_effect(*effectHandle);
+  if (err != 0) {
+    ALOGE("DownmixRelease returned an error %d", err);
+    return -1;
+  }
+  return 0;
+}
+
+int main(int argc, const char *argv[]) {
+  int numFileChannels = 1, numProcessChannels = 8;
+  downmix_cntxt_s descriptor = {};
+  DownmixDefaultConfig(&(descriptor.config));
+
+  const char *infile = nullptr;
+  const char *outfile = nullptr;
+  for (int i = 1; i < argc; i++) {
+    printf("%s ", argv[i]);
+    if (argv[i][0] != '-') {
+      if (infile == nullptr) {
+        infile = argv[i];
+      } else if (outfile == nullptr) {
+        outfile = argv[i];
+      } else {
+        printUsage();
+        return -1;
+      }
+    } else if (!strncmp(argv[i], "-fs:", 4)) {
+      // Add a check for all the supported streams.
+      const int samplingFreq = atoi(argv[i] + 4);
+      if (samplingFreq != 8000 && samplingFreq != 11025 &&
+          samplingFreq != 12000 && samplingFreq != 16000 &&
+          samplingFreq != 22050 && samplingFreq != 24000 &&
+          samplingFreq != 32000 && samplingFreq != 44100 &&
+          samplingFreq != 48000 && samplingFreq != 88200 &&
+          samplingFreq != 96000 && samplingFreq != 176400 &&
+          samplingFreq != 192000) {
+        printf("Unsupported Sampling Frequency : %d", samplingFreq);
+        printUsage();
+        return -1;
+      }
+
+      descriptor.config.inputCfg.samplingRate = samplingFreq;
+      descriptor.config.outputCfg.samplingRate = samplingFreq;
+    } else if (!strncmp(argv[i], "-ch_fmt:", 8)) {
+      const int format = atoi(argv[i] + 8);
+      uint32_t *audioType = &descriptor.config.inputCfg.channels;
+      switch (format) {
+        case 0:
+          *audioType = AUDIO_CHANNEL_OUT_7POINT1;
+          break;
+        case 1:
+          *audioType = AUDIO_CHANNEL_OUT_5POINT1_SIDE;
+          break;
+        case 2:
+          *audioType = AUDIO_CHANNEL_OUT_5POINT1_BACK;
+          break;
+        case 3:
+          *audioType = AUDIO_CHANNEL_OUT_QUAD_SIDE;
+          break;
+        case 4:
+          *audioType = AUDIO_CHANNEL_OUT_QUAD_BACK;
+          break;
+        default:
+          *audioType = AUDIO_CHANNEL_OUT_7POINT1;
+          break;
+      }
+      descriptor.numProcessChannels =
+          audio_channel_count_from_out_mask(*audioType);
+    } else if (!strncmp(argv[i], "-fch:", 5)) {
+      const int fChannels = atoi(argv[i] + 5);
+      if (fChannels > 8 || fChannels < 1) {
+        printf("Unsupported number of file channels : %d", fChannels);
+        printUsage();
+        return -1;
+      }
+      descriptor.numFileChannels = fChannels;
+
+    } else if (!strncmp(argv[i], "-h", 2)) {
+      printUsage();
+      return 0;
+    }
+  }
+
+  if (/*infile == nullptr || */ outfile == nullptr) {
+    printUsage();
+    return -1;
+  }
+
+  FILE *finp = fopen(infile, "rb");
+  if (finp == nullptr) {
+    printf("Cannot open input file %s", infile);
+    return -1;
+  }
+  FILE *fout = fopen(outfile, "wb");
+  if (fout == nullptr) {
+    printf("Cannot open output file %s", outfile);
+    fclose(finp);
+    return -1;
+  }
+
+  const int err = DowmixMainProcess(&descriptor, finp, fout);
+  // close input and output files.
+  fclose(finp);
+  fclose(fout);
+  if (err != 0) {
+    printf("Error: %d\n", err);
+  }
+  return err;
+}
diff --git a/media/libeffects/lvm/lib/Common/src/Copy_16.c b/media/libeffects/lvm/lib/Common/src/Copy_16.c
index 1f9f659..3858450 100644
--- a/media/libeffects/lvm/lib/Common/src/Copy_16.c
+++ b/media/libeffects/lvm/lib/Common/src/Copy_16.c
@@ -132,8 +132,8 @@
     src += NrChannels * (NrFrames - 1);
     for (ii = NrFrames; ii != 0; ii--)
     {
-        dst[0] = src_st[0];
         dst[1] = src_st[1];
+        dst[0] = src_st[0]; // copy 1 before 0 is required for NrChannels == 3.
         for (jj = 2; jj < NrChannels; jj++)
         {
             dst[jj] = src[jj];
diff --git a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
index 41a4f04..1a874a3 100755
--- a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
+++ b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
@@ -54,21 +54,13 @@
     192000
 )
 
-ch_arr=(
-    1
-    2
-    4
-    6
-    8
-)
-
 # run multichannel effects at different configs, saving only the stereo channel
 # pair.
 for flags in "${flags_arr[@]}"
 do
     for fs in ${fs_arr[*]}
     do
-        for ch in ${ch_arr[*]}
+        for ch in {1..8}
         do
             adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw \
                 -o:$testdir/sinesweep_$((ch))_$((fs)).raw -ch:$ch -fs:$fs $flags
diff --git a/media/libeffects/lvm/tests/lvmtest.cpp b/media/libeffects/lvm/tests/lvmtest.cpp
index 43271d2..fe47d0b 100644
--- a/media/libeffects/lvm/tests/lvmtest.cpp
+++ b/media/libeffects/lvm/tests/lvmtest.cpp
@@ -571,17 +571,12 @@
                   0);                      /* Audio Time */
 }
 
-int lvmMainProcess(lvmConfigParams_t *plvmConfigParams, FILE *finp, FILE *fout) {
-  struct EffectContext context;
-  LVM_ControlParams_t params;
-
-  int errCode = lvmCreate(&context, plvmConfigParams, &params);
-  if (errCode) {
-    ALOGE("Error: lvmCreate returned with %d\n", errCode);
-    return errCode;
-  }
-
-  errCode = lvmControl(&context, plvmConfigParams, &params);
+int lvmMainProcess(EffectContext *pContext,
+                   LVM_ControlParams_t *pParams,
+                   lvmConfigParams_t *plvmConfigParams,
+                   FILE *finp,
+                   FILE *fout) {
+  int errCode = lvmControl(pContext, plvmConfigParams, pParams);
   if (errCode) {
     ALOGE("Error: lvmControl returned with %d\n", errCode);
     return errCode;
@@ -625,7 +620,7 @@
         }
     }
 #if 1
-    errCode = lvmExecute(floatIn.data(), floatOut.data(), &context, plvmConfigParams);
+    errCode = lvmExecute(floatIn.data(), floatOut.data(), pContext, plvmConfigParams);
     if (errCode) {
       printf("\nError: lvmExecute returned with %d\n", errCode);
       return errCode;
@@ -654,14 +649,15 @@
   }
 
   lvmConfigParams_t lvmConfigParams{}; // default initialize
-  FILE *finp = nullptr, *fout = nullptr;
+  const char *infile = nullptr;
+  const char *outfile = nullptr;
 
   for (int i = 1; i < argc; i++) {
     printf("%s ", argv[i]);
     if (!strncmp(argv[i], "-i:", 3)) {
-      finp = fopen(argv[i] + 3, "rb");
+      infile = argv[i] + 3;
     } else if (!strncmp(argv[i], "-o:", 3)) {
-      fout = fopen(argv[i] + 3, "wb");
+      outfile = argv[i] + 3;
     } else if (!strncmp(argv[i], "-fs:", 4)) {
       const int samplingFreq = atoi(argv[i] + 4);
       if (samplingFreq != 8000 && samplingFreq != 11025 &&
@@ -671,21 +667,21 @@
           samplingFreq != 48000 && samplingFreq != 88200 &&
           samplingFreq != 96000 && samplingFreq != 176400 &&
           samplingFreq != 192000) {
-        ALOGE("\nError: Unsupported Sampling Frequency : %d\n", samplingFreq);
+        printf("Error: Unsupported Sampling Frequency : %d\n", samplingFreq);
         return -1;
       }
       lvmConfigParams.samplingFreq = samplingFreq;
     } else if (!strncmp(argv[i], "-ch:", 4)) {
       const int nrChannels = atoi(argv[i] + 4);
       if (nrChannels > 8 || nrChannels < 1) {
-        ALOGE("\nError: Unsupported number of channels : %d\n", nrChannels);
+        printf("Error: Unsupported number of channels : %d\n", nrChannels);
         return -1;
       }
       lvmConfigParams.nrChannels = nrChannels;
     } else if (!strncmp(argv[i], "-fch:", 5)) {
       const int fChannels = atoi(argv[i] + 5);
       if (fChannels > 8 || fChannels < 1) {
-             ALOGE("\nError: Unsupported number of file channels : %d\n", fChannels);
+             printf("Error: Unsupported number of file channels : %d\n", fChannels);
              return -1;
            }
            lvmConfigParams.fChannels = fChannels;
@@ -694,7 +690,7 @@
     } else if (!strncmp(argv[i], "-basslvl:", 9)) {
       const int bassEffectLevel = atoi(argv[i] + 9);
       if (bassEffectLevel > 15 || bassEffectLevel < 0) {
-        ALOGE("\nError: Unsupported Bass Effect Level : %d\n",
+        printf("Error: Unsupported Bass Effect Level : %d\n",
                bassEffectLevel);
         printUsage();
         return -1;
@@ -703,7 +699,7 @@
     } else if (!strncmp(argv[i], "-eqPreset:", 10)) {
       const int eqPresetLevel = atoi(argv[i] + 10);
       if (eqPresetLevel > 9 || eqPresetLevel < 0) {
-        ALOGE("\nError: Unsupported Equalizer Preset : %d\n", eqPresetLevel);
+        printf("Error: Unsupported Equalizer Preset : %d\n", eqPresetLevel);
         printUsage();
         return -1;
       }
@@ -722,19 +718,47 @@
     }
   }
 
-  if (finp == nullptr || fout == nullptr) {
-    ALOGE("\nError: missing input/output files\n");
+  if (infile == nullptr || outfile == nullptr) {
+    printf("Error: missing input/output files\n");
     printUsage();
-    // ok not to close.
     return -1;
   }
 
-  const int errCode = lvmMainProcess(&lvmConfigParams, finp, fout);
+  FILE *finp = fopen(infile, "rb");
+  if (finp == nullptr) {
+    printf("Cannot open input file %s", infile);
+    return -1;
+  }
+
+  FILE *fout = fopen(outfile, "wb");
+  if (fout == nullptr) {
+    printf("Cannot open output file %s", outfile);
+    fclose(finp);
+    return -1;
+  }
+
+  EffectContext context;
+  LVM_ControlParams_t params;
+  int errCode = lvmCreate(&context, &lvmConfigParams, &params);
+  if (errCode == 0) {
+    errCode = lvmMainProcess(&context, &params, &lvmConfigParams, finp, fout);
+    if (errCode != 0) {
+      printf("Error: lvmMainProcess returned with the error: %d",errCode);
+    }
+  } else {
+    printf("Error: lvmCreate returned with the error: %d", errCode);
+  }
   fclose(finp);
   fclose(fout);
+  /* Free the allocated buffers */
+  if (context.pBundledContext != nullptr) {
+    if (context.pBundledContext->hInstance != nullptr) {
+      LvmEffect_free(&context);
+    }
+    free(context.pBundledContext);
+  }
 
   if (errCode) {
-    ALOGE("Error: lvmMainProcess returns with the error: %d \n", errCode);
     return -1;
   }
   return 0;
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 68dae56..5853e4b 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -21,6 +21,7 @@
     vndk: {
         enabled: true,
     },
+    double_loadable: true,
     srcs: ["AudioParameter.cpp", "TypeConverter.cpp"],
     cflags: [
         "-Werror",
@@ -144,18 +145,10 @@
     },
 }
 
-filegroup {
-    name: "mediaupdateservice_aidl",
-    srcs: [
-        "aidl/android/media/IMediaUpdateService.aidl",
-    ],
-}
-
 cc_library {
     name: "libmedia",
 
     srcs: [
-        ":mediaupdateservice_aidl",
         "IDataSource.cpp",
         "BufferingSettings.cpp",
         "mediaplayer.cpp",
diff --git a/media/libmedia/IMediaSource.cpp b/media/libmedia/IMediaSource.cpp
index e7da488..4dece96 100644
--- a/media/libmedia/IMediaSource.cpp
+++ b/media/libmedia/IMediaSource.cpp
@@ -107,6 +107,7 @@
         data.writeInterfaceToken(BpMediaSource::getInterfaceDescriptor());
         status_t ret = remote()->transact(GETFORMAT, data, &reply);
         if (ret == NO_ERROR) {
+            AutoMutex _l(mLock);
             mMetaData = MetaData::createFromParcel(reply);
             return mMetaData;
         }
@@ -222,6 +223,8 @@
     // NuPlayer passes pointers-to-metadata around, so we use this to keep the metadata alive
     // XXX: could we use this for caching, or does metadata change on the fly?
     sp<MetaData> mMetaData;
+    // ensure synchronize access to mMetaData
+    Mutex mLock;
 
     // Cache all IMemory objects received from MediaExtractor.
     // We gc IMemory objects that are no longer active (referenced by a MediaBuffer).
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index a073081..747b88f 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -552,7 +552,7 @@
 };
 
 IMPLEMENT_META_INTERFACE(OMX, "android.hardware.IOMX");
-IMPLEMENT_HYBRID_META_INTERFACE(OMXNode, IOmxNode, "android.hardware.IOMXNode");
+IMPLEMENT_HYBRID_META_INTERFACE(OMXNode, "android.hardware.IOMXNode");
 
 ////////////////////////////////////////////////////////////////////////////////
 
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index 08c6a50..98c5497 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -92,6 +92,19 @@
     {"highspeed720p", CAMCORDER_QUALITY_HIGH_SPEED_720P},
     {"highspeed1080p", CAMCORDER_QUALITY_HIGH_SPEED_1080P},
     {"highspeed2160p", CAMCORDER_QUALITY_HIGH_SPEED_2160P},
+
+    // Vendor-specific profiles
+    {"vga", CAMCORDER_QUALITY_VGA},
+    {"4kdci", CAMCORDER_QUALITY_4KDCI},
+    {"timelapsevga", CAMCORDER_QUALITY_TIME_LAPSE_VGA},
+    {"timelapse4kdci", CAMCORDER_QUALITY_TIME_LAPSE_4KDCI},
+    {"highspeedcif", CAMCORDER_QUALITY_HIGH_SPEED_CIF},
+    {"highspeedvga", CAMCORDER_QUALITY_HIGH_SPEED_VGA},
+    {"highspeed4kdci", CAMCORDER_QUALITY_HIGH_SPEED_4KDCI},
+    {"qhd", CAMCORDER_QUALITY_QHD},
+    {"2k", CAMCORDER_QUALITY_2k},
+    {"timelapseqhd", CAMCORDER_QUALITY_TIME_LAPSE_QHD},
+    {"timelapse2k", CAMCORDER_QUALITY_TIME_LAPSE_2k},
 };
 
 #if LOG_NDEBUG
diff --git a/media/libmedia/NdkWrapper.cpp b/media/libmedia/NdkWrapper.cpp
index 6dbc9b8..cbd64bb 100644
--- a/media/libmedia/NdkWrapper.cpp
+++ b/media/libmedia/NdkWrapper.cpp
@@ -57,6 +57,7 @@
     AMEDIAFORMAT_KEY_COLOR_STANDARD,
     AMEDIAFORMAT_KEY_COLOR_TRANSFER,
     AMEDIAFORMAT_KEY_COMPLEXITY,
+    AMEDIAFORMAT_KEY_CREATE_INPUT_SURFACE_SUSPENDED,
     AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE,
     AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_BYTE_BLOCK,
     AMEDIAFORMAT_KEY_CRYPTO_MODE,
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
index aa77cd3..8dac91a 100644
--- a/media/libmedia/TypeConverter.cpp
+++ b/media/libmedia/TypeConverter.cpp
@@ -228,6 +228,9 @@
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_2POINT1),
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_2POINT0POINT2),
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_2POINT1POINT2),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_TRI),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_TRI_BACK),
+    MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_3POINT1),
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_3POINT0POINT2),
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_3POINT1POINT2),
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_QUAD),
@@ -388,6 +391,7 @@
     MAKE_STRING_FROM_ENUM(AUDIO_FLAG_BYPASS_MUTE),
     MAKE_STRING_FROM_ENUM(AUDIO_FLAG_LOW_LATENCY),
     MAKE_STRING_FROM_ENUM(AUDIO_FLAG_DEEP_BUFFER),
+    MAKE_STRING_FROM_ENUM(AUDIO_FLAG_NO_CAPTURE),
     TERMINATOR
 };
 
diff --git a/media/libmedia/aidl/android/media/IMediaUpdateService.aidl b/media/libmedia/aidl/android/media/IMediaUpdateService.aidl
deleted file mode 100644
index 4777969..0000000
--- a/media/libmedia/aidl/android/media/IMediaUpdateService.aidl
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * Service to reload media component plugins when update package is installed/uninstalled.
- * @hide
- */
-interface IMediaUpdateService {
-    void loadPlugins(@utf8InCpp String apkPath);
-}
diff --git a/media/libmedia/include/media/MediaProfiles.h b/media/libmedia/include/media/MediaProfiles.h
index 0feb4f3..3e8e7c8 100644
--- a/media/libmedia/include/media/MediaProfiles.h
+++ b/media/libmedia/include/media/MediaProfiles.h
@@ -34,7 +34,11 @@
     CAMCORDER_QUALITY_1080P = 6,
     CAMCORDER_QUALITY_QVGA = 7,
     CAMCORDER_QUALITY_2160P = 8,
-    CAMCORDER_QUALITY_LIST_END = 8,
+    CAMCORDER_QUALITY_VGA = 9,
+    CAMCORDER_QUALITY_4KDCI = 10,
+    CAMCORDER_QUALITY_QHD = 11,
+    CAMCORDER_QUALITY_2k = 12,
+    CAMCORDER_QUALITY_LIST_END = 12,
 
     CAMCORDER_QUALITY_TIME_LAPSE_LIST_START = 1000,
     CAMCORDER_QUALITY_TIME_LAPSE_LOW  = 1000,
@@ -46,7 +50,11 @@
     CAMCORDER_QUALITY_TIME_LAPSE_1080P = 1006,
     CAMCORDER_QUALITY_TIME_LAPSE_QVGA = 1007,
     CAMCORDER_QUALITY_TIME_LAPSE_2160P = 1008,
-    CAMCORDER_QUALITY_TIME_LAPSE_LIST_END = 1008,
+    CAMCORDER_QUALITY_TIME_LAPSE_VGA = 1009,
+    CAMCORDER_QUALITY_TIME_LAPSE_4KDCI = 1010,
+    CAMCORDER_QUALITY_TIME_LAPSE_QHD = 1011,
+    CAMCORDER_QUALITY_TIME_LAPSE_2k = 1012,
+    CAMCORDER_QUALITY_TIME_LAPSE_LIST_END = 1012,
 
     CAMCORDER_QUALITY_HIGH_SPEED_LIST_START = 2000,
     CAMCORDER_QUALITY_HIGH_SPEED_LOW  = 2000,
@@ -55,7 +63,10 @@
     CAMCORDER_QUALITY_HIGH_SPEED_720P = 2003,
     CAMCORDER_QUALITY_HIGH_SPEED_1080P = 2004,
     CAMCORDER_QUALITY_HIGH_SPEED_2160P = 2005,
-    CAMCORDER_QUALITY_HIGH_SPEED_LIST_END = 2005,
+    CAMCORDER_QUALITY_HIGH_SPEED_CIF = 2006,
+    CAMCORDER_QUALITY_HIGH_SPEED_VGA = 2007,
+    CAMCORDER_QUALITY_HIGH_SPEED_4KDCI = 2008,
+    CAMCORDER_QUALITY_HIGH_SPEED_LIST_END = 2008,
 };
 
 enum video_decoder {
diff --git a/media/libmedia/include/media/omx/1.0/WOmxNode.h b/media/libmedia/include/media/omx/1.0/WOmxNode.h
index eebc8c6..1db4248 100644
--- a/media/libmedia/include/media/omx/1.0/WOmxNode.h
+++ b/media/libmedia/include/media/omx/1.0/WOmxNode.h
@@ -59,7 +59,7 @@
  * - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
  */
 
-struct LWOmxNode : public H2BConverter<IOmxNode, IOMXNode, BnOMXNode> {
+struct LWOmxNode : public H2BConverter<IOmxNode, BnOMXNode> {
     LWOmxNode(sp<IOmxNode> const& base) : CBase(base) {}
     status_t freeNode() override;
     status_t sendCommand(
diff --git a/media/libmedia/omx/1.0/WGraphicBufferSource.cpp b/media/libmedia/omx/1.0/WGraphicBufferSource.cpp
index 31d1df9..1ed1d07 100644
--- a/media/libmedia/omx/1.0/WGraphicBufferSource.cpp
+++ b/media/libmedia/omx/1.0/WGraphicBufferSource.cpp
@@ -32,7 +32,7 @@
 
 BnStatus LWGraphicBufferSource::configure(
         const sp<IOMXNode>& omxNode, int32_t dataSpace) {
-    sp<IOmxNode> hOmxNode = omxNode->getHalInterface();
+    sp<IOmxNode> hOmxNode = omxNode->getHalInterface<IOmxNode>();
     return toBinderStatus(mBase->configure(
             hOmxNode == nullptr ? new TWOmxNode(omxNode) : hOmxNode,
             toHardwareDataspace(dataSpace)));
diff --git a/media/libmediaplayer2/Android.bp b/media/libmediaplayer2/Android.bp
index 00f537d..08519cd 100644
--- a/media/libmediaplayer2/Android.bp
+++ b/media/libmediaplayer2/Android.bp
@@ -123,9 +123,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
 }
diff --git a/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp b/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp
index 98a3e75..4de92ad 100644
--- a/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp
+++ b/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp
@@ -22,7 +22,6 @@
 #include <cutils/properties.h> // for property_get
 #include <utils/Log.h>
 
-#include <media/AudioPolicyHelper.h>
 #include <media/stagefright/foundation/ADebug.h>
 
 namespace {
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
index 5da6e24..d608d4a 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
@@ -1289,6 +1289,7 @@
             } else if (what == DecoderBase::kWhatShutdownCompleted) {
                 ALOGV("%s shutdown completed", audio ? "audio" : "video");
                 if (audio) {
+                    Mutex::Autolock autoLock(mDecoderLock);
                     mAudioDecoder.clear();
                     mAudioDecoderError = false;
                     ++mAudioDecoderGeneration;
@@ -1296,6 +1297,7 @@
                     CHECK_EQ((int)mFlushingAudio, (int)SHUTTING_DOWN_DECODER);
                     mFlushingAudio = SHUT_DOWN;
                 } else {
+                    Mutex::Autolock autoLock(mDecoderLock);
                     mVideoDecoder.clear();
                     mVideoDecoderError = false;
                     ++mVideoDecoderGeneration;
@@ -1967,6 +1969,7 @@
         int64_t currentPositionUs, bool forceNonOffload, bool needsToCreateAudioDecoder) {
     if (mAudioDecoder != NULL) {
         mAudioDecoder->pause();
+        Mutex::Autolock autoLock(mDecoderLock);
         mAudioDecoder.clear();
         mAudioDecoderError = false;
         ++mAudioDecoderGeneration;
@@ -1988,11 +1991,21 @@
     closeAudioSink();
     mRenderer->flush(true /* audio */, false /* notifyComplete */);
     if (mVideoDecoder != NULL) {
-        mRenderer->flush(false /* audio */, false /* notifyComplete */);
+        mDeferredActions.push_back(
+                new FlushDecoderAction(FLUSH_CMD_NONE /* audio */,
+                                       FLUSH_CMD_FLUSH /* video */));
+        mDeferredActions.push_back(
+                new SeekAction(currentPositionUs,
+                MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */));
+        // After a flush without shutdown, decoder is paused.
+        // Don't resume it until source seek is done, otherwise it could
+        // start pulling stale data too soon.
+        mDeferredActions.push_back(new ResumeDecoderAction(false));
+        processDeferredActions();
+    } else {
+        performSeek(currentPositionUs, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */);
     }
 
-    performSeek(currentPositionUs, MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC /* mode */);
-
     if (forceNonOffload) {
         mRenderer->signalDisableOffloadAudio();
         mOffloadAudio = false;
@@ -2085,6 +2098,8 @@
         }
     }
 
+    Mutex::Autolock autoLock(mDecoderLock);
+
     if (audio) {
         sp<AMessage> notify = new AMessage(kWhatAudioNotify, this);
         ++mAudioDecoderGeneration;
@@ -2395,6 +2410,8 @@
     CHECK(mTrackStats != NULL);
 
     mTrackStats->clear();
+
+    Mutex::Autolock autoLock(mDecoderLock);
     if (mVideoDecoder != NULL) {
         mTrackStats->push_back(mVideoDecoder->getStats());
     }
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2.h b/media/libmediaplayer2/nuplayer2/NuPlayer2.h
index 798c725..b8fb988 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2.h
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2.h
@@ -197,6 +197,7 @@
     sp<DecoderBase> mVideoDecoder;
     bool mOffloadAudio;
     sp<DecoderBase> mAudioDecoder;
+    Mutex mDecoderLock;  // guard |mAudioDecoder| and |mVideoDecoder|.
     sp<CCDecoder> mCCDecoder;
     sp<Renderer> mRenderer;
     sp<ALooper> mRendererLooper;
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
index 9729d86..66bfae5 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
@@ -109,7 +109,10 @@
     mStats->setInt64("frames-dropped-output", mNumOutputFramesDropped);
     mStats->setFloat("frame-rate-total", mFrameRateTotal);
 
-    return mStats;
+    // i'm mutexed right now.
+    // make our own copy, so we aren't victim to any later changes.
+    sp<AMessage> copiedStats = mStats->dup();
+    return copiedStats;
 }
 
 status_t NuPlayer2::Decoder::setVideoSurface(const sp<ANativeWindowWrapper> &nww) {
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
index 1b661f2..1876496 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
@@ -81,7 +81,7 @@
 };
 
 // key for media statistics
-static const char *kKeyPlayer = "nuplayer";
+static const char *kKeyPlayer = "nuplayer2";
 // attrs for media statistics
     // NB: these are matched with public Java API constants defined
     // in frameworks/base/media/java/android/media/MediaPlayer2.java
@@ -108,6 +108,8 @@
 static const char *kPlayerRebufferingCount = "android.media.mediaplayer.rebuffers";
 static const char *kPlayerRebufferingAtExit = "android.media.mediaplayer.rebufferExit";
 
+static const char *kPlayerVersion = "android.media.mediaplayer.version";
+
 
 NuPlayer2Driver::NuPlayer2Driver(pid_t pid, uid_t uid, const sp<JObjectHolder> &context)
     : mState(STATE_IDLE),
@@ -127,6 +129,7 @@
       mPlayer(new NuPlayer2(pid, uid, mMediaClock, context)),
       mPlayerFlags(0),
       mMetricsHandle(0),
+      mPlayerVersion(0),
       mClientUid(uid),
       mAtEOS(false),
       mLooping(false),
@@ -137,9 +140,13 @@
 
     mMediaClock->init();
 
+    // XXX: what version are we?
+    // Ideally, this ticks with the apk version info for the APEX packaging
+
     // set up media metrics record
     mMetricsHandle = mediametrics_create(kKeyPlayer);
     mediametrics_setUid(mMetricsHandle, mClientUid);
+    mediametrics_setInt64(mMetricsHandle, kPlayerVersion, mPlayerVersion);
 
     mNuPlayer2Looper->start(
             false, /* runOnCallingThread */
@@ -473,7 +480,7 @@
                 float frameRate = 0;
                 if (stats->findFloat("frame-rate-output", &frameRate)) {
                     mediametrics_setInt64(mMetricsHandle, kPlayerFrameRate, frameRate);
-		}
+                }
 
             } else if (mime.startsWith("audio/")) {
                 mediametrics_setCString(mMetricsHandle, kPlayerAMime, mime.c_str());
@@ -524,6 +531,7 @@
         mediametrics_delete(mMetricsHandle);
         mMetricsHandle = mediametrics_create(kKeyPlayer);
         mediametrics_setUid(mMetricsHandle, mClientUid);
+        mediametrics_setInt64(mMetricsHandle, kPlayerVersion, mPlayerVersion);
     } else {
         ALOGV("did not have anything to record");
     }
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h
index 3d299f3..c97e247 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h
@@ -134,6 +134,7 @@
     uint32_t mPlayerFlags;
 
     mediametrics_handle_t mMetricsHandle;
+    int64_t mPlayerVersion;
     uid_t mClientUid;
 
     bool mAtEOS;
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
index 3be7e36..a8c9932 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
@@ -1148,8 +1148,7 @@
         ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
         return 0;
     }
-    // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
-    return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
+    return (int64_t)(numFrames * 1000000LL / sampleRate);
 }
 
 // Calculate duration of pending samples if played at normal rate (i.e., 1.0).
diff --git a/media/libmediaplayerservice/Android.bp b/media/libmediaplayerservice/Android.bp
index 55867a5..22fa495 100644
--- a/media/libmediaplayerservice/Android.bp
+++ b/media/libmediaplayerservice/Android.bp
@@ -12,6 +12,7 @@
 
     shared_libs: [
         "android.hardware.media.omx@1.0",
+        "libbase",
         "libaudioclient",
         "libbinder",
         "libcamera_client",
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 96f79e0..da95817 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -45,7 +45,6 @@
 #include <utils/Timers.h>
 #include <utils/Vector.h>
 
-#include <media/AudioPolicyHelper.h>
 #include <media/IMediaHTTPService.h>
 #include <media/IRemoteDisplay.h>
 #include <media/IRemoteDisplayClient.h>
@@ -1627,7 +1626,7 @@
         mAttributes = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));
         if (mAttributes != NULL) {
             memcpy(mAttributes, attr, sizeof(audio_attributes_t));
-            mStreamType = audio_attributes_to_stream_type(attr);
+            mStreamType = AudioSystem::attributesToStreamType(*attr);
         }
     } else {
         mAttributes = NULL;
@@ -1816,7 +1815,7 @@
             mAttributes = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));
         }
         memcpy(mAttributes, attributes, sizeof(audio_attributes_t));
-        mStreamType = audio_attributes_to_stream_type(attributes);
+        mStreamType = AudioSystem::attributesToStreamType(*attributes);
     }
 }
 
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 37b13f0..d111313 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -24,6 +24,7 @@
 
 #include <algorithm>
 
+#include <android-base/properties.h>
 #include <android/hardware/ICamera.h>
 
 #include <binder/IPCThreadState.h>
@@ -1761,13 +1762,26 @@
         }
     }
 
+    // Enable temporal layering if the expected (max) playback frame rate is greater than ~11% of
+    // the minimum display refresh rate on a typical device. Add layers until the base layer falls
+    // under this limit. Allow device manufacturers to override this limit.
+
+    // TODO: make this configurable by the application
+    std::string maxBaseLayerFpsProperty =
+        ::android::base::GetProperty("ro.media.recorder-max-base-layer-fps", "");
+    float maxBaseLayerFps = (float)::atof(maxBaseLayerFpsProperty.c_str());
+    // TRICKY: use !> to fix up any NaN values
+    if (!(maxBaseLayerFps >= kMinTypicalDisplayRefreshingRate / 0.9)) {
+        maxBaseLayerFps = kMinTypicalDisplayRefreshingRate / 0.9;
+    }
+
     for (uint32_t tryLayers = 1; tryLayers <= kMaxNumVideoTemporalLayers; ++tryLayers) {
         if (tryLayers > tsLayers) {
             tsLayers = tryLayers;
         }
         // keep going until the base layer fps falls below the typical display refresh rate
         float baseLayerFps = maxPlaybackFps / (1 << (tryLayers - 1));
-        if (baseLayerFps < kMinTypicalDisplayRefreshingRate / 0.9) {
+        if (baseLayerFps < maxBaseLayerFps) {
             break;
         }
     }
diff --git a/media/libmediaplayerservice/include/MediaPlayerInterface.h b/media/libmediaplayerservice/include/MediaPlayerInterface.h
index 3119950..0ad4d04 100644
--- a/media/libmediaplayerservice/include/MediaPlayerInterface.h
+++ b/media/libmediaplayerservice/include/MediaPlayerInterface.h
@@ -151,13 +151,13 @@
 
         virtual media::VolumeShaper::Status applyVolumeShaper(
                                     const sp<media::VolumeShaper::Configuration>& configuration,
-                                    const sp<media::VolumeShaper::Operation>& operation);
-        virtual sp<media::VolumeShaper::State> getVolumeShaperState(int id);
+                                    const sp<media::VolumeShaper::Operation>& operation) = 0;
+        virtual sp<media::VolumeShaper::State> getVolumeShaperState(int id) = 0;
 
         // AudioRouting
-        virtual status_t    setOutputDevice(audio_port_handle_t deviceId);
-        virtual status_t    getRoutedDeviceId(audio_port_handle_t* deviceId);
-        virtual status_t    enableAudioDeviceCallback(bool enabled);
+        virtual status_t    setOutputDevice(audio_port_handle_t deviceId) = 0;
+        virtual status_t    getRoutedDeviceId(audio_port_handle_t* deviceId) = 0;
+        virtual status_t    enableAudioDeviceCallback(bool enabled) = 0;
     };
 
                         MediaPlayerBase() {}
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 1e85804..5a58aa0 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -94,6 +94,7 @@
     mDisconnected = false;
     mUri.clear();
     mUriHeaders.clear();
+    mSources.clear();
     if (mFd >= 0) {
         close(mFd);
         mFd = -1;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 5cf6bbd..3388097 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -1829,11 +1829,21 @@
     closeAudioSink();
     mRenderer->flush(true /* audio */, false /* notifyComplete */);
     if (mVideoDecoder != NULL) {
-        mRenderer->flush(false /* audio */, false /* notifyComplete */);
+        mDeferredActions.push_back(
+                new FlushDecoderAction(FLUSH_CMD_NONE /* audio */,
+                                       FLUSH_CMD_FLUSH /* video */));
+        mDeferredActions.push_back(
+                new SeekAction(currentPositionUs,
+                MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */));
+        // After a flush without shutdown, decoder is paused.
+        // Don't resume it until source seek is done, otherwise it could
+        // start pulling stale data too soon.
+        mDeferredActions.push_back(new ResumeDecoderAction(false));
+        processDeferredActions();
+    } else {
+        performSeek(currentPositionUs, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */);
     }
 
-    performSeek(currentPositionUs, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */);
-
     if (forceNonOffload) {
         mRenderer->signalDisableOffloadAudio();
         mOffloadAudio = false;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 6d69d50..2f0da2d 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -107,11 +107,16 @@
 }
 
 sp<AMessage> NuPlayer::Decoder::getStats() const {
+
     mStats->setInt64("frames-total", mNumFramesTotal);
     mStats->setInt64("frames-dropped-input", mNumInputFramesDropped);
     mStats->setInt64("frames-dropped-output", mNumOutputFramesDropped);
     mStats->setFloat("frame-rate-total", mFrameRateTotal);
-    return mStats;
+
+    // i'm mutexed right now.
+    // make our own copy, so we aren't victim to any later changes.
+    sp<AMessage> copiedStats = mStats->dup();
+    return copiedStats;
 }
 
 status_t NuPlayer::Decoder::setVideoSurface(const sp<Surface> &surface) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 1b396c0..2b813e7 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -778,7 +778,7 @@
 
 status_t NuPlayerDriver::getParameter(int key, Parcel *reply) {
 
-    if (key == FOURCC('m','t','r','X')) {
+    if (key == FOURCC('m','t','r','X') && mAnalyticsItem != NULL) {
         // mtrX -- a play on 'metrics' (not matrix)
         // gather current info all together, parcel it, and send it back
         updateMetrics("api");
@@ -1006,7 +1006,7 @@
             // when we have an error, add it to the analytics for this playback.
             // ext1 is our primary 'error type' value. Only add ext2 when non-zero.
             // [test against msg is due to fall through from previous switch value]
-            if (msg == MEDIA_ERROR) {
+            if (msg == MEDIA_ERROR && mAnalyticsItem != NULL) {
                 mAnalyticsItem->setInt32(kPlayerError, ext1);
                 if (ext2 != 0) {
                     mAnalyticsItem->setInt32(kPlayerErrorCode, ext2);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index c990b2a..65d6d61 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -1161,8 +1161,8 @@
         ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
         return 0;
     }
-    // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
-    return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
+
+    return (int64_t)(numFrames * 1000000LL / sampleRate);
 }
 
 // Calculate duration of pending samples if played at normal rate (i.e., 1.0).
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index a1a2660..9d3338b 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -171,11 +171,7 @@
 }
 
 struct CodecObserver : public BnOMXObserver {
-    CodecObserver() {}
-
-    void setNotificationMessage(const sp<AMessage> &msg) {
-        mNotify = msg;
-    }
+    explicit CodecObserver(const sp<AMessage> &msg) : mNotify(msg) {}
 
     // from IOMXObserver
     virtual void onMessages(const std::list<omx_message> &messages) {
@@ -251,7 +247,7 @@
     virtual ~CodecObserver() {}
 
 private:
-    sp<AMessage> mNotify;
+    const sp<AMessage> mNotify;
 
     DISALLOW_EVIL_CONSTRUCTORS(CodecObserver);
 };
@@ -1248,6 +1244,7 @@
         info.mRenderInfo = NULL;
         info.mGraphicBuffer = graphicBuffer;
         info.mNewGraphicBuffer = false;
+        info.mDequeuedAt = mDequeueCounter;
 
         // TODO: We shouln't need to create MediaCodecBuffer. In metadata mode
         //       OMX doesn't use the shared memory buffer, but some code still
@@ -2092,7 +2089,8 @@
         if (usingSwRenderer) {
             outputFormat->setInt32("using-sw-renderer", 1);
         }
-    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG)) {
+    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG) ||
+        !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II)) {
         int32_t numChannels, sampleRate;
         if (!msg->findInt32("channel-count", &numChannels)
                 || !msg->findInt32("sample-rate", &sampleRate)) {
@@ -4297,24 +4295,27 @@
     int maxDimension = max(width, height);
 
     static const int limits[][5] = {
-        /*   MBps     MB   dim  bitrate        level */
-        {    1485,    99,  28,     64, OMX_VIDEO_AVCLevel1  },
-        {    1485,    99,  28,    128, OMX_VIDEO_AVCLevel1b },
-        {    3000,   396,  56,    192, OMX_VIDEO_AVCLevel11 },
-        {    6000,   396,  56,    384, OMX_VIDEO_AVCLevel12 },
-        {   11880,   396,  56,    768, OMX_VIDEO_AVCLevel13 },
-        {   11880,   396,  56,   2000, OMX_VIDEO_AVCLevel2  },
-        {   19800,   792,  79,   4000, OMX_VIDEO_AVCLevel21 },
-        {   20250,  1620, 113,   4000, OMX_VIDEO_AVCLevel22 },
-        {   40500,  1620, 113,  10000, OMX_VIDEO_AVCLevel3  },
-        {  108000,  3600, 169,  14000, OMX_VIDEO_AVCLevel31 },
-        {  216000,  5120, 202,  20000, OMX_VIDEO_AVCLevel32 },
-        {  245760,  8192, 256,  20000, OMX_VIDEO_AVCLevel4  },
-        {  245760,  8192, 256,  50000, OMX_VIDEO_AVCLevel41 },
-        {  522240,  8704, 263,  50000, OMX_VIDEO_AVCLevel42 },
-        {  589824, 22080, 420, 135000, OMX_VIDEO_AVCLevel5  },
-        {  983040, 36864, 543, 240000, OMX_VIDEO_AVCLevel51 },
-        { 2073600, 36864, 543, 240000, OMX_VIDEO_AVCLevel52 },
+        /*    MBps      MB   dim  bitrate        level */
+        {     1485,     99,   28,     64, OMX_VIDEO_AVCLevel1  },
+        {     1485,     99,   28,    128, OMX_VIDEO_AVCLevel1b },
+        {     3000,    396,   56,    192, OMX_VIDEO_AVCLevel11 },
+        {     6000,    396,   56,    384, OMX_VIDEO_AVCLevel12 },
+        {    11880,    396,   56,    768, OMX_VIDEO_AVCLevel13 },
+        {    11880,    396,   56,   2000, OMX_VIDEO_AVCLevel2  },
+        {    19800,    792,   79,   4000, OMX_VIDEO_AVCLevel21 },
+        {    20250,   1620,  113,   4000, OMX_VIDEO_AVCLevel22 },
+        {    40500,   1620,  113,  10000, OMX_VIDEO_AVCLevel3  },
+        {   108000,   3600,  169,  14000, OMX_VIDEO_AVCLevel31 },
+        {   216000,   5120,  202,  20000, OMX_VIDEO_AVCLevel32 },
+        {   245760,   8192,  256,  20000, OMX_VIDEO_AVCLevel4  },
+        {   245760,   8192,  256,  50000, OMX_VIDEO_AVCLevel41 },
+        {   522240,   8704,  263,  50000, OMX_VIDEO_AVCLevel42 },
+        {   589824,  22080,  420, 135000, OMX_VIDEO_AVCLevel5  },
+        {   983040,  36864,  543, 240000, OMX_VIDEO_AVCLevel51 },
+        {  2073600,  36864,  543, 240000, OMX_VIDEO_AVCLevel52 },
+        {  4177920, 139264, 1055, 240000, OMX_VIDEO_AVCLevel6  },
+        {  8355840, 139264, 1055, 480000, OMX_VIDEO_AVCLevel61 },
+        { 16711680, 139264, 1055, 800000, OMX_VIDEO_AVCLevel62 },
     };
 
     for (size_t i = 0; i < ARRAY_SIZE(limits); i++) {
@@ -4427,9 +4428,9 @@
         h264type.nRefFrames = 2;
         h264type.nBFrames = mLatency == 0 ? 1 : std::min(1U, mLatency - 1);
 
-        // disable B-frames until MPEG4Writer can guarantee finalizing files with B-frames
-        // h264type.nRefFrames = 1;
-        // h264type.nBFrames = 0;
+        // disable B-frames until we have explicit settings for enabling the feature.
+        h264type.nRefFrames = 1;
+        h264type.nBFrames = 0;
 
         h264type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate, h264type.nBFrames);
         h264type.nAllowedPictureTypes =
@@ -6535,8 +6536,10 @@
 
     if (mDeathNotifier != NULL) {
         if (mCodec->mOMXNode != NULL) {
-            auto tOmxNode = mCodec->mOMXNode->getHalInterface();
-            tOmxNode->unlinkToDeath(mDeathNotifier);
+            auto tOmxNode = mCodec->mOMXNode->getHalInterface<IOmxNode>();
+            if (tOmxNode) {
+                tOmxNode->unlinkToDeath(mDeathNotifier);
+            }
         }
         mDeathNotifier.clear();
     }
@@ -6623,7 +6626,8 @@
 
     CHECK(mCodec->mOMXNode == NULL);
 
-    sp<AMessage> notify = new AMessage(kWhatOMXDied, mCodec);
+    sp<AMessage> notify = new AMessage(kWhatOMXMessageList, mCodec);
+    notify->setInt32("generation", mCodec->mNodeGeneration + 1);
 
     sp<RefBase> obj;
     CHECK(msg->findObject("codecInfo", &obj));
@@ -6638,7 +6642,7 @@
     AString componentName;
     CHECK(msg->findString("componentName", &componentName));
 
-    sp<CodecObserver> observer = new CodecObserver;
+    sp<CodecObserver> observer = new CodecObserver(notify);
     sp<IOMX> omx;
     sp<IOMXNode> omxNode;
 
@@ -6664,14 +6668,12 @@
     }
 
     mDeathNotifier = new DeathNotifier(notify);
-    auto tOmxNode = omxNode->getHalInterface();
-    if (!tOmxNode->linkToDeath(mDeathNotifier, 0)) {
+    auto tOmxNode = omxNode->getHalInterface<IOmxNode>();
+    if (tOmxNode && !tOmxNode->linkToDeath(mDeathNotifier, 0)) {
         mDeathNotifier.clear();
     }
 
-    notify = new AMessage(kWhatOMXMessageList, mCodec);
-    notify->setInt32("generation", ++mCodec->mNodeGeneration);
-    observer->setNotificationMessage(notify);
+    ++mCodec->mNodeGeneration;
 
     mCodec->mComponentName = componentName;
     mCodec->mRenderTracker.setComponentName(componentName);
@@ -8161,6 +8163,10 @@
                             OMX_CommandPortEnable, kPortIndexOutput);
                 }
 
+                // Clear the RenderQueue in which queued GraphicBuffers hold the
+                // actual buffer references in order to free them early.
+                mCodec->mRenderTracker.clear(systemTime(CLOCK_MONOTONIC));
+
                 if (err == OK) {
                     err = mCodec->allocateBuffersOnPort(kPortIndexOutput);
                     ALOGE_IF(err != OK, "Failed to allocate output port buffers after port "
@@ -8566,7 +8572,7 @@
     }
 
     sp<IOMX> omx = client.interface();
-    sp<CodecObserver> observer = new CodecObserver;
+    sp<CodecObserver> observer = new CodecObserver(new AMessage);
     sp<IOMXNode> omxNode;
 
     err = omx->allocateNode(name, observer, &omxNode);
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 26464b8..488890d 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -220,6 +220,7 @@
     ],
 
     header_libs:[
+        "libnativeloader-dummy-headers",
         "libstagefright_xmlparser_headers",
         "media_ndk_headers",
     ],
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 42b98b1..18a6bd8 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -431,7 +431,7 @@
             || !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
 
     if (frameTimeUs < 0) {
-        int64_t thumbNailTime;
+        int64_t thumbNailTime = -1ll;
         if (!trackMeta()->findInt64(kKeyThumbnailTime, &thumbNailTime)
                 || thumbNailTime < 0) {
             thumbNailTime = 0;
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index c4015fb..6259b15 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -3595,7 +3595,7 @@
 }
 
 int64_t MPEG4Writer::Track::getDurationUs() const {
-    return mTrackDurationUs + getStartTimeOffsetTimeUs();
+    return mTrackDurationUs + getStartTimeOffsetTimeUs() + mOwner->getStartTimeOffsetBFramesUs();
 }
 
 int64_t MPEG4Writer::Track::getEstimatedTrackSizeBytes() const {
@@ -4059,7 +4059,7 @@
     // Prepone video playback.
     if (mMinCttsOffsetTicks != mMaxCttsOffsetTicks) {
         int32_t mvhdTimeScale = mOwner->getTimeScale();
-        uint32_t tkhdDuration = (mTrackDurationUs * mvhdTimeScale + 5E5) / 1E6;
+        uint32_t tkhdDuration = (getDurationUs() * mvhdTimeScale + 5E5) / 1E6;
         int64_t mediaTime = ((kMaxCttsOffsetTimeUs - getMinCttsOffsetTimeUs())
             * mTimeScale + 5E5) / 1E6;
         if (tkhdDuration > 0 && mediaTime > 0) {
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index 93478e9..3d58d4b 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -77,7 +77,8 @@
     return profilingNeeded;
 }
 
-OmxInfoBuilder sOmxInfoBuilder;
+OmxInfoBuilder sOmxInfoBuilder{true /* allowSurfaceEncoders */};
+OmxInfoBuilder sOmxNoSurfaceEncoderInfoBuilder{false /* allowSurfaceEncoders */};
 
 Mutex sCodec2InfoBuilderMutex;
 std::unique_ptr<MediaCodecListBuilderBase> sCodec2InfoBuilder;
@@ -98,7 +99,11 @@
     sp<PersistentSurface> surfaceTest =
         StagefrightPluginLoader::GetCCodecInstance()->createInputSurface();
     if (surfaceTest == nullptr) {
+        ALOGD("Allowing all OMX codecs");
         builders.push_back(&sOmxInfoBuilder);
+    } else {
+        ALOGD("Allowing only non-surface-encoder OMX codecs");
+        builders.push_back(&sOmxNoSurfaceEncoderInfoBuilder);
     }
     builders.push_back(GetCodec2InfoBuilder());
     return builders;
@@ -219,6 +224,21 @@
                 return info1 == nullptr
                         || (info2 != nullptr && info1->getRank() < info2->getRank());
             });
+
+    // remove duplicate entries
+    bool dedupe = property_get_bool("debug.stagefright.dedupe-codecs", true);
+    if (dedupe) {
+        std::set<std::string> codecsSeen;
+        for (auto it = mCodecInfos.begin(); it != mCodecInfos.end(); ) {
+            std::string codecName = (*it)->getCodecName();
+            if (codecsSeen.count(codecName) == 0) {
+                codecsSeen.emplace(codecName);
+                it++;
+            } else {
+                it = mCodecInfos.erase(it);
+            }
+        }
+    }
 }
 
 MediaCodecList::~MediaCodecList() {
@@ -268,10 +288,17 @@
 }
 
 ssize_t MediaCodecList::findCodecByName(const char *name) const {
+    Vector<AString> aliases;
     for (size_t i = 0; i < mCodecInfos.size(); ++i) {
         if (strcmp(mCodecInfos[i]->getCodecName(), name) == 0) {
             return i;
         }
+        mCodecInfos[i]->getAliases(&aliases);
+        for (const AString &alias : aliases) {
+            if (alias == name) {
+                return i;
+            }
+        }
     }
 
     return -ENOENT;
diff --git a/media/libstagefright/MediaCodecListWriter.cpp b/media/libstagefright/MediaCodecListWriter.cpp
index b32e470..c4fb199 100644
--- a/media/libstagefright/MediaCodecListWriter.cpp
+++ b/media/libstagefright/MediaCodecListWriter.cpp
@@ -37,6 +37,16 @@
             new MediaCodecInfoWriter(info.get()));
 }
 
+std::unique_ptr<MediaCodecInfoWriter>
+        MediaCodecListWriter::findMediaCodecInfo(const char *name) {
+    for (const sp<MediaCodecInfo> &info : mCodecInfos) {
+        if (!strcmp(info->getCodecName(), name)) {
+            return std::unique_ptr<MediaCodecInfoWriter>(new MediaCodecInfoWriter(info.get()));
+        }
+    }
+    return nullptr;
+}
+
 void MediaCodecListWriter::writeGlobalSettings(
         const sp<AMessage> &globalSettings) const {
     for (const std::pair<std::string, std::string> &kv : mGlobalSettings) {
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 0f75822..5d2291f 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -643,6 +643,10 @@
             output->mBufferQueue.clear();
             output->mEncoderReachedEOS = true;
             output->mErrorCode = err;
+            if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {
+                mStopping = true;
+                mPuller->stop();
+            }
             output->mCond.signal();
 
             reachedEOS = true;
@@ -761,8 +765,8 @@
 }
 
 status_t MediaCodecSource::onStart(MetaData *params) {
-    if (mStopping) {
-        ALOGE("Failed to start while we're stopping");
+    if (mStopping || mOutput.lock()->mEncoderReachedEOS) {
+        ALOGE("Failed to start while we're stopping or encoder already stopped due to EOS error");
         return INVALID_OPERATION;
     }
     int64_t startTimeUs;
diff --git a/media/libstagefright/MediaExtractorFactory.cpp b/media/libstagefright/MediaExtractorFactory.cpp
index 19b174f..a938d51 100644
--- a/media/libstagefright/MediaExtractorFactory.cpp
+++ b/media/libstagefright/MediaExtractorFactory.cpp
@@ -30,6 +30,7 @@
 #include <media/stagefright/MediaExtractorFactory.h>
 #include <media/IMediaExtractor.h>
 #include <media/IMediaExtractorService.h>
+#include <nativeloader/dlext_namespaces.h>
 #include <private/android_filesystem_config.h>
 #include <cutils/properties.h>
 #include <utils/String8.h>
@@ -38,23 +39,6 @@
 #include <dirent.h>
 #include <dlfcn.h>
 
-// Copied from GraphicsEnv.cpp
-// TODO(b/37049319) Get this from a header once one exists
-extern "C" {
-  android_namespace_t* android_create_namespace(const char* name,
-                                                const char* ld_library_path,
-                                                const char* default_library_path,
-                                                uint64_t type,
-                                                const char* permitted_when_isolated_path,
-                                                android_namespace_t* parent);
-  bool android_link_namespaces(android_namespace_t* from,
-                               android_namespace_t* to,
-                               const char* shared_libs_sonames);
-  enum {
-     ANDROID_NAMESPACE_TYPE_ISOLATED = 1,
-  };
-}
-
 namespace android {
 
 // static
@@ -89,7 +73,7 @@
 
     ALOGV("MediaExtractorFactory::CreateFromService %s", mime);
 
-    UpdateExtractors(nullptr);
+    UpdateExtractors();
 
     // initialize source decryption if needed
     source->DrmInitialization(nullptr /* mime */);
@@ -122,13 +106,6 @@
     return CreateIMediaExtractorFromMediaExtractor(ex, source, plugin);
 }
 
-//static
-void MediaExtractorFactory::LoadPlugins(const ::std::string& apkPath) {
-    // TODO: Verify apk path with package manager in extractor process.
-    ALOGV("Load plugins from: %s", apkPath.c_str());
-    UpdateExtractors(apkPath.empty() ? nullptr : apkPath.c_str());
-}
-
 struct ExtractorPlugin : public RefBase {
     ExtractorDef def;
     void *libHandle;
@@ -258,54 +235,6 @@
 }
 
 //static
-void MediaExtractorFactory::RegisterExtractorsInApk(
-        const char *apkPath, std::list<sp<ExtractorPlugin>> &pluginList) {
-    ALOGV("search for plugins at %s", apkPath);
-    ZipArchiveHandle zipHandle;
-    int32_t ret = OpenArchive(apkPath, &zipHandle);
-    if (ret == 0) {
-        char abi[PROPERTY_VALUE_MAX];
-        property_get("ro.product.cpu.abi", abi, "arm64-v8a");
-        String8 prefix8 = String8::format("lib/%s/", abi);
-        ZipString prefix(prefix8.c_str());
-        ZipString suffix("extractor.so");
-        void* cookie;
-        ret = StartIteration(zipHandle, &cookie, &prefix, &suffix);
-        if (ret == 0) {
-            ZipEntry entry;
-            ZipString name;
-            while (Next(cookie, &entry, &name) == 0) {
-                String8 libPath = String8(apkPath) + "!/" +
-                    String8(reinterpret_cast<const char*>(name.name), name.name_length);
-                // TODO: Open with a linker namespace so that it can be linked with sub-libraries
-                // within the apk instead of system libraries already loaded.
-                void *libHandle = dlopen(libPath.string(), RTLD_NOW | RTLD_LOCAL);
-                if (libHandle) {
-                    GetExtractorDef getDef =
-                        (GetExtractorDef) dlsym(libHandle, "GETEXTRACTORDEF");
-                    if (getDef) {
-                        ALOGV("registering sniffer for %s", libPath.string());
-                        RegisterExtractor(
-                                new ExtractorPlugin(getDef(), libHandle, libPath), pluginList);
-                    } else {
-                        ALOGW("%s does not contain sniffer", libPath.string());
-                        dlclose(libHandle);
-                    }
-                } else {
-                    ALOGW("couldn't dlopen(%s) %s", libPath.string(), strerror(errno));
-                }
-            }
-            EndIteration(cookie);
-        } else {
-            ALOGW("couldn't find plugins from %s, %d", apkPath, ret);
-        }
-        CloseArchive(zipHandle);
-    } else {
-        ALOGW("couldn't open(%s) %d", apkPath, ret);
-    }
-}
-
-//static
 void MediaExtractorFactory::RegisterExtractorsInSystem(
         const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList) {
     ALOGV("search for plugins at %s", libDirPath);
@@ -412,11 +341,9 @@
 static std::unordered_set<std::string> gSupportedExtensions;
 
 // static
-void MediaExtractorFactory::UpdateExtractors(const char *newUpdateApkPath) {
+void MediaExtractorFactory::UpdateExtractors() {
     Mutex::Autolock autoLock(gPluginMutex);
-    if (newUpdateApkPath != nullptr) {
-        gPluginsRegistered = false;
-    }
+
     if (gPluginsRegistered) {
         return;
     }
@@ -437,10 +364,6 @@
 #endif
             "/extractors", *newList);
 
-    if (newUpdateApkPath != nullptr) {
-        RegisterExtractorsInApk(newUpdateApkPath, *newList);
-    }
-
     newList->sort(compareFunc);
     gPlugins = newList;
 
diff --git a/media/libstagefright/OmxInfoBuilder.cpp b/media/libstagefright/OmxInfoBuilder.cpp
index 382c947..8910463 100644
--- a/media/libstagefright/OmxInfoBuilder.cpp
+++ b/media/libstagefright/OmxInfoBuilder.cpp
@@ -21,8 +21,8 @@
 #define OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
 #endif
 
+#include <android-base/properties.h>
 #include <utils/Log.h>
-#include <cutils/properties.h>
 
 #include <media/stagefright/foundation/MediaDefs.h>
 #include <media/stagefright/OmxInfoBuilder.h>
@@ -53,7 +53,7 @@
 namespace /* unnamed */ {
 
 bool hasPrefix(const hidl_string& s, const char* prefix) {
-    return strncmp(s.c_str(), prefix, strlen(prefix)) == 0;
+    return strncasecmp(s.c_str(), prefix, strlen(prefix)) == 0;
 }
 
 status_t queryCapabilities(
@@ -87,7 +87,8 @@
 
 }  // unnamed namespace
 
-OmxInfoBuilder::OmxInfoBuilder() {
+OmxInfoBuilder::OmxInfoBuilder(bool allowSurfaceEncoders)
+    : mAllowSurfaceEncoders(allowSurfaceEncoders) {
 }
 
 status_t OmxInfoBuilder::buildMediaCodecList(MediaCodecListWriter* writer) {
@@ -135,81 +136,80 @@
     // Convert roles to lists of codecs
 
     // codec name -> index into swCodecs/hwCodecs
-    std::map<hidl_string, std::unique_ptr<MediaCodecInfoWriter>>
-            swCodecName2Info, hwCodecName2Info;
+    std::map<hidl_string, std::unique_ptr<MediaCodecInfoWriter>> codecName2Info;
 
-    char rank[PROPERTY_VALUE_MAX];
-    uint32_t defaultRank = 0x100;
-    if (property_get("debug.stagefright.omx_default_rank", rank, nullptr)) {
-        defaultRank = std::strtoul(rank, nullptr, 10);
-    }
+    uint32_t defaultRank =
+        ::android::base::GetUintProperty("debug.stagefright.omx_default_rank", 0x100u);
+    uint32_t defaultSwAudioRank =
+        ::android::base::GetUintProperty("debug.stagefright.omx_default_rank.sw-audio", 0x10u);
+    uint32_t defaultSwOtherRank =
+        ::android::base::GetUintProperty("debug.stagefright.omx_default_rank.sw-other", 0x210u);
+
     for (const IOmxStore::RoleInfo& role : roles) {
         const hidl_string& typeName = role.type;
         bool isEncoder = role.isEncoder;
-        bool preferPlatformNodes = role.preferPlatformNodes;
-        // If preferPlatformNodes is true, hardware nodes must be added after
-        // platform (software) nodes. hwCodecs is used to hold hardware nodes
-        // that need to be added after software nodes for the same role.
-        std::vector<const IOmxStore::NodeInfo*> hwCodecs;
-        for (const IOmxStore::NodeInfo& node : role.nodes) {
+        bool isAudio = hasPrefix(role.type, "audio/");
+        bool isVideoOrImage = hasPrefix(role.type, "video/") || hasPrefix(role.type, "image/");
+
+        for (const IOmxStore::NodeInfo &node : role.nodes) {
             const hidl_string& nodeName = node.name;
+
+            // currently image and video encoders use surface input
+            if (!mAllowSurfaceEncoders && isVideoOrImage && isEncoder) {
+                ALOGD("disabling %s for media type %s because we are not using OMX input surface",
+                        nodeName.c_str(), role.type.c_str());
+                continue;
+            }
+
             bool isSoftware = hasPrefix(nodeName, "OMX.google");
-            MediaCodecInfoWriter* info;
-            if (isSoftware) {
-                auto c2i = swCodecName2Info.find(nodeName);
-                if (c2i == swCodecName2Info.end()) {
-                    // Create a new MediaCodecInfo for a new node.
-                    c2i = swCodecName2Info.insert(std::make_pair(
-                            nodeName, writer->addMediaCodecInfo())).first;
-                    info = c2i->second.get();
-                    info->setName(nodeName.c_str());
-                    info->setOwner(node.owner.c_str());
-                    info->setAttributes(
-                            // all OMX codecs are vendor codecs (in the vendor partition), but
-                            // treat OMX.google codecs as non-hardware-accelerated and  non-vendor
-                            (isEncoder ? MediaCodecInfo::kFlagIsEncoder : 0));
-                    info->setRank(defaultRank);
-                } else {
-                    // The node has been seen before. Simply retrieve the
-                    // existing MediaCodecInfoWriter.
-                    info = c2i->second.get();
-                }
-            } else {
-                auto c2i = hwCodecName2Info.find(nodeName);
-                if (c2i == hwCodecName2Info.end()) {
-                    // Create a new MediaCodecInfo for a new node.
-                    if (!preferPlatformNodes) {
-                        c2i = hwCodecName2Info.insert(std::make_pair(
-                                nodeName, writer->addMediaCodecInfo())).first;
-                        info = c2i->second.get();
-                        info->setName(nodeName.c_str());
-                        info->setOwner(node.owner.c_str());
-                        typename std::underlying_type<MediaCodecInfo::Attributes>::type attrs =
-                            MediaCodecInfo::kFlagIsVendor;
-                        if (isEncoder) {
-                            attrs |= MediaCodecInfo::kFlagIsEncoder;
-                        }
-                        if (std::count_if(
-                                node.attributes.begin(), node.attributes.end(),
-                                [](const IOmxStore::Attribute &i) -> bool {
-                                    return i.key == "attribute::software-codec";
-                                                                          })) {
-                            attrs |= MediaCodecInfo::kFlagIsHardwareAccelerated;
-                        }
-                        info->setAttributes(attrs);
-                        info->setRank(defaultRank);
-                    } else {
-                        // If preferPlatformNodes is true, this node must be
-                        // added after all software nodes.
-                        hwCodecs.push_back(&node);
-                        continue;
+            uint32_t rank = isSoftware
+                    ? (isAudio ? defaultSwAudioRank : defaultSwOtherRank)
+                    : defaultRank;
+            // get rank from IOmxStore via attribute
+            for (const IOmxStore::Attribute& attribute : node.attributes) {
+                if (attribute.key == "rank") {
+                    uint32_t oldRank = rank;
+                    char dummy;
+                    if (sscanf(attribute.value.c_str(), "%u%c", &rank, &dummy) != 1) {
+                        rank = oldRank;
                     }
-                } else {
-                    // The node has been seen before. Simply retrieve the
-                    // existing MediaCodecInfoWriter.
-                    info = c2i->second.get();
+                    break;
                 }
             }
+
+            MediaCodecInfoWriter* info;
+            auto c2i = codecName2Info.find(nodeName);
+            if (c2i == codecName2Info.end()) {
+                // Create a new MediaCodecInfo for a new node.
+                c2i = codecName2Info.insert(std::make_pair(
+                        nodeName, writer->addMediaCodecInfo())).first;
+                info = c2i->second.get();
+                info->setName(nodeName.c_str());
+                info->setOwner(node.owner.c_str());
+                info->setRank(rank);
+
+                typename std::underlying_type<MediaCodecInfo::Attributes>::type attrs = 0;
+                // all OMX codecs are vendor codecs (in the vendor partition), but
+                // treat OMX.google codecs as non-hardware-accelerated and non-vendor
+                if (!isSoftware) {
+                    attrs |= MediaCodecInfo::kFlagIsVendor;
+                    if (std::count_if(
+                            node.attributes.begin(), node.attributes.end(),
+                            [](const IOmxStore::Attribute &i) -> bool {
+                                return i.key == "attribute::software-codec";
+                                                                      })) {
+                        attrs |= MediaCodecInfo::kFlagIsHardwareAccelerated;
+                    }
+                }
+                if (isEncoder) {
+                    attrs |= MediaCodecInfo::kFlagIsEncoder;
+                }
+                info->setAttributes(attrs);
+            } else {
+                // The node has been seen before. Simply retrieve the
+                // existing MediaCodecInfoWriter.
+                info = c2i->second.get();
+            }
             std::unique_ptr<MediaCodecInfo::CapabilitiesWriter> caps =
                     info->addMediaType(typeName.c_str());
             if (queryCapabilities(
@@ -219,54 +219,8 @@
                 info->removeMediaType(typeName.c_str());
             }
         }
-
-        // If preferPlatformNodes is true, hardware nodes will not have been
-        // added in the loop above, but rather saved in hwCodecs. They are
-        // going to be added here.
-        if (preferPlatformNodes) {
-            for (const IOmxStore::NodeInfo *node : hwCodecs) {
-                MediaCodecInfoWriter* info;
-                const hidl_string& nodeName = node->name;
-                auto c2i = hwCodecName2Info.find(nodeName);
-                if (c2i == hwCodecName2Info.end()) {
-                    // Create a new MediaCodecInfo for a new node.
-                    c2i = hwCodecName2Info.insert(std::make_pair(
-                            nodeName, writer->addMediaCodecInfo())).first;
-                    info = c2i->second.get();
-                    info->setName(nodeName.c_str());
-                    info->setOwner(node->owner.c_str());
-                    typename std::underlying_type<MediaCodecInfo::Attributes>::type attrs =
-                        MediaCodecInfo::kFlagIsVendor;
-                    if (isEncoder) {
-                        attrs |= MediaCodecInfo::kFlagIsEncoder;
-                    }
-                    if (std::count_if(
-                            node->attributes.begin(), node->attributes.end(),
-                            [](const IOmxStore::Attribute &i) -> bool {
-                                return i.key == "attribute::software-codec";
-                                                                      })) {
-                        attrs |= MediaCodecInfo::kFlagIsHardwareAccelerated;
-                    }
-                    info->setRank(defaultRank);
-                } else {
-                    // The node has been seen before. Simply retrieve the
-                    // existing MediaCodecInfoWriter.
-                    info = c2i->second.get();
-                }
-                std::unique_ptr<MediaCodecInfo::CapabilitiesWriter> caps =
-                        info->addMediaType(typeName.c_str());
-                if (queryCapabilities(
-                        *node, typeName.c_str(), isEncoder, caps.get()) != OK) {
-                    ALOGW("Fail to add media type %s to codec %s "
-                          "after software codecs",
-                          typeName.c_str(), nodeName.c_str());
-                    info->removeMediaType(typeName.c_str());
-                }
-            }
-        }
     }
     return OK;
 }
 
 }  // namespace android
-
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index f34d54c..fa3d372 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -165,6 +165,9 @@
 
     for (i = 0; i < n; ++i) {
         sp<MetaData> meta = mExtractor->getTrackMetaData(i);
+        if (!meta) {
+            continue;
+        }
         ALOGV("getting track %zu of %zu, meta=%s", i, n, meta->toString().c_str());
 
         const char *mime;
@@ -186,6 +189,9 @@
     }
 
     sp<MetaData> trackMeta = mExtractor->getTrackMetaData(i);
+    if (!trackMeta) {
+        return NULL;
+    }
 
     if (metaOnly) {
         return FrameDecoder::getMetadataOnly(trackMeta, colorFormat, thumbnail);
@@ -280,6 +286,9 @@
     size_t i;
     for (i = 0; i < n; ++i) {
         sp<MetaData> meta = mExtractor->getTrackMetaData(i);
+        if (!meta) {
+            continue;
+        }
 
         const char *mime;
         CHECK(meta->findCString(kKeyMIMEType, &mime));
@@ -296,6 +305,9 @@
 
     sp<MetaData> trackMeta = mExtractor->getTrackMetaData(
             i, MediaExtractor::kIncludeExtensiveMetaData);
+    if (!trackMeta) {
+        return UNKNOWN_ERROR;
+    }
 
     if (metaOnly) {
         if (outFrame != NULL) {
@@ -529,6 +541,9 @@
     String8 timedTextLang;
     for (size_t i = 0; i < numTracks; ++i) {
         sp<MetaData> trackMeta = mExtractor->getTrackMetaData(i);
+        if (!trackMeta) {
+            continue;
+        }
 
         int64_t durationUs;
         if (trackMeta->findInt64(kKeyDuration, &durationUs)) {
@@ -667,8 +682,9 @@
                 !strcasecmp(fileMIME, "video/x-matroska")) {
             sp<MetaData> trackMeta = mExtractor->getTrackMetaData(0);
             const char *trackMIME;
-            CHECK(trackMeta->findCString(kKeyMIMEType, &trackMIME));
-
+            if (trackMeta != nullptr) {
+                CHECK(trackMeta->findCString(kKeyMIMEType, &trackMIME));
+            }
             if (!strncasecmp("audio/", trackMIME, 6)) {
                 // The matroska file only contains a single audio track,
                 // rewrite its mime type.
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 2e7da01..16b3319 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -192,6 +192,9 @@
         { 50, OMX_VIDEO_AVCLevel5  },
         { 51, OMX_VIDEO_AVCLevel51 },
         { 52, OMX_VIDEO_AVCLevel52 },
+        { 60, OMX_VIDEO_AVCLevel6  },
+        { 61, OMX_VIDEO_AVCLevel61 },
+        { 62, OMX_VIDEO_AVCLevel62 },
     };
     const static ALookup<uint8_t, OMX_VIDEO_AVCPROFILETYPE> profiles {
         { 66, OMX_VIDEO_AVCProfileBaseline },
@@ -585,6 +588,7 @@
         { "genre", kKeyGenre },
         { "location", kKeyLocation },
         { "lyricist", kKeyWriter },
+        { "manufacturer", kKeyManufacturer },
         { "title", kKeyTitle },
         { "year", kKeyYear },
     }
@@ -1184,6 +1188,16 @@
         }
 
         parseHevcProfileLevelFromHvcc((const uint8_t *)data, dataSize, msg);
+    } else if (meta->findData(kKeyAV1C, &type, &data, &size)) {
+        sp<ABuffer> buffer = new (std::nothrow) ABuffer(size);
+        if (buffer.get() == NULL || buffer->base() == NULL) {
+            return NO_MEMORY;
+        }
+        memcpy(buffer->data(), data, size);
+
+        buffer->meta()->setInt32("csd", true);
+        buffer->meta()->setInt64("timeUs", 0);
+        msg->setBuffer("csd-0", buffer);
     } else if (meta->findData(kKeyESDS, &type, &data, &size)) {
         ESDS esds((const char *)data, size);
         if (esds.InitCheck() != (status_t)OK) {
@@ -1690,6 +1704,11 @@
             meta->setInt32(kKeyIsADTS, isADTS);
         }
 
+        int32_t aacProfile = -1;
+        if (msg->findInt32("aac-profile", &aacProfile)) {
+            meta->setInt32(kKeyAACAOT, aacProfile);
+        }
+
         int32_t pcmEncoding;
         if (msg->findInt32("pcm-encoding", &pcmEncoding)) {
             meta->setInt32(kKeyPcmEncoding, pcmEncoding);
@@ -1743,6 +1762,8 @@
             std::vector<uint8_t> hvcc(csd0size + 1024);
             size_t outsize = reassembleHVCC(csd0, hvcc.data(), hvcc.size(), 4);
             meta->setData(kKeyHVCC, kTypeHVCC, hvcc.data(), outsize);
+        } else if (mime == MEDIA_MIMETYPE_VIDEO_AV1) {
+            meta->setData(kKeyAV1C, 0, csd0->data(), csd0->size());
         } else if (mime == MEDIA_MIMETYPE_VIDEO_VP9) {
             meta->setData(kKeyVp9CodecPrivate, 0, csd0->data(), csd0->size());
         } else if (mime == MEDIA_MIMETYPE_AUDIO_OPUS) {
diff --git a/media/libstagefright/data/media_codecs_google_c2_audio.xml b/media/libstagefright/data/media_codecs_google_c2_audio.xml
index 88cd08d..47a9715 100644
--- a/media/libstagefright/data/media_codecs_google_c2_audio.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_audio.xml
@@ -17,51 +17,61 @@
 <Included>
     <Decoders>
         <MediaCodec name="c2.android.mp3.decoder" type="audio/mpeg">
+            <Alias name="OMX.google.mp3.decoder" />
             <Limit name="channel-count" max="2" />
             <Limit name="sample-rate" ranges="8000,11025,12000,16000,22050,24000,32000,44100,48000" />
             <Limit name="bitrate" range="8000-320000" />
         </MediaCodec>
         <MediaCodec name="c2.android.amrnb.decoder" type="audio/3gpp">
+            <Alias name="OMX.google.amrnb.decoder" />
             <Limit name="channel-count" max="1" />
             <Limit name="sample-rate" ranges="8000" />
             <Limit name="bitrate" range="4750-12200" />
         </MediaCodec>
         <MediaCodec name="c2.android.amrwb.decoder" type="audio/amr-wb">
+            <Alias name="OMX.google.amrwb.decoder" />
             <Limit name="channel-count" max="1" />
             <Limit name="sample-rate" ranges="16000" />
             <Limit name="bitrate" range="6600-23850" />
         </MediaCodec>
         <MediaCodec name="c2.android.aac.decoder" type="audio/mp4a-latm">
+            <Alias name="OMX.google.aac.decoder" />
             <Limit name="channel-count" max="8" />
             <Limit name="sample-rate" ranges="7350,8000,11025,12000,16000,22050,24000,32000,44100,48000" />
             <Limit name="bitrate" range="8000-960000" />
         </MediaCodec>
         <MediaCodec name="c2.android.g711.alaw.decoder" type="audio/g711-alaw">
+            <Alias name="OMX.google.g711.alaw.decoder" />
             <Limit name="channel-count" max="1" />
             <Limit name="sample-rate" ranges="8000-48000" />
             <Limit name="bitrate" range="64000" />
         </MediaCodec>
         <MediaCodec name="c2.android.g711.mlaw.decoder" type="audio/g711-mlaw">
+            <Alias name="OMX.google.g711.mlaw.decoder" />
             <Limit name="channel-count" max="1" />
             <Limit name="sample-rate" ranges="8000-48000" />
             <Limit name="bitrate" range="64000" />
         </MediaCodec>
         <MediaCodec name="c2.android.vorbis.decoder" type="audio/vorbis">
+            <Alias name="OMX.google.vorbis.decoder" />
             <Limit name="channel-count" max="8" />
             <Limit name="sample-rate" ranges="8000-96000" />
             <Limit name="bitrate" range="32000-500000" />
         </MediaCodec>
         <MediaCodec name="c2.android.opus.decoder" type="audio/opus">
+            <Alias name="OMX.google.opus.decoder" />
             <Limit name="channel-count" max="8" />
             <Limit name="sample-rate" ranges="48000" />
             <Limit name="bitrate" range="6000-510000" />
         </MediaCodec>
         <MediaCodec name="c2.android.raw.decoder" type="audio/raw">
+            <Alias name="OMX.google.raw.decoder" />
             <Limit name="channel-count" max="8" />
             <Limit name="sample-rate" ranges="8000-96000" />
             <Limit name="bitrate" range="1-10000000" />
         </MediaCodec>
         <MediaCodec name="c2.android.flac.decoder" type="audio/flac">
+            <Alias name="OMX.google.flac.decoder" />
             <Limit name="channel-count" max="8" />
             <Limit name="sample-rate" ranges="1-655350" />
             <Limit name="bitrate" range="1-21000000" />
@@ -69,24 +79,28 @@
     </Decoders>
     <Encoders>
         <MediaCodec name="c2.android.aac.encoder" type="audio/mp4a-latm">
+            <Alias name="OMX.google.aac.encoder" />
             <Limit name="channel-count" max="6" />
             <Limit name="sample-rate" ranges="8000,11025,12000,16000,22050,24000,32000,44100,48000" />
             <!-- also may support 64000, 88200  and 96000 Hz -->
             <Limit name="bitrate" range="8000-960000" />
         </MediaCodec>
         <MediaCodec name="c2.android.amrnb.encoder" type="audio/3gpp">
+            <Alias name="OMX.google.amrnb.encoder" />
             <Limit name="channel-count" max="1" />
             <Limit name="sample-rate" ranges="8000" />
             <Limit name="bitrate" range="4750-12200" />
             <Feature name="bitrate-modes" value="CBR" />
         </MediaCodec>
         <MediaCodec name="c2.android.amrwb.encoder" type="audio/amr-wb">
+            <Alias name="OMX.google.amrwb.encoder" />
             <Limit name="channel-count" max="1" />
             <Limit name="sample-rate" ranges="16000" />
             <Limit name="bitrate" range="6600-23850" />
             <Feature name="bitrate-modes" value="CBR" />
         </MediaCodec>
         <MediaCodec name="c2.android.flac.encoder" type="audio/flac">
+            <Alias name="OMX.google.flac.encoder" />
             <Limit name="channel-count" max="2" />
             <Limit name="sample-rate" ranges="1-655350" />
             <Limit name="bitrate" range="1-21000000" />
diff --git a/media/libstagefright/data/media_codecs_google_c2_telephony.xml b/media/libstagefright/data/media_codecs_google_c2_telephony.xml
index d1055b3..950b092 100644
--- a/media/libstagefright/data/media_codecs_google_c2_telephony.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_telephony.xml
@@ -17,6 +17,7 @@
 <Included>
     <Decoders>
         <MediaCodec name="c2.android.gsm.decoder" type="audio/gsm">
+            <Alias name="OMX.google.gsm.decoder" />
             <Limit name="channel-count" max="1" />
             <Limit name="sample-rate" ranges="8000" />
             <Limit name="bitrate" range="13000" />
diff --git a/media/libstagefright/data/media_codecs_google_c2_tv.xml b/media/libstagefright/data/media_codecs_google_c2_tv.xml
index fa082c7..1b00dc9 100644
--- a/media/libstagefright/data/media_codecs_google_c2_tv.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_tv.xml
@@ -17,6 +17,7 @@
 <Included>
     <Decoders>
         <MediaCodec name="c2.android.mpeg2.decoder" type="video/mpeg2">
+            <Alias name="OMX.google.mpeg2.decoder" />
             <!-- profiles and levels:  ProfileMain : LevelHL -->
             <Limit name="size" min="16x16" max="1920x1088" />
             <Limit name="alignment" value="2x2" />
diff --git a/media/libstagefright/data/media_codecs_google_c2_video.xml b/media/libstagefright/data/media_codecs_google_c2_video.xml
index c49789e..e20174f 100644
--- a/media/libstagefright/data/media_codecs_google_c2_video.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_video.xml
@@ -17,6 +17,7 @@
 <Included>
     <Decoders>
         <MediaCodec name="c2.android.mpeg4.decoder" type="video/mp4v-es">
+            <Alias name="OMX.google.mpeg4.decoder" />
             <!-- profiles and levels:  ProfileSimple : Level3 -->
             <Limit name="size" min="2x2" max="352x288" />
             <Limit name="alignment" value="2x2" />
@@ -26,6 +27,7 @@
             <Feature name="adaptive-playback" />
         </MediaCodec>
         <MediaCodec name="c2.android.h263.decoder" type="video/3gpp">
+            <Alias name="OMX.google.h263.decoder" />
             <!-- profiles and levels:  ProfileBaseline : Level30, ProfileBaseline : Level45
                     ProfileISWV2 : Level30, ProfileISWV2 : Level45 -->
             <Limit name="size" min="2x2" max="352x288" />
@@ -34,6 +36,7 @@
             <Feature name="adaptive-playback" />
         </MediaCodec>
         <MediaCodec name="c2.android.avc.decoder" type="video/avc">
+            <Alias name="OMX.google.h264.decoder" />
             <!-- profiles and levels:  ProfileHigh : Level52 -->
             <Limit name="size" min="2x2" max="4080x4080" />
             <Limit name="alignment" value="2x2" />
@@ -44,6 +47,7 @@
             <Feature name="adaptive-playback" />
         </MediaCodec>
         <MediaCodec name="c2.android.hevc.decoder" type="video/hevc">
+            <Alias name="OMX.google.hevc.decoder" />
             <!-- profiles and levels:  ProfileMain : MainTierLevel51 -->
             <Limit name="size" min="2x2" max="4096x4096" />
             <Limit name="alignment" value="2x2" />
@@ -54,6 +58,7 @@
             <Feature name="adaptive-playback" />
         </MediaCodec>
         <MediaCodec name="c2.android.vp8.decoder" type="video/x-vnd.on2.vp8">
+            <Alias name="OMX.google.vp8.decoder" />
             <Limit name="size" min="2x2" max="2048x2048" />
             <Limit name="alignment" value="2x2" />
             <Limit name="block-size" value="16x16" />
@@ -63,6 +68,7 @@
             <Feature name="adaptive-playback" />
         </MediaCodec>
         <MediaCodec name="c2.android.vp9.decoder" type="video/x-vnd.on2.vp9">
+            <Alias name="OMX.google.vp9.decoder" />
             <Limit name="size" min="2x2" max="2048x2048" />
             <Limit name="alignment" value="2x2" />
             <Limit name="block-size" value="16x16" />
@@ -84,12 +90,14 @@
 
     <Encoders>
         <MediaCodec name="c2.android.h263.encoder" type="video/3gpp">
+            <Alias name="OMX.google.h263.encoder" />
             <!-- profiles and levels:  ProfileBaseline : Level45 -->
             <Limit name="size" min="176x144" max="176x144" />
             <Limit name="alignment" value="16x16" />
             <Limit name="bitrate" range="1-128000" />
         </MediaCodec>
         <MediaCodec name="c2.android.avc.encoder" type="video/avc">
+            <Alias name="OMX.google.h264.encoder" />
             <!-- profiles and levels:  ProfileBaseline : Level41 -->
             <Limit name="size" min="16x16" max="2048x2048" />
             <Limit name="alignment" value="2x2" />
@@ -99,7 +107,17 @@
             <Limit name="bitrate" range="1-12000000" />
             <Feature name="intra-refresh" />
         </MediaCodec>
+        <MediaCodec name="c2.android.hevc.encoder" type="video/hevc">
+            <!-- profiles and levels:  ProfileMain : MainTierLevel51 -->
+            <Limit name="size" min="320x128" max="512x512" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="block-size" value="8x8" />
+            <Limit name="block-count" range="1-4096" /> <!-- max 512x512 -->
+            <Limit name="blocks-per-second" range="1-122880" />
+            <Limit name="bitrate" range="1-10000000" />
+        </MediaCodec>
         <MediaCodec name="c2.android.mpeg4.encoder" type="video/mp4v-es">
+            <Alias name="OMX.google.mpeg4.encoder" />
             <!-- profiles and levels:  ProfileCore : Level2 -->
             <Limit name="size" min="16x16" max="176x144" />
             <Limit name="alignment" value="16x16" />
@@ -108,6 +126,7 @@
             <Limit name="bitrate" range="1-64000" />
         </MediaCodec>
         <MediaCodec name="c2.android.vp8.encoder" type="video/x-vnd.on2.vp8">
+            <Alias name="OMX.google.vp8.encoder" />
             <!-- profiles and levels:  ProfileMain : Level_Version0-3 -->
             <Limit name="size" min="2x2" max="2048x2048" />
             <Limit name="alignment" value="2x2" />
@@ -118,6 +137,7 @@
             <Feature name="bitrate-modes" value="VBR,CBR" />
         </MediaCodec>
         <MediaCodec name="c2.android.vp9.encoder" type="video/x-vnd.on2.vp9">
+            <Alias name="OMX.google.vp9.encoder" />
             <!-- profiles and levels:  ProfileMain : Level_Version0-3 -->
             <Limit name="size" min="2x2" max="2048x2048" />
             <Limit name="alignment" value="2x2" />
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 2ecfa43..5e7f90a 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -1234,7 +1234,7 @@
         const AString &uri, uint32_t streamMask, int64_t timeUs, bool newUri) {
     ssize_t index = mFetcherInfos.indexOfKey(uri);
     if (index < 0) {
-        ALOGE("did not find fetcher for uri: %s", uri.c_str());
+        ALOGE("did not find fetcher for uri: %s", uriDebugString(uri).c_str());
         return false;
     }
 
@@ -2005,7 +2005,7 @@
 
             if ((mNewStreamMask & stream) && mStreams[idx].mNewUri.empty()) {
                 ALOGW("swapping stream type %d %s to empty stream",
-                        stream, mStreams[idx].mUri.c_str());
+                        stream, uriDebugString(mStreams[idx].mUri).c_str());
             }
             mStreams[idx].mUri = mStreams[idx].mNewUri;
             mStreams[idx].mNewUri.clear();
@@ -2033,7 +2033,7 @@
         CHECK(idx >= 0);
         if (mStreams[idx].mNewUri.empty()) {
             ALOGW("swapping extra stream type %d %s to empty stream",
-                    stream, mStreams[idx].mUri.c_str());
+                    stream, uriDebugString(mStreams[idx].mUri).c_str());
         }
         mStreams[idx].mUri = mStreams[idx].mNewUri;
         mStreams[idx].mNewUri.clear();
@@ -2138,7 +2138,7 @@
             ALOGV("stopping newUri = %s", newUri.c_str());
             ssize_t index = mFetcherInfos.indexOfKey(newUri);
             if (index < 0) {
-                ALOGE("did not find fetcher for newUri: %s", newUri.c_str());
+                ALOGE("did not find fetcher for newUri: %s", uriDebugString(newUri).c_str());
                 continue;
             }
             FetcherInfo &info = mFetcherInfos.editValueAt(index);
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 4392799..b2361b8 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -1205,8 +1205,7 @@
             if (val.size() < 2
                     || val.c_str()[0] != '"'
                     || val.c_str()[val.size() - 1] != '"') {
-                ALOGE("Expected quoted string for URI, got '%s' instead.",
-                      val.c_str());
+                ALOGE("Expected quoted string for URI.");
 
                 return ERROR_MALFORMED;
             }
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 562c625..d153598 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -365,10 +365,10 @@
         if (err == ERROR_NOT_CONNECTED) {
             return ERROR_NOT_CONNECTED;
         } else if (err < 0) {
-            ALOGE("failed to fetch cipher key from '%s'.", keyURI.c_str());
+            ALOGE("failed to fetch cipher key from '%s'.", uriDebugString(keyURI).c_str());
             return ERROR_IO;
         } else if (key->size() != 16) {
-            ALOGE("key file '%s' wasn't 16 bytes in size.", keyURI.c_str());
+            ALOGE("key file '%s' wasn't 16 bytes in size.", uriDebugString(keyURI).c_str());
             return ERROR_MALFORMED;
         }
 
@@ -1366,7 +1366,7 @@
         }
         if (bytesRead < 0) {
             status_t err = bytesRead;
-            ALOGE("failed to fetch .ts segment at url '%s'", uri.c_str());
+            ALOGE("failed to fetch .ts segment at url '%s'", uriDebugString(uri).c_str());
             notifyError(err);
             return;
         }
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
index c06c288..2dca5c3 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
@@ -752,6 +752,7 @@
 constexpr char KEY_COLOR_STANDARD[] = "color-standard";
 constexpr char KEY_COLOR_TRANSFER[] = "color-transfer";
 constexpr char KEY_COMPLEXITY[] = "complexity";
+constexpr char KEY_CREATE_INPUT_SURFACE_SUSPENDED[] = "create-input-buffers-suspended";
 constexpr char KEY_DURATION[] = "durationUs";
 constexpr char KEY_FEATURE_[] = "feature-";
 constexpr char KEY_FLAC_COMPRESSION_LEVEL[] = "flac-compression-level";
@@ -772,8 +773,10 @@
 constexpr char KEY_LATENCY[] = "latency";
 constexpr char KEY_LEVEL[] = "level";
 constexpr char KEY_MAX_BIT_RATE[] = "max-bitrate";
+constexpr char KEY_MAX_FPS_TO_ENCODER[] = "max-fps-to-encoder";
 constexpr char KEY_MAX_HEIGHT[] = "max-height";
 constexpr char KEY_MAX_INPUT_SIZE[] = "max-input-size";
+constexpr char KEY_MAX_PTS_GAP_TO_ENCODER[] = "max-pts-gap-to-encoder";
 constexpr char KEY_MAX_WIDTH[] = "max-width";
 constexpr char KEY_MIME[] = "mime";
 constexpr char KEY_OPERATING_RATE[] = "operating-rate";
@@ -828,8 +831,10 @@
 constexpr int32_t INFO_TRY_AGAIN_LATER        = -1;
 constexpr int32_t VIDEO_SCALING_MODE_SCALE_TO_FIT               = 1;
 constexpr int32_t VIDEO_SCALING_MODE_SCALE_TO_FIT_WITH_CROPPING = 2;
+constexpr char PARAMETER_KEY_OFFSET_TIME[] = "time-offset-us";
 constexpr char PARAMETER_KEY_REQUEST_SYNC_FRAME[] = "request-sync";
 constexpr char PARAMETER_KEY_SUSPEND[] = "drop-input-frames";
+constexpr char PARAMETER_KEY_SUSPEND_TIME[] = "drop-start-time-us";
 constexpr char PARAMETER_KEY_VIDEO_BITRATE[] = "video-bitrate";
 
 }
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecListWriter.h b/media/libstagefright/include/media/stagefright/MediaCodecListWriter.h
index 59f57c7..f53b23e 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecListWriter.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecListWriter.h
@@ -48,6 +48,13 @@
      * added `MediaCodecInfo` object.
      */
     std::unique_ptr<MediaCodecInfoWriter> addMediaCodecInfo();
+    /**
+     * Find an existing `MediaCodecInfo` object for a codec name and return a
+     * `MediaCodecInfoWriter` object associated with the found added `MediaCodecInfo`.
+     *
+     * @return The `MediaCodecInfoWriter` object if found, or nullptr if not found.
+     */
+    std::unique_ptr<MediaCodecInfoWriter> findMediaCodecInfo(const char *codecName);
 private:
     MediaCodecListWriter() = default;
 
diff --git a/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h b/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
index ba6631c..4358aac 100644
--- a/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
+++ b/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
@@ -34,7 +34,6 @@
             const sp<DataSource> &source, const char *mime = NULL);
     static sp<IMediaExtractor> CreateFromService(
             const sp<DataSource> &source, const char *mime = NULL);
-    static void LoadPlugins(const ::std::string& apkPath);
     static status_t dump(int fd, const Vector<String16>& args);
     static std::unordered_set<std::string> getSupportedTypes();
     static void SetLinkedLibraries(const std::string& linkedLibraries);
@@ -46,8 +45,6 @@
     static bool gIgnoreVersion;
     static std::string gLinkedLibraries;
 
-    static void RegisterExtractorsInApk(
-            const char *apkPath, std::list<sp<ExtractorPlugin>> &pluginList);
     static void RegisterExtractorsInSystem(
             const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList);
     static void RegisterExtractorsInApex(
@@ -59,7 +56,7 @@
             float *confidence, void **meta, FreeMetaFunc *freeMeta,
             sp<ExtractorPlugin> &plugin, uint32_t *creatorVersion);
 
-    static void UpdateExtractors(const char *newUpdateApkPath);
+    static void UpdateExtractors();
 };
 
 }  // namespace android
diff --git a/media/libstagefright/include/media/stagefright/MetaDataBase.h b/media/libstagefright/include/media/stagefright/MetaDataBase.h
index 2910bd3..a0407af 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataBase.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataBase.h
@@ -59,6 +59,7 @@
     kKeyAACProfile        = 'aacp',  // int32_t
     kKeyAVCC              = 'avcc',  // raw data
     kKeyHVCC              = 'hvcc',  // raw data
+    kKeyAV1C              = 'av1c',  // raw data
     kKeyThumbnailHVCC     = 'thvc',  // raw data
     kKeyD263              = 'd263',  // raw data
     kKeyVorbisInfo        = 'vinf',  // raw data
@@ -143,6 +144,9 @@
     // The language code for this media
     kKeyMediaLanguage     = 'lang',  // cstring
 
+    // The manufacturer code for this media
+    kKeyManufacturer  = 'manu',  // cstring
+
     // To store the timed text format data
     kKeyTextFormatData    = 'text',  // raw data
 
@@ -236,6 +240,7 @@
     kTypeESDS        = 'esds',
     kTypeAVCC        = 'avcc',
     kTypeHVCC        = 'hvcc',
+    kTypeAV1C        = 'av1c',
     kTypeD263        = 'd263',
 };
 
diff --git a/media/libstagefright/include/media/stagefright/OmxInfoBuilder.h b/media/libstagefright/include/media/stagefright/OmxInfoBuilder.h
index 28f6094..1410a16 100644
--- a/media/libstagefright/include/media/stagefright/OmxInfoBuilder.h
+++ b/media/libstagefright/include/media/stagefright/OmxInfoBuilder.h
@@ -23,8 +23,11 @@
 namespace android {
 
 class OmxInfoBuilder : public MediaCodecListBuilderBase {
+private:
+    bool mAllowSurfaceEncoders; // allow surface encoders
+
 public:
-    OmxInfoBuilder();
+    explicit OmxInfoBuilder(bool allowSurfaceEncoders);
     ~OmxInfoBuilder() override = default;
     status_t buildMediaCodecList(MediaCodecListWriter* writer) override;
 };
diff --git a/media/libstagefright/omx/1.0/OmxStore.cpp b/media/libstagefright/omx/1.0/OmxStore.cpp
index 447af6f..2e041e3 100644
--- a/media/libstagefright/omx/1.0/OmxStore.cpp
+++ b/media/libstagefright/omx/1.0/OmxStore.cpp
@@ -61,10 +61,7 @@
         role.role = rolePair.first;
         role.type = rolePair.second.type;
         role.isEncoder = rolePair.second.isEncoder;
-        // TODO: Currently, preferPlatformNodes information is not available in
-        // the xml file. Once we have a way to provide this information, it
-        // should be parsed properly.
-        role.preferPlatformNodes = rolePair.first.compare(0, 5, "audio") == 0;
+        role.preferPlatformNodes = false; // deprecated and ignored, using rank instead
         hidl_vec<NodeInfo>& nodeList = role.nodes;
         nodeList.resize(rolePair.second.nodeList.size());
         size_t j = 0;
diff --git a/media/libstagefright/omx/Android.bp b/media/libstagefright/omx/Android.bp
index 362b7f5..4383004 100644
--- a/media/libstagefright/omx/Android.bp
+++ b/media/libstagefright/omx/Android.bp
@@ -87,6 +87,7 @@
     vndk: {
         enabled: true,
     },
+    double_loadable: true,
     srcs: ["OMXUtils.cpp"],
     export_include_dirs: [
         "include",
diff --git a/media/libstagefright/rtsp/ARTSPConnection.cpp b/media/libstagefright/rtsp/ARTSPConnection.cpp
index 20cb415..789e62a 100644
--- a/media/libstagefright/rtsp/ARTSPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTSPConnection.cpp
@@ -255,7 +255,7 @@
 
     struct hostent *ent = gethostbyname(host.c_str());
     if (ent == NULL) {
-        ALOGE("Unknown host %s", host.c_str());
+        ALOGE("Unknown host %s", uriDebugString(host).c_str());
 
         reply->setInt32("result", -ENOENT);
         reply->post();
diff --git a/media/libstagefright/rtsp/ASessionDescription.cpp b/media/libstagefright/rtsp/ASessionDescription.cpp
index c581e9d..9263565 100644
--- a/media/libstagefright/rtsp/ASessionDescription.cpp
+++ b/media/libstagefright/rtsp/ASessionDescription.cpp
@@ -80,7 +80,7 @@
             return false;
         }
 
-        ALOGI("%s", line.c_str());
+        ALOGV("%s", line.c_str());
 
         switch (line.c_str()[0]) {
             case 'v':
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index 5d993db..b4515e4 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -345,8 +345,7 @@
 
             struct hostent *ent = gethostbyname(mSessionHost.c_str());
             if (ent == NULL) {
-                ALOGE("Failed to look up address of session host '%s'",
-                     mSessionHost.c_str());
+                ALOGE("Failed to look up address of session host");
 
                 return false;
             }
@@ -531,7 +530,7 @@
                             mSessionURL.append(AStringPrintf("%u", port));
                             mSessionURL.append(path);
 
-                            ALOGI("rewritten session url: '%s'", mSessionURL.c_str());
+                            ALOGV("rewritten session url: '%s'", mSessionURL.c_str());
                         }
 
                         sp<AMessage> reply = new AMessage('conn', this);
@@ -1913,7 +1912,7 @@
             mLastMediaTimeUs = mediaTimeUs;
         }
 
-        if (mediaTimeUs < 0) {
+        if (mediaTimeUs < 0 && !mSeekable) {
             ALOGV("dropping early accessUnit.");
             return false;
         }
diff --git a/media/libstagefright/timedtext/Android.bp b/media/libstagefright/timedtext/Android.bp
index 97e1ec6..6935655 100644
--- a/media/libstagefright/timedtext/Android.bp
+++ b/media/libstagefright/timedtext/Android.bp
@@ -44,9 +44,6 @@
             "signed-integer-overflow",
         ],
         cfi: true,
-        diag: {
-            cfi: true,
-        },
     },
 
     include_dirs: [
diff --git a/media/libstagefright/xmlparser/Android.bp b/media/libstagefright/xmlparser/Android.bp
index bebfb3b..819058c 100644
--- a/media/libstagefright/xmlparser/Android.bp
+++ b/media/libstagefright/xmlparser/Android.bp
@@ -10,6 +10,7 @@
     vndk: {
         enabled: true,
     },
+    double_loadable: true,
 
     srcs: [
         "MediaCodecsXmlParser.cpp",
diff --git a/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp b/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
index 6e541ba..7046f61 100644
--- a/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
+++ b/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
@@ -502,6 +502,7 @@
     const char *name = nullptr;
     const char *type = nullptr;
     const char *update = nullptr;
+    const char *rank = nullptr;
 
     size_t i = 0;
     while (attrs[i] != nullptr) {
@@ -523,6 +524,12 @@
                 return BAD_VALUE;
             }
             update = attrs[i];
+        } else if (strEq(attrs[i], "rank")) {
+            if (attrs[++i] == nullptr) {
+                ALOGE("addMediaCodecFromAttributes: rank is null");
+                return BAD_VALUE;
+            }
+            rank = attrs[i];
         } else {
             ALOGE("addMediaCodecFromAttributes: unrecognized attribute: %s", attrs[i]);
             return BAD_VALUE;
@@ -579,6 +586,15 @@
         }
     }
 
+    if (rank != nullptr) {
+        if (!mCurrentCodec->second.rank.empty() && mCurrentCodec->second.rank != rank) {
+            ALOGE("addMediaCodecFromAttributes: code \"%s\" rank changed from \"%s\" to \"%s\"",
+                    name, mCurrentCodec->second.rank.c_str(), rank);
+            return BAD_VALUE;
+        }
+        mCurrentCodec->second.rank = rank;
+    }
+
     return OK;
 }
 
@@ -1035,6 +1051,7 @@
         const auto& codecName = codec.first;
         bool isEncoder = codec.second.isEncoder;
         size_t order = codec.second.order;
+        std::string rank = codec.second.rank;
         const auto& typeMap = codec.second.typeMap;
         for (const auto& type : typeMap) {
             const auto& typeName = type.first;
@@ -1090,6 +1107,9 @@
                     nodeInfo.attributeList.push_back(Attribute{quirk, "present"});
                 }
             }
+            if (!rank.empty()) {
+                nodeInfo.attributeList.push_back(Attribute{"rank", rank});
+            }
             nodeList->insert(std::make_pair(
                     std::move(order), std::move(nodeInfo)));
         }
diff --git a/media/libstagefright/xmlparser/include/media/stagefright/xmlparser/MediaCodecsXmlParser.h b/media/libstagefright/xmlparser/include/media/stagefright/xmlparser/MediaCodecsXmlParser.h
index fd949da..7a986b7 100644
--- a/media/libstagefright/xmlparser/include/media/stagefright/xmlparser/MediaCodecsXmlParser.h
+++ b/media/libstagefright/xmlparser/include/media/stagefright/xmlparser/MediaCodecsXmlParser.h
@@ -66,6 +66,7 @@
         QuirkSet quirkSet; ///< Set of quirks requested by this codec
         TypeMap typeMap;   ///< Map of types supported by this codec
         std::vector<std::string> aliases; ///< Name aliases for this codec
+        std::string rank;  ///< Rank of this codec. This is a numeric string.
     };
 
     typedef std::pair<std::string, CodecProperties> Codec;
diff --git a/media/mtp/IMtpDatabase.h b/media/mtp/IMtpDatabase.h
index 1245092..81fa60c 100644
--- a/media/mtp/IMtpDatabase.h
+++ b/media/mtp/IMtpDatabase.h
@@ -112,8 +112,8 @@
                                             MtpObjectHandle handle, bool succeeded) = 0;
 
     virtual MtpResponseCode         beginCopyObject(MtpObjectHandle handle, MtpObjectHandle newParent,
-                                            MtpStorageID newStorage);
-    virtual void                    endCopyObject(MtpObjectHandle handle, bool succeeded);
+                                            MtpStorageID newStorage) = 0;
+    virtual void                    endCopyObject(MtpObjectHandle handle, bool succeeded) = 0;
 };
 
 }; // namespace android
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index 74754ea..339f622 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -106,6 +106,10 @@
         symbol_file: "libmediandk.map.txt",
         versions: ["29"],
     },
+
+    // Bug: http://b/124522995 libmediandk has linker errors when built with
+    // coverage
+    native_coverage: false,
 }
 
 llndk_library {
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index 010c1aa..c3eb437 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -69,6 +69,7 @@
         case AIMAGE_FORMAT_DEPTH16:
         case AIMAGE_FORMAT_DEPTH_POINT_CLOUD:
         case AIMAGE_FORMAT_Y8:
+        case AIMAGE_FORMAT_HEIC:
             return true;
         case AIMAGE_FORMAT_PRIVATE:
             // For private format, cpu usage is prohibited.
@@ -96,6 +97,7 @@
         case AIMAGE_FORMAT_DEPTH16:
         case AIMAGE_FORMAT_DEPTH_POINT_CLOUD:
         case AIMAGE_FORMAT_Y8:
+        case AIMAGE_FORMAT_HEIC:
             return 1;
         case AIMAGE_FORMAT_PRIVATE:
             return 0;
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index fcb706d..26a6238 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -292,6 +292,7 @@
 EXPORT const char* AMEDIAFORMAT_KEY_COMPILATION = "compilation";
 EXPORT const char* AMEDIAFORMAT_KEY_COMPLEXITY = "complexity";
 EXPORT const char* AMEDIAFORMAT_KEY_COMPOSER = "composer";
+EXPORT const char* AMEDIAFORMAT_KEY_CREATE_INPUT_SURFACE_SUSPENDED = "create-input-buffers-suspended";
 EXPORT const char* AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE = "crypto-default-iv-size";
 EXPORT const char* AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_BYTE_BLOCK = "crypto-encrypted-byte-block";
 EXPORT const char* AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_SIZES = "crypto-encrypted-sizes";
@@ -341,6 +342,7 @@
 EXPORT const char* AMEDIAFORMAT_KEY_LOCATION = "location";
 EXPORT const char* AMEDIAFORMAT_KEY_LOOP = "loop";
 EXPORT const char* AMEDIAFORMAT_KEY_LYRICIST = "lyricist";
+EXPORT const char* AMEDIAFORMAT_KEY_MANUFACTURER = "manufacturer";
 EXPORT const char* AMEDIAFORMAT_KEY_MAX_BIT_RATE = "max-bitrate";
 EXPORT const char* AMEDIAFORMAT_KEY_MAX_HEIGHT = "max-height";
 EXPORT const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE = "max-input-size";
diff --git a/media/ndk/include/media/NdkImage.h b/media/ndk/include/media/NdkImage.h
index 15b340c..14d88cb 100644
--- a/media/ndk/include/media/NdkImage.h
+++ b/media/ndk/include/media/NdkImage.h
@@ -526,7 +526,15 @@
      * (in bytes) between adjacent rows.</p>
      *
      */
-    AIMAGE_FORMAT_Y8 = 0x20203859
+    AIMAGE_FORMAT_Y8 = 0x20203859,
+
+    /**
+     * Compressed HEIC format.
+     *
+     * <p>This format defines the HEIC brand of High Efficiency Image File
+     * Format as described in ISO/IEC 23008-12.</p>
+     */
+    AIMAGE_FORMAT_HEIC = 0x48454946,
 };
 
 /**
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 2551228..ddf5291 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -188,6 +188,7 @@
 extern const char* AMEDIAFORMAT_KEY_CDTRACKNUMBER __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_COMPILATION __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_COMPOSER __INTRODUCED_IN(29);
+extern const char* AMEDIAFORMAT_KEY_CREATE_INPUT_SURFACE_SUSPENDED __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_BYTE_BLOCK __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_SIZES __INTRODUCED_IN(29);
@@ -213,6 +214,7 @@
 extern const char* AMEDIAFORMAT_KEY_LOCATION __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_LOOP __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_LYRICIST __INTRODUCED_IN(29);
+extern const char* AMEDIAFORMAT_KEY_MANUFACTURER __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_MAX_BIT_RATE __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_MPEG2_STREAM_HEADER __INTRODUCED_IN(29);
 extern const char* AMEDIAFORMAT_KEY_PCM_BIG_ENDIAN __INTRODUCED_IN(29);
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index c50084e..7bdd3ad 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -56,6 +56,7 @@
     AMEDIAFORMAT_KEY_COMPILATION; # var introduced=29
     AMEDIAFORMAT_KEY_COMPLEXITY; # var introduced=28
     AMEDIAFORMAT_KEY_COMPOSER; # var introduced=29
+    AMEDIAFORMAT_KEY_CREATE_INPUT_SURFACE_SUSPENDED; # var introduced=29
     AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE; # var introduced=29
     AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_BYTE_BLOCK; # var introduced=29
     AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_SIZES; # var introduced=29
@@ -104,6 +105,7 @@
     AMEDIAFORMAT_KEY_LOCATION; # var introduced=29
     AMEDIAFORMAT_KEY_LOOP; # var introduced=29
     AMEDIAFORMAT_KEY_LYRICIST; # var introduced=29
+    AMEDIAFORMAT_KEY_MANUFACTURER; # var introduced=29
     AMEDIAFORMAT_KEY_MAX_BIT_RATE; # var introduced=29
     AMEDIAFORMAT_KEY_MAX_HEIGHT; # var introduced=21
     AMEDIAFORMAT_KEY_MAX_INPUT_SIZE; # var introduced=21
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 1c54aec..599c446 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -85,7 +85,7 @@
             return false;
         }
     } else {
-        if (appOps.noteOp(op, uid, resolvedOpPackageName) != AppOpsManager::MODE_ALLOWED) {
+        if (appOps.checkOp(op, uid, resolvedOpPackageName) != AppOpsManager::MODE_ALLOWED) {
             ALOGE("Request denied by app op: %d", op);
             return false;
         }
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index bc99099..4033247 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -292,13 +292,16 @@
         fullConfig.sample_rate = config->sample_rate;
         fullConfig.channel_mask = config->channel_mask;
         fullConfig.format = config->format;
+        std::vector<audio_io_handle_t> secondaryOutputs;
         ret = AudioSystem::getOutputForAttr(attr, &io,
                                             actualSessionId,
                                             &streamType, client.clientPid, client.clientUid,
                                             &fullConfig,
                                             (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_MMAP_NOIRQ |
                                                     AUDIO_OUTPUT_FLAG_DIRECT),
-                                            deviceId, &portId);
+                                            deviceId, &portId, &secondaryOutputs);
+        ALOGW_IF(!secondaryOutputs.empty(),
+                 "%s does not support secondary outputs, ignoring them", __func__);
     } else {
         ret = AudioSystem::getInputForAttr(attr, &io,
                                               actualSessionId,
@@ -344,7 +347,7 @@
             return ret;
         }
     }
-    return AudioMixer::HAPTIC_SCALE_NONE;
+    return AudioMixer::HAPTIC_SCALE_MUTE;
 }
 
 /* static */
@@ -678,6 +681,7 @@
     status_t lStatus;
     audio_stream_type_t streamType;
     audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
+    std::vector<audio_io_handle_t> secondaryOutputs;
 
     bool updatePid = (input.clientInfo.clientPid == -1);
     const uid_t callingUid = IPCThreadState::self()->getCallingUid();
@@ -712,7 +716,7 @@
 
     lStatus = AudioSystem::getOutputForAttr(&input.attr, &output.outputId, sessionId, &streamType,
                                             clientPid, clientUid, &input.config, input.flags,
-                                            &output.selectedDeviceId, &portId);
+                                            &output.selectedDeviceId, &portId, &secondaryOutputs);
 
     if (lStatus != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
         ALOGE("createTrack() getOutputForAttr() return error %d or invalid output handle", lStatus);
@@ -785,6 +789,59 @@
         output.afLatencyMs = thread->latency();
         output.portId = portId;
 
+        if (lStatus == NO_ERROR) {
+            // Connect secondary outputs. Failure on a secondary output must not imped the primary
+            // Any secondary output setup failure will lead to a desync between the AP and AF until
+            // the track is destroyed.
+            TeePatches teePatches;
+            for (audio_io_handle_t secondaryOutput : secondaryOutputs) {
+                PlaybackThread *secondaryThread = checkPlaybackThread_l(secondaryOutput);
+                if (secondaryThread == NULL) {
+                    ALOGE("no playback thread found for secondary output %d", output.outputId);
+                    continue;
+                }
+
+                size_t frameCount = std::lcm(thread->frameCount(), secondaryThread->frameCount());
+
+                using namespace std::chrono_literals;
+                auto inChannelMask = audio_channel_mask_out_to_in(input.config.channel_mask);
+                sp patchRecord = new RecordThread::PatchRecord(nullptr /* thread */,
+                                                               output.sampleRate,
+                                                               inChannelMask,
+                                                               input.config.format,
+                                                               frameCount,
+                                                               NULL /* buffer */,
+                                                               (size_t)0 /* bufferSize */,
+                                                               AUDIO_INPUT_FLAG_DIRECT,
+                                                               0ns /* timeout */);
+                status_t status = patchRecord->initCheck();
+                if (status != NO_ERROR) {
+                    ALOGE("Secondary output patchRecord init failed: %d", status);
+                    continue;
+                }
+                sp patchTrack = new PlaybackThread::PatchTrack(secondaryThread,
+                                                               streamType,
+                                                               output.sampleRate,
+                                                               input.config.channel_mask,
+                                                               input.config.format,
+                                                               frameCount,
+                                                               patchRecord->buffer(),
+                                                               patchRecord->bufferSize(),
+                                                               output.flags,
+                                                               0ns /* timeout */);
+                status = patchTrack->initCheck();
+                if (status != NO_ERROR) {
+                    ALOGE("Secondary output patchTrack init failed: %d", status);
+                    continue;
+                }
+                teePatches.push_back({patchRecord, patchTrack});
+                secondaryThread->addPatchTrack(patchTrack);
+                patchTrack->setPeerProxy(patchRecord.get());
+                patchRecord->setPeerProxy(patchTrack.get());
+            }
+            track->setTeePatches(std::move(teePatches));
+        }
+
         // move effect chain to this output thread if an effect on same session was waiting
         // for a track to be created
         if (lStatus == NO_ERROR && effectThread != NULL) {
@@ -3220,9 +3277,13 @@
             }
             // look for the thread where the specified audio session is present
             for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
-                if (mPlaybackThreads.valueAt(i)->hasAudioSession(sessionId) != 0) {
+                uint32_t sessionType = mPlaybackThreads.valueAt(i)->hasAudioSession(sessionId);
+                if (sessionType != 0) {
                     io = mPlaybackThreads.keyAt(i);
-                    break;
+                    // thread with same effect session is preferable
+                    if ((sessionType & ThreadBase::EFFECT_SESSION) != 0) {
+                        break;
+                    }
                 }
             }
             if (io == AUDIO_IO_HANDLE_NONE) {
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index d8c0da5..1441e15 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -21,8 +21,11 @@
 #include "Configuration.h"
 #include <atomic>
 #include <mutex>
+#include <chrono>
 #include <deque>
 #include <map>
+#include <numeric>
+#include <optional>
 #include <set>
 #include <string>
 #include <vector>
@@ -526,6 +529,9 @@
     class EffectChain;
 
     struct AudioStreamIn;
+    struct TeePatch;
+    using TeePatches = std::vector<TeePatch>;
+
 
     struct  stream_type_t {
         stream_type_t()
@@ -725,6 +731,11 @@
             audioHwDev(dev), stream(in), flags(flags) {}
     };
 
+    struct TeePatch {
+        sp<RecordThread::PatchRecord> patchRecord;
+        sp<PlaybackThread::PatchTrack> patchTrack;
+    };
+
     // for mAudioSessionRefs only
     struct AudioSessionRef {
         AudioSessionRef(audio_session_t sessionid, pid_t pid) :
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index e78c98b..c5b9953 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -139,6 +139,75 @@
     }
 }
 
+void FastMixer::updateMixerTrack(int index, Reason reason) {
+    const FastMixerState * const current = (const FastMixerState *) mCurrent;
+    const FastTrack * const fastTrack = &current->mFastTracks[index];
+
+    // check and update generation
+    if (reason == REASON_MODIFY && mGenerations[index] == fastTrack->mGeneration) {
+        return; // no change on an already configured track.
+    }
+    mGenerations[index] = fastTrack->mGeneration;
+
+    // mMixer == nullptr on configuration failure (check done after generation update).
+    if (mMixer == nullptr) {
+        return;
+    }
+
+    switch (reason) {
+    case REASON_REMOVE:
+        mMixer->destroy(index);
+        break;
+    case REASON_ADD: {
+        const status_t status = mMixer->create(
+                index, fastTrack->mChannelMask, fastTrack->mFormat, AUDIO_SESSION_OUTPUT_MIX);
+        LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+                "%s: cannot create fast track index"
+                " %d, mask %#x, format %#x in AudioMixer",
+                __func__, index, fastTrack->mChannelMask, fastTrack->mFormat);
+    }
+        [[fallthrough]];  // now fallthrough to update the newly created track.
+    case REASON_MODIFY:
+        mMixer->setBufferProvider(index, fastTrack->mBufferProvider);
+
+        float vlf, vrf;
+        if (fastTrack->mVolumeProvider != nullptr) {
+            const gain_minifloat_packed_t vlr = fastTrack->mVolumeProvider->getVolumeLR();
+            vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
+            vrf = float_from_gain(gain_minifloat_unpack_right(vlr));
+        } else {
+            vlf = vrf = AudioMixer::UNITY_GAIN_FLOAT;
+        }
+
+        // set volume to avoid ramp whenever the track is updated (or created).
+        // Note: this does not distinguish from starting fresh or
+        // resuming from a paused state.
+        mMixer->setParameter(index, AudioMixer::VOLUME, AudioMixer::VOLUME0, &vlf);
+        mMixer->setParameter(index, AudioMixer::VOLUME, AudioMixer::VOLUME1, &vrf);
+
+        mMixer->setParameter(index, AudioMixer::RESAMPLE, AudioMixer::REMOVE, nullptr);
+        mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
+                (void *)mMixerBuffer);
+        mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::MIXER_FORMAT,
+                (void *)(uintptr_t)mMixerBufferFormat);
+        mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::FORMAT,
+                (void *)(uintptr_t)fastTrack->mFormat);
+        mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK,
+                (void *)(uintptr_t)fastTrack->mChannelMask);
+        mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::MIXER_CHANNEL_MASK,
+                (void *)(uintptr_t)mSinkChannelMask);
+        mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::HAPTIC_ENABLED,
+                (void *)(uintptr_t)fastTrack->mHapticPlaybackEnabled);
+        mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::HAPTIC_INTENSITY,
+                (void *)(uintptr_t)fastTrack->mHapticIntensity);
+
+        mMixer->enable(index);
+        break;
+    default:
+        LOG_ALWAYS_FATAL("%s: invalid update reason %d", __func__, reason);
+    }
+}
+
 void FastMixer::onStateChange()
 {
     const FastMixerState * const current = (const FastMixerState *) mCurrent;
@@ -240,21 +309,16 @@
     // check for change in active track set
     const unsigned currentTrackMask = current->mTrackMask;
     dumpState->mTrackMask = currentTrackMask;
+    dumpState->mNumTracks = popcount(currentTrackMask);
     if (current->mFastTracksGen != mFastTracksGen) {
-        ALOG_ASSERT(mMixerBuffer != NULL);
 
         // process removed tracks first to avoid running out of track names
         unsigned removedTracks = previousTrackMask & ~currentTrackMask;
         while (removedTracks != 0) {
             int i = __builtin_ctz(removedTracks);
             removedTracks &= ~(1 << i);
-            const FastTrack* fastTrack = &current->mFastTracks[i];
-            ALOG_ASSERT(fastTrack->mBufferProvider == NULL);
-            if (mMixer != NULL) {
-                mMixer->destroy(i);
-            }
+            updateMixerTrack(i, REASON_REMOVE);
             // don't reset track dump state, since other side is ignoring it
-            mGenerations[i] = fastTrack->mGeneration;
         }
 
         // now process added tracks
@@ -262,40 +326,7 @@
         while (addedTracks != 0) {
             int i = __builtin_ctz(addedTracks);
             addedTracks &= ~(1 << i);
-            const FastTrack* fastTrack = &current->mFastTracks[i];
-            AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider;
-            if (mMixer != NULL) {
-                const int name = i; // for clarity, choose name as fast track index.
-                status_t status = mMixer->create(
-                        name,
-                        fastTrack->mChannelMask,
-                        fastTrack->mFormat, AUDIO_SESSION_OUTPUT_MIX);
-                LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
-                        "%s: cannot create track name"
-                        " %d, mask %#x, format %#x, sessionId %d in AudioMixer",
-                        __func__, name,
-                        fastTrack->mChannelMask, fastTrack->mFormat, AUDIO_SESSION_OUTPUT_MIX);
-                mMixer->setBufferProvider(name, bufferProvider);
-                mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
-                        (void *)mMixerBuffer);
-                // newly allocated track names default to full scale volume
-                mMixer->setParameter(
-                        name,
-                        AudioMixer::TRACK,
-                        AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat);
-                mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::FORMAT,
-                        (void *)(uintptr_t)fastTrack->mFormat);
-                mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK,
-                        (void *)(uintptr_t)fastTrack->mChannelMask);
-                mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MIXER_CHANNEL_MASK,
-                        (void *)(uintptr_t)mSinkChannelMask);
-                mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::HAPTIC_ENABLED,
-                        (void *)(uintptr_t)fastTrack->mHapticPlaybackEnabled);
-                mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::HAPTIC_INTENSITY,
-                        (void *)(uintptr_t)fastTrack->mHapticIntensity);
-                mMixer->enable(name);
-            }
-            mGenerations[i] = fastTrack->mGeneration;
+            updateMixerTrack(i, REASON_ADD);
         }
 
         // finally process (potentially) modified tracks; these use the same slot
@@ -304,44 +335,10 @@
         while (modifiedTracks != 0) {
             int i = __builtin_ctz(modifiedTracks);
             modifiedTracks &= ~(1 << i);
-            const FastTrack* fastTrack = &current->mFastTracks[i];
-            if (fastTrack->mGeneration != mGenerations[i]) {
-                // this track was actually modified
-                AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider;
-                ALOG_ASSERT(bufferProvider != NULL);
-                if (mMixer != NULL) {
-                    const int name = i;
-                    mMixer->setBufferProvider(name, bufferProvider);
-                    if (fastTrack->mVolumeProvider == NULL) {
-                        float f = AudioMixer::UNITY_GAIN_FLOAT;
-                        mMixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f);
-                        mMixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f);
-                    }
-                    mMixer->setParameter(name, AudioMixer::RESAMPLE,
-                            AudioMixer::REMOVE, NULL);
-                    mMixer->setParameter(
-                            name,
-                            AudioMixer::TRACK,
-                            AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat);
-                    mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::FORMAT,
-                            (void *)(uintptr_t)fastTrack->mFormat);
-                    mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK,
-                            (void *)(uintptr_t)fastTrack->mChannelMask);
-                    mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MIXER_CHANNEL_MASK,
-                            (void *)(uintptr_t)mSinkChannelMask);
-                    mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::HAPTIC_ENABLED,
-                            (void *)(uintptr_t)fastTrack->mHapticPlaybackEnabled);
-                    mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::HAPTIC_INTENSITY,
-                            (void *)(uintptr_t)fastTrack->mHapticIntensity);
-                    // already enabled
-                }
-                mGenerations[i] = fastTrack->mGeneration;
-            }
+            updateMixerTrack(i, REASON_MODIFY);
         }
 
         mFastTracksGen = current->mFastTracksGen;
-
-        dumpState->mNumTracks = popcount(currentTrackMask);
     }
 }
 
@@ -408,8 +405,8 @@
                 float vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
                 float vrf = float_from_gain(gain_minifloat_unpack_right(vlr));
 
-                mMixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &vlf);
-                mMixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &vrf);
+                mMixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME0, &vlf);
+                mMixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME1, &vrf);
             }
             // FIXME The current implementation of framesReady() for fast tracks
             // takes a tryLock, which can block
diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h
index c31d476..97ab635 100644
--- a/services/audioflinger/FastMixer.h
+++ b/services/audioflinger/FastMixer.h
@@ -59,6 +59,14 @@
     virtual void onStateChange();
     virtual void onWork();
 
+    enum Reason {
+        REASON_REMOVE,
+        REASON_ADD,
+        REASON_MODIFY,
+    };
+    // called when a fast track of index has been removed, added, or modified
+    void updateMixerTrack(int index, Reason reason);
+
     // FIXME these former local variables need comments
     static const FastMixerState sInitial;
 
diff --git a/services/audioflinger/FastMixerState.h b/services/audioflinger/FastMixerState.h
index c27f2b7..396c797 100644
--- a/services/audioflinger/FastMixerState.h
+++ b/services/audioflinger/FastMixerState.h
@@ -49,7 +49,7 @@
     audio_format_t          mFormat;         // track format
     int                     mGeneration;     // increment when any field is assigned
     bool                    mHapticPlaybackEnabled = false; // haptic playback is enabled or not
-    AudioMixer::haptic_intensity_t mHapticIntensity = AudioMixer::HAPTIC_SCALE_NONE; // intensity of
+    AudioMixer::haptic_intensity_t mHapticIntensity = AudioMixer::HAPTIC_SCALE_MUTE; // intensity of
                                                                                      // haptic data
 };
 
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index 3381e4d..676a575 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -211,8 +211,8 @@
                 ((patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) &&
                  ((patch->sinks[0].ext.device.hw_module != srcModule) ||
                   !audioHwDevice->supportsAudioPatches()))) {
-                audio_devices_t outputDevice = AUDIO_DEVICE_NONE;
-                String8 outputDeviceAddress;
+                audio_devices_t outputDevice = patch->sinks[0].ext.device.type;
+                String8 outputDeviceAddress = String8(patch->sinks[0].ext.device.address);
                 if (patch->num_sources == 2) {
                     if (patch->sources[1].type != AUDIO_PORT_TYPE_MIX ||
                             (patch->num_sinks != 0 && patch->sinks[0].ext.device.hw_module !=
@@ -234,8 +234,6 @@
                             reinterpret_cast<PlaybackThread*>(thread.get()), false /*closeThread*/);
                 } else {
                     audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-                    audio_devices_t device = patch->sinks[0].ext.device.type;
-                    String8 address = String8(patch->sinks[0].ext.device.address);
                     audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
                     audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE;
                     if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
@@ -254,8 +252,8 @@
                                                             patch->sinks[0].ext.device.hw_module,
                                                             &output,
                                                             &config,
-                                                            device,
-                                                            address,
+                                                            outputDevice,
+                                                            outputDeviceAddress,
                                                             flags);
                     ALOGV("mAudioFlinger.openOutput_l() returned %p", thread.get());
                     if (thread == 0) {
@@ -263,8 +261,6 @@
                         goto exit;
                     }
                     newPatch.mPlayback.setThread(reinterpret_cast<PlaybackThread*>(thread.get()));
-                    outputDevice = device;
-                    outputDeviceAddress = address;
                 }
                 audio_devices_t device = patch->sources[0].ext.device.type;
                 String8 address = String8(patch->sources[0].ext.device.address);
diff --git a/services/audioflinger/PatchPanel.h b/services/audioflinger/PatchPanel.h
index 2d9bd8e..612855f 100644
--- a/services/audioflinger/PatchPanel.h
+++ b/services/audioflinger/PatchPanel.h
@@ -81,13 +81,16 @@
     class Endpoint {
     public:
         Endpoint() = default;
-        Endpoint(Endpoint&& other) { *this = std::move(other); }
-        Endpoint& operator=(Endpoint&& other) {
+        Endpoint(const Endpoint&) = delete;
+        Endpoint& operator=(const Endpoint&) = delete;
+        Endpoint(Endpoint&& other) noexcept { swap(other); }
+        Endpoint& operator=(Endpoint&& other) noexcept {
+            swap(other);
+            return *this;
+        }
+        ~Endpoint() {
             ALOGE_IF(mHandle != AUDIO_PATCH_HANDLE_NONE,
                     "A non empty Patch Endpoint leaked, handle %d", mHandle);
-            *this = other;
-            other.mHandle = AUDIO_PATCH_HANDLE_NONE;
-            return *this;
         }
 
         status_t checkTrack(TrackType *trackOrNull) const {
@@ -127,10 +130,19 @@
         }
         void stopTrack() { if (mTrack) mTrack->stop(); }
 
-    private:
-        Endpoint(const Endpoint&) = default;
-        Endpoint& operator=(const Endpoint&) = default;
+        void swap(Endpoint &other) noexcept {
+            using std::swap;
+            swap(mThread, other.mThread);
+            swap(mCloseThread, other.mCloseThread);
+            swap(mHandle, other.mHandle);
+            swap(mTrack, other.mTrack);
+        }
 
+        friend void swap(Endpoint &a, Endpoint &b) noexcept {
+            a.swap(b);
+        }
+
+    private:
         sp<ThreadType> mThread;
         bool mCloseThread = true;
         audio_patch_handle_t mHandle = AUDIO_PATCH_HANDLE_NONE;
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index bad3ca8..357370e 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -43,9 +43,8 @@
 
             void        appendDumpHeader(String8& result);
             void        appendDump(String8& result, bool active);
-    virtual status_t    start(AudioSystem::sync_event_t event =
-                                    AudioSystem::SYNC_EVENT_NONE,
-                             audio_session_t triggerSession = AUDIO_SESSION_NONE);
+    virtual status_t    start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
+                              audio_session_t triggerSession = AUDIO_SESSION_NONE);
     virtual void        stop();
             void        pause();
 
@@ -125,10 +124,13 @@
             void    setHapticIntensity(AudioMixer::haptic_intensity_t hapticIntensity) {
                 if (AudioMixer::isValidHapticIntensity(hapticIntensity)) {
                     mHapticIntensity = hapticIntensity;
+                    setHapticPlaybackEnabled(mHapticIntensity != AudioMixer::HAPTIC_SCALE_MUTE);
                 }
             }
             sp<os::ExternalVibration> getExternalVibration() const { return mExternalVibration; }
 
+            void    setTeePatches(TeePatches teePatches);
+
 protected:
     // for numerous
     friend class PlaybackThread;
@@ -139,8 +141,8 @@
     DISALLOW_COPY_AND_ASSIGN(Track);
 
     // AudioBufferProvider interface
-    virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
-    // releaseBuffer() not overridden
+    status_t getNextBuffer(AudioBufferProvider::Buffer* buffer) override;
+    void releaseBuffer(AudioBufferProvider::Buffer* buffer) override;
 
     // ExtendedAudioBufferProvider interface
     virtual size_t framesReady() const;
@@ -207,7 +209,7 @@
 
     bool                mHapticPlaybackEnabled = false; // indicates haptic playback enabled or not
     // intensity to play haptic data
-    AudioMixer::haptic_intensity_t mHapticIntensity = AudioMixer::HAPTIC_SCALE_NONE;
+    AudioMixer::haptic_intensity_t mHapticIntensity = AudioMixer::HAPTIC_SCALE_MUTE;
     class AudioVibrationController : public os::BnExternalVibrationController {
     public:
         explicit AudioVibrationController(Track* track) : mTrack(track) {}
@@ -220,6 +222,14 @@
     sp<os::ExternalVibration>    mExternalVibration;
 
 private:
+    void                interceptBuffer(const AudioBufferProvider::Buffer& buffer);
+    /** Write the source data in the buffer provider. @return written frame count. */
+    size_t              writeFrames(AudioBufferProvider* dest, const void* src, size_t frameCount);
+    template <class F>
+    void                forEachTeePatchTrack(F f) {
+        for (auto& tp : mTeePatches) { f(tp.patchTrack); }
+    };
+
     // The following fields are only for fast tracks, and should be in a subclass
     int                 mFastIndex; // index within FastMixerState::mFastTracks[];
                                     // either mFastIndex == -1 if not isFastTrack()
@@ -239,6 +249,7 @@
     audio_output_flags_t mFlags;
     // If the last track change was notified to the client with readAndClearHasChanged
     std::atomic_flag     mChangeNotified = ATOMIC_FLAG_INIT;
+    TeePatches  mTeePatches;
 };  // end of Track
 
 
@@ -318,7 +329,7 @@
 };  // end of OutputTrack
 
 // playback track, used by PatchPanel
-class PatchTrack : public Track, public PatchProxyBufferProvider {
+class PatchTrack : public Track, public PatchTrackBase {
 public:
 
                         PatchTrack(PlaybackThread *playbackThread,
@@ -329,7 +340,8 @@
                                    size_t frameCount,
                                    void *buffer,
                                    size_t bufferSize,
-                                   audio_output_flags_t flags);
+                                   audio_output_flags_t flags,
+                                   const Timeout& timeout = {});
     virtual             ~PatchTrack();
 
     virtual status_t    start(AudioSystem::sync_event_t event =
@@ -345,12 +357,7 @@
                                      const struct timespec *timeOut = NULL);
     virtual void        releaseBuffer(Proxy::Buffer* buffer);
 
-            void setPeerProxy(PatchProxyBufferProvider *proxy) { mPeerProxy = proxy; }
-
 private:
             void restartIfDisabled();
 
-    sp<ClientProxy>             mProxy;
-    PatchProxyBufferProvider*   mPeerProxy;
-    struct timespec             mPeerTimeout;
 };  // end of PatchTrack
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 32af7d5..ab4af33 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -113,7 +113,7 @@
 };
 
 // playback track, used by PatchPanel
-class PatchRecord : virtual public RecordTrack, public PatchProxyBufferProvider {
+class PatchRecord : public RecordTrack, public PatchTrackBase {
 public:
 
     PatchRecord(RecordThread *recordThread,
@@ -123,7 +123,8 @@
                 size_t frameCount,
                 void *buffer,
                 size_t bufferSize,
-                audio_input_flags_t flags);
+                audio_input_flags_t flags,
+                const Timeout& timeout = {});
     virtual             ~PatchRecord();
 
     // AudioBufferProvider interface
@@ -134,11 +135,4 @@
     virtual status_t    obtainBuffer(Proxy::Buffer *buffer,
                                      const struct timespec *timeOut = NULL);
     virtual void        releaseBuffer(Proxy::Buffer *buffer);
-
-    void setPeerProxy(PatchProxyBufferProvider *proxy) { mPeerProxy = proxy; }
-
-private:
-    sp<ClientProxy>             mProxy;
-    PatchProxyBufferProvider*   mPeerProxy;
-    struct timespec             mPeerTimeout;
 };  // end of PatchRecord
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 5a70864..a8c4bd1 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -2372,6 +2372,7 @@
             const int intensity = AudioFlinger::onExternalVibrationStart(
                     track->getExternalVibration());
             mLock.lock();
+            track->setHapticIntensity(static_cast<AudioMixer::haptic_intensity_t>(intensity));
             // Haptic playback should be enabled by vibrator service.
             if (track->getHapticPlaybackEnabled()) {
                 // Disable haptic playback of all active track to ensure only
@@ -2380,7 +2381,6 @@
                     t->setHapticPlaybackEnabled(false);
                 }
             }
-            track->setHapticIntensity(intensity);
         }
 
         track->mResetDone = false;
@@ -4784,7 +4784,10 @@
                 track->mFillingUpStatus = Track::FS_ACTIVE;
                 if (track->mState == TrackBase::RESUMING) {
                     track->mState = TrackBase::ACTIVE;
-                    param = AudioMixer::RAMP_VOLUME;
+                    // If a new track is paused immediately after start, do not ramp on resume.
+                    if (cblk->mServer != 0) {
+                        param = AudioMixer::RAMP_VOLUME;
+                    }
                 }
                 mAudioMixer->setParameter(trackId, AudioMixer::RESAMPLE, AudioMixer::RESET, NULL);
                 mLeftVolFloat = -1.0;
@@ -5462,6 +5465,11 @@
                 mFlushPending = true;
             }
         }
+    } else if (previousTrack == 0) {
+        // there could be an old track added back during track transition for direct
+        // output, so always issues flush to flush data of the previous track if it
+        // was already destroyed with HAL paused, then flush can resume the playback
+        mFlushPending = true;
     }
     PlaybackThread::onAddNewTrack_l();
 }
@@ -5500,7 +5508,6 @@
                 doHwPause = true;
                 mHwPaused = true;
             }
-            tracksToRemove->add(track);
         } else if (track->isFlushPending()) {
             track->flushAck();
             if (last) {
@@ -5597,7 +5604,8 @@
 
                 int64_t framesWritten = mBytesWritten / mFrameSize;
                 if (mStandby || !last ||
-                        track->presentationComplete(framesWritten, audioHALFrames)) {
+                        track->presentationComplete(framesWritten, audioHALFrames) ||
+                        track->isPaused()) {
                     if (track->isStopping_2()) {
                         track->mState = TrackBase::STOPPED;
                     }
@@ -8464,6 +8472,7 @@
         audio_output_flags_t flags =
                 (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_MMAP_NOIRQ | AUDIO_OUTPUT_FLAG_DIRECT);
         audio_port_handle_t deviceId = mDeviceId;
+        std::vector<audio_io_handle_t> secondaryOutputs;
         ret = AudioSystem::getOutputForAttr(&mAttr, &io,
                                             mSessionId,
                                             &stream,
@@ -8472,7 +8481,10 @@
                                             &config,
                                             flags,
                                             &deviceId,
-                                            &portId);
+                                            &portId,
+                                            &secondaryOutputs);
+        ALOGD_IF(!secondaryOutputs.empty(),
+                 "MmapThread::start does not support secondary outputs, ignoring them");
     } else {
         audio_config_base_t config;
         config.sample_rate = mSampleRate;
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index c94639b..0ba0ab4 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -329,3 +329,19 @@
                                      const struct timespec *requested = NULL) = 0;
     virtual void        releaseBuffer(Proxy::Buffer* buffer) = 0;
 };
+
+class PatchTrackBase : public PatchProxyBufferProvider
+{
+public:
+    using Timeout = std::optional<std::chrono::nanoseconds>;
+                        PatchTrackBase(sp<ClientProxy> proxy, const ThreadBase& thread,
+                                       const Timeout& timeout);
+            void        setPeerTimeout(std::chrono::nanoseconds timeout);
+            void        setPeerProxy(PatchProxyBufferProvider *proxy) { mPeerProxy = proxy; }
+
+protected:
+    const sp<ClientProxy>       mProxy;
+    PatchProxyBufferProvider*   mPeerProxy = nullptr;
+    struct timespec             mPeerTimeout{};
+
+};
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 22d34b2..65f799e 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -99,7 +99,7 @@
         mId(android_atomic_inc(&nextTrackId)),
         mTerminated(false),
         mType(type),
-        mThreadIoHandle(thread->id()),
+        mThreadIoHandle(thread ? thread->id() : AUDIO_IO_HANDLE_NONE),
         mPortId(portId),
         mIsInvalid(false)
 {
@@ -277,6 +277,27 @@
     return NO_ERROR;
 }
 
+AudioFlinger::ThreadBase::PatchTrackBase::PatchTrackBase(sp<ClientProxy> proxy,
+                                                         const ThreadBase& thread,
+                                                         const Timeout& timeout)
+    : mProxy(proxy)
+{
+    if (timeout) {
+        setPeerTimeout(*timeout);
+    } else {
+        // Double buffer mixer
+        uint64_t mixBufferNs = ((uint64_t)2 * thread.frameCount() * 1000000000) /
+                                              thread.sampleRate();
+        setPeerTimeout(std::chrono::nanoseconds{mixBufferNs});
+    }
+}
+
+void AudioFlinger::ThreadBase::PatchTrackBase::setPeerTimeout(std::chrono::nanoseconds timeout) {
+    mPeerTimeout.tv_sec = timeout.count() / std::nano::den;
+    mPeerTimeout.tv_nsec = timeout.count() % std::nano::den;
+}
+
+
 // ----------------------------------------------------------------------------
 //      Playback
 // ----------------------------------------------------------------------------
@@ -449,7 +470,7 @@
             || thread->type() == ThreadBase::DUPLICATING;
 #ifdef TEE_SINK
     mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
-            + "_" + std::to_string(mId));
+            + "_" + std::to_string(mId) + "_T");
 #endif
 
     if (channelMask & AUDIO_CHANNEL_HAPTIC_ALL) {
@@ -504,6 +525,7 @@
             AudioSystem::releaseOutput(mPortId);
         }
     }
+    forEachTeePatchTrack([](auto patchTrack) { patchTrack->destroy(); });
 }
 
 void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
@@ -649,8 +671,7 @@
 }
 
 // AudioBufferProvider interface
-status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
-        AudioBufferProvider::Buffer* buffer)
+status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(AudioBufferProvider::Buffer* buffer)
 {
     ServerProxy::Buffer buf;
     size_t desiredFrames = buffer->frameCount;
@@ -665,10 +686,61 @@
     } else {
         mAudioTrackServerProxy->tallyUnderrunFrames(0);
     }
-
     return status;
 }
 
+void AudioFlinger::PlaybackThread::Track::releaseBuffer(AudioBufferProvider::Buffer* buffer)
+{
+    interceptBuffer(*buffer);
+    TrackBase::releaseBuffer(buffer);
+}
+
+// TODO: compensate for time shift between HW modules.
+void AudioFlinger::PlaybackThread::Track::interceptBuffer(
+        const AudioBufferProvider::Buffer& sourceBuffer) {
+    auto start = std::chrono::steady_clock::now();
+    const size_t frameCount = sourceBuffer.frameCount;
+    for (auto& sink : mTeePatches) {
+        RecordThread::PatchRecord* patchRecord = sink.patchRecord.get();
+
+        size_t framesWritten = writeFrames(patchRecord, sourceBuffer.i8, frameCount);
+        // On buffer wrap, the buffer frame count will be less than requested,
+        // when this happens a second buffer needs to be used to write the leftover audio
+        size_t framesLeft = frameCount - framesWritten;
+        if (framesWritten != 0 && framesLeft != 0) {
+            framesWritten +=
+                writeFrames(patchRecord, sourceBuffer.i8 + framesWritten * mFrameSize, framesLeft);
+            framesLeft = frameCount - framesWritten;
+        }
+        ALOGW_IF(framesLeft != 0, "%s(%d) PatchRecord %d can not provide big enough "
+                 "buffer %zu/%zu, dropping %zu frames", __func__, mId, patchRecord->mId,
+                 framesWritten, frameCount, framesLeft);
+    }
+    auto spent = ceil<std::chrono::microseconds>(std::chrono::steady_clock::now() - start);
+    using namespace std::chrono_literals;
+    // Average is ~20us per track, this should virtually never be logged (Logging takes >200us)
+    ALOGD_IF(spent > 200us, "%s: took %lldus to intercept %zu tracks", __func__,
+             spent.count(), mTeePatches.size());
+}
+
+size_t AudioFlinger::PlaybackThread::Track::writeFrames(AudioBufferProvider* dest,
+                                                        const void* src,
+                                                        size_t frameCount) {
+    AudioBufferProvider::Buffer patchBuffer;
+    patchBuffer.frameCount = frameCount;
+    auto status = dest->getNextBuffer(&patchBuffer);
+    if (status != NO_ERROR) {
+       ALOGW("%s PathRecord getNextBuffer failed with error %d: %s",
+             __func__, status, strerror(-status));
+       return 0;
+    }
+    ALOG_ASSERT(patchBuffer.frameCount <= frameCount);
+    memcpy(patchBuffer.raw, src, patchBuffer.frameCount * mFrameSize);
+    auto framesWritten = patchBuffer.frameCount;
+    dest->releaseBuffer(&patchBuffer);
+    return framesWritten;
+}
+
 // releaseBuffer() is not overridden
 
 // ExtendedAudioBufferProvider interface
@@ -816,6 +888,9 @@
     } else {
         status = BAD_VALUE;
     }
+    if (status == NO_ERROR) {
+        forEachTeePatchTrack([](auto patchTrack) { patchTrack->start(); });
+    }
     return status;
 }
 
@@ -849,6 +924,7 @@
                     __func__, mId, (int)mThreadIoHandle);
         }
     }
+    forEachTeePatchTrack([](auto patchTrack) { patchTrack->stop(); });
 }
 
 void AudioFlinger::PlaybackThread::Track::pause()
@@ -881,6 +957,8 @@
             break;
         }
     }
+    // Pausing the TeePatch to avoid a glitch on underrun, at the cost of buffered audio loss.
+    forEachTeePatchTrack([](auto patchTrack) { patchTrack->pause(); });
 }
 
 void AudioFlinger::PlaybackThread::Track::flush()
@@ -942,6 +1020,8 @@
         // because the hardware buffer could hold a large amount of audio
         playbackThread->broadcast_l();
     }
+    // Flush the Tee to avoid on resume playing old data and glitching on the transition to new data
+    forEachTeePatchTrack([](auto patchTrack) { patchTrack->flush(); });
 }
 
 // must be called with thread lock held
@@ -1060,6 +1140,11 @@
     };
 }
 
+void AudioFlinger::PlaybackThread::Track::setTeePatches(TeePatches teePatches) {
+    forEachTeePatchTrack([](auto patchTrack) { patchTrack->destroy(); });
+    mTeePatches = std::move(teePatches);
+}
+
 status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp)
 {
     if (!isOffloaded() && !isDirect()) {
@@ -1615,19 +1700,16 @@
                                                      size_t frameCount,
                                                      void *buffer,
                                                      size_t bufferSize,
-                                                     audio_output_flags_t flags)
+                                                     audio_output_flags_t flags,
+                                                     const Timeout& timeout)
     :   Track(playbackThread, NULL, streamType,
               audio_attributes_t{} /* currently unused for patch track */,
               sampleRate, format, channelMask, frameCount,
               buffer, bufferSize, nullptr /* sharedBuffer */,
               AUDIO_SESSION_NONE, AID_AUDIOSERVER, flags, TYPE_PATCH),
-              mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true))
+        PatchTrackBase(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true),
+                       *playbackThread, timeout)
 {
-    uint64_t mixBufferNs = ((uint64_t)2 * playbackThread->frameCount() * 1000000000) /
-                                                                    playbackThread->sampleRate();
-    mPeerTimeout.tv_sec = mixBufferNs / 1000000000;
-    mPeerTimeout.tv_nsec = (int) (mixBufferNs % 1000000000);
-
     ALOGV("%s(%d): sampleRate %d mPeerTimeout %d.%03d sec",
                                       __func__, mId, sampleRate,
                                       (int)mPeerTimeout.tv_sec,
@@ -2088,19 +2170,16 @@
                                                      size_t frameCount,
                                                      void *buffer,
                                                      size_t bufferSize,
-                                                     audio_input_flags_t flags)
+                                                     audio_input_flags_t flags,
+                                                     const Timeout& timeout)
     :   RecordTrack(recordThread, NULL,
                 audio_attributes_t{} /* currently unused for patch track */,
                 sampleRate, format, channelMask, frameCount,
                 buffer, bufferSize, AUDIO_SESSION_NONE, AID_AUDIOSERVER,
                 flags, TYPE_PATCH),
-                mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true))
+        PatchTrackBase(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true),
+                       *recordThread, timeout)
 {
-    uint64_t mixBufferNs = ((uint64_t)2 * recordThread->frameCount() * 1000000000) /
-                                                                recordThread->sampleRate();
-    mPeerTimeout.tv_sec = mixBufferNs / 1000000000;
-    mPeerTimeout.tv_nsec = (int) (mixBufferNs % 1000000000);
-
     ALOGV("%s(%d): sampleRate %d mPeerTimeout %d.%03d sec",
                                       __func__, mId, sampleRate,
                                       (int)mPeerTimeout.tv_sec,
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index ebb4f3b..f72f44a 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -11,8 +11,10 @@
 LOCAL_C_INCLUDES := \
     frameworks/av/services/audioflinger \
     $(call include-path-for, audio-utils) \
-    frameworks/av/services/audiopolicy/common/include \
-    frameworks/av/services/audiopolicy/engine/interface \
+
+LOCAL_HEADER_LIBRARIES := \
+    libaudiopolicycommon \
+    libaudiopolicyengine_interface_headers \
 
 LOCAL_SHARED_LIBRARIES := \
     libcutils \
@@ -51,7 +53,7 @@
     libcutils \
     libutils \
     liblog \
-    libaudioclient \
+    libaudiopolicy \
     libsoundtrigger
 
 ifeq ($(USE_CONFIGURABLE_AUDIO_POLICY), 1)
@@ -76,10 +78,12 @@
 endif # ifeq ($(USE_CONFIGURABLE_AUDIO_POLICY), 1)
 
 LOCAL_C_INCLUDES += \
-    frameworks/av/services/audiopolicy/common/include \
-    frameworks/av/services/audiopolicy/engine/interface \
     $(call include-path-for, audio-utils) \
 
+LOCAL_HEADER_LIBRARIES := \
+    libaudiopolicycommon \
+    libaudiopolicyengine_interface_headers
+
 LOCAL_STATIC_LIBRARIES := \
     libaudiopolicycomponents
 
@@ -114,10 +118,12 @@
     libaudiopolicycomponents
 
 LOCAL_C_INCLUDES += \
-    frameworks/av/services/audiopolicy/common/include \
-    frameworks/av/services/audiopolicy/engine/interface \
     $(call include-path-for, audio-utils) \
 
+LOCAL_HEADER_LIBRARIES := \
+    libaudiopolicycommon \
+    libaudiopolicyengine_interface_headers
+
 LOCAL_CFLAGS := -Wall -Werror
 
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index cf2ce99..bb5441d 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -110,7 +110,8 @@
                                         const audio_config_t *config,
                                         audio_output_flags_t *flags,
                                         audio_port_handle_t *selectedDeviceId,
-                                        audio_port_handle_t *portId) = 0;
+                                        audio_port_handle_t *portId,
+                                        std::vector<audio_io_handle_t> *secondaryOutputs) = 0;
     // indicates to the audio policy manager that the output starts being used by corresponding stream.
     virtual status_t startOutput(audio_port_handle_t portId) = 0;
     // indicates to the audio policy manager that the output stops being used by corresponding stream.
@@ -240,6 +241,10 @@
                 std::vector<audio_format_t> *formats) = 0;
 
     virtual void     setAppState(uid_t uid, app_state_t state);
+
+    virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies) = 0;
+
+    virtual product_strategy_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa) = 0;
 };
 
 
diff --git a/services/audiopolicy/common/include/RoutingStrategy.h b/services/audiopolicy/common/include/RoutingStrategy.h
deleted file mode 100644
index f8a1cd6..0000000
--- a/services/audiopolicy/common/include/RoutingStrategy.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-namespace android {
-
-// Time in milliseconds after media stopped playing during which we consider that the
-// sonification should be as unobtrusive as during the time media was playing.
-#define SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY 5000
-
-enum routing_strategy {
-    STRATEGY_NONE = -1,
-    STRATEGY_MEDIA,
-    STRATEGY_PHONE,
-    STRATEGY_SONIFICATION,
-    STRATEGY_SONIFICATION_RESPECTFUL,
-    STRATEGY_DTMF,
-    STRATEGY_ENFORCED_AUDIBLE,
-    STRATEGY_TRANSMITTED_THROUGH_SPEAKER,
-    STRATEGY_ACCESSIBILITY,
-    STRATEGY_REROUTING,
-    NUM_STRATEGIES
-};
-
-}; //namespace android
diff --git a/services/audiopolicy/common/include/Volume.h b/services/audiopolicy/common/include/Volume.h
index 5ccc8fd..a3b6b36 100644
--- a/services/audiopolicy/common/include/Volume.h
+++ b/services/audiopolicy/common/include/Volume.h
@@ -20,6 +20,22 @@
 #include <utils/Log.h>
 #include <math.h>
 
+namespace android {
+/**
+ * VolumeSource is the discriminent for volume management on an output.
+ * It used to be the stream type by legacy, it may be host volume group or a volume curves if
+ * we allow to have more than one curve per volume group.
+ */
+enum VolumeSource : std::underlying_type<audio_stream_type_t>::type;
+static const VolumeSource VOLUME_SOURCE_NONE = static_cast<VolumeSource>(AUDIO_STREAM_DEFAULT);
+
+static inline VolumeSource streamToVolumeSource(audio_stream_type_t stream) {
+    return static_cast<VolumeSource>(stream);
+}
+
+
+} // namespace android
+
 // Absolute min volume in dB (can be represented in single precision normal float value)
 #define VOLUME_MIN_DB (-758)
 
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 837ca47..605fc1c 100644
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -17,9 +17,20 @@
 #pragma once
 
 #include <system/audio.h>
+#include <vector>
+
+namespace android {
+
+using StreamTypeVector = std::vector<audio_stream_type_t>;
+
+static const audio_attributes_t defaultAttr = AUDIO_ATTRIBUTES_INITIALIZER;
+
+} // namespace android
 
 static const audio_format_t gDynamicFormat = AUDIO_FORMAT_DEFAULT;
 
+static const uint32_t SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY = 5000;
+
 // For mixed output and inputs, the policy will use max mixer sampling rates.
 // Do not limit sampling rate otherwise
 #define SAMPLE_RATE_HZ_MAX 192000
@@ -151,3 +162,25 @@
     }
     return format1 == format2;
 }
+
+/**
+ * @brief hasStream checks if a given stream type is found in the list of streams
+ * @param streams collection of stream types to consider.
+ * @param streamType to consider
+ * @return true if voice stream is found in the given streams, false otherwise
+ */
+static inline bool hasStream(const android::StreamTypeVector &streams,
+                             audio_stream_type_t streamType)
+{
+    return std::find(begin(streams), end(streams), streamType) != end(streams);
+}
+
+/**
+ * @brief hasVoiceStream checks if a voice stream is found in the list of streams
+ * @param streams collection to consider.
+ * @return true if voice stream is found in the given streams, false otherwise
+ */
+static inline bool hasVoiceStream(const android::StreamTypeVector &streams)
+{
+    return hasStream(streams, AUDIO_STREAM_VOICE_CALL);
+}
diff --git a/services/audiopolicy/common/managerdefinitions/Android.bp b/services/audiopolicy/common/managerdefinitions/Android.bp
index e5ebab7..c9037a1 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.bp
+++ b/services/audiopolicy/common/managerdefinitions/Android.bp
@@ -19,7 +19,6 @@
         "src/Serializer.cpp",
         "src/SoundTriggerSession.cpp",
         "src/TypeConverter.cpp",
-        "src/VolumeCurve.cpp",
     ],
     shared_libs: [
         "libcutils",
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h b/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h
index 555412e..6e29632 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h
@@ -16,6 +16,8 @@
 
 #pragma once
 
+#include "DeviceDescriptor.h"
+
 namespace android {
 
 /**
@@ -34,4 +36,36 @@
     virtual void setPatchHandle(audio_patch_handle_t handle) = 0;
 };
 
+template <class IoDescriptor, class Filter>
+sp<DeviceDescriptor> findPreferredDevice(
+        IoDescriptor& desc, Filter filter, bool& active, const DeviceVector& devices)
+{
+    auto activeClients = desc->clientsList(true /*activeOnly*/);
+    auto activeClientsWithRoute =
+        desc->clientsList(true /*activeOnly*/, filter, true /*preferredDevice*/);
+    active = activeClients.size() > 0;
+    if (active && activeClients.size() == activeClientsWithRoute.size()) {
+        return devices.getDeviceFromId(activeClientsWithRoute[0]->preferredDeviceId());
+    }
+    return nullptr;
+}
+
+template <class IoCollection, class Filter>
+sp<DeviceDescriptor> findPreferredDevice(
+        IoCollection& ioCollection, Filter filter, const DeviceVector& devices)
+{
+    sp<DeviceDescriptor> device;
+    for (size_t i = 0; i < ioCollection.size(); i++) {
+        auto desc = ioCollection.valueAt(i);
+        bool active;
+        sp<DeviceDescriptor> curDevice = findPreferredDevice(desc, filter, active, devices);
+        if (active && curDevice == nullptr) {
+            return nullptr;
+        } else if (curDevice != nullptr) {
+            device = curDevice;
+        }
+    }
+    return device;
+}
+
 } // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index d4cfd1e..803cfac 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -143,6 +143,16 @@
 
     void trackEffectEnabled(const sp<EffectDescriptor> &effect, bool enabled);
 
+    /**
+    * @brief clearSessionRoutesForDevice: when a device is disconnected, and if this device has
+    * been chosen as the preferred device by any client, the policy manager shall
+    * prevent from using this device any more by clearing all the session routes involving this
+    * device.
+    * In other words, the preferred device port id of these clients will be resetted to NONE.
+    * @param disconnectedDevice device to be disconnected
+    */
+    void clearSessionRoutesForDevice(const sp<DeviceDescriptor> &disconnectedDevice);
+
     void dump(String8 *dst) const;
 };
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index e1ecc61..cf9519b 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -16,18 +16,20 @@
 
 #pragma once
 
+#define __STDC_LIMIT_MACROS
+#include <inttypes.h>
+
 #include <sys/types.h>
 
 #include <utils/Errors.h>
 #include <utils/Timers.h>
 #include <utils/KeyedVector.h>
 #include <system/audio.h>
-#include <RoutingStrategy.h>
 #include "AudioIODescriptorInterface.h"
 #include "AudioPort.h"
 #include "ClientDescriptor.h"
 #include "DeviceDescriptor.h"
-#include <map>
+#include <vector>
 
 namespace android {
 
@@ -35,6 +37,105 @@
 class AudioMix;
 class AudioPolicyClientInterface;
 
+class ActivityTracking
+{
+public:
+    virtual ~ActivityTracking() = default;
+    bool isActive(uint32_t inPastMs = 0, nsecs_t sysTime = 0) const
+    {
+        if (mActivityCount > 0) {
+            return true;
+        }
+        if (inPastMs == 0) {
+            return false;
+        }
+        if (sysTime == 0) {
+            sysTime = systemTime();
+        }
+        if (ns2ms(sysTime - mStopTime) < inPastMs) {
+            return true;
+        }
+        return false;
+    }
+    void changeActivityCount(int delta)
+    {
+        if ((delta + (int)mActivityCount) < 0) {
+            LOG_ALWAYS_FATAL("%s: invalid delta %d, refCount %d", __func__, delta, mActivityCount);
+        }
+        mActivityCount += delta;
+        if (!mActivityCount) {
+            setStopTime(systemTime());
+        }
+    }
+    uint32_t getActivityCount() const { return mActivityCount; }
+    nsecs_t getStopTime() const { return mStopTime; }
+    void setStopTime(nsecs_t stopTime) { mStopTime = stopTime; }
+
+    virtual void dump(String8 *dst, int spaces) const
+    {
+        dst->appendFormat("%*s- ActivityCount: %d, StopTime: %" PRId64 ", ", spaces, "",
+                          getActivityCount(), getStopTime());
+    }
+private:
+    uint32_t mActivityCount = 0;
+    nsecs_t mStopTime = 0;
+};
+
+/**
+ * @brief VolumeActivity: it tracks the activity for volume policy (volume index, mute,
+ * memorize previous stop, and store mute if incompatible device with another strategy.
+ */
+class VolumeActivity : public ActivityTracking
+{
+public:
+    bool isMuted() const { return mMuteCount > 0; }
+    int getMuteCount() const { return mMuteCount; }
+    int incMuteCount() { return ++mMuteCount; }
+    int decMuteCount() { return mMuteCount > 0 ? --mMuteCount : -1; }
+
+    void dump(String8 *dst, int spaces) const override
+    {
+        ActivityTracking::dump(dst, spaces);
+        dst->appendFormat(", Volume: %.03f, MuteCount: %02d\n", mCurVolumeDb, mMuteCount);
+    }
+    void setVolume(float volume) { mCurVolumeDb = volume; }
+    float getVolume() const { return mCurVolumeDb; }
+
+private:
+    int mMuteCount = 0; /**< mute request counter */
+    float mCurVolumeDb = NAN; /**< current volume in dB. */
+};
+/**
+ * Note: volume activities shall be indexed by CurvesId if we want to allow multiple
+ * curves per volume group, inferring a mute management or volume balancing between HW and SW is
+ * done
+ */
+using VolumeActivities = std::map<VolumeSource, VolumeActivity>;
+
+/**
+ * @brief The Activity class: it tracks the activity for volume policy (volume index, mute,
+ * memorize previous stop, and store mute if incompatible device with another strategy.
+ * Having this class prevents from looping on all attributes (legacy streams) of the strategy
+ */
+class RoutingActivity : public ActivityTracking
+{
+public:
+    void setMutedByDevice( bool isMuted) { mIsMutedByDevice = isMuted; }
+    bool isMutedByDevice() const { return mIsMutedByDevice; }
+
+    void dump(String8 *dst, int spaces) const override {
+        ActivityTracking::dump(dst, spaces);
+        dst->appendFormat("\n");
+    }
+private:
+    /**
+     * strategies muted because of incompatible device selection.
+     * See AudioPolicyManager::checkDeviceMuteStrategies()
+     */
+    bool mIsMutedByDevice = false;
+};
+using RoutingActivities = std::map<product_strategy_t, RoutingActivity>;
+
 // descriptor for audio outputs. Used to maintain current configuration of each opened audio output
 // and keep track of the usage of this output by each audio stream type.
 class AudioOutputDescriptor: public AudioPortConfig, public AudioIODescriptorInterface
@@ -62,25 +163,87 @@
                            bool force);
 
     /**
-     * Changes the stream active count and mActiveClients only.
-     * This does not change the client->active() state or the output descriptor's
-     * global active count.
+     * @brief setStopTime set the stop time due to the client stoppage or a re routing of this
+     * client
+     * @param client to be considered
+     * @param sysTime when the client stopped/was rerouted
      */
-    virtual void changeStreamActiveCount(const sp<TrackClientDescriptor>& client, int delta);
-            uint32_t streamActiveCount(audio_stream_type_t stream) const
-                            { return mActiveCount[stream]; }
+    void setStopTime(const sp<TrackClientDescriptor>& client, nsecs_t sysTime);
 
     /**
      * Changes the client->active() state and the output descriptor's global active count,
      * along with the stream active count and mActiveClients.
      * The client must be previously added by the base class addClient().
+     * In case of duplicating thread, client shall be added on the duplicated thread, not on the
+     * involved outputs but setClientActive will be called on all output to track strategy and
+     * active client for a given output.
+     * Active ref count of the client will be incremented/decremented through setActive API
      */
-            void setClientActive(const sp<TrackClientDescriptor>& client, bool active);
+    virtual void setClientActive(const sp<TrackClientDescriptor>& client, bool active);
 
-    bool isActive(uint32_t inPastMs = 0) const;
-    bool isStreamActive(audio_stream_type_t stream,
-                        uint32_t inPastMs = 0,
-                        nsecs_t sysTime = 0) const;
+    bool isActive(uint32_t inPastMs) const;
+    bool isActive(VolumeSource volumeSource = VOLUME_SOURCE_NONE,
+                  uint32_t inPastMs = 0,
+                  nsecs_t sysTime = 0) const;
+    bool isAnyActive(VolumeSource volumeSourceToIgnore) const;
+
+    std::vector<VolumeSource> getActiveVolumeSources() const {
+        std::vector<VolumeSource> activeList;
+        for (const auto &iter : mVolumeActivities) {
+            if (iter.second.isActive()) {
+                activeList.push_back(iter.first);
+            }
+        }
+        return activeList;
+    }
+    uint32_t getActivityCount(VolumeSource vs) const
+    {
+        return mVolumeActivities.find(vs) != std::end(mVolumeActivities)?
+                    mVolumeActivities.at(vs).getActivityCount() : 0;
+    }
+    bool isMuted(VolumeSource vs) const
+    {
+        return mVolumeActivities.find(vs) != std::end(mVolumeActivities)?
+                    mVolumeActivities.at(vs).isMuted() : false;
+    }
+    int getMuteCount(VolumeSource vs) const
+    {
+        return mVolumeActivities.find(vs) != std::end(mVolumeActivities)?
+                    mVolumeActivities.at(vs).getMuteCount() : 0;
+    }
+    int incMuteCount(VolumeSource vs)
+    {
+        return mVolumeActivities[vs].incMuteCount();
+    }
+    int decMuteCount(VolumeSource vs)
+    {
+        return mVolumeActivities[vs].decMuteCount();
+    }
+    void setCurVolume(VolumeSource vs, float volume)
+    {
+        // Even if not activity for this group registered, need to create anyway
+        mVolumeActivities[vs].setVolume(volume);
+    }
+    float getCurVolume(VolumeSource vs) const
+    {
+        return mVolumeActivities.find(vs) != std::end(mVolumeActivities) ?
+                    mVolumeActivities.at(vs).getVolume() : NAN;
+    }
+
+    bool isStrategyActive(product_strategy_t ps, uint32_t inPastMs = 0, nsecs_t sysTime = 0) const
+    {
+        return mRoutingActivities.find(ps) != std::end(mRoutingActivities)?
+                    mRoutingActivities.at(ps).isActive(inPastMs, sysTime) : false;
+    }
+    bool isStrategyMutedByDevice(product_strategy_t ps) const
+    {
+        return mRoutingActivities.find(ps) != std::end(mRoutingActivities)?
+                    mRoutingActivities.at(ps).isMutedByDevice() : false;
+    }
+    void setStrategyMutedByDevice(product_strategy_t ps, bool isMuted)
+    {
+        mRoutingActivities[ps].setMutedByDevice(isMuted);
+    }
 
     virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
                            const struct audio_port_config *srcConfig = NULL) const;
@@ -95,7 +258,8 @@
     void setPatchHandle(audio_patch_handle_t handle) override;
 
     TrackClientVector clientsList(bool activeOnly = false,
-        routing_strategy strategy = STRATEGY_NONE, bool preferredDeviceOnly = false) const;
+                                  product_strategy_t strategy = PRODUCT_STRATEGY_NONE,
+                                  bool preferredDeviceOnly = false) const;
 
     // override ClientMapHandler to abort when removing a client when active.
     void removeClient(audio_port_handle_t portId) override {
@@ -105,40 +269,36 @@
         // it is possible that when a client is removed, we could remove its
         // associated active count by calling changeStreamActiveCount(),
         // but that would be hiding a problem, so we log fatal instead.
-        auto it2 = mActiveClients.find(client);
-        LOG_ALWAYS_FATAL_IF(it2 != mActiveClients.end(),
-                "%s(%d) removing client portId %d which is active (count %zu)",
-                __func__, mId, portId, it2->second);
+        auto clientIter = std::find(begin(mActiveClients), end(mActiveClients), client);
+        LOG_ALWAYS_FATAL_IF(clientIter != mActiveClients.end(),
+                            "%s(%d) removing client portId %d which is active (count %d)",
+                            __func__, mId, portId, client->getActivityCount());
         ClientMapHandler<TrackClientDescriptor>::removeClient(portId);
     }
 
-    using ActiveClientMap = std::map<sp<TrackClientDescriptor>, size_t /* count */>;
-    // required for duplicating thread
-    const ActiveClientMap& getActiveClients() const {
+    const TrackClientVector& getActiveClients() const {
         return mActiveClients;
     }
 
     DeviceVector mDevices; /**< current devices this output is routed to */
-    nsecs_t mStopTime[AUDIO_STREAM_CNT];
-    int mMuteCount[AUDIO_STREAM_CNT];            // mute request counter
-    bool mStrategyMutedByDevice[NUM_STRATEGIES]; // strategies muted because of incompatible
-                                        // device selection. See checkDeviceMuteStrategies()
     AudioMix *mPolicyMix = nullptr;              // non NULL when used by a dynamic policy
 
 protected:
     const sp<AudioPort> mPort;
     AudioPolicyClientInterface * const mClientInterface;
-    float mCurVolume[AUDIO_STREAM_CNT];   // current stream volume in dB
-    uint32_t mActiveCount[AUDIO_STREAM_CNT]; // number of streams of each type active on this output
     uint32_t mGlobalActiveCount = 0;  // non-client-specific active count
     audio_patch_handle_t mPatchHandle = AUDIO_PATCH_HANDLE_NONE;
     audio_port_handle_t mId = AUDIO_PORT_HANDLE_NONE;
 
-    // The ActiveClientMap shows the clients that contribute to the streams counts
+    // The ActiveClients shows the clients that contribute to the @VolumeSource counts
     // and may include upstream clients from a duplicating thread.
     // Compare with the ClientMap (mClients) which are external AudioTrack clients of the
     // output descriptor (and do not count internal PatchTracks).
-    ActiveClientMap mActiveClients;
+    TrackClientVector mActiveClients;
+
+    RoutingActivities mRoutingActivities; /**< track routing activity on this ouput.*/
+
+    VolumeActivities mVolumeActivities; /**< track volume activity on this ouput.*/
 };
 
 // Audio output driven by a software mixer in audio flinger.
@@ -160,8 +320,13 @@
     virtual bool isFixedVolume(audio_devices_t device);
     sp<SwAudioOutputDescriptor> subOutput1() { return mOutput1; }
     sp<SwAudioOutputDescriptor> subOutput2() { return mOutput2; }
-            void changeStreamActiveCount(
-                    const sp<TrackClientDescriptor>& client, int delta) override;
+    void setClientActive(const sp<TrackClientDescriptor>& client, bool active) override;
+    void setAllClientsInactive()
+    {
+        for (const auto &client : clientsList(true)) {
+            setClientActive(client, false);
+        }
+    }
     virtual bool setVolume(float volume,
                            audio_stream_type_t stream,
                            audio_devices_t device,
@@ -254,25 +419,52 @@
         public DefaultKeyedVector< audio_io_handle_t, sp<SwAudioOutputDescriptor> >
 {
 public:
-    bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+    bool isActive(VolumeSource volumeSource, uint32_t inPastMs = 0) const;
 
     /**
-     * return whether a stream is playing remotely, override to change the definition of
+     * return whether any source contributing to VolumeSource is playing remotely, override 
+     * to change the definition of
      * local/remote playback, used for instance by notification manager to not make
      * media players lose audio focus when not playing locally
      * For the base implementation, "remotely" means playing during screen mirroring which
      * uses an output for playback with a non-empty, non "0" address.
      */
-    bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+    bool isActiveRemotely(VolumeSource volumeSource, uint32_t inPastMs = 0) const;
 
     /**
-     * return whether a stream is playing, but not on a "remote" device.
+     * return whether any source contributing to VolumeSource is playing, but not on a "remote"
+     * device.
      * Override to change the definition of a local/remote playback.
      * Used for instance by policy manager to alter the speaker playback ("speaker safe" behavior)
      * when media plays or not locally.
      * For the base implementation, "remotely" means playing during screen mirroring.
      */
-    bool isStreamActiveLocally(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+    bool isActiveLocally(VolumeSource volumeSource, uint32_t inPastMs = 0) const;
+
+    /**
+     * @brief isStrategyActiveOnSameModule checks if the given strategy is active (or was active
+     * in the past) on the given output and all the outputs belonging to the same HW Module
+     * the same module than the given output
+     * @param outputDesc to be considered
+     * @param ps product strategy to be checked upon activity status
+     * @param inPastMs if 0, check currently, otherwise, check in the past
+     * @param sysTime shall be set if request is done for the past activity.
+     * @return true if an output following the strategy is active on the same module than desc,
+     * false otherwise
+     */
+    bool isStrategyActiveOnSameModule(product_strategy_t ps,
+                                      const sp<SwAudioOutputDescriptor>& desc,
+                                      uint32_t inPastMs = 0, nsecs_t sysTime = 0) const;
+
+    /**
+     * @brief clearSessionRoutesForDevice: when a device is disconnected, and if this device has
+     * been chosen as the preferred device by any client, the policy manager shall
+     * prevent from using this device any more by clearing all the session routes involving this
+     * device.
+     * In other words, the preferred device port id of these clients will be resetted to NONE.
+     * @param disconnectedDevice device to be disconnected
+     */
+    void clearSessionRoutesForDevice(const sp<DeviceDescriptor> &disconnectedDevice);
 
     /**
      * returns the A2DP output handle if it is open or 0 otherwise
@@ -294,9 +486,21 @@
     sp<SwAudioOutputDescriptor> getPrimaryOutput() const;
 
     /**
-     * return true if any output is playing anything besides the stream to ignore
+     * @brief isAnyOutputActive checks if any output is active (aka playing) except the one(s) that
+     * hold the volume source to be ignored
+     * @param volumeSourceToIgnore source not considered in the activity detection
+     * @return true if any output is active for any source except the one to be ignored
      */
-    bool isAnyOutputActive(audio_stream_type_t streamToIgnore) const;
+    bool isAnyOutputActive(VolumeSource volumeSourceToIgnore) const
+    {
+        for (size_t i = 0; i < size(); i++) {
+            const sp<AudioOutputDescriptor> &outputDesc = valueAt(i);
+            if (outputDesc->isAnyActive(volumeSourceToIgnore)) {
+                return true;
+            }
+        }
+        return false;
+    }
 
     audio_devices_t getSupportedDevices(audio_io_handle_t handle) const;
 
@@ -309,12 +513,24 @@
         public DefaultKeyedVector< audio_io_handle_t, sp<HwAudioOutputDescriptor> >
 {
 public:
-    bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+    bool isActive(VolumeSource volumeSource, uint32_t inPastMs = 0) const;
 
     /**
-     * return true if any output is playing anything besides the stream to ignore
+     * @brief isAnyOutputActive checks if any output is active (aka playing) except the one(s) that
+     * hold the volume source to be ignored
+     * @param volumeSourceToIgnore source not considered in the activity detection
+     * @return true if any output is active for any source except the one to be ignored
      */
-    bool isAnyOutputActive(audio_stream_type_t streamToIgnore) const;
+    bool isAnyOutputActive(VolumeSource volumeSourceToIgnore) const
+    {
+        for (size_t i = 0; i < size(); i++) {
+            const sp<AudioOutputDescriptor> &outputDesc = valueAt(i);
+            if (outputDesc->isAnyActive(volumeSourceToIgnore)) {
+                return true;
+            }
+        }
+        return false;
+    }
 
     void dump(String8 *dst) const;
 };
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
index d52eb3d..2264d8f 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
@@ -20,7 +20,6 @@
 #include <unordered_set>
 
 #include <AudioGain.h>
-#include <VolumeCurve.h>
 #include <AudioPort.h>
 #include <AudioPatch.h>
 #include <DeviceDescriptor.h>
@@ -40,13 +39,11 @@
     AudioPolicyConfig(HwModuleCollection &hwModules,
                       DeviceVector &availableOutputDevices,
                       DeviceVector &availableInputDevices,
-                      sp<DeviceDescriptor> &defaultOutputDevice,
-                      VolumeCurvesCollection *volumes = nullptr)
+                      sp<DeviceDescriptor> &defaultOutputDevice)
         : mHwModules(hwModules),
           mAvailableOutputDevices(availableOutputDevices),
           mAvailableInputDevices(availableInputDevices),
           mDefaultOutputDevice(defaultOutputDevice),
-          mVolumeCurves(volumes),
           mIsSpeakerDrcEnabled(false)
     {}
 
@@ -58,13 +55,6 @@
         mSource = file;
     }
 
-    void setVolumes(const VolumeCurvesCollection &volumes)
-    {
-        if (mVolumeCurves != nullptr) {
-            *mVolumeCurves = volumes;
-        }
-    }
-
     void setHwModules(const HwModuleCollection &hwModules)
     {
         mHwModules = hwModules;
@@ -182,7 +172,6 @@
     DeviceVector &mAvailableOutputDevices;
     DeviceVector &mAvailableInputDevices;
     sp<DeviceDescriptor> &mDefaultOutputDevice;
-    VolumeCurvesCollection *mVolumeCurves;
     // TODO: remove when legacy conf file is removed. true on devices that use DRC on the
     // DEVICE_CATEGORY_SPEAKER path to boost soft sounds, used to adjust volume curves accordingly.
     // Note: remove also speaker_drc_enabled from global configuration of XML config file.
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
index 2932296..d6f24b2 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
@@ -43,12 +43,12 @@
 
     android::AudioMix *getMix();
 
-    void setMix(AudioMix &mix);
+    void setMix(const AudioMix &mix);
 
     void dump(String8 *dst, int spaces, int index) const;
 
 private:
-    AudioMix    mMix;                   // Audio policy mix descriptor
+    AudioMix    mMix;                     // Audio policy mix descriptor
     sp<SwAudioOutputDescriptor> mOutput;  // Corresponding output stream
 };
 
@@ -68,17 +68,16 @@
      * Try to find an output descriptor for the given attributes.
      *
      * @param[in] attributes to consider fowr the research of output descriptor.
-     * @param[out] desc to return if an output could be found.
-     *
-     * @return NO_ERROR if an output was found for the given attribute (in this case, the
-     *                  descriptor output param is initialized), error code otherwise.
+     * @param[out] desc to return if an primary output could be found.
+     * @param[out] secondaryDesc other desc that the audio should be routed to.
      */
-    status_t getOutputForAttr(audio_attributes_t attributes, uid_t uid,
-            sp<SwAudioOutputDescriptor> &desc);
+    status_t getOutputForAttr(const audio_attributes_t& attributes, uid_t uid,
+                sp<SwAudioOutputDescriptor> &primaryDesc,
+                std::vector<sp<SwAudioOutputDescriptor>> *secondaryDescs);
 
     sp<DeviceDescriptor> getDeviceAndMixForInputSource(audio_source_t inputSource,
                                                        const DeviceVector &availableDeviceTypes,
-                                                       AudioMix **policyMix);
+                                                       AudioMix **policyMix) const;
 
     /**
      * @brief try to find a matching mix for a given output descriptor and returns the associated
@@ -99,6 +98,11 @@
     status_t getDevicesForUid(uid_t uid, Vector<AudioDeviceTypeAddr>& devices) const;
 
     void dump(String8 *dst) const;
+
+private:
+    enum class MixMatchStatus { MATCH, NO_MATCH, INVALID_MIX };
+    MixMatchStatus mixMatch(const AudioMix* mix, size_t mixIndex,
+                            const audio_attributes_t& attributes, uid_t uid);
 };
 
 } // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
index a187029..4bb225d 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -22,14 +22,15 @@
 #include <sys/types.h>
 
 #include <system/audio.h>
-#include <system/audio_policy.h>
+#include <media/AudioProductStrategy.h>
 #include <utils/Errors.h>
 #include <utils/KeyedVector.h>
 #include <utils/RefBase.h>
 #include <utils/String8.h>
+#include <policy.h>
+#include <Volume.h>
 #include "AudioPatch.h"
 #include "EffectDescriptor.h"
-#include "RoutingStrategy.h"
 
 namespace android {
 
@@ -41,10 +42,12 @@
 {
 public:
     ClientDescriptor(audio_port_handle_t portId, uid_t uid, audio_session_t sessionId,
-                   audio_attributes_t attributes, audio_config_base_t config,
-                   audio_port_handle_t preferredDeviceId) :
+                     audio_attributes_t attributes, audio_config_base_t config,
+                     audio_port_handle_t preferredDeviceId,
+                     bool isPreferredDeviceForExclusiveUse = false) :
         mPortId(portId), mUid(uid), mSessionId(sessionId), mAttributes(attributes),
-        mConfig(config), mPreferredDeviceId(preferredDeviceId), mActive(false) {}
+        mConfig(config), mPreferredDeviceId(preferredDeviceId), mActive(false),
+        mPreferredDeviceForExclusiveUse(isPreferredDeviceForExclusiveUse){}
     ~ClientDescriptor() override = default;
 
     virtual void dump(String8 *dst, int spaces, int index) const;
@@ -58,8 +61,9 @@
     audio_port_handle_t preferredDeviceId() const { return mPreferredDeviceId; };
     void setPreferredDeviceId(audio_port_handle_t preferredDeviceId) {
         mPreferredDeviceId = preferredDeviceId;
-    };
-    void setActive(bool active) { mActive = active; }
+    }
+    bool isPreferredDeviceForExclusiveUse() const { return mPreferredDeviceForExclusiveUse; }
+    virtual void setActive(bool active) { mActive = active; }
     bool active() const { return mActive; }
     bool hasPreferredDevice(bool activeOnly = false) const {
         return mPreferredDeviceId != AUDIO_PORT_HANDLE_NONE && (!activeOnly || mActive);
@@ -73,17 +77,23 @@
     const audio_config_base_t mConfig;
           audio_port_handle_t mPreferredDeviceId;  // selected input device port ID
           bool mActive;
+          bool mPreferredDeviceForExclusiveUse = false;
 };
 
 class TrackClientDescriptor: public ClientDescriptor
 {
 public:
     TrackClientDescriptor(audio_port_handle_t portId, uid_t uid, audio_session_t sessionId,
-                   audio_attributes_t attributes, audio_config_base_t config,
-                   audio_port_handle_t preferredDeviceId, audio_stream_type_t stream,
-                          routing_strategy strategy, audio_output_flags_t flags) :
-        ClientDescriptor(portId, uid, sessionId, attributes, config, preferredDeviceId),
-        mStream(stream), mStrategy(strategy), mFlags(flags) {}
+                          audio_attributes_t attributes, audio_config_base_t config,
+                          audio_port_handle_t preferredDeviceId, audio_stream_type_t stream,
+                          product_strategy_t strategy, VolumeSource volumeSource,
+                          audio_output_flags_t flags,
+                          bool isPreferredDeviceForExclusiveUse,
+                          std::vector<wp<SwAudioOutputDescriptor>> secondaryOutputs) :
+        ClientDescriptor(portId, uid, sessionId, attributes, config, preferredDeviceId,
+                         isPreferredDeviceForExclusiveUse),
+        mStream(stream), mStrategy(strategy), mVolumeSource(volumeSource), mFlags(flags),
+        mSecondaryOutputs(std::move(secondaryOutputs)) {}
     ~TrackClientDescriptor() override = default;
 
     using ClientDescriptor::dump;
@@ -92,12 +102,45 @@
 
     audio_output_flags_t flags() const { return mFlags; }
     audio_stream_type_t stream() const { return mStream; }
-    routing_strategy strategy() const { return mStrategy; }
+    product_strategy_t strategy() const { return mStrategy; }
+    const std::vector<wp<SwAudioOutputDescriptor>>& getSecondaryOutputs() const {
+        return mSecondaryOutputs;
+    };
+    VolumeSource volumeSource() const { return mVolumeSource; }
+
+    void setActive(bool active) override
+    {
+        int delta = active ? 1 : -1;
+        changeActivityCount(delta);
+    }
+    void changeActivityCount(int delta)
+    {
+        if (delta > 0) {
+            mActivityCount += delta;
+        } else {
+            LOG_ALWAYS_FATAL_IF(!mActivityCount, "%s(%s) invalid delta %d, inactive client",
+                                 __func__, toShortString().c_str(), delta);
+            LOG_ALWAYS_FATAL_IF(static_cast<int>(mActivityCount) < -delta,
+                                "%s(%s) invalid delta %d, active client count %d",
+                                 __func__, toShortString().c_str(), delta, mActivityCount);
+            mActivityCount += delta;
+        }
+        ClientDescriptor::setActive(mActivityCount > 0);
+    }
+    uint32_t getActivityCount() const { return mActivityCount; }
 
 private:
     const audio_stream_type_t mStream;
-    const routing_strategy mStrategy;
+    const product_strategy_t mStrategy;
+    const VolumeSource mVolumeSource;
     const audio_output_flags_t mFlags;
+    const std::vector<wp<SwAudioOutputDescriptor>> mSecondaryOutputs;
+
+    /**
+     * required for duplicating thread, prevent from removing active client from an output
+     * involved in a duplication.
+     */
+    uint32_t mActivityCount = 0;
 };
 
 class RecordClientDescriptor: public ClientDescriptor
@@ -136,7 +179,8 @@
 public:
     SourceClientDescriptor(audio_port_handle_t portId, uid_t uid, audio_attributes_t attributes,
                            const sp<AudioPatch>& patchDesc, const sp<DeviceDescriptor>& srcDevice,
-                           audio_stream_type_t stream, routing_strategy strategy);
+                           audio_stream_type_t stream, product_strategy_t strategy,
+                           VolumeSource volumeSource);
     ~SourceClientDescriptor() override = default;
 
     sp<AudioPatch> patchDesc() const { return mPatchDesc; }
diff --git a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
index 2dc33ab..7f01dc5 100644
--- a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
@@ -16,7 +16,7 @@
 
 #pragma once
 
-#include <RoutingStrategy.h>
+#include <policy.h>
 #include <system/audio_effect.h>
 #include <utils/KeyedVector.h>
 #include <utils/RefBase.h>
@@ -28,14 +28,26 @@
 class EffectDescriptor : public RefBase
 {
 public:
+    EffectDescriptor(const effect_descriptor_t *desc, bool isMusicEffect,
+                     int id, int io, int session) :
+        mId(id), mIo(io), mSession(session), mEnabled(false),
+        mIsMusicEffect(isMusicEffect)
+    {
+        memcpy (&mDesc, desc, sizeof(effect_descriptor_t));
+    }
+
     void dump(String8 *dst, int spaces = 0) const;
 
     int mId;                // effect unique ID
     int mIo;                // io the effect is attached to
-    routing_strategy mStrategy; // routing strategy the effect is associated to
     int mSession;               // audio session the effect is on
     effect_descriptor_t mDesc;  // effect descriptor
     bool mEnabled;              // enabled state: CPU load being used or not
+
+    bool isMusicEffect() const { return mIsMusicEffect; }
+
+private:
+    bool mIsMusicEffect;
 };
 
 class EffectDescriptorCollection : public KeyedVector<int, sp<EffectDescriptor> >
@@ -44,7 +56,7 @@
     EffectDescriptorCollection();
 
     status_t registerEffect(const effect_descriptor_t *desc, audio_io_handle_t io,
-                            uint32_t strategy, int session, int id);
+                            int session, int id, bool isMusicEffect);
     status_t unregisterEffect(int id);
     sp<EffectDescriptor> getEffect(int id) const;
     status_t setEffectEnabled(int id, bool enabled);
diff --git a/services/audiopolicy/common/managerdefinitions/include/IVolumeCurves.h b/services/audiopolicy/common/managerdefinitions/include/IVolumeCurves.h
new file mode 100644
index 0000000..93022fb
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/IVolumeCurves.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <system/audio.h>
+#include <Volume.h>
+#include <utils/Errors.h>
+#include <utils/String8.h>
+#include <vector>
+
+namespace android {
+
+class IVolumeCurves
+{
+public:
+    virtual ~IVolumeCurves() = default;
+
+    virtual void clearCurrentVolumeIndex() = 0;
+    virtual void addCurrentVolumeIndex(audio_devices_t device, int index) = 0;
+    virtual bool canBeMuted() const = 0;
+    virtual int getVolumeIndexMin() const = 0;
+    virtual int getVolumeIndex(audio_devices_t device) const = 0;
+    virtual int getVolumeIndexMax() const = 0;
+    virtual float volIndexToDb(device_category device, int indexInUi) const = 0;
+    virtual bool hasVolumeIndexForDevice(audio_devices_t device) const = 0;
+    virtual status_t initVolume(int indexMin, int indexMax) = 0;
+    virtual void dump(String8 *dst, int spaces = 0, bool curvePoints = false) const = 0;
+};
+
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/IVolumeCurvesCollection.h b/services/audiopolicy/common/managerdefinitions/include/IVolumeCurvesCollection.h
deleted file mode 100644
index 750da55..0000000
--- a/services/audiopolicy/common/managerdefinitions/include/IVolumeCurvesCollection.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <system/audio.h>
-#include <Volume.h>
-#include <utils/Errors.h>
-#include <utils/String8.h>
-
-namespace android {
-
-class IVolumeCurvesCollection
-{
-public:
-    virtual ~IVolumeCurvesCollection() = default;
-
-    virtual void clearCurrentVolumeIndex(audio_stream_type_t stream) = 0;
-    virtual void addCurrentVolumeIndex(audio_stream_type_t stream, audio_devices_t device,
-                                       int index) = 0;
-    virtual bool canBeMuted(audio_stream_type_t stream) = 0;
-    virtual int getVolumeIndexMin(audio_stream_type_t stream) const = 0;
-    virtual int getVolumeIndex(audio_stream_type_t stream, audio_devices_t device) = 0;
-    virtual int getVolumeIndexMax(audio_stream_type_t stream) const = 0;
-    virtual float volIndexToDb(audio_stream_type_t stream, device_category device,
-                               int indexInUi) const = 0;
-    virtual status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax) = 0;
-
-    virtual void initializeVolumeCurves(bool /*isSpeakerDrcEnabled*/) {}
-    virtual void switchVolumeCurve(audio_stream_type_t src, audio_stream_type_t dst) = 0;
-    virtual void restoreOriginVolumeCurve(audio_stream_type_t stream)
-    {
-        switchVolumeCurve(stream, stream);
-    }
-    virtual bool hasVolumeIndexForDevice(audio_stream_type_t stream,
-                                         audio_devices_t device) const = 0;
-
-    virtual void dump(String8 *dst) const = 0;
-};
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h b/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h
deleted file mode 100644
index 76ec198..0000000
--- a/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include "IVolumeCurvesCollection.h"
-#include <policy.h>
-#include <utils/RefBase.h>
-#include <utils/String8.h>
-#include <utils/SortedVector.h>
-#include <utils/KeyedVector.h>
-#include <system/audio.h>
-#include <cutils/config_utils.h>
-#include <string>
-#include <utility>
-
-namespace android {
-
-struct CurvePoint
-{
-    CurvePoint() {}
-    CurvePoint(int index, int attenuationInMb) :
-        mIndex(index), mAttenuationInMb(attenuationInMb) {}
-    uint32_t mIndex;
-    int mAttenuationInMb;
-};
-
-inline bool operator< (const CurvePoint &lhs, const CurvePoint &rhs)
-{
-    return lhs.mIndex < rhs.mIndex;
-}
-
-// A volume curve for a given use case and device category
-// It contains of list of points of this curve expressing the attenuation in Millibels for
-// a given volume index from 0 to 100
-class VolumeCurve : public RefBase
-{
-public:
-    VolumeCurve(device_category device, audio_stream_type_t stream) :
-        mDeviceCategory(device), mStreamType(stream) {}
-
-    device_category getDeviceCategory() const { return mDeviceCategory; }
-    audio_stream_type_t getStreamType() const { return mStreamType; }
-
-    void add(const CurvePoint &point) { mCurvePoints.add(point); }
-
-    float volIndexToDb(int indexInUi, int volIndexMin, int volIndexMax) const;
-
-    void dump(String8 *result) const;
-
-private:
-    SortedVector<CurvePoint> mCurvePoints;
-    device_category mDeviceCategory;
-    audio_stream_type_t mStreamType;
-};
-
-// Volume Curves for a given use case indexed by device category
-class VolumeCurvesForStream : public KeyedVector<device_category, sp<VolumeCurve> >
-{
-public:
-    VolumeCurvesForStream() : mIndexMin(0), mIndexMax(1), mCanBeMuted(true)
-    {
-        mIndexCur.add(AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, 0);
-    }
-
-    sp<VolumeCurve> getCurvesFor(device_category device) const
-    {
-        if (indexOfKey(device) < 0) {
-            return 0;
-        }
-        return valueFor(device);
-    }
-
-    int getVolumeIndex(audio_devices_t device) const
-    {
-        device = Volume::getDeviceForVolume(device);
-        // there is always a valid entry for AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME
-        if (mIndexCur.indexOfKey(device) < 0) {
-            device = AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME;
-        }
-        return mIndexCur.valueFor(device);
-    }
-
-    bool canBeMuted() const { return mCanBeMuted; }
-    void clearCurrentVolumeIndex() { mIndexCur.clear(); }
-    void addCurrentVolumeIndex(audio_devices_t device, int index) { mIndexCur.add(device, index); }
-
-    void setVolumeIndexMin(int volIndexMin) { mIndexMin = volIndexMin; }
-    int getVolumeIndexMin() const { return mIndexMin; }
-
-    void setVolumeIndexMax(int volIndexMax) { mIndexMax = volIndexMax; }
-    int getVolumeIndexMax() const { return mIndexMax; }
-
-    bool hasVolumeIndexForDevice(audio_devices_t device) const
-    {
-        device = Volume::getDeviceForVolume(device);
-        return mIndexCur.indexOfKey(device) >= 0;
-    }
-
-    const sp<VolumeCurve> getOriginVolumeCurve(device_category deviceCategory) const
-    {
-        ALOG_ASSERT(mOriginVolumeCurves.indexOfKey(deviceCategory) >= 0, "Invalid device category");
-        return mOriginVolumeCurves.valueFor(deviceCategory);
-    }
-    void setVolumeCurve(device_category deviceCategory, const sp<VolumeCurve> &volumeCurve)
-    {
-        ALOG_ASSERT(indexOfKey(deviceCategory) >= 0, "Invalid device category for Volume Curve");
-        replaceValueFor(deviceCategory, volumeCurve);
-    }
-
-    ssize_t add(const sp<VolumeCurve> &volumeCurve)
-    {
-        device_category deviceCategory = volumeCurve->getDeviceCategory();
-        ssize_t index = indexOfKey(deviceCategory);
-        if (index < 0) {
-            // Keep track of original Volume Curves per device category in order to switch curves.
-            mOriginVolumeCurves.add(deviceCategory, volumeCurve);
-            return KeyedVector::add(deviceCategory, volumeCurve);
-        }
-        return index;
-    }
-
-    float volIndexToDb(device_category deviceCat, int indexInUi) const
-    {
-        sp<VolumeCurve> vc = getCurvesFor(deviceCat);
-        if (vc != 0) {
-            return vc->volIndexToDb(indexInUi, mIndexMin, mIndexMax);
-        } else {
-            ALOGE("Invalid device category %d for Volume Curve", deviceCat);
-            return 0.0f;
-        }
-    }
-
-    void dump(String8 *dst, int spaces, bool curvePoints = false) const;
-
-private:
-    KeyedVector<device_category, sp<VolumeCurve> > mOriginVolumeCurves;
-    KeyedVector<audio_devices_t, int> mIndexCur; /**< current volume index per device. */
-    int mIndexMin; /**< min volume index. */
-    int mIndexMax; /**< max volume index. */
-    bool mCanBeMuted; /**< true is the stream can be muted. */
-};
-
-// Collection of Volume Curves indexed by use case
-class VolumeCurvesCollection : public KeyedVector<audio_stream_type_t, VolumeCurvesForStream>,
-                               public IVolumeCurvesCollection
-{
-public:
-    VolumeCurvesCollection()
-    {
-        // Create an empty collection of curves
-        for (ssize_t i = 0 ; i < AUDIO_STREAM_CNT; i++) {
-            audio_stream_type_t stream = static_cast<audio_stream_type_t>(i);
-            KeyedVector::add(stream, VolumeCurvesForStream());
-        }
-    }
-
-    // Once XML has been parsed, must be call first to sanity check table and initialize indexes
-    virtual status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax)
-    {
-        editValueAt(stream).setVolumeIndexMin(indexMin);
-        editValueAt(stream).setVolumeIndexMax(indexMax);
-        return NO_ERROR;
-    }
-    virtual void clearCurrentVolumeIndex(audio_stream_type_t stream)
-    {
-        editCurvesFor(stream).clearCurrentVolumeIndex();
-    }
-    virtual void addCurrentVolumeIndex(audio_stream_type_t stream, audio_devices_t device, int index)
-    {
-        editCurvesFor(stream).addCurrentVolumeIndex(device, index);
-    }
-    virtual bool canBeMuted(audio_stream_type_t stream) { return getCurvesFor(stream).canBeMuted(); }
-
-    virtual int getVolumeIndexMin(audio_stream_type_t stream) const
-    {
-        return getCurvesFor(stream).getVolumeIndexMin();
-    }
-    virtual int getVolumeIndexMax(audio_stream_type_t stream) const
-    {
-        return getCurvesFor(stream).getVolumeIndexMax();
-    }
-    virtual int getVolumeIndex(audio_stream_type_t stream, audio_devices_t device)
-    {
-        return getCurvesFor(stream).getVolumeIndex(device);
-    }
-    virtual void switchVolumeCurve(audio_stream_type_t streamSrc, audio_stream_type_t streamDst)
-    {
-        const VolumeCurvesForStream &sourceCurves = getCurvesFor(streamSrc);
-        VolumeCurvesForStream &dstCurves = editCurvesFor(streamDst);
-        ALOG_ASSERT(sourceCurves.size() == dstCurves.size(), "device category not aligned");
-        for (size_t index = 0; index < sourceCurves.size(); index++) {
-            device_category cat = sourceCurves.keyAt(index);
-            dstCurves.setVolumeCurve(cat, sourceCurves.getOriginVolumeCurve(cat));
-        }
-    }
-    virtual float volIndexToDb(audio_stream_type_t stream, device_category cat, int indexInUi) const
-    {
-        return getCurvesFor(stream).volIndexToDb(cat, indexInUi);
-    }
-    virtual bool hasVolumeIndexForDevice(audio_stream_type_t stream,
-                                         audio_devices_t device) const
-    {
-        return getCurvesFor(stream).hasVolumeIndexForDevice(device);
-    }
-
-    void dump(String8 *dst) const override;
-
-    ssize_t add(const sp<VolumeCurve> &volumeCurve)
-    {
-        audio_stream_type_t streamType = volumeCurve->getStreamType();
-        return editCurvesFor(streamType).add(volumeCurve);
-    }
-    VolumeCurvesForStream &editCurvesFor(audio_stream_type_t stream)
-    {
-        ALOG_ASSERT(indexOfKey(stream) >= 0, "Invalid stream type for Volume Curve");
-        return editValueAt(stream);
-    }
-    const VolumeCurvesForStream &getCurvesFor(audio_stream_type_t stream) const
-    {
-        ALOG_ASSERT(indexOfKey(stream) >= 0, "Invalid stream type for Volume Curve");
-        return valueFor(stream);
-    }
-};
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index c880e67..1fa1123 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -511,6 +511,19 @@
     }
 }
 
+void AudioInputCollection::clearSessionRoutesForDevice(
+    const sp<DeviceDescriptor> &disconnectedDevice)
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<AudioInputDescriptor> inputDesc = valueAt(i);
+        for (const auto& client : inputDesc->getClientIterable()) {
+            if (client->preferredDeviceId() == disconnectedDevice->getId()) {
+                client->setPreferredDeviceId(AUDIO_PORT_HANDLE_NONE);
+            }
+        }
+    }
+}
+
 void AudioInputCollection::dump(String8 *dst) const
 {
     dst->append("\nInputs dump:\n");
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 78b3f45..7293bc4 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -34,19 +34,8 @@
 
 AudioOutputDescriptor::AudioOutputDescriptor(const sp<AudioPort>& port,
                                              AudioPolicyClientInterface *clientInterface)
-    : mPort(port)
-    , mClientInterface(clientInterface)
+    : mPort(port), mClientInterface(clientInterface)
 {
-    // clear usage count for all stream types
-    for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
-        mActiveCount[i] = 0;
-        mCurVolume[i] = -1.0;
-        mMuteCount[i] = 0;
-        mStopTime[i] = 0;
-    }
-    for (int i = 0; i < NUM_STRATEGIES; i++) {
-        mStrategyMutedByDevice[i] = false;
-    }
     if (mPort.get() != nullptr) {
         mPort->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
         if (mPort->mGains.size() > 0) {
@@ -88,117 +77,73 @@
     return hasSameHwModuleAs(outputDesc);
 }
 
-void AudioOutputDescriptor::changeStreamActiveCount(const sp<TrackClientDescriptor>& client,
-                                                    int delta)
+void AudioOutputDescriptor::setStopTime(const sp<TrackClientDescriptor>& client, nsecs_t sysTime)
 {
-    if (delta == 0) return;
-    const audio_stream_type_t stream = client->stream();
-    if ((delta + (int)mActiveCount[stream]) < 0) {
-        // any mismatched active count will abort.
-        LOG_ALWAYS_FATAL("%s(%s) invalid delta %d, active stream count %d",
-              __func__, client->toShortString().c_str(), delta, mActiveCount[stream]);
-        // mActiveCount[stream] = 0;
-        // return;
-    }
-    mActiveCount[stream] += delta;
-
-    if (delta > 0) {
-        mActiveClients[client] += delta;
-    } else {
-        auto it = mActiveClients.find(client);
-        if (it == mActiveClients.end()) { // client not found!
-            LOG_ALWAYS_FATAL("%s(%s) invalid delta %d, inactive client",
-                    __func__, client->toShortString().c_str(), delta);
-        } else if (it->second < -delta) { // invalid delta!
-            LOG_ALWAYS_FATAL("%s(%s) invalid delta %d, active client count %zu",
-                    __func__, client->toShortString().c_str(), delta, it->second);
-        }
-        it->second += delta;
-        if (it->second == 0) {
-            (void)mActiveClients.erase(it);
-        }
-    }
-
-    ALOGV("%s stream %d, count %d", __FUNCTION__, stream, mActiveCount[stream]);
+    mVolumeActivities[client->volumeSource()].setStopTime(sysTime);
+    mRoutingActivities[client->strategy()].setStopTime(sysTime);
 }
 
 void AudioOutputDescriptor::setClientActive(const sp<TrackClientDescriptor>& client, bool active)
 {
-    LOG_ALWAYS_FATAL_IF(getClient(client->portId()) == nullptr,
-        "%s(%d) does not exist on output descriptor", __func__, client->portId());
-
-    if (active == client->active()) {
-        ALOGW("%s(%s): ignored active: %d, current stream count %d",
-                __func__, client->toShortString().c_str(),
-                active, mActiveCount[client->stream()]);
+    auto clientIter = std::find(begin(mActiveClients), end(mActiveClients), client);
+    if (active == (clientIter != end(mActiveClients))) {
+        ALOGW("%s(%s): ignored active: %d, current stream count %d", __func__,
+              client->toShortString().c_str(), active,
+              mRoutingActivities.at(client->strategy()).getActivityCount());
         return;
     }
+    if (active) {
+        mActiveClients.push_back(client);
+    } else {
+        mActiveClients.erase(clientIter);
+    }
     const int delta = active ? 1 : -1;
-    changeStreamActiveCount(client, delta);
+    // If ps is unknown, it is time to track it!
+    mRoutingActivities[client->strategy()].changeActivityCount(delta);
+    mVolumeActivities[client->volumeSource()].changeActivityCount(delta);
 
     // Handle non-client-specific activity ref count
     int32_t oldGlobalActiveCount = mGlobalActiveCount;
     if (!active && mGlobalActiveCount < 1) {
         ALOGW("%s(%s): invalid deactivation with globalRefCount %d",
-                __func__, client->toShortString().c_str(), mGlobalActiveCount);
+              __func__, client->toShortString().c_str(), mGlobalActiveCount);
         mGlobalActiveCount = 1;
     }
     mGlobalActiveCount += delta;
 
-    if ((oldGlobalActiveCount == 0) && (mGlobalActiveCount > 0)) {
-        if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
-        {
+    if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
+        if ((oldGlobalActiveCount == 0) || (mGlobalActiveCount == 0)) {
             mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
-                                                            MIX_STATE_MIXING);
-        }
-    } else if ((oldGlobalActiveCount > 0) && (mGlobalActiveCount == 0)) {
-        if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
-        {
-            mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
-                                                            MIX_STATE_IDLE);
+                mGlobalActiveCount > 0 ? MIX_STATE_MIXING : MIX_STATE_IDLE);
         }
     }
-
     client->setActive(active);
 }
 
+bool AudioOutputDescriptor::isActive(VolumeSource vs, uint32_t inPastMs, nsecs_t sysTime) const
+{
+    return (vs == VOLUME_SOURCE_NONE) ?
+                isActive(inPastMs) : (mVolumeActivities.find(vs) != std::end(mVolumeActivities)?
+                mVolumeActivities.at(vs).isActive(inPastMs, sysTime) : false);
+}
+
 bool AudioOutputDescriptor::isActive(uint32_t inPastMs) const
 {
     nsecs_t sysTime = 0;
     if (inPastMs != 0) {
         sysTime = systemTime();
     }
-    for (int i = 0; i < (int)AUDIO_STREAM_CNT; i++) {
-        if (i == AUDIO_STREAM_PATCH) {
+    for (const auto &iter : mVolumeActivities) {
+        if (iter.first == streamToVolumeSource(AUDIO_STREAM_PATCH)) {
             continue;
         }
-        if (isStreamActive((audio_stream_type_t)i, inPastMs, sysTime)) {
+        if (iter.second.isActive(inPastMs, sysTime)) {
             return true;
         }
     }
     return false;
 }
 
-bool AudioOutputDescriptor::isStreamActive(audio_stream_type_t stream,
-                                           uint32_t inPastMs,
-                                           nsecs_t sysTime) const
-{
-    if (mActiveCount[stream] != 0) {
-        return true;
-    }
-    if (inPastMs == 0) {
-        return false;
-    }
-    if (sysTime == 0) {
-        sysTime = systemTime();
-    }
-    if (ns2ms(sysTime - mStopTime[stream]) < inPastMs) {
-        return true;
-    }
-    return false;
-}
-
-
 bool AudioOutputDescriptor::isFixedVolume(audio_devices_t device __unused)
 {
     return false;
@@ -213,9 +158,9 @@
     // We actually change the volume if:
     // - the float value returned by computeVolume() changed
     // - the force flag is set
-    if (volume != mCurVolume[stream] || force) {
+    if (volume != getCurVolume(static_cast<VolumeSource>(stream)) || force) {
         ALOGV("setVolume() for stream %d, volume %f, delay %d", stream, volume, delayMs);
-        mCurVolume[stream] = volume;
+        setCurVolume(static_cast<VolumeSource>(stream), volume);
         return true;
     }
     return false;
@@ -247,20 +192,28 @@
     port->ext.mix.hw_module = getModuleHandle();
 }
 
-TrackClientVector AudioOutputDescriptor::clientsList(bool activeOnly, routing_strategy strategy,
+TrackClientVector AudioOutputDescriptor::clientsList(bool activeOnly, product_strategy_t strategy,
                                                      bool preferredDeviceOnly) const
 {
     TrackClientVector clients;
     for (const auto &client : getClientIterable()) {
         if ((!activeOnly || client->active())
-            && (strategy == STRATEGY_NONE || strategy == client->strategy())
-            && (!preferredDeviceOnly || client->hasPreferredDevice())) {
+            && (strategy == PRODUCT_STRATEGY_NONE || strategy == client->strategy())
+            && (!preferredDeviceOnly ||
+                (client->hasPreferredDevice() && !client->isPreferredDeviceForExclusiveUse()))) {
             clients.push_back(client);
         }
     }
     return clients;
 }
 
+bool AudioOutputDescriptor::isAnyActive(VolumeSource volumeSourceToIgnore) const
+{
+    return std::find_if(begin(mActiveClients), end(mActiveClients),
+                        [&volumeSourceToIgnore](const auto &client) {
+        return client->volumeSource() != volumeSourceToIgnore; }) != end(mActiveClients);
+}
+
 void AudioOutputDescriptor::dump(String8 *dst) const
 {
     dst->appendFormat(" ID: %d\n", mId);
@@ -269,20 +222,22 @@
     dst->appendFormat(" Channels: %08x\n", mChannelMask);
     dst->appendFormat(" Devices: %s\n", devices().toString().c_str());
     dst->appendFormat(" Global active count: %u\n", mGlobalActiveCount);
-    dst->append(" Stream volume activeCount muteCount\n");
-    for (int i = 0; i < (int)AUDIO_STREAM_CNT; i++) {
-        dst->appendFormat(" %02d     %.03f     %02d          %02d\n",
-                 i, mCurVolume[i], streamActiveCount((audio_stream_type_t)i), mMuteCount[i]);
+    for (const auto &iter : mRoutingActivities) {
+        dst->appendFormat(" Product Strategy id: %d", iter.first);
+        iter.second.dump(dst, 4);
+    }
+    for (const auto &iter : mVolumeActivities) {
+        dst->appendFormat(" Volume Activities id: %d", iter.first);
+        iter.second.dump(dst, 4);
     }
     dst->append(" AudioTrack Clients:\n");
     ClientMapHandler<TrackClientDescriptor>::dump(dst);
     dst->append("\n");
-    if (mActiveClients.size() > 0) {
+    if (!mActiveClients.empty()) {
         dst->append(" AudioTrack active (stream) clients:\n");
         size_t index = 0;
-        for (const auto& clientPair : mActiveClients) {
-            dst->appendFormat(" Refcount: %zu", clientPair.second);
-            clientPair.first->dump(dst, 2, index++);
+        for (const auto& client : mActiveClients) {
+            client->dump(dst, 2, index++);
         }
         dst->append(" \n");
     }
@@ -383,15 +338,14 @@
     }
 }
 
-void SwAudioOutputDescriptor::changeStreamActiveCount(const sp<TrackClientDescriptor>& client,
-                                                       int delta)
+void SwAudioOutputDescriptor::setClientActive(const sp<TrackClientDescriptor>& client, bool active)
 {
     // forward usage count change to attached outputs
     if (isDuplicated()) {
-        mOutput1->changeStreamActiveCount(client, delta);
-        mOutput2->changeStreamActiveCount(client, delta);
+        mOutput1->setClientActive(client, active);
+        mOutput2->setClientActive(client, active);
     }
-    AudioOutputDescriptor::changeStreamActiveCount(client, delta);
+    AudioOutputDescriptor::setClientActive(client, active);
 }
 
 bool SwAudioOutputDescriptor::isFixedVolume(audio_devices_t device)
@@ -440,19 +394,16 @@
                                         uint32_t delayMs,
                                         bool force)
 {
-    bool changed = AudioOutputDescriptor::setVolume(volume, stream, device, delayMs, force);
-
-    if (changed) {
-        // Force VOICE_CALL to track BLUETOOTH_SCO stream volume when bluetooth audio is
-        // enabled
-        float volume = Volume::DbToAmpl(mCurVolume[stream]);
-        if (stream == AUDIO_STREAM_BLUETOOTH_SCO) {
-            mClientInterface->setStreamVolume(
-                    AUDIO_STREAM_VOICE_CALL, volume, mIoHandle, delayMs);
-        }
-        mClientInterface->setStreamVolume(stream, volume, mIoHandle, delayMs);
+    if (!AudioOutputDescriptor::setVolume(volume, stream, device, delayMs, force)) {
+        return false;
     }
-    return changed;
+    // Force VOICE_CALL to track BLUETOOTH_SCO stream volume when bluetooth audio is enabled
+    float volumeAmpl = Volume::DbToAmpl(getCurVolume(static_cast<VolumeSource>(stream)));
+    if (stream == AUDIO_STREAM_BLUETOOTH_SCO) {
+        mClientInterface->setStreamVolume(AUDIO_STREAM_VOICE_CALL, volumeAmpl, mIoHandle, delayMs);
+    }
+    mClientInterface->setStreamVolume(stream, volumeAmpl, mIoHandle, delayMs);
+    return true;
 }
 
 status_t SwAudioOutputDescriptor::open(const audio_config_t *config,
@@ -655,24 +606,24 @@
 }
 
 // SwAudioOutputCollection implementation
-bool SwAudioOutputCollection::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
+bool SwAudioOutputCollection::isActive(VolumeSource volumeSource, uint32_t inPastMs) const
 {
     nsecs_t sysTime = systemTime();
     for (size_t i = 0; i < this->size(); i++) {
         const sp<SwAudioOutputDescriptor> outputDesc = this->valueAt(i);
-        if (outputDesc->isStreamActive(stream, inPastMs, sysTime)) {
+        if (outputDesc->isActive(volumeSource, inPastMs, sysTime)) {
             return true;
         }
     }
     return false;
 }
 
-bool SwAudioOutputCollection::isStreamActiveLocally(audio_stream_type_t stream, uint32_t inPastMs) const
+bool SwAudioOutputCollection::isActiveLocally(VolumeSource volumeSource, uint32_t inPastMs) const
 {
     nsecs_t sysTime = systemTime();
     for (size_t i = 0; i < this->size(); i++) {
         const sp<SwAudioOutputDescriptor> outputDesc = this->valueAt(i);
-        if (outputDesc->isStreamActive(stream, inPastMs, sysTime)
+        if (outputDesc->isActive(volumeSource, inPastMs, sysTime)
                 && ((outputDesc->devices().types() & APM_AUDIO_OUT_DEVICE_REMOTE_ALL) == 0)) {
             return true;
         }
@@ -680,14 +631,13 @@
     return false;
 }
 
-bool SwAudioOutputCollection::isStreamActiveRemotely(audio_stream_type_t stream,
-                                                   uint32_t inPastMs) const
+bool SwAudioOutputCollection::isActiveRemotely(VolumeSource volumeSource, uint32_t inPastMs) const
 {
     nsecs_t sysTime = systemTime();
     for (size_t i = 0; i < size(); i++) {
         const sp<SwAudioOutputDescriptor> outputDesc = valueAt(i);
         if (((outputDesc->devices().types() & APM_AUDIO_OUT_DEVICE_REMOTE_ALL) != 0) &&
-                outputDesc->isStreamActive(stream, inPastMs, sysTime)) {
+                outputDesc->isActive(volumeSource, inPastMs, sysTime)) {
             // do not consider re routing (when the output is going to a dynamic policy)
             // as "remote playback"
             if (outputDesc->mPolicyMix == NULL) {
@@ -698,6 +648,20 @@
     return false;
 }
 
+bool SwAudioOutputCollection::isStrategyActiveOnSameModule(product_strategy_t ps,
+                                                           const sp<SwAudioOutputDescriptor>& desc,
+                                                           uint32_t inPastMs, nsecs_t sysTime) const
+{
+    for (size_t i = 0; i < size(); i++) {
+        const sp<SwAudioOutputDescriptor> otherDesc = valueAt(i);
+        if (desc->sharesHwModuleWith(otherDesc) &&
+                otherDesc->isStrategyActive(ps, inPastMs, sysTime)) {
+            return true;
+        }
+    }
+    return false;
+}
+
 audio_io_handle_t SwAudioOutputCollection::getA2dpOutput() const
 {
     for (size_t i = 0; i < size(); i++) {
@@ -756,22 +720,6 @@
     return NULL;
 }
 
-bool SwAudioOutputCollection::isAnyOutputActive(audio_stream_type_t streamToIgnore) const
-{
-    for (size_t s = 0 ; s < AUDIO_STREAM_CNT ; s++) {
-        if (s == (size_t) streamToIgnore) {
-            continue;
-        }
-        for (size_t i = 0; i < size(); i++) {
-            const sp<SwAudioOutputDescriptor> outputDesc = valueAt(i);
-            if (outputDesc->streamActiveCount((audio_stream_type_t)s)!= 0) {
-                return true;
-            }
-        }
-    }
-    return false;
-}
-
 sp<SwAudioOutputDescriptor> SwAudioOutputCollection::getOutputForClient(audio_port_handle_t portId)
 {
     for (size_t i = 0; i < size(); i++) {
@@ -783,6 +731,19 @@
     return 0;
 }
 
+void SwAudioOutputCollection::clearSessionRoutesForDevice(
+        const sp<DeviceDescriptor> &disconnectedDevice)
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<AudioOutputDescriptor> outputDesc = valueAt(i);
+        for (const auto& client : outputDesc->getClientIterable()) {
+            if (client->preferredDeviceId() == disconnectedDevice->getId()) {
+                client->setPreferredDeviceId(AUDIO_PORT_HANDLE_NONE);
+            }
+        }
+    }
+}
+
 void SwAudioOutputCollection::dump(String8 *dst) const
 {
     dst->append("\nOutputs dump:\n");
@@ -793,34 +754,18 @@
 }
 
 // HwAudioOutputCollection implementation
-bool HwAudioOutputCollection::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
+bool HwAudioOutputCollection::isActive(VolumeSource volumeSource, uint32_t inPastMs) const
 {
     nsecs_t sysTime = systemTime();
     for (size_t i = 0; i < this->size(); i++) {
         const sp<HwAudioOutputDescriptor> outputDesc = this->valueAt(i);
-        if (outputDesc->isStreamActive(stream, inPastMs, sysTime)) {
+        if (outputDesc->isActive(volumeSource, inPastMs, sysTime)) {
             return true;
         }
     }
     return false;
 }
 
-bool HwAudioOutputCollection::isAnyOutputActive(audio_stream_type_t streamToIgnore) const
-{
-    for (size_t s = 0 ; s < AUDIO_STREAM_CNT ; s++) {
-        if (s == (size_t) streamToIgnore) {
-            continue;
-        }
-        for (size_t i = 0; i < size(); i++) {
-            const sp<HwAudioOutputDescriptor> outputDesc = valueAt(i);
-            if (outputDesc->streamActiveCount((audio_stream_type_t)s) != 0) {
-                return true;
-            }
-        }
-    }
-    return false;
-}
-
 void HwAudioOutputCollection::dump(String8 *dst) const
 {
     dst->append("\nOutputs dump:\n");
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index cd10c82..2c4695d 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -42,7 +42,7 @@
     mOutput.clear();
 }
 
-void AudioPolicyMix::setMix(AudioMix &mix)
+void AudioPolicyMix::setMix(const AudioMix &mix)
 {
     mMix = mix;
 }
@@ -156,128 +156,175 @@
     }
 }
 
-status_t AudioPolicyMixCollection::getOutputForAttr(audio_attributes_t attributes, uid_t uid,
-                                                    sp<SwAudioOutputDescriptor> &desc)
+status_t AudioPolicyMixCollection::getOutputForAttr(
+        const audio_attributes_t& attributes, uid_t uid, sp<SwAudioOutputDescriptor> &primaryDesc,
+        std::vector<sp<SwAudioOutputDescriptor>> *secondaryDescs)
 {
     ALOGV("getOutputForAttr() querying %zu mixes:", size());
-    desc = 0;
+    primaryDesc = 0;
     for (size_t i = 0; i < size(); i++) {
         sp<AudioPolicyMix> policyMix = valueAt(i);
+        sp<SwAudioOutputDescriptor> policyDesc = policyMix->getOutput();
+        if (!policyDesc) {
+            ALOGV("%s: Skiping %zu: Mix has no output", __func__, i);
+            continue;
+        }
+
         AudioMix *mix = policyMix->getMix();
+        const bool primaryOutputMix = !is_mix_loopback_render(mix->mRouteFlags);
 
-        if (mix->mMixType == MIX_TYPE_PLAYERS) {
-            // TODO if adding more player rules (currently only 2), make rule handling "generic"
-            //      as there is no difference in the treatment of usage- or uid-based rules
-            bool hasUsageMatchRules = false;
-            bool hasUsageExcludeRules = false;
-            bool usageMatchFound = false;
-            bool usageExclusionFound = false;
+        if (primaryOutputMix && primaryDesc != 0) {
+            ALOGV("%s: Skiping %zu: Primary output already found", __func__, i);
+            continue; // Primary output already found
+        }
 
-            bool hasUidMatchRules = false;
-            bool hasUidExcludeRules = false;
-            bool uidMatchFound = false;
-            bool uidExclusionFound = false;
+        switch (mixMatch(mix, i, attributes, uid)) {
+            case MixMatchStatus::INVALID_MIX: return BAD_VALUE; // TODO: Do we really want to abort?
+            case MixMatchStatus::NO_MATCH:
+                ALOGV("%s: Mix %zu: does not match", __func__, i);
+                continue; // skip the mix
+            case MixMatchStatus::MATCH:;
+        }
 
-            bool hasAddrMatch = false;
-
-            // iterate over all mix criteria to list what rules this mix contains
-            for (size_t j = 0; j < mix->mCriteria.size(); j++) {
-                ALOGV(" getOutputForAttr: mix %zu: inspecting mix criteria %zu of %zu",
-                        i, j, mix->mCriteria.size());
-
-                // if there is an address match, prioritize that match
-                if (strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
-                        strncmp(attributes.tags + strlen("addr="),
-                                mix->mDeviceAddress.string(),
-                                AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) {
-                    hasAddrMatch = true;
-                    break;
-                }
-
-                switch (mix->mCriteria[j].mRule) {
-                case RULE_MATCH_ATTRIBUTE_USAGE:
-                    ALOGV("\tmix has RULE_MATCH_ATTRIBUTE_USAGE for usage %d",
-                                                mix->mCriteria[j].mValue.mUsage);
-                    hasUsageMatchRules = true;
-                    if (mix->mCriteria[j].mValue.mUsage == attributes.usage) {
-                        // found one match against all allowed usages
-                        usageMatchFound = true;
-                    }
-                    break;
-                case RULE_EXCLUDE_ATTRIBUTE_USAGE:
-                    ALOGV("\tmix has RULE_EXCLUDE_ATTRIBUTE_USAGE for usage %d",
-                            mix->mCriteria[j].mValue.mUsage);
-                    hasUsageExcludeRules = true;
-                    if (mix->mCriteria[j].mValue.mUsage == attributes.usage) {
-                        // found this usage is to be excluded
-                        usageExclusionFound = true;
-                    }
-                    break;
-                case RULE_MATCH_UID:
-                    ALOGV("\tmix has RULE_MATCH_UID for uid %d", mix->mCriteria[j].mValue.mUid);
-                    hasUidMatchRules = true;
-                    if (mix->mCriteria[j].mValue.mUid == uid) {
-                        // found one UID match against all allowed UIDs
-                        uidMatchFound = true;
-                    }
-                    break;
-                case RULE_EXCLUDE_UID:
-                    ALOGV("\tmix has RULE_EXCLUDE_UID for uid %d", mix->mCriteria[j].mValue.mUid);
-                    hasUidExcludeRules = true;
-                    if (mix->mCriteria[j].mValue.mUid == uid) {
-                        // found this UID is to be excluded
-                        uidExclusionFound = true;
-                    }
-                    break;
-                default:
-                    break;
-                }
-
-                // consistency checks: for each "dimension" of rules (usage, uid...), we can
-                // only have MATCH rules, or EXCLUDE rules in each dimension, not a combination
-                if (hasUsageMatchRules && hasUsageExcludeRules) {
-                    ALOGE("getOutputForAttr: invalid combination of RULE_MATCH_ATTRIBUTE_USAGE"
-                            " and RULE_EXCLUDE_ATTRIBUTE_USAGE in mix %zu", i);
-                    return BAD_VALUE;
-                }
-                if (hasUidMatchRules && hasUidExcludeRules) {
-                    ALOGE("getOutputForAttr: invalid combination of RULE_MATCH_UID"
-                            " and RULE_EXCLUDE_UID in mix %zu", i);
-                    return BAD_VALUE;
-                }
-
-                if ((hasUsageExcludeRules && usageExclusionFound)
-                        || (hasUidExcludeRules && uidExclusionFound)) {
-                    break; // stop iterating on criteria because an exclusion was found (will fail)
-                }
-
-            }//iterate on mix criteria
-
-            // determine if exiting on success (or implicit failure as desc is 0)
-            if (hasAddrMatch ||
-                    !((hasUsageExcludeRules && usageExclusionFound) ||
-                      (hasUsageMatchRules && !usageMatchFound)  ||
-                      (hasUidExcludeRules && uidExclusionFound) ||
-                      (hasUidMatchRules && !uidMatchFound))) {
-                ALOGV("\tgetOutputForAttr will use mix %zu", i);
-                desc = policyMix->getOutput();
+        policyDesc->mPolicyMix = mix;
+        if (primaryOutputMix) {
+            primaryDesc = policyDesc;
+            ALOGV("%s: Mix %zu: set primary desc", __func__, i);
+        } else {
+            if (policyDesc->mIoHandle == AUDIO_IO_HANDLE_NONE) {
+                ALOGV("%s: Mix %zu ignored as secondaryOutput because not opened yet", __func__, i);
+            } else {
+                ALOGV("%s: Add a secondary desc %zu", __func__, i);
+                secondaryDescs->push_back(policyDesc);
             }
+        }
+    }
+    return (primaryDesc == nullptr && secondaryDescs->empty()) ? BAD_VALUE : NO_ERROR;
+}
 
-        } else if (mix->mMixType == MIX_TYPE_RECORDERS) {
-            if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE &&
-                    strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
+AudioPolicyMixCollection::MixMatchStatus AudioPolicyMixCollection::mixMatch(
+        const AudioMix* mix, size_t mixIndex, const audio_attributes_t& attributes, uid_t uid) {
+
+    if (mix->mMixType == MIX_TYPE_PLAYERS) {
+        // Loopback render mixes are created from a public API and thus restricted
+        // to non sensible audio that have not opted out.
+        if (is_mix_loopback_render(mix->mRouteFlags)) {
+            if ((attributes.flags & AUDIO_FLAG_NO_CAPTURE) == AUDIO_FLAG_NO_CAPTURE) {
+                return MixMatchStatus::NO_MATCH;
+            }
+            if (!(attributes.usage == AUDIO_USAGE_UNKNOWN ||
+                  attributes.usage == AUDIO_USAGE_MEDIA ||
+                  attributes.usage == AUDIO_USAGE_GAME)) {
+                return MixMatchStatus::NO_MATCH;
+            }
+        }
+        // TODO if adding more player rules (currently only 2), make rule handling "generic"
+        //      as there is no difference in the treatment of usage- or uid-based rules
+        bool hasUsageMatchRules = false;
+        bool hasUsageExcludeRules = false;
+        bool usageMatchFound = false;
+        bool usageExclusionFound = false;
+
+        bool hasUidMatchRules = false;
+        bool hasUidExcludeRules = false;
+        bool uidMatchFound = false;
+        bool uidExclusionFound = false;
+
+        bool hasAddrMatch = false;
+
+        // iterate over all mix criteria to list what rules this mix contains
+        for (size_t j = 0; j < mix->mCriteria.size(); j++) {
+            ALOGV(" getOutputForAttr: mix %zu: inspecting mix criteria %zu of %zu",
+                    mixIndex, j, mix->mCriteria.size());
+
+            // if there is an address match, prioritize that match
+            if (strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
                     strncmp(attributes.tags + strlen("addr="),
                             mix->mDeviceAddress.string(),
                             AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) {
-                desc = policyMix->getOutput();
+                hasAddrMatch = true;
+                break;
             }
+
+            switch (mix->mCriteria[j].mRule) {
+            case RULE_MATCH_ATTRIBUTE_USAGE:
+                ALOGV("\tmix has RULE_MATCH_ATTRIBUTE_USAGE for usage %d",
+                                            mix->mCriteria[j].mValue.mUsage);
+                hasUsageMatchRules = true;
+                if (mix->mCriteria[j].mValue.mUsage == attributes.usage) {
+                    // found one match against all allowed usages
+                    usageMatchFound = true;
+                }
+                break;
+            case RULE_EXCLUDE_ATTRIBUTE_USAGE:
+                ALOGV("\tmix has RULE_EXCLUDE_ATTRIBUTE_USAGE for usage %d",
+                        mix->mCriteria[j].mValue.mUsage);
+                hasUsageExcludeRules = true;
+                if (mix->mCriteria[j].mValue.mUsage == attributes.usage) {
+                    // found this usage is to be excluded
+                    usageExclusionFound = true;
+                }
+                break;
+            case RULE_MATCH_UID:
+                ALOGV("\tmix has RULE_MATCH_UID for uid %d", mix->mCriteria[j].mValue.mUid);
+                hasUidMatchRules = true;
+                if (mix->mCriteria[j].mValue.mUid == uid) {
+                    // found one UID match against all allowed UIDs
+                    uidMatchFound = true;
+                }
+                break;
+            case RULE_EXCLUDE_UID:
+                ALOGV("\tmix has RULE_EXCLUDE_UID for uid %d", mix->mCriteria[j].mValue.mUid);
+                hasUidExcludeRules = true;
+                if (mix->mCriteria[j].mValue.mUid == uid) {
+                    // found this UID is to be excluded
+                    uidExclusionFound = true;
+                }
+                break;
+            default:
+                break;
+            }
+
+            // consistency checks: for each "dimension" of rules (usage, uid...), we can
+            // only have MATCH rules, or EXCLUDE rules in each dimension, not a combination
+            if (hasUsageMatchRules && hasUsageExcludeRules) {
+                ALOGE("getOutputForAttr: invalid combination of RULE_MATCH_ATTRIBUTE_USAGE"
+                        " and RULE_EXCLUDE_ATTRIBUTE_USAGE in mix %zu", mixIndex);
+                return MixMatchStatus::INVALID_MIX;
+            }
+            if (hasUidMatchRules && hasUidExcludeRules) {
+                ALOGE("getOutputForAttr: invalid combination of RULE_MATCH_UID"
+                        " and RULE_EXCLUDE_UID in mix %zu", mixIndex);
+                return MixMatchStatus::INVALID_MIX;
+            }
+
+            if ((hasUsageExcludeRules && usageExclusionFound)
+                    || (hasUidExcludeRules && uidExclusionFound)) {
+                break; // stop iterating on criteria because an exclusion was found (will fail)
+            }
+
+        }//iterate on mix criteria
+
+        // determine if exiting on success (or implicit failure as desc is 0)
+        if (hasAddrMatch ||
+                !((hasUsageExcludeRules && usageExclusionFound) ||
+                  (hasUsageMatchRules && !usageMatchFound)  ||
+                  (hasUidExcludeRules && uidExclusionFound) ||
+                  (hasUidMatchRules && !uidMatchFound))) {
+            ALOGV("\tgetOutputForAttr will use mix %zu", mixIndex);
+            return MixMatchStatus::MATCH;
         }
-        if (desc != 0) {
-            desc->mPolicyMix = mix;
-            return NO_ERROR;
+
+    } else if (mix->mMixType == MIX_TYPE_RECORDERS) {
+        if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE &&
+                strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
+                strncmp(attributes.tags + strlen("addr="),
+                        mix->mDeviceAddress.string(),
+                        AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) {
+            return MixMatchStatus::MATCH;
         }
     }
-    return BAD_VALUE;
+    return MixMatchStatus::NO_MATCH;
 }
 
 sp<DeviceDescriptor> AudioPolicyMixCollection::getDeviceAndMixForOutput(
@@ -302,7 +349,7 @@
 }
 
 sp<DeviceDescriptor> AudioPolicyMixCollection::getDeviceAndMixForInputSource(
-        audio_source_t inputSource, const DeviceVector &availDevices, AudioMix **policyMix)
+        audio_source_t inputSource, const DeviceVector &availDevices, AudioMix **policyMix) const
 {
     for (size_t i = 0; i < size(); i++) {
         AudioMix *mix = valueAt(i)->getMix();
@@ -343,7 +390,7 @@
     ALOGV("getInputMixForAttr looking for address %s\n  mixes available:", address.string());
     for (size_t i = 0; i < size(); i++) {
             sp<AudioPolicyMix> policyMix = valueAt(i);
-            AudioMix *mix = policyMix->getMix();
+            const AudioMix *mix = policyMix->getMix();
             ALOGV("\tmix %zu address=%s", i, mix->mDeviceAddress.string());
     }
 #endif
@@ -400,7 +447,7 @@
     // for each player mix: remove existing rules that match or exclude this uid
     for (size_t i = 0; i < size(); i++) {
         bool foundUidRule = false;
-        AudioMix *mix = valueAt(i)->getMix();
+        const AudioMix *mix = valueAt(i)->getMix();
         if (mix->mMixType != MIX_TYPE_PLAYERS) {
             continue;
         }
@@ -428,7 +475,7 @@
     // for each player mix: find rules that don't exclude this uid, and add the device to the list
     for (size_t i = 0; i < size(); i++) {
         bool ruleAllowsUid = true;
-        AudioMix *mix = valueAt(i)->getMix();
+        const AudioMix *mix = valueAt(i)->getMix();
         if (mix->mMixType != MIX_TYPE_PLAYERS) {
             continue;
         }
diff --git a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
index 82d64c9..ad07ab1 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
@@ -20,6 +20,7 @@
 #include <sstream>
 #include <utils/Log.h>
 #include <utils/String8.h>
+#include <TypeConverter.h>
 #include "AudioGain.h"
 #include "AudioOutputDescriptor.h"
 #include "AudioPatch.h"
@@ -45,6 +46,7 @@
              mPortId, mSessionId, mUid);
     dst->appendFormat("%*s- Format: %08x Sampling rate: %d Channels: %08x\n", spaces, "",
              mConfig.format, mConfig.sample_rate, mConfig.channel_mask);
+    dst->appendFormat("%*s- Attributes: %s\n", spaces, "", toString(mAttributes).c_str());
     dst->appendFormat("%*s- Preferred Device Id: %08x\n", spaces, "", mPreferredDeviceId);
     dst->appendFormat("%*s- State: %s\n", spaces, "", mActive ? "Active" : "Inactive");
 }
@@ -53,6 +55,7 @@
 {
     ClientDescriptor::dump(dst, spaces, index);
     dst->appendFormat("%*s- Stream: %d flags: %08x\n", spaces, "", mStream, mFlags);
+    dst->appendFormat("%*s- Refcount: %d\n", spaces, "", mActivityCount);
 }
 
 std::string TrackClientDescriptor::toShortString() const
@@ -82,10 +85,11 @@
 SourceClientDescriptor::SourceClientDescriptor(audio_port_handle_t portId, uid_t uid,
          audio_attributes_t attributes, const sp<AudioPatch>& patchDesc,
          const sp<DeviceDescriptor>& srcDevice, audio_stream_type_t stream,
-         routing_strategy strategy) :
+         product_strategy_t strategy, VolumeSource volumeSource) :
     TrackClientDescriptor::TrackClientDescriptor(portId, uid, AUDIO_SESSION_NONE, attributes,
         AUDIO_CONFIG_BASE_INITIALIZER, AUDIO_PORT_HANDLE_NONE,
-        stream, strategy, AUDIO_OUTPUT_FLAG_NONE),
+        stream, strategy, volumeSource, AUDIO_OUTPUT_FLAG_NONE, false,
+        {} /* Sources do not support secondary outputs*/),
         mPatchDesc(patchDesc), mSrcDevice(srcDevice)
 {
 }
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 4cb1e17..a3121d1 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -215,11 +215,12 @@
     sp<DeviceDescriptor> device;
     for (size_t i = 0; i < size(); i++) {
         if (itemAt(i)->type() == type) {
-            // Assign device if address is empty or matches and
-            // format is default or matches
+            // If format is specified, match it and ignore address
+            // Otherwise if address is specified match it
+            // Otherwise always match
             if (((address == "" || itemAt(i)->address() == address) &&
                  format == AUDIO_FORMAT_DEFAULT) ||
-                itemAt(i)->supportsFormat(format)) {
+                (itemAt(i)->supportsFormat(format) && format != AUDIO_FORMAT_DEFAULT)) {
                 device = itemAt(i);
                 if (itemAt(i)->address() == address) {
                     break;
diff --git a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
index 40c49e7..89f9899 100644
--- a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
@@ -24,8 +24,9 @@
 
 void EffectDescriptor::dump(String8 *dst, int spaces) const
 {
+    dst->appendFormat("%*sID: %d\n", spaces, "", mId);
     dst->appendFormat("%*sI/O: %d\n", spaces, "", mIo);
-    dst->appendFormat("%*sStrategy: %d\n", spaces, "", mStrategy);
+    dst->appendFormat("%*sMusic Effect: %s\n", spaces, "", isMusicEffect()? "yes" : "no");
     dst->appendFormat("%*sSession: %d\n", spaces, "", mSession);
     dst->appendFormat("%*sName: %s\n", spaces, "",  mDesc.name);
     dst->appendFormat("%*s%s\n", spaces, "",  mEnabled ? "Enabled" : "Disabled");
@@ -41,9 +42,8 @@
 
 status_t EffectDescriptorCollection::registerEffect(const effect_descriptor_t *desc,
                                                     audio_io_handle_t io,
-                                                    uint32_t strategy,
                                                     int session,
-                                                    int id)
+                                                    int id, bool isMusicEffect)
 {
     if (getEffect(id) != nullptr) {
         ALOGW("%s effect %s already registered", __FUNCTION__, desc->name);
@@ -59,18 +59,11 @@
     if (mTotalEffectsMemory > mTotalEffectsMemoryMaxUsed) {
         mTotalEffectsMemoryMaxUsed = mTotalEffectsMemory;
     }
-    ALOGV("registerEffect() effect %s, io %d, strategy %d session %d id %d",
-            desc->name, io, strategy, session, id);
+    ALOGV("registerEffect() effect %s, io %d, session %d id %d",
+            desc->name, io, session, id);
     ALOGV("registerEffect() memory %d, total memory %d", desc->memoryUsage, mTotalEffectsMemory);
 
-    sp<EffectDescriptor> effectDesc = new EffectDescriptor();
-    memcpy (&effectDesc->mDesc, desc, sizeof(effect_descriptor_t));
-    effectDesc->mId = id;
-    effectDesc->mIo = io;
-    effectDesc->mStrategy = static_cast<routing_strategy>(strategy);
-    effectDesc->mSession = session;
-    effectDesc->mEnabled = false;
-
+    sp<EffectDescriptor> effectDesc = new EffectDescriptor(desc, isMusicEffect, id, io, session);
     add(id, effectDesc);
 
     return NO_ERROR;
@@ -161,7 +154,7 @@
 {
     for (size_t i = 0; i < size(); i++) {
         sp<EffectDescriptor> effectDesc = valueAt(i);
-        if (effectDesc->mEnabled && (effectDesc->mStrategy == STRATEGY_MEDIA) &&
+        if (effectDesc->mEnabled && (effectDesc->isMusicEffect()) &&
                 ((effectDesc->mDesc.flags & EFFECT_FLAG_OFFLOAD_SUPPORTED) == 0)) {
             ALOGV("isNonOffloadableEffectEnabled() non offloadable effect %s enabled on session %d",
                   effectDesc->mDesc.name, effectDesc->mSession);
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 98d375c..81d3968 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -201,25 +201,6 @@
     static status_t deserialize(const xmlNode *root, AudioPolicyConfig *config);
 };
 
-struct VolumeTraits : public AndroidCollectionTraits<VolumeCurve, VolumeCurvesCollection>
-{
-    static constexpr const char *tag = "volume";
-    static constexpr const char *collectionTag = "volumes";
-    static constexpr const char *volumePointTag = "point";
-    static constexpr const char *referenceTag = "reference";
-
-    struct Attributes
-    {
-        static constexpr const char *stream = "stream";
-        static constexpr const char *deviceCategory = "deviceCategory";
-        static constexpr const char *reference = "ref";
-        static constexpr const char *referenceName = "name";
-    };
-
-    static Return<Element> deserialize(const xmlNode *cur, PtrSerializingCtx serializingContext);
-    // No Children
-};
-
 struct SurroundSoundTraits
 {
     static constexpr const char *tag = "surroundSound";
@@ -703,67 +684,6 @@
     return NO_ERROR;
 }
 
-Return<VolumeTraits::Element> VolumeTraits::deserialize(const xmlNode *cur,
-        PtrSerializingCtx /*serializingContext*/)
-{
-    std::string streamTypeLiteral = getXmlAttribute(cur, Attributes::stream);
-    if (streamTypeLiteral.empty()) {
-        ALOGE("%s: No %s found", __func__, Attributes::stream);
-        return Status::fromStatusT(BAD_VALUE);
-    }
-    audio_stream_type_t streamType;
-    if (!StreamTypeConverter::fromString(streamTypeLiteral, streamType)) {
-        ALOGE("%s: Invalid %s", __func__, Attributes::stream);
-        return Status::fromStatusT(BAD_VALUE);
-    }
-    std::string deviceCategoryLiteral = getXmlAttribute(cur, Attributes::deviceCategory);
-    if (deviceCategoryLiteral.empty()) {
-        ALOGE("%s: No %s found", __func__, Attributes::deviceCategory);
-        return Status::fromStatusT(BAD_VALUE);
-    }
-    device_category deviceCategory;
-    if (!DeviceCategoryConverter::fromString(deviceCategoryLiteral, deviceCategory)) {
-        ALOGE("%s: Invalid %s=%s", __func__, Attributes::deviceCategory,
-              deviceCategoryLiteral.c_str());
-        return Status::fromStatusT(BAD_VALUE);
-    }
-
-    std::string referenceName = getXmlAttribute(cur, Attributes::reference);
-    const xmlNode *ref = NULL;
-    if (!referenceName.empty()) {
-        ref = getReference<VolumeTraits>(cur->parent, referenceName);
-        if (ref == NULL) {
-            ALOGE("%s: No reference Ptr found for %s", __func__, referenceName.c_str());
-            return Status::fromStatusT(BAD_VALUE);
-        }
-    }
-
-    Element volCurve = new VolumeCurve(deviceCategory, streamType);
-
-    for (const xmlNode *child = referenceName.empty() ? cur->xmlChildrenNode : ref->xmlChildrenNode;
-         child != NULL; child = child->next) {
-        if (!xmlStrcmp(child->name, reinterpret_cast<const xmlChar*>(volumePointTag))) {
-            auto pointDefinition = make_xmlUnique(xmlNodeListGetString(
-                            child->doc, child->xmlChildrenNode, 1));
-            if (pointDefinition == nullptr) {
-                return Status::fromStatusT(BAD_VALUE);
-            }
-            ALOGV("%s: %s=%s",
-                    __func__, tag, reinterpret_cast<const char*>(pointDefinition.get()));
-            std::vector<int32_t> point;
-            collectionFromString<DefaultTraits<int32_t>>(
-                    reinterpret_cast<const char*>(pointDefinition.get()), point, ",");
-            if (point.size() != 2) {
-                ALOGE("%s: Invalid %s: %s", __func__, volumePointTag,
-                        reinterpret_cast<const char*>(pointDefinition.get()));
-                return Status::fromStatusT(BAD_VALUE);
-            }
-            volCurve->add(CurvePoint(point[0], point[1]));
-        }
-    }
-    return volCurve;
-}
-
 status_t SurroundSoundTraits::deserialize(const xmlNode *root, AudioPolicyConfig *config)
 {
     config->setDefaultSurroundFormats();
@@ -851,14 +771,6 @@
     }
     config->setHwModules(modules);
 
-    // deserialize volume section
-    VolumeTraits::Collection volumes;
-    status = deserializeCollection<VolumeTraits>(root, &volumes, config);
-    if (status != NO_ERROR) {
-        return status;
-    }
-    config->setVolumes(volumes);
-
     // Global Configuration
     GlobalConfigTraits::deserialize(root, config);
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
index 6f48eae..7c76d8a 100644
--- a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
@@ -45,6 +45,7 @@
 const RouteFlagTypeConverter::Table RouteFlagTypeConverter::mTable[] = {
     MAKE_STRING_FROM_ENUM(MIX_ROUTE_FLAG_RENDER),
     MAKE_STRING_FROM_ENUM(MIX_ROUTE_FLAG_LOOP_BACK),
+    MAKE_STRING_FROM_ENUM(MIX_ROUTE_FLAG_LOOP_BACK_AND_RENDER),
     MAKE_STRING_FROM_ENUM(MIX_ROUTE_FLAG_ALL),
     TERMINATOR
 };
diff --git a/services/audiopolicy/config/audio_policy_configuration.xml b/services/audiopolicy/config/audio_policy_configuration.xml
index 42c52de..b4cc1d3 100644
--- a/services/audiopolicy/config/audio_policy_configuration.xml
+++ b/services/audiopolicy/config/audio_policy_configuration.xml
@@ -191,7 +191,11 @@
     </modules>
     <!-- End of Modules section -->
 
-    <!-- Volume section -->
+    <!-- Volume section:
+        IMPORTANT NOTE: Volume tables have been moved to engine configuration.
+                        Keep it here for legacy.
+                        Engine will fallback on these files if none are provided by engine.
+     -->
 
     <xi:include href="audio_policy_volumes.xml"/>
     <xi:include href="default_volume_tables.xml"/>
diff --git a/services/audiopolicy/config/audio_policy_configuration_generic.xml b/services/audiopolicy/config/audio_policy_configuration_generic.xml
index 40dcc22..9ad609d 100644
--- a/services/audiopolicy/config/audio_policy_configuration_generic.xml
+++ b/services/audiopolicy/config/audio_policy_configuration_generic.xml
@@ -30,7 +30,11 @@
     </modules>
     <!-- End of Modules section -->
 
-    <!-- Volume section -->
+    <!-- Volume section:
+        IMPORTANT NOTE: Volume tables have been moved to engine configuration.
+                        Keep it here for legacy.
+                        Engine will fallback on these files if none are provided by engine.
+     -->
 
     <xi:include href="audio_policy_volumes.xml"/>
     <xi:include href="default_volume_tables.xml"/>
diff --git a/services/audiopolicy/config/hearing_aid_audio_policy_configuration.xml b/services/audiopolicy/config/hearing_aid_audio_policy_configuration.xml
index 3c48e88..e6e6bdb 100644
--- a/services/audiopolicy/config/hearing_aid_audio_policy_configuration.xml
+++ b/services/audiopolicy/config/hearing_aid_audio_policy_configuration.xml
@@ -2,7 +2,7 @@
 <!-- Hearing aid Audio HAL Audio Policy Configuration file -->
 <module name="hearing_aid" halVersion="2.0">
     <mixPorts>
-        <mixPort name="hearing aid output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+        <mixPort name="hearing aid output" role="source">
             <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
                      samplingRates="24000,16000"
                      channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
diff --git a/services/audiopolicy/engine/Android.mk b/services/audiopolicy/engine/Android.mk
new file mode 100644
index 0000000..dcce8e3
--- /dev/null
+++ b/services/audiopolicy/engine/Android.mk
@@ -0,0 +1,9 @@
+
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+#######################################################################
+# Recursive call sub-folder Android.mk
+#
+include $(call all-makefiles-under,$(LOCAL_PATH))
+
diff --git a/services/audiopolicy/engine/common/Android.bp b/services/audiopolicy/engine/common/Android.bp
new file mode 100644
index 0000000..e6ede07
--- /dev/null
+++ b/services/audiopolicy/engine/common/Android.bp
@@ -0,0 +1,19 @@
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//       http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+cc_library_headers {
+    name: "libaudiopolicyengine_common_headers",
+    host_supported: true,
+    export_include_dirs: ["include"],
+}
diff --git a/services/audiopolicy/engine/common/include/EngineBase.h b/services/audiopolicy/engine/common/include/EngineBase.h
new file mode 100644
index 0000000..bc027e2
--- /dev/null
+++ b/services/audiopolicy/engine/common/include/EngineBase.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <EngineConfig.h>
+#include <AudioPolicyManagerInterface.h>
+#include <ProductStrategy.h>
+#include <StreamVolumeCurves.h>
+
+namespace android {
+namespace audio_policy {
+
+class EngineBase : public AudioPolicyManagerInterface
+{
+public:
+    ///
+    /// from AudioPolicyManagerInterface
+    ///
+    android::status_t initCheck() override;
+
+    void setObserver(AudioPolicyManagerObserver *observer) override;
+
+    status_t setPhoneState(audio_mode_t mode) override;
+
+    audio_mode_t getPhoneState() const override { return mPhoneState; }
+
+    status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config) override
+    {
+        mForceUse[usage] = config;
+        return NO_ERROR;
+    }
+
+    audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const override
+    {
+        return mForceUse[usage];
+    }
+    android::status_t setDeviceConnectionState(const sp<DeviceDescriptor> /*devDesc*/,
+                                               audio_policy_dev_state_t /*state*/) override
+    {
+        return NO_ERROR;
+    }
+    product_strategy_t getProductStrategyForAttributes(
+            const audio_attributes_t &attr) const override;
+
+    audio_stream_type_t getStreamTypeForAttributes(const audio_attributes_t &attr) const override;
+
+    audio_attributes_t getAttributesForStreamType(audio_stream_type_t stream) const override;
+
+    StreamTypeVector getStreamTypesForProductStrategy(product_strategy_t ps) const override;
+
+    AttributesVector getAllAttributesForProductStrategy(product_strategy_t ps) const override;
+
+    StrategyVector getOrderedProductStrategies() const override;
+
+    status_t listAudioProductStrategies(AudioProductStrategyVector &strategies) const override;
+
+    VolumeCurves *getVolumeCurvesForAttributes(const audio_attributes_t &attr) override;
+
+    VolumeCurves *getVolumeCurvesForStreamType(audio_stream_type_t stream) override;
+
+    void dump(String8 *dst) const override;
+
+
+    engineConfig::ParsingResult loadAudioPolicyEngineConfig();
+
+    const ProductStrategyMap &getProductStrategies() const { return mProductStrategies; }
+
+    ProductStrategyMap &getProductStrategies() { return mProductStrategies; }
+
+    product_strategy_t getProductStrategyForStream(audio_stream_type_t stream) const;
+
+    product_strategy_t getProductStrategyByName(const std::string &name) const;
+
+    AudioPolicyManagerObserver *getApmObserver() const { return mApmObserver; }
+
+    inline bool isInCall() const
+    {
+        return is_state_in_call(getPhoneState());
+    }
+
+    VolumeSource toVolumeSource(audio_stream_type_t stream) const
+    {
+        return static_cast<VolumeSource>(stream);
+    }
+
+    status_t switchVolumeCurve(audio_stream_type_t streamSrc, audio_stream_type_t streamDst);
+
+    status_t restoreOriginVolumeCurve(audio_stream_type_t stream);
+
+ private:
+    AudioPolicyManagerObserver *mApmObserver = nullptr;
+
+    ProductStrategyMap mProductStrategies;
+    audio_mode_t mPhoneState = AUDIO_MODE_NORMAL;  /**< current phone state. */
+
+    /** current forced use configuration. */
+    audio_policy_forced_cfg_t mForceUse[AUDIO_POLICY_FORCE_USE_CNT] = {};
+
+    StreamVolumeCurves mStreamVolumeCurves;
+};
+
+} // namespace audio_policy
+} // namespace android
diff --git a/services/audiopolicy/engine/common/include/ProductStrategy.h b/services/audiopolicy/engine/common/include/ProductStrategy.h
new file mode 100644
index 0000000..72505b2
--- /dev/null
+++ b/services/audiopolicy/engine/common/include/ProductStrategy.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <system/audio.h>
+#include <AudioPolicyManagerInterface.h>
+#include <utils/RefBase.h>
+#include <HandleGenerator.h>
+#include <string>
+#include <vector>
+#include <map>
+#include <utils/Errors.h>
+#include <utils/String8.h>
+
+namespace android {
+
+/**
+ * @brief The ProductStrategy class describes for each product_strategy_t identifier the
+ * associated audio attributes, the device types to use, the device address to use.
+ * The identifier is voluntarily not strongly typed in order to be extensible by OEM.
+ */
+class ProductStrategy : public virtual RefBase, private HandleGenerator<uint32_t>
+{
+private:
+    struct AudioAttributes {
+        audio_stream_type_t mStream = AUDIO_STREAM_DEFAULT;
+        uint32_t mGroupId = 0;
+        audio_attributes_t mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
+    };
+
+    using AudioAttributesVector = std::vector<AudioAttributes>;
+
+public:
+    ProductStrategy(const std::string &name);
+
+    void addAttributes(const AudioAttributes &audioAttributes);
+
+    std::vector<android::AudioAttributes> listAudioAttributes() const;
+
+    std::string getName() const { return mName; }
+    AttributesVector getAudioAttributes() const;
+    product_strategy_t getId() const { return mId; }
+    StreamTypeVector getSupportedStreams() const;
+
+    /**
+     * @brief matches checks if the given audio attributes shall follow the strategy.
+     *        Order of the attributes within a strategy matters.
+     *        If only the usage is available, the check is performed on the usages of the given
+     *        attributes, otherwise all fields must match.
+     * @param attributes to consider
+     * @return true if attributes matches with the strategy, false otherwise.
+     */
+    bool matches(const audio_attributes_t attributes) const;
+
+    bool supportStreamType(const audio_stream_type_t &streamType) const;
+
+    void setDeviceAddress(const std::string &address)
+    {
+        mDeviceAddress = address;
+    }
+
+    std::string getDeviceAddress() const { return mDeviceAddress; }
+
+    void setDeviceTypes(audio_devices_t devices)
+    {
+        mApplicableDevices = devices;
+    }
+
+    audio_devices_t getDeviceTypes() const { return mApplicableDevices; }
+
+    audio_attributes_t getAttributesForStreamType(audio_stream_type_t stream) const;
+    audio_stream_type_t getStreamTypeForAttributes(const audio_attributes_t &attr) const;
+
+    bool isDefault() const;
+
+    void dump(String8 *dst, int spaces = 0) const;
+
+private:
+    std::string mName;
+
+    AudioAttributesVector mAttributesVector;
+
+    product_strategy_t mId;
+
+    std::string mDeviceAddress; /**< Device address applicable for this strategy, maybe empty */
+
+    /**
+     * Applicable device(s) type mask for this strategy.
+     */
+    audio_devices_t mApplicableDevices = AUDIO_DEVICE_NONE;
+};
+
+class ProductStrategyMap : public std::map<product_strategy_t, sp<ProductStrategy> >
+{
+public:
+    /**
+     * @brief getProductStrategyForAttribute. The order of the vector is dimensionning.
+     * @param attr
+     * @return applicable product strategy for the given attribute, default if none applicable.
+     */
+    product_strategy_t getProductStrategyForAttributes(const audio_attributes_t &attr) const;
+
+    product_strategy_t getProductStrategyForStream(audio_stream_type_t stream) const;
+
+    audio_attributes_t getAttributesForStreamType(audio_stream_type_t stream) const;
+
+    audio_stream_type_t getStreamTypeForAttributes(const audio_attributes_t &attr) const;
+
+    /**
+     * @brief getAttributesForProductStrategy can be called from
+     *        AudioManager: in this case, the product strategy IS the former routing strategy
+     *        CarAudioManager: in this case, the product strategy IS the car usage
+     *                      [getAudioAttributesForCarUsage]
+     *        OemExtension: in this case, the product strategy IS the Oem usage
+     *
+     * @param strategy
+     * @return audio attributes (or at least one of the attributes) following the given strategy.
+     */
+    audio_attributes_t getAttributesForProductStrategy(product_strategy_t strategy) const;
+
+    audio_devices_t getDeviceTypesForProductStrategy(product_strategy_t strategy) const;
+
+    std::string getDeviceAddressForProductStrategy(product_strategy_t strategy) const;
+
+    product_strategy_t getDefault() const;
+
+    void dump(String8 *dst, int spaces = 0) const;
+};
+
+} // namespace android
diff --git a/services/audiopolicy/engine/common/include/StreamVolumeCurves.h b/services/audiopolicy/engine/common/include/StreamVolumeCurves.h
new file mode 100644
index 0000000..5b0b7d6
--- /dev/null
+++ b/services/audiopolicy/engine/common/include/StreamVolumeCurves.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <VolumeCurve.h>
+#include <map>
+
+namespace android {
+
+class StreamVolumeCurves
+{
+public:
+    StreamVolumeCurves() = default;
+
+    /**
+     * @brief switchVolumeCurve control API for Engine, allows to switch the volume curves
+     * from one stream type to another.
+     * @param src source stream type
+     * @param dst destination stream type
+     */
+    status_t switchVolumeCurve(audio_stream_type_t streamSrc, audio_stream_type_t streamDst)
+    {
+        if (!hasCurvesFor(streamSrc) || !hasCurvesFor(streamDst)) {
+            ALOGE("%s: No curves defined for streams %d %d", __FUNCTION__, streamSrc, streamDst);
+            return NO_INIT;
+        }
+        const VolumeCurves &sourceCurves = getCurvesFor(streamSrc);
+        VolumeCurves &dstCurves = editCurvesFor(streamDst);
+        return dstCurves.switchCurvesFrom(sourceCurves);
+    }
+    void dump(String8 *dst, int spaces = 0) const;
+
+    void add(const VolumeCurves &curves, audio_stream_type_t streamType)
+    {
+        mCurves.emplace(streamType, curves);
+    }
+
+    bool hasCurvesFor(audio_stream_type_t stream)
+    {
+        return mCurves.find(stream) != end(mCurves);
+    }
+
+    VolumeCurves &editCurvesFor(audio_stream_type_t stream)
+    {
+        ALOG_ASSERT(mCurves.find(stream) != end(mCurves), "Invalid stream type for Volume Curve");
+        return mCurves[stream];
+    }
+    const VolumeCurves &getCurvesFor(audio_stream_type_t stream) const
+    {
+        ALOG_ASSERT(mCurves.find(stream) != end(mCurves), "Invalid stream type for Volume Curve");
+        return mCurves.at(stream);
+    }
+    /**
+     * @brief getVolumeCurvesForStream
+     * @param stream type for which the volume curves interface is requested
+     * @return the VolumeCurves for a given stream type.
+     */
+    VolumeCurves &getVolumeCurvesForStream(audio_stream_type_t stream)
+    {
+        ALOG_ASSERT(mCurves.find(stream) != end(mCurves), "Invalid stream type for Volume Curve");
+        return mCurves[stream];
+    }
+    /**
+     * @brief restoreOriginVolumeCurve helper control API for engine to restore the original volume
+     * curves for a given stream type
+     * @param stream for which the volume curves will be restored.
+     */
+    status_t restoreOriginVolumeCurve(audio_stream_type_t stream)
+    {
+        if (!hasCurvesFor(stream)) {
+            ALOGE("%s: No curves defined for streams", __FUNCTION__);
+            return NO_INIT;
+        }
+        return switchVolumeCurve(stream, stream);
+    }
+
+private:
+    std::map<audio_stream_type_t, VolumeCurves> mCurves;
+};
+
+} // namespace android
diff --git a/services/audiopolicy/engine/common/include/VolumeCurve.h b/services/audiopolicy/engine/common/include/VolumeCurve.h
new file mode 100644
index 0000000..0ec63e1
--- /dev/null
+++ b/services/audiopolicy/engine/common/include/VolumeCurve.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "IVolumeCurves.h"
+#include <policy.h>
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+#include <utils/SortedVector.h>
+#include <utils/KeyedVector.h>
+#include <system/audio.h>
+#include <cutils/config_utils.h>
+#include <string>
+#include <map>
+#include <utility>
+
+namespace android {
+
+struct CurvePoint
+{
+    CurvePoint() {}
+    CurvePoint(int index, int attenuationInMb) :
+        mIndex(index), mAttenuationInMb(attenuationInMb) {}
+    uint32_t mIndex;
+    int mAttenuationInMb;
+};
+
+inline bool operator< (const CurvePoint &lhs, const CurvePoint &rhs)
+{
+    return lhs.mIndex < rhs.mIndex;
+}
+
+// A volume curve for a given use case and device category
+// It contains of list of points of this curve expressing the attenuation in Millibels for
+// a given volume index from 0 to 100
+class VolumeCurve : public RefBase
+{
+public:
+    VolumeCurve(device_category device) : mDeviceCategory(device) {}
+
+    void add(const CurvePoint &point) { mCurvePoints.add(point); }
+
+    float volIndexToDb(int indexInUi, int volIndexMin, int volIndexMax) const;
+
+    void dump(String8 *dst, int spaces = 0, bool curvePoints = false) const;
+
+    device_category getDeviceCategory() const { return mDeviceCategory; }
+
+private:
+    const device_category mDeviceCategory;
+    SortedVector<CurvePoint> mCurvePoints;
+};
+
+// Volume Curves for a given use case indexed by device category
+class VolumeCurves : public KeyedVector<device_category, sp<VolumeCurve> >,
+                     public IVolumeCurves
+{
+public:
+    VolumeCurves(int indexMin = 0, int indexMax = 100) :
+        mIndexMin(indexMin), mIndexMax(indexMax), mStream(AUDIO_STREAM_DEFAULT)
+    {
+        addCurrentVolumeIndex(AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, 0);
+    }
+    VolumeCurves(audio_stream_type_t stream, int indexMin, int indexMax) :
+        mIndexMin(indexMin), mIndexMax(indexMax), mStream(stream)
+    {
+        addCurrentVolumeIndex(AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, 0);
+    }
+
+    // Once XML has been parsed, must be call first to sanity check table and initialize indexes
+    virtual status_t initVolume(int indexMin, int indexMax)
+    {
+        mIndexMin = indexMin;
+        mIndexMax = indexMax;
+        return NO_ERROR;
+    }
+
+    sp<VolumeCurve> getCurvesFor(device_category device) const
+    {
+        if (indexOfKey(device) < 0) {
+            return 0;
+        }
+        return valueFor(device);
+    }
+
+    virtual int getVolumeIndex(audio_devices_t device) const
+    {
+        device = Volume::getDeviceForVolume(device);
+        // there is always a valid entry for AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME
+        if (mIndexCur.find(device) == end(mIndexCur)) {
+            device = AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME;
+        }
+        return mIndexCur.at(device);
+    }
+
+    virtual bool canBeMuted() const { return mCanBeMuted; }
+    virtual void clearCurrentVolumeIndex() { mIndexCur.clear(); }
+    void addCurrentVolumeIndex(audio_devices_t device, int index) override
+    {
+        mIndexCur[device] = index;
+    }
+
+    int getVolumeIndexMin() const { return mIndexMin; }
+
+    int getVolumeIndexMax() const { return mIndexMax; }
+
+    bool hasVolumeIndexForDevice(audio_devices_t device) const
+    {
+        device = Volume::getDeviceForVolume(device);
+        return mIndexCur.find(device) != end(mIndexCur);
+    }
+
+    status_t switchCurvesFrom(const VolumeCurves &referenceCurves)
+    {
+        if (size() != referenceCurves.size()) {
+            ALOGE("%s! device category not aligned, cannot switch", __FUNCTION__);
+            return BAD_TYPE;
+        }
+        for (size_t index = 0; index < size(); index++) {
+            device_category cat = keyAt(index);
+            setVolumeCurve(cat, referenceCurves.getOriginVolumeCurve(cat));
+        }
+        return NO_ERROR;
+    }
+    status_t restoreOriginVolumeCurve()
+    {
+        return switchCurvesFrom(*this);
+    }
+
+    const sp<VolumeCurve> getOriginVolumeCurve(device_category deviceCategory) const
+    {
+        ALOG_ASSERT(mOriginVolumeCurves.indexOfKey(deviceCategory) >= 0, "Invalid device category");
+        return mOriginVolumeCurves.valueFor(deviceCategory);
+    }
+    void setVolumeCurve(device_category deviceCategory, const sp<VolumeCurve> &volumeCurve)
+    {
+        ALOG_ASSERT(indexOfKey(deviceCategory) >= 0, "Invalid device category for Volume Curve");
+        replaceValueFor(deviceCategory, volumeCurve);
+    }
+
+    ssize_t add(const sp<VolumeCurve> &volumeCurve)
+    {
+        device_category deviceCategory = volumeCurve->getDeviceCategory();
+        ssize_t index = indexOfKey(deviceCategory);
+        if (index < 0) {
+            // Keep track of original Volume Curves per device category in order to switch curves.
+            mOriginVolumeCurves.add(deviceCategory, volumeCurve);
+            return KeyedVector::add(deviceCategory, volumeCurve);
+        }
+        return index;
+    }
+
+    virtual float volIndexToDb(device_category deviceCat, int indexInUi) const
+    {
+        sp<VolumeCurve> vc = getCurvesFor(deviceCat);
+        if (vc != 0) {
+            return vc->volIndexToDb(indexInUi, mIndexMin, mIndexMax);
+        } else {
+            ALOGE("Invalid device category %d for Volume Curve", deviceCat);
+            return 0.0f;
+        }
+    }
+
+    audio_stream_type_t getStreamType() const { return mStream; }
+
+    void dump(String8 *dst, int spaces = 0, bool curvePoints = false) const override;
+
+private:
+    KeyedVector<device_category, sp<VolumeCurve> > mOriginVolumeCurves;
+    std::map<audio_devices_t, int> mIndexCur; /**< current volume index per device. */
+    int mIndexMin; /**< min volume index. */
+    int mIndexMax; /**< max volume index. */
+    const bool mCanBeMuted = true; /**< true is the stream can be muted. */
+
+    const audio_stream_type_t mStream; /**< Keep it for legacy. */
+};
+
+} // namespace android
diff --git a/services/audiopolicy/engine/common/src/EngineBase.cpp b/services/audiopolicy/engine/common/src/EngineBase.cpp
new file mode 100644
index 0000000..6e2ab4c
--- /dev/null
+++ b/services/audiopolicy/engine/common/src/EngineBase.cpp
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPolicyEngine/Base"
+#define LOG_NDEBUG 0
+
+#include "EngineBase.h"
+#include "EngineDefaultConfig.h"
+#include <TypeConverter.h>
+
+namespace android {
+namespace audio_policy {
+
+void EngineBase::setObserver(AudioPolicyManagerObserver *observer)
+{
+    ALOG_ASSERT(observer != NULL, "Invalid Audio Policy Manager observer");
+    mApmObserver = observer;
+}
+
+status_t EngineBase::initCheck()
+{
+    return (mApmObserver != nullptr)? NO_ERROR : NO_INIT;
+}
+
+status_t EngineBase::setPhoneState(audio_mode_t state)
+{
+    ALOGV("setPhoneState() state %d", state);
+
+    if (state < 0 || state >= AUDIO_MODE_CNT) {
+        ALOGW("setPhoneState() invalid state %d", state);
+        return BAD_VALUE;
+    }
+
+    if (state == mPhoneState ) {
+        ALOGW("setPhoneState() setting same state %d", state);
+        return BAD_VALUE;
+    }
+
+    // store previous phone state for management of sonification strategy below
+    int oldState = mPhoneState;
+    mPhoneState = state;
+
+    if (!is_state_in_call(oldState) && is_state_in_call(state)) {
+        ALOGV("  Entering call in setPhoneState()");
+        switchVolumeCurve(AUDIO_STREAM_VOICE_CALL, AUDIO_STREAM_DTMF);
+    } else if (is_state_in_call(oldState) && !is_state_in_call(state)) {
+        ALOGV("  Exiting call in setPhoneState()");
+        restoreOriginVolumeCurve(AUDIO_STREAM_DTMF);
+    }
+    return NO_ERROR;
+}
+
+product_strategy_t EngineBase::getProductStrategyForAttributes(const audio_attributes_t &attr) const
+{
+    return mProductStrategies.getProductStrategyForAttributes(attr);
+}
+
+audio_stream_type_t EngineBase::getStreamTypeForAttributes(const audio_attributes_t &attr) const
+{
+    return mProductStrategies.getStreamTypeForAttributes(attr);
+}
+
+audio_attributes_t EngineBase::getAttributesForStreamType(audio_stream_type_t stream) const
+{
+    return mProductStrategies.getAttributesForStreamType(stream);
+}
+
+product_strategy_t EngineBase::getProductStrategyForStream(audio_stream_type_t stream) const
+{
+    return mProductStrategies.getProductStrategyForStream(stream);
+}
+
+product_strategy_t EngineBase::getProductStrategyByName(const std::string &name) const
+{
+    for (const auto &iter : mProductStrategies) {
+        if (iter.second->getName() == name) {
+            return iter.second->getId();
+        }
+    }
+    return PRODUCT_STRATEGY_NONE;
+}
+
+engineConfig::ParsingResult EngineBase::loadAudioPolicyEngineConfig()
+{
+    auto loadProductStrategies =
+            [](auto& strategyConfigs, auto& productStrategies) {
+        uint32_t groupid = 0;
+        for (auto& strategyConfig : strategyConfigs) {
+            sp<ProductStrategy> strategy = new ProductStrategy(strategyConfig.name);
+            for (const auto &group : strategyConfig.attributesGroups) {
+                for (const auto &attr : group.attributesVect) {
+                    strategy->addAttributes({group.stream, groupid, attr});
+                }
+                groupid += 1;
+            }
+            product_strategy_t strategyId = strategy->getId();
+            productStrategies[strategyId] = strategy;
+        }
+    };
+    auto loadVolumeCurves = [](const auto &configVolumes, auto &streamVolumeCollection) {
+        for (auto &configVolume : configVolumes) {
+            audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT;
+            if (configVolume.stream.empty() ||
+                    !StreamTypeConverter::fromString(configVolume.stream, streamType)) {
+                ALOGE("%s: Invalid stream type", __FUNCTION__);
+                continue;
+            }
+            VolumeCurves volumeCurves(streamType, configVolume.indexMin, configVolume.indexMax);
+            for (auto &configCurve : configVolume.volumeCurves) {
+                device_category deviceCategory = DEVICE_CATEGORY_SPEAKER;
+                if (!DeviceCategoryConverter::fromString(configCurve.deviceCategory,
+                                                         deviceCategory)) {
+                    ALOGE("%s: Invalid %s", __FUNCTION__, configCurve.deviceCategory.c_str());
+                    continue;
+                }
+                sp<VolumeCurve> curve = new VolumeCurve(deviceCategory);
+                for (auto &point : configCurve.curvePoints) {
+                    curve->add({point.index, point.attenuationInMb});
+                }
+                volumeCurves.add(curve);
+            }
+            streamVolumeCollection.add(volumeCurves, streamType);
+        }
+    };
+
+    auto result = engineConfig::parse();
+    if (result.parsedConfig == nullptr) {
+        ALOGW("%s: No configuration found, using default matching phone experience.", __FUNCTION__);
+        engineConfig::Config config = gDefaultEngineConfig;
+        android::status_t ret = engineConfig::parseLegacyVolumes(config.volumeGroups);
+        result = {std::make_unique<engineConfig::Config>(config),
+                  static_cast<size_t>(ret == NO_ERROR ? 0 : 1)};
+    }
+    ALOGE_IF(result.nbSkippedElement != 0, "skipped %zu elements", result.nbSkippedElement);
+    loadProductStrategies(result.parsedConfig->productStrategies, mProductStrategies);
+    loadVolumeCurves(result.parsedConfig->volumeGroups, mStreamVolumeCurves);
+    return result;
+}
+
+StrategyVector EngineBase::getOrderedProductStrategies() const
+{
+    auto findByFlag = [](const auto &productStrategies, auto flag) {
+        return std::find_if(begin(productStrategies), end(productStrategies),
+                            [&](const auto &strategy) {
+            for (const auto &attributes : strategy.second->getAudioAttributes()) {
+                if ((attributes.flags & flag) == flag) {
+                    return true;
+                }
+            }
+            return false;
+        });
+    };
+    auto strategies = mProductStrategies;
+    auto enforcedAudibleStrategyIter = findByFlag(strategies, AUDIO_FLAG_AUDIBILITY_ENFORCED);
+
+    if (getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED &&
+            enforcedAudibleStrategyIter != strategies.end()) {
+        auto enforcedAudibleStrategy = *enforcedAudibleStrategyIter;
+        strategies.erase(enforcedAudibleStrategyIter);
+        strategies.insert(begin(strategies), enforcedAudibleStrategy);
+    }
+    StrategyVector orderedStrategies;
+    for (const auto &iter : strategies) {
+        orderedStrategies.push_back(iter.second->getId());
+    }
+    return orderedStrategies;
+}
+
+StreamTypeVector EngineBase::getStreamTypesForProductStrategy(product_strategy_t ps) const
+{
+    // @TODO default music stream to control volume if no group?
+    return (mProductStrategies.find(ps) != end(mProductStrategies)) ?
+                mProductStrategies.at(ps)->getSupportedStreams() :
+                StreamTypeVector(AUDIO_STREAM_MUSIC);
+}
+
+AttributesVector EngineBase::getAllAttributesForProductStrategy(product_strategy_t ps) const
+{
+    return (mProductStrategies.find(ps) != end(mProductStrategies)) ?
+                mProductStrategies.at(ps)->getAudioAttributes() : AttributesVector();
+}
+
+status_t EngineBase::listAudioProductStrategies(AudioProductStrategyVector &strategies) const
+{
+    for (const auto &iter : mProductStrategies) {
+        const auto &productStrategy = iter.second;
+        strategies.push_back(
+        {productStrategy->getName(), productStrategy->listAudioAttributes(),
+         productStrategy->getId()});
+    }
+    return NO_ERROR;
+}
+
+VolumeCurves *EngineBase::getVolumeCurvesForAttributes(const audio_attributes_t &attr)
+{
+    return &mStreamVolumeCurves.getVolumeCurvesForStream(getStreamTypeForAttributes(attr));
+}
+
+VolumeCurves *EngineBase::getVolumeCurvesForStreamType(audio_stream_type_t stream)
+{
+    return &mStreamVolumeCurves.getVolumeCurvesForStream(stream);
+}
+
+status_t EngineBase::switchVolumeCurve(audio_stream_type_t streamSrc, audio_stream_type_t streamDst)
+{
+    return mStreamVolumeCurves.switchVolumeCurve(streamSrc, streamDst);;
+}
+
+status_t EngineBase::restoreOriginVolumeCurve(audio_stream_type_t stream)
+{
+    return mStreamVolumeCurves.restoreOriginVolumeCurve(stream);
+}
+
+void EngineBase::dump(String8 *dst) const
+{
+    mProductStrategies.dump(dst, 2);
+    mStreamVolumeCurves.dump(dst, 2);
+}
+
+} // namespace audio_policy
+} // namespace android
diff --git a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
new file mode 100644
index 0000000..f1642c5
--- /dev/null
+++ b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <system/audio.h>
+
+namespace android {
+/**
+ * @brief AudioProductStrategies hard coded array of strategies to fill new engine API contract.
+ */
+const engineConfig::ProductStrategies gOrderedStrategies = {
+    {"STRATEGY_PHONE",
+     {
+         {"phone", AUDIO_STREAM_VOICE_CALL,
+          {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_VOICE_COMMUNICATION, AUDIO_SOURCE_DEFAULT, 0,
+            ""}},
+         },
+         {"sco", AUDIO_STREAM_BLUETOOTH_SCO,
+          {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_SCO,
+            ""}},
+         }
+     },
+    },
+    {"STRATEGY_SONIFICATION",
+     {
+         {"ring", AUDIO_STREAM_RING,
+          {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE,
+            AUDIO_SOURCE_DEFAULT, 0, ""}}
+         },
+         {"alarm", AUDIO_STREAM_ALARM,
+          {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ALARM, AUDIO_SOURCE_DEFAULT, 0, ""}},
+         }
+     },
+    },
+    {"STRATEGY_ENFORCED_AUDIBLE",
+     {
+         {"", AUDIO_STREAM_ENFORCED_AUDIBLE,
+          {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT,
+            AUDIO_FLAG_AUDIBILITY_ENFORCED, ""}}
+         }
+     },
+    },
+    {"STRATEGY_ACCESSIBILITY",
+     {
+         {"", AUDIO_STREAM_ACCESSIBILITY,
+          {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY,
+            AUDIO_SOURCE_DEFAULT, 0, ""}}
+         }
+     },
+    },
+    {"STRATEGY_SONIFICATION_RESPECTFUL",
+     {
+         {"", AUDIO_STREAM_NOTIFICATION,
+          {
+              {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION, AUDIO_SOURCE_DEFAULT, 0, ""},
+              {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST,
+               AUDIO_SOURCE_DEFAULT, 0, ""},
+              {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT,
+               AUDIO_SOURCE_DEFAULT, 0, ""},
+              {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED,
+               AUDIO_SOURCE_DEFAULT, 0, ""},
+              {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_EVENT,
+               AUDIO_SOURCE_DEFAULT, 0, ""}
+          }
+         }
+     },
+    },
+    {"STRATEGY_MEDIA",
+     {
+         {"music", AUDIO_STREAM_MUSIC,
+          {
+              {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_MEDIA, AUDIO_SOURCE_DEFAULT, 0, ""},
+              {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_GAME, AUDIO_SOURCE_DEFAULT, 0, ""},
+              {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ASSISTANT, AUDIO_SOURCE_DEFAULT, 0, ""},
+              {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
+               AUDIO_SOURCE_DEFAULT, 0, ""},
+              {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT, 0, ""}
+          },
+         },
+         {"system", AUDIO_STREAM_SYSTEM,
+          {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ASSISTANCE_SONIFICATION,
+            AUDIO_SOURCE_DEFAULT, 0, ""}}
+         }
+     },
+    },
+    {"STRATEGY_DTMF",
+     {
+         {"", AUDIO_STREAM_DTMF,
+          {
+              {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING,
+               AUDIO_SOURCE_DEFAULT, 0, ""}
+          }
+         }
+     },
+    },
+    {"STRATEGY_TRANSMITTED_THROUGH_SPEAKER",
+     {
+         {"", AUDIO_STREAM_TTS,
+          {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT,
+            AUDIO_FLAG_BEACON, ""}}
+         }
+     },
+    },
+    {"STRATEGY_REROUTING",
+     {
+         {"", AUDIO_STREAM_REROUTING,
+          {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT, 0, ""}}
+         }
+     },
+    },
+    {"STRATEGY_PATCH",
+     {
+         {"", AUDIO_STREAM_PATCH,
+          {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT, 0, ""}}
+         }
+     },
+    }
+};
+
+const engineConfig::Config gDefaultEngineConfig = {
+    1.0,
+    gOrderedStrategies,
+    {},
+    {},
+    {}
+};
+} // namespace android
diff --git a/services/audiopolicy/engine/common/src/ProductStrategy.cpp b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
new file mode 100644
index 0000000..71607d1
--- /dev/null
+++ b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPolicyEngine/ProductStrategy"
+//#define LOG_NDEBUG 0
+
+#include "ProductStrategy.h"
+
+#include <media/TypeConverter.h>
+#include <utils/String8.h>
+#include <cstdint>
+#include <string>
+
+#include <log/log.h>
+
+
+namespace android {
+
+ProductStrategy::ProductStrategy(const std::string &name) :
+    mName(name),
+    mId(static_cast<product_strategy_t>(HandleGenerator<uint32_t>::getNextHandle()))
+{
+}
+
+void ProductStrategy::addAttributes(const AudioAttributes &audioAttributes)
+{
+    mAttributesVector.push_back(audioAttributes);
+}
+
+std::vector<android::AudioAttributes> ProductStrategy::listAudioAttributes() const
+{
+    std::vector<android::AudioAttributes> androidAa;
+    for (const auto &attr : mAttributesVector) {
+        androidAa.push_back({attr.mGroupId, attr.mStream, attr.mAttributes});
+    }
+    return androidAa;
+}
+
+AttributesVector ProductStrategy::getAudioAttributes() const
+{
+    AttributesVector attrVector;
+    for (const auto &attrGroup : mAttributesVector) {
+        attrVector.push_back(attrGroup.mAttributes);
+    }
+    if (not attrVector.empty()) {
+        return attrVector;
+    }
+    return { AUDIO_ATTRIBUTES_INITIALIZER };
+}
+
+bool ProductStrategy::matches(const audio_attributes_t attr) const
+{
+    return std::find_if(begin(mAttributesVector), end(mAttributesVector),
+                        [&attr](const auto &supportedAttr) {
+        return AudioProductStrategy::attributesMatches(supportedAttr.mAttributes, attr);
+    }) != end(mAttributesVector);
+}
+
+audio_stream_type_t ProductStrategy::getStreamTypeForAttributes(const audio_attributes_t &attr) const
+{
+    const auto iter = std::find_if(begin(mAttributesVector), end(mAttributesVector),
+                                   [&attr](const auto &supportedAttr) {
+        return AudioProductStrategy::attributesMatches(supportedAttr.mAttributes, attr); });
+    return iter != end(mAttributesVector) ? iter->mStream : AUDIO_STREAM_DEFAULT;
+}
+
+audio_attributes_t ProductStrategy::getAttributesForStreamType(audio_stream_type_t streamType) const
+{
+    const auto iter = std::find_if(begin(mAttributesVector), end(mAttributesVector),
+                                   [&streamType](const auto &supportedAttr) {
+        return supportedAttr.mStream == streamType; });
+    return iter != end(mAttributesVector) ? iter->mAttributes : AUDIO_ATTRIBUTES_INITIALIZER;
+}
+
+bool ProductStrategy::isDefault() const
+{
+    return std::find_if(begin(mAttributesVector), end(mAttributesVector), [](const auto &attr) {
+        return attr.mAttributes == defaultAttr; }) != end(mAttributesVector);
+}
+
+StreamTypeVector ProductStrategy::getSupportedStreams() const
+{
+    StreamTypeVector streams;
+    for (const auto &supportedAttr : mAttributesVector) {
+        if (std::find(begin(streams), end(streams), supportedAttr.mStream) == end(streams) &&
+                supportedAttr.mStream != AUDIO_STREAM_DEFAULT) {
+            streams.push_back(supportedAttr.mStream);
+        }
+    }
+    return streams;
+}
+
+bool ProductStrategy::supportStreamType(const audio_stream_type_t &streamType) const
+{
+    return std::find_if(begin(mAttributesVector), end(mAttributesVector),
+                        [&streamType](const auto &supportedAttr) {
+        return supportedAttr.mStream == streamType; }) != end(mAttributesVector);
+}
+
+void ProductStrategy::dump(String8 *dst, int spaces) const
+{
+    dst->appendFormat("\n%*s-%s (id: %d)\n", spaces, "", mName.c_str(), mId);
+    std::string deviceLiteral;
+    if (!OutputDeviceConverter::toString(mApplicableDevices, deviceLiteral)) {
+        ALOGE("%s: failed to convert device %d", __FUNCTION__, mApplicableDevices);
+    }
+    dst->appendFormat("%*sSelected Device: {type:%s, @:%s}\n", spaces + 2, "",
+                       deviceLiteral.c_str(), mDeviceAddress.c_str());
+
+    for (const auto &attr : mAttributesVector) {
+        dst->appendFormat("%*sGroup: %d stream: %s\n", spaces + 3, "", attr.mGroupId,
+                          android::toString(attr.mStream).c_str());
+        dst->appendFormat("%*s Attributes: ", spaces + 3, "");
+        std::string attStr =
+                attr.mAttributes == defaultAttr ? "{ Any }" : android::toString(attr.mAttributes);
+        dst->appendFormat("%s\n", attStr.c_str());
+    }
+}
+
+product_strategy_t ProductStrategyMap::getProductStrategyForAttributes(
+        const audio_attributes_t &attr) const
+{
+    for (const auto &iter : *this) {
+        if (iter.second->matches(attr)) {
+            return iter.second->getId();
+        }
+    }
+    ALOGV("%s: No matching product strategy for attributes %s, return default", __FUNCTION__,
+          toString(attr).c_str());
+    return getDefault();
+}
+
+audio_attributes_t ProductStrategyMap::getAttributesForStreamType(audio_stream_type_t stream) const
+{
+    for (const auto &iter : *this) {
+        const auto strategy = iter.second;
+        if (strategy->supportStreamType(stream)) {
+            return strategy->getAttributesForStreamType(stream);
+        }
+    }
+    ALOGV("%s: No product strategy for stream %s, using default", __FUNCTION__,
+          toString(stream).c_str());
+    return {};
+}
+
+audio_stream_type_t ProductStrategyMap::getStreamTypeForAttributes(
+        const audio_attributes_t &attr) const
+{
+    for (const auto &iter : *this) {
+        audio_stream_type_t stream = iter.second->getStreamTypeForAttributes(attr);
+        if (stream != AUDIO_STREAM_DEFAULT) {
+            return stream;
+        }
+    }
+    ALOGV("%s: No product strategy for attributes %s, using default (aka MUSIC)", __FUNCTION__,
+          toString(attr).c_str());
+    return  AUDIO_STREAM_MUSIC;
+}
+
+product_strategy_t ProductStrategyMap::getDefault() const
+{
+    for (const auto &iter : *this) {
+        if (iter.second->isDefault()) {
+            ALOGV("%s: using default %s", __FUNCTION__, iter.second->getName().c_str());
+            return iter.second->getId();
+        }
+    }
+    ALOGE("%s: No default product strategy defined", __FUNCTION__);
+    return PRODUCT_STRATEGY_NONE;
+}
+
+audio_attributes_t ProductStrategyMap::getAttributesForProductStrategy(
+        product_strategy_t strategy) const
+{
+    if (find(strategy) == end()) {
+        ALOGE("Invalid %d strategy requested", strategy);
+        return AUDIO_ATTRIBUTES_INITIALIZER;
+    }
+    return at(strategy)->getAudioAttributes()[0];
+}
+
+product_strategy_t ProductStrategyMap::getProductStrategyForStream(audio_stream_type_t stream) const
+{
+    for (const auto &iter : *this) {
+        if (iter.second->supportStreamType(stream)) {
+            return iter.second->getId();
+        }
+    }
+    ALOGV("%s: No product strategy for stream %d, using default", __FUNCTION__, stream);
+    return getDefault();
+}
+
+
+audio_devices_t ProductStrategyMap::getDeviceTypesForProductStrategy(
+        product_strategy_t strategy) const
+{
+    if (find(strategy) == end()) {
+        ALOGE("Invalid %d strategy requested, returning device for default strategy", strategy);
+        product_strategy_t defaultStrategy = getDefault();
+        if (defaultStrategy == PRODUCT_STRATEGY_NONE) {
+            return AUDIO_DEVICE_NONE;
+        }
+        return at(getDefault())->getDeviceTypes();
+    }
+    return at(strategy)->getDeviceTypes();
+}
+
+std::string ProductStrategyMap::getDeviceAddressForProductStrategy(product_strategy_t psId) const
+{
+    if (find(psId) == end()) {
+        ALOGE("Invalid %d strategy requested, returning device for default strategy", psId);
+        product_strategy_t defaultStrategy = getDefault();
+        if (defaultStrategy == PRODUCT_STRATEGY_NONE) {
+            return {};
+        }
+        return at(getDefault())->getDeviceAddress();
+    }
+    return at(psId)->getDeviceAddress();
+}
+
+void ProductStrategyMap::dump(String8 *dst, int spaces) const
+{
+    dst->appendFormat("%*sProduct Strategies dump:", spaces, "");
+    for (const auto &iter : *this) {
+        iter.second->dump(dst, spaces + 2);
+    }
+}
+
+}
+
diff --git a/services/audiopolicy/engine/common/src/StreamVolumeCurves.cpp b/services/audiopolicy/engine/common/src/StreamVolumeCurves.cpp
new file mode 100644
index 0000000..fe3b000
--- /dev/null
+++ b/services/audiopolicy/engine/common/src/StreamVolumeCurves.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::Engine::StreamVolumeCurves"
+//#define LOG_NDEBUG 0
+
+#include "StreamVolumeCurves.h"
+#include <TypeConverter.h>
+
+namespace android {
+
+void StreamVolumeCurves::dump(String8 *dst, int spaces) const
+{
+    if (mCurves.empty()) {
+        return;
+    }
+    dst->appendFormat("\n%*sStreams dump:\n", spaces, "");
+    dst->appendFormat(
+                "%*sStream  Can be muted  Index Min  Index Max  Index Cur [device : index]...\n", spaces + 2, "");
+    for (const auto &streamCurve : mCurves) {
+        streamCurve.second.dump(dst, spaces + 2, false);
+    }
+    dst->appendFormat("\n%*sVolume Curves for Use Cases (aka Stream types) dump:\n", spaces, "");
+    for (const auto &streamCurve : mCurves) {
+        std::string streamTypeLiteral;
+        StreamTypeConverter::toString(streamCurve.first, streamTypeLiteral);
+        dst->appendFormat(
+                    " %s (%02d): Curve points for device category (index, attenuation in millibel)\n",
+                    streamTypeLiteral.c_str(), streamCurve.first);
+        streamCurve.second.dump(dst, spaces + 2, true);
+    }
+}
+
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp b/services/audiopolicy/engine/common/src/VolumeCurve.cpp
similarity index 68%
rename from services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
rename to services/audiopolicy/engine/common/src/VolumeCurve.cpp
index 2625733..be2ca73 100644
--- a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
+++ b/services/audiopolicy/engine/common/src/VolumeCurve.cpp
@@ -64,8 +64,7 @@
                     ((float)(mCurvePoints[indexInUiPosition].mIndex -
                             mCurvePoints[indexInUiPosition - 1].mIndex)) );
 
-    ALOGV("VOLUME mDeviceCategory %d, mStreamType %d vol index=[%d %d %d], dB=[%.1f %.1f %.1f]",
-            mDeviceCategory, mStreamType,
+    ALOGV("VOLUME vol index=[%d %d %d], dB=[%.1f %.1f %.1f]",
             mCurvePoints[indexInUiPosition - 1].mIndex, volIdx,
             mCurvePoints[indexInUiPosition].mIndex,
             ((float)mCurvePoints[indexInUiPosition - 1].mAttenuationInMb / 100.0f), decibels,
@@ -74,55 +73,35 @@
     return decibels;
 }
 
-void VolumeCurve::dump(String8 *dst) const
+void VolumeCurve::dump(String8 *dst, int spaces, bool curvePoints) const
 {
+    if (!curvePoints) {
+        return;
+    }
     dst->append(" {");
     for (size_t i = 0; i < mCurvePoints.size(); i++) {
-        dst->appendFormat("(%3d, %5d)",
+        dst->appendFormat("%*s (%3d, %5d)", spaces, "",
                  mCurvePoints[i].mIndex, mCurvePoints[i].mAttenuationInMb);
-        dst->append(i == (mCurvePoints.size() - 1) ? " }\n" : ", ");
+        dst->appendFormat(i == (mCurvePoints.size() - 1) ? " }\n" : ", ");
     }
 }
 
-void VolumeCurvesForStream::dump(String8 *dst, int spaces = 0, bool curvePoints) const
+void VolumeCurves::dump(String8 *dst, int spaces, bool curvePoints) const
 {
     if (!curvePoints) {
-        dst->appendFormat("%s         %02d         %02d         ",
-                 mCanBeMuted ? "true " : "false", mIndexMin, mIndexMax);
-        for (size_t i = 0; i < mIndexCur.size(); i++) {
-            dst->appendFormat("%04x : %02d, ", mIndexCur.keyAt(i), mIndexCur.valueAt(i));
+        dst->appendFormat("%*s%02d      %s         %03d        %03d        ", spaces, "",
+                          mStream, mCanBeMuted ? "true " : "false", mIndexMin, mIndexMax);
+        for (const auto &pair : mIndexCur) {
+            dst->appendFormat("%*s %04x : %02d, ", spaces, "", pair.first, pair.second);
         }
-        dst->append("\n");
+        dst->appendFormat("\n");
         return;
     }
-
     for (size_t i = 0; i < size(); i++) {
         std::string deviceCatLiteral;
         DeviceCategoryConverter::toString(keyAt(i), deviceCatLiteral);
-        dst->appendFormat("%*s %s :",
-                 spaces, "", deviceCatLiteral.c_str());
-        valueAt(i)->dump(dst);
-    }
-    dst->append("\n");
-}
-
-void VolumeCurvesCollection::dump(String8 *dst) const
-{
-    dst->append("\nStreams dump:\n");
-    dst->append(
-             " Stream  Can be muted  Index Min  Index Max  Index Cur [device : index]...\n");
-    for (size_t i = 0; i < size(); i++) {
-        dst->appendFormat(" %02zu      ", i);
-        valueAt(i).dump(dst);
-    }
-    dst->append("\nVolume Curves for Use Cases (aka Stream types) dump:\n");
-    for (size_t i = 0; i < size(); i++) {
-        std::string streamTypeLiteral;
-        StreamTypeConverter::toString(keyAt(i), streamTypeLiteral);
-        dst->appendFormat(
-                 " %s (%02zu): Curve points for device category (index, attenuation in millibel)\n",
-                 streamTypeLiteral.c_str(), i);
-        valueAt(i).dump(dst, 2, true);
+        dst->appendFormat("%*s %s :", spaces, "", deviceCatLiteral.c_str());
+        valueAt(i)->dump(dst, 2, true);
     }
 }
 
diff --git a/services/audiopolicy/engine/config/Android.mk b/services/audiopolicy/engine/config/Android.mk
new file mode 100644
index 0000000..0b292a5
--- /dev/null
+++ b/services/audiopolicy/engine/config/Android.mk
@@ -0,0 +1,42 @@
+LOCAL_PATH := $(call my-dir)
+
+##################################################################
+# Component build
+##################################################################
+
+include $(CLEAR_VARS)
+
+LOCAL_EXPORT_C_INCLUDE_DIRS :=  $(LOCAL_PATH)/include
+
+LOCAL_C_INCLUDES := \
+    $(LOCAL_EXPORT_C_INCLUDE_DIRS) \
+    external/libxml2/include \
+    external/icu/icu4c/source/common
+
+LOCAL_SRC_FILES := \
+    src/EngineConfig.cpp
+
+LOCAL_CFLAGS += -Wall -Werror -Wextra
+
+LOCAL_SHARED_LIBRARIES := \
+    libmedia_helper \
+    libandroidicu \
+    libxml2 \
+    libutils \
+    liblog \
+    libcutils
+
+LOCAL_STATIC_LIBRARIES := \
+    libaudiopolicycomponents
+
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
+LOCAL_MODULE := libaudiopolicyengineconfig
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_HEADER_LIBRARIES := \
+    libaudio_system_headers \
+    libaudiopolicycommon
+
+include $(BUILD_SHARED_LIBRARY)
+
diff --git a/services/audiopolicy/engine/config/include/EngineConfig.h b/services/audiopolicy/engine/config/include/EngineConfig.h
new file mode 100644
index 0000000..a188115
--- /dev/null
+++ b/services/audiopolicy/engine/config/include/EngineConfig.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <system/audio.h>
+
+#include <string>
+#include <vector>
+#include <utils/Errors.h>
+
+struct _xmlNode;
+struct _xmlDoc;
+
+namespace android {
+namespace engineConfig {
+
+/** Default path of audio policy usages configuration file. */
+constexpr char DEFAULT_PATH[] = "/vendor/etc/audio_policy_engine_configuration.xml";
+
+/** Directories where the effect libraries will be search for. */
+constexpr const char* POLICY_USAGE_LIBRARY_PATH[] = {"/odm/etc/", "/vendor/etc/", "/system/etc/"};
+
+using AttributesVector = std::vector<audio_attributes_t>;
+using StreamVector = std::vector<audio_stream_type_t>;
+
+struct AttributesGroup {
+    std::string name;
+    audio_stream_type_t stream;
+    AttributesVector attributesVect;
+};
+
+using AttributesGroups = std::vector<AttributesGroup>;
+
+struct CurvePoint {
+    int index;
+    int attenuationInMb;
+};
+using CurvePoints = std::vector<CurvePoint>;
+
+struct VolumeCurve {
+    std::string deviceCategory;
+    CurvePoints curvePoints;
+};
+using VolumeCurves = std::vector<VolumeCurve>;
+
+struct VolumeGroup {
+    std::string name;
+    std::string stream;
+    int indexMin;
+    int indexMax;
+    VolumeCurves volumeCurves;
+};
+using VolumeGroups = std::vector<VolumeGroup>;
+
+struct ProductStrategy {
+    std::string name;
+    AttributesGroups attributesGroups;
+};
+
+using ProductStrategies = std::vector<ProductStrategy>;
+
+using ValuePair = std::pair<uint32_t, std::string>;
+using ValuePairs = std::vector<ValuePair>;
+
+struct CriterionType
+{
+    std::string name;
+    bool isInclusive;
+    ValuePairs valuePairs;
+};
+
+using CriterionTypes = std::vector<CriterionType>;
+
+struct Criterion
+{
+    std::string name;
+    std::string typeName;
+    std::string defaultLiteralValue;
+};
+
+using Criteria = std::vector<Criterion>;
+
+struct Config {
+    float version;
+    ProductStrategies productStrategies;
+    Criteria criteria;
+    CriterionTypes criterionTypes;
+    VolumeGroups volumeGroups;
+};
+
+/** Result of `parse(const char*)` */
+struct ParsingResult {
+    /** Parsed config, nullptr if the xml lib could not load the file */
+    std::unique_ptr<Config> parsedConfig;
+    size_t nbSkippedElement; //< Number of skipped invalid product strategies
+};
+
+/** Parses the provided audio policy usage configuration.
+ * @return audio policy usage @see Config
+ */
+ParsingResult parse(const char* path = DEFAULT_PATH);
+android::status_t parseLegacyVolumes(VolumeGroups &volumeGroups);
+
+} // namespace engineConfig
+} // namespace android
diff --git a/services/audiopolicy/engine/config/src/EngineConfig.cpp b/services/audiopolicy/engine/config/src/EngineConfig.cpp
new file mode 100644
index 0000000..00fbac4
--- /dev/null
+++ b/services/audiopolicy/engine/config/src/EngineConfig.cpp
@@ -0,0 +1,682 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPolicyEngine/Config"
+//#define LOG_NDEBUG 0
+
+#include "EngineConfig.h"
+#include <policy.h>
+#include <cutils/properties.h>
+#include <media/TypeConverter.h>
+#include <media/convert.h>
+#include <utils/Log.h>
+#include <libxml/parser.h>
+#include <libxml/xinclude.h>
+#include <string>
+#include <vector>
+#include <map>
+#include <sstream>
+#include <istream>
+
+#include <cstdint>
+#include <string>
+
+
+namespace android {
+
+using utilities::convertTo;
+
+namespace engineConfig {
+
+static constexpr const char *gVersionAttribute = "version";
+static const char *const gReferenceElementName = "reference";
+static const char *const gReferenceAttributeName = "name";
+
+template<typename E, typename C>
+struct BaseSerializerTraits {
+    typedef E Element;
+    typedef C Collection;
+    typedef void* PtrSerializingCtx;
+};
+
+struct AttributesGroupTraits : public BaseSerializerTraits<AttributesGroup, AttributesGroups> {
+    static constexpr const char *tag = "AttributesGroup";
+    static constexpr const char *collectionTag = "AttributesGroups";
+
+    struct Attributes {
+        static constexpr const char *name = "name";
+        static constexpr const char *streamType = "streamType";
+    };
+    static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root, Collection &ps);
+};
+
+struct ProductStrategyTraits : public BaseSerializerTraits<ProductStrategy, ProductStrategies> {
+    static constexpr const char *tag = "ProductStrategy";
+    static constexpr const char *collectionTag = "ProductStrategies";
+
+    struct Attributes {
+        static constexpr const char *name = "name";
+    };
+    static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root, Collection &ps);
+};
+struct ValueTraits : public BaseSerializerTraits<ValuePair, ValuePairs> {
+    static constexpr const char *tag = "value";
+    static constexpr const char *collectionTag = "values";
+
+    struct Attributes {
+        static constexpr const char *literal = "literal";
+        static constexpr const char *numerical = "numerical";
+    };
+
+    static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
+                                         Collection &collection);
+};
+struct CriterionTypeTraits : public BaseSerializerTraits<CriterionType, CriterionTypes> {
+    static constexpr const char *tag = "criterion_type";
+    static constexpr const char *collectionTag = "criterion_types";
+
+    struct Attributes {
+        static constexpr const char *name = "name";
+        static constexpr const char *type = "type";
+    };
+
+    static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
+                                         Collection &collection);
+};
+struct CriterionTraits : public BaseSerializerTraits<Criterion, Criteria> {
+    static constexpr const char *tag = "criterion";
+    static constexpr const char *collectionTag = "criteria";
+
+    struct Attributes {
+        static constexpr const char *name = "name";
+        static constexpr const char *type = "type";
+        static constexpr const char *defaultVal = "default";
+    };
+
+    static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
+                                         Collection &collection);
+};
+struct VolumeTraits : public BaseSerializerTraits<VolumeCurve, VolumeCurves> {
+    static constexpr const char *tag = "volume";
+    static constexpr const char *collectionTag = "volumes";
+    static constexpr const char *volumePointTag = "point";
+
+    struct Attributes {
+        static constexpr const char *deviceCategory = "deviceCategory";
+        static constexpr const char *reference = "ref"; /**< For volume curves factorization. */
+    };
+
+    static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
+                                         Collection &collection);
+};
+struct VolumeGroupTraits : public BaseSerializerTraits<VolumeGroup, VolumeGroups> {
+    static constexpr const char *tag = "volumeGroup";
+    static constexpr const char *collectionTag = "volumeGroups";
+
+    struct Attributes {
+        static constexpr const char *name = "name";
+        static constexpr const char *stream = "stream"; // For legacy volume curves
+        static constexpr const char *indexMin = "indexMin";
+        static constexpr const char *indexMax = "indexMax";
+    };
+
+    static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
+                                         Collection &collection);
+};
+
+using xmlCharUnique = std::unique_ptr<xmlChar, decltype(xmlFree)>;
+
+using xmlCharUnique = std::unique_ptr<xmlChar, decltype(xmlFree)>;
+
+std::string getXmlAttribute(const xmlNode *cur, const char *attribute)
+{
+    xmlCharUnique charPtr(xmlGetProp(cur, reinterpret_cast<const xmlChar *>(attribute)), xmlFree);
+    if (charPtr == NULL) {
+        return "";
+    }
+    std::string value(reinterpret_cast<const char*>(charPtr.get()));
+    return value;
+}
+
+static void getReference(const _xmlNode *root, const _xmlNode *&refNode, const std::string &refName,
+                         const char *collectionTag)
+{
+    for (root = root->xmlChildrenNode; root != NULL; root = root->next) {
+        if (!xmlStrcmp(root->name, (const xmlChar *)collectionTag)) {
+            for (xmlNode *cur = root->xmlChildrenNode; cur != NULL; cur = cur->next) {
+                if ((!xmlStrcmp(cur->name, (const xmlChar *)gReferenceElementName))) {
+                    std::string name = getXmlAttribute(cur, gReferenceAttributeName);
+                    if (refName == name) {
+                        refNode = cur;
+                        return;
+                    }
+                }
+            }
+        }
+    }
+    return;
+}
+
+template <class Trait>
+static status_t deserializeCollection(_xmlDoc *doc, const _xmlNode *cur,
+                                      typename Trait::Collection &collection,
+                                      size_t &nbSkippedElement)
+{
+    for (cur = cur->xmlChildrenNode; cur != NULL; cur = cur->next) {
+        if (xmlStrcmp(cur->name, (const xmlChar *)Trait::collectionTag) &&
+            xmlStrcmp(cur->name, (const xmlChar *)Trait::tag)) {
+            continue;
+        }
+        const xmlNode *child = cur;
+        if (!xmlStrcmp(child->name, (const xmlChar *)Trait::collectionTag)) {
+            child = child->xmlChildrenNode;
+        }
+        for (; child != NULL; child = child->next) {
+            if (!xmlStrcmp(child->name, (const xmlChar *)Trait::tag)) {
+                status_t status = Trait::deserialize(doc, child, collection);
+                if (status != NO_ERROR) {
+                    nbSkippedElement += 1;
+                }
+            }
+        }
+        if (!xmlStrcmp(cur->name, (const xmlChar *)Trait::tag)) {
+            return NO_ERROR;
+        }
+    }
+    return NO_ERROR;
+}
+
+static constexpr const char *attributesAttributeRef = "attributesRef"; /**< for factorization. */
+
+static status_t parseAttributes(const _xmlNode *cur, audio_attributes_t &attributes)
+{
+    for (; cur != NULL; cur = cur->next) {
+        if (!xmlStrcmp(cur->name, (const xmlChar *)("ContentType"))) {
+            std::string contentTypeXml = getXmlAttribute(cur, "value");
+            audio_content_type_t contentType;
+            if (not AudioContentTypeConverter::fromString(contentTypeXml.c_str(), contentType)) {
+                ALOGE("Invalid content type %s", contentTypeXml.c_str());
+                return BAD_VALUE;
+            }
+            attributes.content_type = contentType;
+            ALOGV("%s content type %s",  __FUNCTION__, contentTypeXml.c_str());
+        }
+        if (!xmlStrcmp(cur->name, (const xmlChar *)("Usage"))) {
+            std::string usageXml = getXmlAttribute(cur, "value");
+            audio_usage_t usage;
+            if (not UsageTypeConverter::fromString(usageXml.c_str(), usage)) {
+                ALOGE("Invalid usage %s", usageXml.c_str());
+                return BAD_VALUE;
+            }
+            attributes.usage = usage;
+            ALOGV("%s usage %s",  __FUNCTION__, usageXml.c_str());
+        }
+        if (!xmlStrcmp(cur->name, (const xmlChar *)("Flags"))) {
+            std::string flags = getXmlAttribute(cur, "value");
+
+            ALOGV("%s flags %s",  __FUNCTION__, flags.c_str());
+            attributes.flags = AudioFlagConverter::maskFromString(flags, " ");
+        }
+        if (!xmlStrcmp(cur->name, (const xmlChar *)("Bundle"))) {
+            std::string bundleKey = getXmlAttribute(cur, "key");
+            std::string bundleValue = getXmlAttribute(cur, "value");
+
+            ALOGV("%s Bundle %s %s",  __FUNCTION__, bundleKey.c_str(), bundleValue.c_str());
+
+            std::string tags(bundleKey + "=" + bundleValue);
+            std::strncpy(attributes.tags, tags.c_str(), AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
+        }
+    }
+    return NO_ERROR;
+}
+
+static status_t deserializeAttributes(_xmlDoc *doc, const _xmlNode *cur,
+                                      audio_attributes_t &attributes) {
+    // Retrieve content type, usage, flags, and bundle from xml
+    for (; cur != NULL; cur = cur->next) {
+        if (not xmlStrcmp(cur->name, (const xmlChar *)("Attributes"))) {
+            const xmlNode *attrNode = cur;
+            std::string attrRef = getXmlAttribute(cur, attributesAttributeRef);
+            if (!attrRef.empty()) {
+                getReference(xmlDocGetRootElement(doc), attrNode, attrRef, attributesAttributeRef);
+                if (attrNode == NULL) {
+                    ALOGE("%s: No reference found for %s", __FUNCTION__, attrRef.c_str());
+                    return BAD_VALUE;
+                }
+                return deserializeAttributes(doc, attrNode->xmlChildrenNode, attributes);
+            }
+            return parseAttributes(attrNode->xmlChildrenNode, attributes);
+        }
+        if (not xmlStrcmp(cur->name, (const xmlChar *)("ContentType")) ||
+                not xmlStrcmp(cur->name, (const xmlChar *)("Usage")) ||
+                not xmlStrcmp(cur->name, (const xmlChar *)("Flags")) ||
+                not xmlStrcmp(cur->name, (const xmlChar *)("Bundle"))) {
+            return parseAttributes(cur, attributes);
+        }
+    }
+    return BAD_VALUE;
+}
+
+static status_t deserializeAttributesCollection(_xmlDoc *doc, const _xmlNode *cur,
+                                                AttributesVector &collection)
+{
+    status_t ret = BAD_VALUE;
+    // Either we do provide only one attributes or a collection of supported attributes
+    for (cur = cur->xmlChildrenNode; cur != NULL; cur = cur->next) {
+        if (not xmlStrcmp(cur->name, (const xmlChar *)("Attributes")) ||
+                not xmlStrcmp(cur->name, (const xmlChar *)("ContentType")) ||
+                not xmlStrcmp(cur->name, (const xmlChar *)("Usage")) ||
+                not xmlStrcmp(cur->name, (const xmlChar *)("Flags")) ||
+                not xmlStrcmp(cur->name, (const xmlChar *)("Bundle"))) {
+            audio_attributes_t attributes = AUDIO_ATTRIBUTES_INITIALIZER;
+            ret = deserializeAttributes(doc, cur, attributes);
+            if (ret == NO_ERROR) {
+                collection.push_back(attributes);
+                // We are done if the "Attributes" balise is omitted, only one Attributes is allowed
+                if (xmlStrcmp(cur->name, (const xmlChar *)("Attributes"))) {
+                    return ret;
+                }
+            }
+        }
+    }
+    return ret;
+}
+
+status_t AttributesGroupTraits::deserialize(_xmlDoc *doc, const _xmlNode *child,
+                                            Collection &attributesGroup)
+{
+    std::string name = getXmlAttribute(child, Attributes::name);
+    if (name.empty()) {
+        ALOGV("AttributesGroupTraits No attribute %s found", Attributes::name);
+    }
+    ALOGV("%s: %s = %s", __FUNCTION__, Attributes::name, name.c_str());
+
+    audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT;
+    std::string streamTypeXml = getXmlAttribute(child, Attributes::streamType);
+    if (streamTypeXml.empty()) {
+        ALOGV("%s: No attribute %s found", __FUNCTION__, Attributes::streamType);
+    } else {
+        ALOGV("%s: %s = %s", __FUNCTION__, Attributes::streamType, streamTypeXml.c_str());
+        if (not StreamTypeConverter::fromString(streamTypeXml.c_str(), streamType)) {
+            ALOGE("Invalid stream type %s", streamTypeXml.c_str());
+            return BAD_VALUE;
+        }
+    }
+    AttributesVector attributesVect;
+    deserializeAttributesCollection(doc, child, attributesVect);
+
+    attributesGroup.push_back({name, streamType, attributesVect});
+    return NO_ERROR;
+}
+
+status_t ValueTraits::deserialize(_xmlDoc */*doc*/, const _xmlNode *child, Collection &values)
+{
+    std::string literal = getXmlAttribute(child, Attributes::literal);
+    if (literal.empty()) {
+        ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::literal);
+        return BAD_VALUE;
+    }
+    uint32_t numerical = 0;
+    std::string numericalTag = getXmlAttribute(child, Attributes::numerical);
+    if (numericalTag.empty()) {
+        ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::literal);
+        return BAD_VALUE;
+    }
+    if (!convertTo(numericalTag, numerical)) {
+        ALOGE("%s: : Invalid value(%s)", __FUNCTION__, numericalTag.c_str());
+        return BAD_VALUE;
+    }
+    values.push_back({numerical, literal});
+    return NO_ERROR;
+}
+
+status_t CriterionTypeTraits::deserialize(_xmlDoc *doc, const _xmlNode *child,
+                                          Collection &criterionTypes)
+{
+    std::string name = getXmlAttribute(child, Attributes::name);
+    if (name.empty()) {
+        ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::name);
+        return BAD_VALUE;
+    }
+    ALOGV("%s: %s %s = %s", __FUNCTION__, tag, Attributes::name, name.c_str());
+
+    std::string type = getXmlAttribute(child, Attributes::type);
+    if (type.empty()) {
+        ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::type);
+        return BAD_VALUE;
+    }
+    ALOGV("%s: %s %s = %s", __FUNCTION__, tag, Attributes::type, type.c_str());
+    bool isInclusive(type == "inclusive");
+
+    ValuePairs pairs;
+    size_t nbSkippedElements = 0;
+    deserializeCollection<ValueTraits>(doc, child, pairs, nbSkippedElements);
+    criterionTypes.push_back({name, isInclusive, pairs});
+    return NO_ERROR;
+}
+
+status_t CriterionTraits::deserialize(_xmlDoc */*doc*/, const _xmlNode *child,
+                                      Collection &criteria)
+{
+    std::string name = getXmlAttribute(child, Attributes::name);
+    if (name.empty()) {
+        ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::name);
+        return BAD_VALUE;
+    }
+    ALOGV("%s: %s = %s", __FUNCTION__, Attributes::name, name.c_str());
+
+    std::string defaultValue = getXmlAttribute(child, Attributes::defaultVal);
+    if (defaultValue.empty()) {
+        // Not mandatory to provide a default value for a criterion, even it is recommanded...
+        ALOGV("%s: No attribute %s found (but recommanded)", __FUNCTION__, Attributes::defaultVal);
+    }
+    ALOGV("%s: %s = %s", __FUNCTION__, Attributes::defaultVal, defaultValue.c_str());
+
+    std::string typeName = getXmlAttribute(child, Attributes::type);
+    if (typeName.empty()) {
+        ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::name);
+        return BAD_VALUE;
+    }
+    ALOGV("%s: %s = %s", __FUNCTION__, Attributes::type, typeName.c_str());
+
+    criteria.push_back({name, typeName, defaultValue});
+    return NO_ERROR;
+}
+
+status_t ProductStrategyTraits::deserialize(_xmlDoc *doc, const _xmlNode *child,
+                                            Collection &strategies)
+{
+    std::string name = getXmlAttribute(child, Attributes::name);
+    if (name.empty()) {
+        ALOGE("ProductStrategyTraits No attribute %s found", Attributes::name);
+        return BAD_VALUE;
+    }
+    ALOGV("%s: %s = %s", __FUNCTION__, Attributes::name, name.c_str());
+
+    size_t skipped = 0;
+    AttributesGroups attrGroups;
+    deserializeCollection<AttributesGroupTraits>(doc, child, attrGroups, skipped);
+
+    strategies.push_back({name, attrGroups});
+    return NO_ERROR;
+}
+
+status_t VolumeTraits::deserialize(_xmlDoc *doc, const _xmlNode *root, Collection &volumes)
+{
+    std::string deviceCategory = getXmlAttribute(root, Attributes::deviceCategory);
+    if (deviceCategory.empty()) {
+        ALOGW("%s: No %s found", __FUNCTION__, Attributes::deviceCategory);
+    }
+
+    std::string referenceName = getXmlAttribute(root, Attributes::reference);
+    const _xmlNode *ref = NULL;
+    if (!referenceName.empty()) {
+        getReference(xmlDocGetRootElement(doc), ref, referenceName, collectionTag);
+        if (ref == NULL) {
+            ALOGE("%s: No reference Ptr found for %s", __FUNCTION__, referenceName.c_str());
+            return BAD_VALUE;
+        }
+    }
+    // Retrieve curve point from reference element if found or directly from current curve
+    CurvePoints curvePoints;
+    for (const xmlNode *child = referenceName.empty() ?
+         root->xmlChildrenNode : ref->xmlChildrenNode; child != NULL; child = child->next) {
+        if (!xmlStrcmp(child->name, (const xmlChar *)volumePointTag)) {
+            xmlCharUnique pointXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+            if (pointXml == NULL) {
+                return BAD_VALUE;
+            }
+            ALOGV("%s: %s=%s", __func__, tag, reinterpret_cast<const char*>(pointXml.get()));
+            std::vector<int> point;
+            collectionFromString<DefaultTraits<int>>(
+                        reinterpret_cast<const char*>(pointXml.get()), point, ",");
+            if (point.size() != 2) {
+                ALOGE("%s: Invalid %s: %s", __func__, volumePointTag,
+                      reinterpret_cast<const char*>(pointXml.get()));
+                return BAD_VALUE;
+            }
+            curvePoints.push_back({point[0], point[1]});
+        }
+    }
+    volumes.push_back({ deviceCategory, curvePoints });
+    return NO_ERROR;
+}
+
+status_t VolumeGroupTraits::deserialize(_xmlDoc *doc, const _xmlNode *root, Collection &volumes)
+{
+    std::string name;
+    std::string stream = {};
+    int indexMin = 0;
+    int indexMax = 0;
+
+    for (const xmlNode *child = root->xmlChildrenNode; child != NULL; child = child->next) {
+        if (not xmlStrcmp(child->name, (const xmlChar *)Attributes::name)) {
+            xmlCharUnique nameXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+            if (nameXml == nullptr) {
+                return BAD_VALUE;
+            }
+            name = reinterpret_cast<const char*>(nameXml.get());
+        }
+        if (not xmlStrcmp(child->name, (const xmlChar *)Attributes::stream)) {
+            xmlCharUnique streamXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+            if (streamXml == nullptr) {
+                return BAD_VALUE;
+            }
+            stream = reinterpret_cast<const char*>(streamXml.get());
+        }
+        if (not xmlStrcmp(child->name, (const xmlChar *)Attributes::indexMin)) {
+            xmlCharUnique indexMinXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+            if (indexMinXml == nullptr) {
+                return BAD_VALUE;
+            }
+            std::string indexMinLiteral(reinterpret_cast<const char*>(indexMinXml.get()));
+            if (!convertTo(indexMinLiteral, indexMin)) {
+                return BAD_VALUE;
+            }
+        }
+        if (not xmlStrcmp(child->name, (const xmlChar *)Attributes::indexMax)) {
+            xmlCharUnique indexMaxXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+            if (indexMaxXml == nullptr) {
+                return BAD_VALUE;
+            }
+            std::string indexMaxLiteral(reinterpret_cast<const char*>(indexMaxXml.get()));
+            if (!convertTo(indexMaxLiteral, indexMax)) {
+                return BAD_VALUE;
+            }
+        }
+    }
+    ALOGV("%s: group=%s stream=%s indexMin=%d, indexMax=%d",
+          __func__, name.c_str(), stream.c_str(), indexMin, indexMax);
+
+    VolumeCurves groupVolumeCurves;
+    size_t skipped = 0;
+    deserializeCollection<VolumeTraits>(doc, root, groupVolumeCurves, skipped);
+    volumes.push_back({ name, stream, indexMin, indexMax, groupVolumeCurves });
+    return NO_ERROR;
+}
+
+static constexpr const char *legacyVolumecollectionTag = "volumes";
+static constexpr const char *legacyVolumeTag = "volume";
+
+status_t deserializeLegacyVolume(_xmlDoc *doc, const _xmlNode *cur,
+                                 std::map<std::string, VolumeCurves> &legacyVolumes)
+{
+    std::string streamTypeLiteral = getXmlAttribute(cur, "stream");
+    if (streamTypeLiteral.empty()) {
+        ALOGE("%s: No attribute stream found", __func__);
+        return BAD_VALUE;
+    }
+    std::string deviceCategoryLiteral = getXmlAttribute(cur, "deviceCategory");
+    if (deviceCategoryLiteral.empty()) {
+        ALOGE("%s: No attribute deviceCategory found", __func__);
+        return BAD_VALUE;
+    }
+    std::string referenceName = getXmlAttribute(cur, "ref");
+    const xmlNode *ref = NULL;
+    if (!referenceName.empty()) {
+        getReference(xmlDocGetRootElement(doc), ref, referenceName, legacyVolumecollectionTag);
+        if (ref == NULL) {
+            ALOGE("%s: No reference Ptr found for %s", __func__, referenceName.c_str());
+            return BAD_VALUE;
+        }
+        ALOGV("%s: reference found for %s", __func__, referenceName.c_str());
+    }
+    CurvePoints curvePoints;
+    for (const xmlNode *child = referenceName.empty() ?
+         cur->xmlChildrenNode : ref->xmlChildrenNode; child != NULL; child = child->next) {
+        if (!xmlStrcmp(child->name, (const xmlChar *)VolumeTraits::volumePointTag)) {
+            xmlCharUnique pointXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+            if (pointXml == NULL) {
+                return BAD_VALUE;
+            }
+            ALOGV("%s: %s=%s", __func__, legacyVolumeTag,
+                  reinterpret_cast<const char*>(pointXml.get()));
+            std::vector<int> point;
+            collectionFromString<DefaultTraits<int>>(
+                        reinterpret_cast<const char*>(pointXml.get()), point, ",");
+            if (point.size() != 2) {
+                ALOGE("%s: Invalid %s: %s", __func__, VolumeTraits::volumePointTag,
+                      reinterpret_cast<const char*>(pointXml.get()));
+                return BAD_VALUE;
+            }
+            curvePoints.push_back({point[0], point[1]});
+        }
+    }
+    legacyVolumes[streamTypeLiteral].push_back({ deviceCategoryLiteral, curvePoints });
+    return NO_ERROR;
+}
+
+static status_t deserializeLegacyVolumeCollection(_xmlDoc *doc, const _xmlNode *cur,
+                                                  VolumeGroups &volumeGroups,
+                                                  size_t &nbSkippedElement)
+{
+    std::map<std::string, VolumeCurves> legacyVolumeMap;
+    for (cur = cur->xmlChildrenNode; cur != NULL; cur = cur->next) {
+        if (xmlStrcmp(cur->name, (const xmlChar *)legacyVolumecollectionTag)) {
+            continue;
+        }
+        const xmlNode *child = cur->xmlChildrenNode;
+        for (; child != NULL; child = child->next) {
+            if (!xmlStrcmp(child->name, (const xmlChar *)legacyVolumeTag)) {
+
+                status_t status = deserializeLegacyVolume(doc, child, legacyVolumeMap);
+                if (status != NO_ERROR) {
+                    nbSkippedElement += 1;
+                }
+            }
+        }
+    }
+    for (const auto &volumeMapIter : legacyVolumeMap) {
+        volumeGroups.push_back({ volumeMapIter.first, volumeMapIter.first, 0, 100,
+                                 volumeMapIter.second });
+    }
+    return NO_ERROR;
+}
+
+ParsingResult parse(const char* path) {
+    xmlDocPtr doc;
+    doc = xmlParseFile(path);
+    if (doc == NULL) {
+        ALOGE("%s: Could not parse document %s", __FUNCTION__, path);
+        return {nullptr, 0};
+    }
+    xmlNodePtr cur = xmlDocGetRootElement(doc);
+    if (cur == NULL) {
+        ALOGE("%s: Could not parse: empty document %s", __FUNCTION__, path);
+        xmlFreeDoc(doc);
+        return {nullptr, 0};
+    }
+    if (xmlXIncludeProcess(doc) < 0) {
+        ALOGE("%s: libxml failed to resolve XIncludes on document %s", __FUNCTION__, path);
+        return {nullptr, 0};
+    }
+    std::string version = getXmlAttribute(cur, gVersionAttribute);
+    if (version.empty()) {
+        ALOGE("%s: No version found", __func__);
+        return {nullptr, 0};
+    }
+    size_t nbSkippedElements = 0;
+    auto config = std::make_unique<Config>();
+    config->version = std::stof(version);
+    deserializeCollection<ProductStrategyTraits>(
+                doc, cur, config->productStrategies, nbSkippedElements);
+    deserializeCollection<CriterionTraits>(
+                doc, cur, config->criteria, nbSkippedElements);
+    deserializeCollection<CriterionTypeTraits>(
+                doc, cur, config->criterionTypes, nbSkippedElements);
+    deserializeCollection<VolumeGroupTraits>(
+                doc, cur, config->volumeGroups, nbSkippedElements);
+
+    return {std::move(config), nbSkippedElements};
+}
+
+android::status_t parseLegacyVolumeFile(const char* path, VolumeGroups &volumeGroups) {
+    xmlDocPtr doc;
+    doc = xmlParseFile(path);
+    if (doc == NULL) {
+        ALOGE("%s: Could not parse document %s", __FUNCTION__, path);
+        return BAD_VALUE;
+    }
+    xmlNodePtr cur = xmlDocGetRootElement(doc);
+    if (cur == NULL) {
+        ALOGE("%s: Could not parse: empty document %s", __FUNCTION__, path);
+        xmlFreeDoc(doc);
+        return BAD_VALUE;
+    }
+    if (xmlXIncludeProcess(doc) < 0) {
+        ALOGE("%s: libxml failed to resolve XIncludes on document %s", __FUNCTION__, path);
+        return BAD_VALUE;
+    }
+    size_t nbSkippedElements = 0;
+    return deserializeLegacyVolumeCollection(doc, cur, volumeGroups, nbSkippedElements);
+}
+
+static const char *kConfigLocationList[] = {"/odm/etc", "/vendor/etc", "/system/etc"};
+static const int kConfigLocationListSize =
+        (sizeof(kConfigLocationList) / sizeof(kConfigLocationList[0]));
+static const int gApmXmlConfigFilePathMaxLength = 128;
+
+static constexpr const char *apmXmlConfigFileName = "audio_policy_configuration.xml";
+static constexpr const char *apmA2dpOffloadDisabledXmlConfigFileName =
+        "audio_policy_configuration_a2dp_offload_disabled.xml";
+
+android::status_t parseLegacyVolumes(VolumeGroups &volumeGroups) {
+    char audioPolicyXmlConfigFile[gApmXmlConfigFilePathMaxLength];
+    std::vector<const char *> fileNames;
+    status_t ret;
+
+    if (property_get_bool("ro.bluetooth.a2dp_offload.supported", false) &&
+            property_get_bool("persist.bluetooth.a2dp_offload.disabled", false)) {
+        // A2DP offload supported but disabled: try to use special XML file
+        fileNames.push_back(apmA2dpOffloadDisabledXmlConfigFileName);
+    }
+    fileNames.push_back(apmXmlConfigFileName);
+
+    for (const char* fileName : fileNames) {
+        for (int i = 0; i < kConfigLocationListSize; i++) {
+            snprintf(audioPolicyXmlConfigFile, sizeof(audioPolicyXmlConfigFile),
+                     "%s/%s", kConfigLocationList[i], fileName);
+            ret = parseLegacyVolumeFile(audioPolicyXmlConfigFile, volumeGroups);
+            if (ret == NO_ERROR) {
+                return ret;
+            }
+        }
+    }
+    return BAD_VALUE;
+}
+
+} // namespace engineConfig
+} // namespace android
diff --git a/services/audiopolicy/engine/interface/Android.bp b/services/audiopolicy/engine/interface/Android.bp
new file mode 100644
index 0000000..2ea42b6
--- /dev/null
+++ b/services/audiopolicy/engine/interface/Android.bp
@@ -0,0 +1,19 @@
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//       http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+cc_library_headers {
+    name: "libaudiopolicyengine_interface_headers",
+    host_supported: true,
+    export_include_dirs: ["."],
+}
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h b/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
index 04594f5..c9e9507 100644
--- a/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
@@ -17,7 +17,9 @@
 #pragma once
 
 #include <AudioPolicyManagerObserver.h>
-#include <RoutingStrategy.h>
+#include <media/AudioProductStrategy.h>
+#include <IVolumeCurves.h>
+#include <policy.h>
 #include <Volume.h>
 #include <HwModule.h>
 #include <DeviceDescriptor.h>
@@ -28,6 +30,10 @@
 
 namespace android {
 
+using DeviceStrategyMap = std::map<product_strategy_t, DeviceVector>;
+using StrategyVector = std::vector<product_strategy_t>;
+
+
 /**
  * This interface is dedicated to the policy manager that a Policy Engine shall implement.
  */
@@ -50,42 +56,6 @@
     virtual void setObserver(AudioPolicyManagerObserver *observer) = 0;
 
     /**
-     * Get the input device selected for a given input source.
-     *
-     * @param[in] inputSource to get the selected input device associated to
-     *
-     * @return selected input device for the given input source, may be none if error.
-     */
-    virtual audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const = 0;
-
-    /**
-     * Get the output device associated to a given strategy.
-     *
-     * @param[in] stream type for which the selected ouput device is requested.
-     *
-     * @return selected ouput device for the given strategy, may be none if error.
-     */
-    virtual audio_devices_t getDeviceForStrategy(routing_strategy stategy) const = 0;
-
-    /**
-     * Get the strategy selected for a given stream type.
-     *
-     * @param[in] stream: for which the selected strategy followed by is requested.
-     *
-     * @return strategy to be followed.
-     */
-    virtual routing_strategy getStrategyForStream(audio_stream_type_t stream) = 0;
-
-    /**
-     * Get the strategy selected for a given usage.
-     *
-     * @param[in] usage to get the selected strategy followed by.
-     *
-     * @return strategy to be followed.
-     */
-    virtual routing_strategy getStrategyForUsage(audio_usage_t usage) = 0;
-
-    /**
      * Set the Telephony Mode.
      *
      * @param[in] mode: Android Phone state (normal, ringtone, csv, in communication)
@@ -133,6 +103,155 @@
     virtual status_t setDeviceConnectionState(const android::sp<android::DeviceDescriptor> devDesc,
                                               audio_policy_dev_state_t state) = 0;
 
+    /**
+     * Get the strategy selected for a given audio attributes.
+     *
+     * @param[in] audio attributes to get the selected @product_strategy_t followed by.
+     *
+     * @return @product_strategy_t to be followed.
+     */
+    virtual product_strategy_t getProductStrategyForAttributes(
+            const audio_attributes_t &attr) const = 0;
+
+    /**
+     * @brief getOutputDevicesForAttributes retrieves the devices to be used for given
+     * audio attributes.
+     * @param attributes of the output requesting Device(s) selection
+     * @param preferedDevice valid reference if a prefered device is requested, nullptr otherwise.
+     * @param fromCache if true, the device is returned from internal cache,
+     *                  otherwise it is determined by current state (device connected,phone state,
+     *                  force use, a2dp output...)
+     * @return vector of selected device descriptors.
+     *         Appropriate device for streams handled by the specified audio attributes according
+     *         to current phone state, forced states, connected devices...
+     *         if fromCache is true, the device is returned from internal cache,
+     *         otherwise it is determined by current state (device connected,phone state, force use,
+     *         a2dp output...)
+     * This allows to:
+     *      1 speed up process when the state is stable (when starting or stopping an output)
+     *      2 access to either current device selection (fromCache == true) or
+     *      "future" device selection (fromCache == false) when called from a context
+     *      where conditions are changing (setDeviceConnectionState(), setPhoneState()...) AND
+     *      before manager updates its outputs.
+     */
+    virtual DeviceVector getOutputDevicesForAttributes(
+            const audio_attributes_t &attributes,
+            const sp<DeviceDescriptor> &preferedDevice = nullptr,
+            bool fromCache = false) const = 0;
+
+    /**
+     * @brief getOutputDevicesForStream Legacy function retrieving devices from a stream type.
+     * @param stream type of the output requesting Device(s) selection
+     * @param fromCache if true, the device is returned from internal cache,
+     *                  otherwise it is determined by current state (device connected,phone state,
+     *                  force use, a2dp output...)
+     * @return appropriate device for streams handled by the specified audio attributes according
+     *         to current phone state, forced states, connected devices...
+     *         if fromCache is true, the device is returned from internal cache,
+     *         otherwise it is determined by current state (device connected,phone state, force use,
+     *         a2dp output...)
+     * This allows to:
+     *      1 speed up process when the state is stable (when starting or stopping an output)
+     *      2 access to either current device selection (fromCache == true) or
+     *      "future" device selection (fromCache == false) when called from a context
+     *      where conditions are changing (setDeviceConnectionState(), setPhoneState()...) AND
+     *      before manager updates its outputs.
+     */
+    virtual DeviceVector getOutputDevicesForStream(audio_stream_type_t stream,
+                                                   bool fromCache = false) const = 0;
+
+    /**
+     * Get the input device selected for given audio attributes.
+     *
+     * @param[in] attr audio attributes to consider
+     * @param[out] mix to be used if a mix has been installed for the given audio attributes.
+     * @return selected input device for the audio attributes, may be null if error.
+     */
+    virtual sp<DeviceDescriptor> getInputDeviceForAttributes(
+            const audio_attributes_t &attr, AudioMix **mix = nullptr) const = 0;
+
+    /**
+     * Get the legacy stream type for a given audio attributes.
+     *
+     * @param[in] audio attributes to get the associated audio_stream_type_t.
+     *
+     * @return audio_stream_type_t associated to the attributes.
+     */
+    virtual audio_stream_type_t getStreamTypeForAttributes(
+            const audio_attributes_t &attr) const = 0;
+
+    /**
+     * @brief getAttributesForStream get the audio attributes from legacy stream type
+     * @param stream to consider
+     * @return audio attributes matching the legacy stream type
+     */
+    virtual audio_attributes_t getAttributesForStreamType(audio_stream_type_t stream) const = 0;
+
+    /**
+     * @brief getStreamTypesForProductStrategy retrieves the list of legacy stream type following
+     * the given product strategy
+     * @param ps product strategy to consider
+     * @return associated legacy Stream Types vector of the given product strategy
+     */
+    virtual StreamTypeVector getStreamTypesForProductStrategy(product_strategy_t ps) const = 0;
+
+    /**
+     * @brief getAllAttributesForProductStrategy retrieves all the attributes following the given
+     * product strategy. Any attributes that "matches" with this one will follow the product
+     * strategy.
+     * "matching" means the usage shall match if reference attributes has a defined usage, AND
+     * content type shall match if reference attributes has a defined content type AND
+     * flags shall match if reference attributes has defined flags AND
+     * tags shall match if reference attributes has defined tags.
+     * @param ps product strategy to consider
+     * @return vector of product strategy ids, empty if unknown strategy.
+     */
+    virtual AttributesVector getAllAttributesForProductStrategy(product_strategy_t ps) const = 0;
+
+    /**
+     * @brief getOrderedAudioProductStrategies
+     * @return priority ordered product strategies to help the AudioPolicyManager evaluating the
+     * device selection per output according to the prioritized strategies.
+     */
+    virtual StrategyVector getOrderedProductStrategies() const = 0;
+
+    /**
+     * @brief updateDeviceSelectionCache. Device selection for AudioAttribute / Streams is cached
+     * in the engine in order to speed up process when the audio system is stable.
+     * When a device is connected, the android mode is changed, engine is notified and can update
+     * the cache.
+     * When starting / stopping an output with a stream that can affect notification, the engine
+     * needs to update the cache upon this function call.
+     */
+    virtual void updateDeviceSelectionCache() = 0;
+
+    /**
+     * @brief listAudioProductStrategies. Introspection API to retrieve a collection of
+     * AudioProductStrategyVector that allows to build AudioAttributes according to a
+     * product_strategy which is just an index. It has also a human readable name to help the
+     * Car/Oem/AudioManager identiying the use case.
+     * @param strategies collection.
+     * @return OK if the list has been retrieved, error code otherwise
+     */
+    virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies) const = 0;
+
+    /**
+     * @brief getVolumeCurvesForAttributes retrieves the Volume Curves interface for the
+     *        requested Audio Attributes.
+     * @param attr to be considered
+     * @return IVolumeCurves interface pointer if found, nullptr otherwise
+     */
+    virtual IVolumeCurves *getVolumeCurvesForAttributes(const audio_attributes_t &attr) = 0;
+
+    /**
+     * @brief getVolumeCurvesForStreamType retrieves the Volume Curves interface for the stream
+     * @param stream to be considered
+     * @return IVolumeCurves interface pointer if found, nullptr otherwise
+     */
+    virtual IVolumeCurves *getVolumeCurvesForStreamType(audio_stream_type_t stream) = 0;
+
+    virtual void dump(String8 *dst) const = 0;
+
 protected:
     virtual ~AudioPolicyManagerInterface() {}
 };
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
index b7902cf..43ba625 100644
--- a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
@@ -16,7 +16,6 @@
 
 #pragma once
 
-#include <IVolumeCurvesCollection.h>
 #include <AudioGain.h>
 #include <AudioPort.h>
 #include <AudioPatch.h>
@@ -51,8 +50,6 @@
 
     virtual const DeviceVector &getAvailableInputDevices() const = 0;
 
-    virtual IVolumeCurvesCollection &getVolumeCurves() = 0;
-
     virtual const sp<DeviceDescriptor> &getDefaultOutputDevice() const = 0;
 
 protected:
diff --git a/services/audiopolicy/engineconfigurable/Android.mk b/services/audiopolicy/engineconfigurable/Android.mk
index c2105e9..2b7e4c8 100644
--- a/services/audiopolicy/engineconfigurable/Android.mk
+++ b/services/audiopolicy/engineconfigurable/Android.mk
@@ -11,14 +11,15 @@
     src/Engine.cpp \
     src/EngineInstance.cpp \
     src/Stream.cpp \
-    src/Strategy.cpp \
-    src/Usage.cpp \
     src/InputSource.cpp \
+    ../engine/common/src/VolumeCurve.cpp \
+    ../engine/common/src/StreamVolumeCurves.cpp \
+    ../engine/common/src/ProductStrategy.cpp \
+    ../engine/common/src/EngineBase.cpp
 
 audio_policy_engine_includes_common := \
     frameworks/av/services/audiopolicy/engineconfigurable/include \
-    frameworks/av/services/audiopolicy/engineconfigurable/interface \
-    frameworks/av/services/audiopolicy/engine/interface
+    frameworks/av/services/audiopolicy/engineconfigurable/interface
 
 LOCAL_CFLAGS += \
     -Wall \
@@ -32,8 +33,12 @@
     $(audio_policy_engine_includes_common) \
     $(TARGET_OUT_HEADERS)/hw \
     $(call include-path-for, frameworks-av) \
-    $(call include-path-for, audio-utils) \
-    frameworks/av/services/audiopolicy/common/include
+    $(call include-path-for, audio-utils)
+
+LOCAL_HEADER_LIBRARIES := \
+    libaudiopolicycommon \
+    libaudiopolicyengine_common_headers \
+    libaudiopolicyengine_interface_headers
 
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
@@ -45,13 +50,14 @@
     libaudiopolicycomponents
 
 LOCAL_SHARED_LIBRARIES := \
+    libaudiopolicyengineconfig \
     liblog \
-    libcutils \
     libutils \
     liblog \
     libaudioutils \
     libparameter \
     libmedia_helper \
+    libaudiopolicy \
     libxml2
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audiopolicy/engineconfigurable/config/Android.mk b/services/audiopolicy/engineconfigurable/config/Android.mk
new file mode 100644
index 0000000..dcce8e3
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/Android.mk
@@ -0,0 +1,9 @@
+
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+#######################################################################
+# Recursive call sub-folder Android.mk
+#
+include $(call all-makefiles-under,$(LOCAL_PATH))
+
diff --git a/services/audiopolicy/engineconfigurable/config/example/Android.mk b/services/audiopolicy/engineconfigurable/config/example/Android.mk
new file mode 100644
index 0000000..ef476f7
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/example/Android.mk
@@ -0,0 +1,117 @@
+LOCAL_PATH := $(call my-dir)
+
+TOOLS := frameworks/av/services/audiopolicy/engineconfigurable/tools
+PROVISION_CRITERION_TYPES := $(TOOLS)/provision_criterion_types_from_android_headers.mk
+
+##################################################################
+# CONFIGURATION TOP FILE
+##################################################################
+
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_configurable)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_configuration_phone.xml
+LOCAL_MODULE_STEM := audio_policy_engine_configuration.xml
+
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE_STEM)
+
+LOCAL_REQUIRED_MODULES := \
+    audio_policy_engine_product_strategies_phone.xml  \
+    audio_policy_engine_stream_volumes.xml \
+    audio_policy_engine_default_stream_volumes.xml \
+    audio_policy_engine_criteria.xml \
+    audio_policy_engine_criterion_types.xml
+
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_product_strategies_phone.xml
+LOCAL_MODULE_STEM := audio_policy_engine_product_strategies.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE_STEM)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_stream_volumes.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_default_stream_volumes.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+endif # ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_configurable)
+
+
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), automotive_configurable)
+
+##################################################################
+# AUTOMOTIVE CONFIGURATION TOP FILE
+##################################################################
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_configuration_automotive.xml
+LOCAL_MODULE_STEM := audio_policy_engine_configuration.xml
+
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := automotive/$(LOCAL_MODULE_STEM)
+
+LOCAL_REQUIRED_MODULES := \
+    audio_policy_engine_product_strategies_automotive.xml \
+    audio_policy_engine_criteria.xml \
+    audio_policy_engine_criterion_types.xml
+
+include $(BUILD_PREBUILT)
+
+##################################################################
+# CONFIGURATION FILES
+##################################################################
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_product_strategies_automotive.xml
+LOCAL_MODULE_STEM := audio_policy_engine_product_strategies.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := automotive/$(LOCAL_MODULE_STEM)
+include $(BUILD_PREBUILT)
+
+endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), automotive_configurable)
+
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),phone_configurable automotive_configurable))
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_criteria.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := common/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_criterion_types.xml
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_ADDITIONAL_DEPENDENCIES := \
+    $(TARGET_OUT_VENDOR_ETC)/audio_policy_configuration.xml
+
+ANDROID_AUDIO_BASE_HEADER_FILE := system/media/audio/include/system/audio-base.h
+AUDIO_POLICY_CONFIGURATION_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_configuration.xml
+CRITERION_TYPES_FILE := $(LOCAL_PATH)/common/$(LOCAL_MODULE).in
+
+include $(PROVISION_CRITERION_TYPES)
+
+endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),phone_configurable automotive_configurable))
diff --git a/services/audiopolicy/engineconfigurable/wrapper/config/policy_wrapper_configuration.xml b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_configuration.xml
similarity index 67%
rename from services/audiopolicy/engineconfigurable/wrapper/config/policy_wrapper_configuration.xml
rename to services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_configuration.xml
index 5d9193b..e2fb02b 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/config/policy_wrapper_configuration.xml
+++ b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_configuration.xml
@@ -12,14 +12,13 @@
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      See the License for the specific language governing permissions and
      limitations under the License.
--->
-<!--
-    These are the minimum required criteria to be used by Audio HAL to ensure a basic
-    user experience on an Android device
--->
-<configuration name="audio_policy_wrapper_configuration" xmlns:xi="http://www.w3.org/2001/XInclude">
+     -->
 
-    <xi:include href="policy_criterion_types.xml"/>
-    <xi:include href="policy_criteria.xml"/>
+<configuration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+    <xi:include href="audio_policy_engine_product_strategies.xml"/>
+    <xi:include href="audio_policy_engine_criterion_types.xml"/>
+    <xi:include href="audio_policy_engine_criteria.xml"/>
 
 </configuration>
+
diff --git a/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_product_strategies.xml b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_product_strategies.xml
new file mode 100644
index 0000000..543a2f0
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_product_strategies.xml
@@ -0,0 +1,161 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<ProductStrategies>
+    <!-- OEM Usages -->
+    <!-- product_strategy will be defined according this order
+    product_strategy is oem_traffic_anouncement if all the conditions are satisfied for
+    AudioAttributes aa
+
+    int type = 0;
+    if (bundle != null) {
+    type = bundle.getInt(KEY_OEM_TYPE, 0);
+    }
+    if(
+    ( aa.mContentType == AudioAttributes.AUDIO_CONTENT_TYPE_SPEECH ) &&
+    ( aa.mUsage == AudioAttributes.AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE ) &&
+    ( type == 1 ) )
+    -->
+
+    <ProductStrategy name="oem_traffic_anouncement">
+        <AttributesGroup>
+            <ContentType value="AUDIO_CONTENT_TYPE_SPEECH"/>
+            <Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
+            <!--  traffic_annoucement = 1 -->
+            <Bundle key="oem" value="1"/>
+        </AttributesGroup>
+    </ProductStrategy>
+    <ProductStrategy name="oem_strategy_1">
+        <AttributesGroup>
+            <ContentType value="AUDIO_CONTENT_TYPE_SPEECH"/>
+            <Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
+            <Bundle key="oem" value="2"/>
+        </AttributesGroup>
+    </ProductStrategy>
+    <ProductStrategy name="oem_strategy_2">
+        <AttributesGroup>
+            <ContentType value="AUDIO_CONTENT_TYPE_SPEECH"/>
+            <Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
+            <Bundle key="oem" value="3"/>
+        </AttributesGroup>
+    </ProductStrategy>
+
+    <!-- Car Usages -->
+    <!-- Keep those lines only for car -->
+    <!-- Check car conditions if any OEM conditions matched -->
+    <!-- As defined by CarAudioAttributesUtil.java -->
+    <!-- product_strategy will be defined according this order
+    product_strategy is radio if all the conditions are satisfied for AudioAttributes aa
+
+        int type = CAR_AUDIO_TYPE_DEFAULT;
+        if (bundle != null) {
+        type = bundle.getInt(KEY_CAR_AUDIO_TYPE, CAR_AUDIO_TYPE_DEFAULT);
+        }
+        if(
+        ( aa.mContentType == AudioAttributes.AUDIO_CONTENT_TYPE_SPEECH ) &&
+        ( aa.mUsage == AudioAttributes.AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE ) &&
+        ( type == CAR_AUDIO_TYPE_RADIO ) )
+        -->
+    <ProductStrategy name="radio">
+        <AttributesGroup>
+            <ContentType value="AUDIO_CONTENT_TYPE_MUSIC"/>
+            <Usage value="AUDIO_USAGE_MEDIA"/>
+            <Bundle key="car_audio_type" value="3"/>
+        </AttributesGroup>
+    </ProductStrategy>
+    <ProductStrategy name="ext_audio_source">
+        <AttributesGroup>
+            <ContentType value="AUDIO_CONTENT_TYPE_MUSIC"/>
+            <Usage value="AUDIO_USAGE_MEDIA"/>
+            <Bundle key="car_audio_type" value="7"/>
+        </AttributesGroup>
+    </ProductStrategy>
+    <ProductStrategy name="voice_command">
+        <AttributesGroup>
+            <Attributes>
+                <ContentType value="AUDIO_CONTENT_TYPE_SPEECH"/>
+                <Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
+                <!--  CAR_AUDIO_TYPE_VOICE_COMMAND = 1 -->
+                <Bundle key="car_audio_type" value="1"/>
+            </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY"/> </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_ASSISTANT"/> </Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+    <ProductStrategy name="safety_alert">
+        <AttributesGroup>
+            <ContentType value="AUDIO_CONTENT_TYPE_SONIFICATION"/>
+            <Usage value="AUDIO_USAGE_NOTIFICATION"/>
+            <!--  CAR_AUDIO_TYPE_SAFETY_ALERT = 2 -->
+            <Bundle key="car_audio_type" value="2"/>
+        </AttributesGroup>
+    </ProductStrategy>
+
+    <!-- To be checked
+    CAR_AUDIO_TYPE_CARSERVICE_BOTTOM
+    CAR_AUDIO_TYPE_CARSERVICE_CAR_PROXY
+    CAR_AUDIO_TYPE_CARSERVICE_MEDIA_MUTE
+    -->
+
+    <!-- Generic Usages -->
+    <ProductStrategy name="music">
+        <AttributesGroup streamType="AUDIO_STREAM_MUSIC">
+            <Attributes> <Usage value="AUDIO_USAGE_MEDIA"/> </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_GAME"/> </Attributes>
+            <!-- Default product strategy has empty attributes -->
+            <Attributes></Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+
+    <ProductStrategy name="nav_guidance">
+        <AttributesGroup>
+            <Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
+        </AttributesGroup>
+    </ProductStrategy>
+    <ProductStrategy name="voice_call">
+        <AttributesGroup streamType="AUDIO_STREAM_VOICE_CALL">
+            <Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION"/> </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING"/> </Attributes>
+            <Attributes> <Flags value="AUDIO_FLAG_SCO"/> </Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+    <ProductStrategy name="alarm">
+        <AttributesGroup streamType="AUDIO_STREAM_ALARM">
+            <Usage value="AUDIO_USAGE_ALARM"/>
+        </AttributesGroup>
+    </ProductStrategy>
+    <ProductStrategy name="ring">
+        <AttributesGroup streamType="AUDIO_STREAM_RING">
+            <Usage value="AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE"/>
+        </AttributesGroup>
+    </ProductStrategy>
+    <ProductStrategy name="notification">
+        <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION">
+            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION"/> </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT"/> </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED"/> </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST"/> </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_EVENT"/> </Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+    <ProductStrategy name="system">
+        <AttributesGroup streamType="AUDIO_STREAM_SYSTEM">
+            <Usage value="AUDIO_USAGE_ASSISTANCE_SONIFICATION"/>
+        </AttributesGroup>
+    </ProductStrategy>
+
+</ProductStrategies>
+
diff --git a/services/audiopolicy/engineconfigurable/wrapper/config/policy_criteria.xml b/services/audiopolicy/engineconfigurable/config/example/common/audio_policy_engine_criteria.xml
similarity index 100%
rename from services/audiopolicy/engineconfigurable/wrapper/config/policy_criteria.xml
rename to services/audiopolicy/engineconfigurable/config/example/common/audio_policy_engine_criteria.xml
diff --git a/services/audiopolicy/engineconfigurable/wrapper/config/policy_criterion_types.xml.in b/services/audiopolicy/engineconfigurable/config/example/common/audio_policy_engine_criterion_types.xml.in
similarity index 100%
rename from services/audiopolicy/engineconfigurable/wrapper/config/policy_criterion_types.xml.in
rename to services/audiopolicy/engineconfigurable/config/example/common/audio_policy_engine_criterion_types.xml.in
diff --git a/services/audiopolicy/engineconfigurable/wrapper/config/policy_wrapper_configuration.xml b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_configuration.xml
similarity index 67%
copy from services/audiopolicy/engineconfigurable/wrapper/config/policy_wrapper_configuration.xml
copy to services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_configuration.xml
index 5d9193b..4ca33b4 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/config/policy_wrapper_configuration.xml
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_configuration.xml
@@ -12,14 +12,13 @@
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      See the License for the specific language governing permissions and
      limitations under the License.
--->
-<!--
-    These are the minimum required criteria to be used by Audio HAL to ensure a basic
-    user experience on an Android device
--->
-<configuration name="audio_policy_wrapper_configuration" xmlns:xi="http://www.w3.org/2001/XInclude">
+     -->
 
-    <xi:include href="policy_criterion_types.xml"/>
-    <xi:include href="policy_criteria.xml"/>
+<configuration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+    <xi:include href="audio_policy_engine_product_strategies.xml"/>
+    <xi:include href="audio_policy_engine_stream_volumes.xml"/>
+    <xi:include href="audio_policy_engine_default_stream_volumes.xml"/>
 
 </configuration>
+
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_default_stream_volumes.xml b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_default_stream_volumes.xml
new file mode 100644
index 0000000..21e6dd5
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_default_stream_volumes.xml
@@ -0,0 +1,136 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<!-- Default Volume Tables included by Audio Policy Configuration file -->
+<!-- Full Default Volume table for all device category -->
+<volumes>
+    <reference name="FULL_SCALE_VOLUME_CURVE">
+    <!-- Full Scale reference Volume Curve -->
+        <point>0,0</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="SILENT_VOLUME_CURVE">
+        <point>0,-9600</point>
+        <point>100,-9600</point>
+    </reference>
+    <reference name="DEFAULT_SYSTEM_VOLUME_CURVE">
+    <!-- Default System reference Volume Curve -->
+        <point>1,-2400</point>
+        <point>33,-1800</point>
+        <point>66,-1200</point>
+        <point>100,-600</point>
+    </reference>
+    <reference name="DEFAULT_MEDIA_VOLUME_CURVE">
+    <!-- Default Media reference Volume Curve -->
+        <point>1,-5800</point>
+        <point>20,-4000</point>
+        <point>60,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE">
+    <!--Default Volume Curve -->
+        <point>1,-4950</point>
+        <point>33,-3350</point>
+        <point>66,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE">
+    <!-- Default is Speaker Media Volume Curve -->
+        <point>1,-5800</point>
+        <point>20,-4000</point>
+        <point>60,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_DEVICE_CATEGORY_SPEAKER_SYSTEM_VOLUME_CURVE">
+    <!-- Default is Speaker System Volume Curve -->
+        <point>1,-4680</point>
+        <point>42,-2070</point>
+        <point>85,-540</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE">
+    <!--Default Volume Curve -->
+        <point>1,-4950</point>
+        <point>33,-3350</point>
+        <point>66,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE">
+    <!-- Default is Ext Media System Volume Curve -->
+        <point>1,-5800</point>
+        <point>20,-4000</point>
+        <point>60,-2100</point>
+        <point>100,-1000</point>
+    </reference>
+    <reference name="DEFAULT_HEARING_AID_VOLUME_CURVE">
+    <!-- Default Hearing Aid Volume Curve -->
+        <point>1,-12700</point>
+        <point>20,-8000</point>
+        <point>60,-4000</point>
+        <point>100,0</point>
+    </reference>
+    <!-- **************************************************************** -->
+    <!-- Non-mutable default volume curves:                               -->
+    <!--     * first point is always for index 0                          -->
+    <!--     * attenuation is small enough that stream can still be heard -->
+    <reference name="DEFAULT_NON_MUTABLE_VOLUME_CURVE">
+    <!-- Default non-mutable reference Volume Curve -->
+    <!--        based on DEFAULT_MEDIA_VOLUME_CURVE -->
+        <point>0,-5800</point>
+        <point>20,-4000</point>
+        <point>60,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE">
+    <!--Default non-mutable Volume Curve for headset -->
+    <!--    based on DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE -->
+        <point>0,-4950</point>
+        <point>33,-3350</point>
+        <point>66,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE">
+    <!-- Default non-mutable Speaker Volume Curve -->
+    <!--    based on DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE -->
+        <point>0,-5800</point>
+        <point>20,-4000</point>
+        <point>60,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE">
+    <!--Default non-mutable Volume Curve -->
+    <!--    based on DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE -->
+        <point>0,-4950</point>
+        <point>33,-3350</point>
+        <point>66,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE">
+    <!-- Default non-mutable Ext Media System Volume Curve -->
+    <!--     based on DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE -->
+        <point>0,-5800</point>
+        <point>20,-4000</point>
+        <point>60,-2100</point>
+        <point>100,-1000</point>
+    </reference>
+    <reference name="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE">
+    <!-- Default non-mutable Hearing Aid Volume Curve -->
+    <!--     based on DEFAULT_HEARING_AID_VOLUME_CURVE -->
+        <point>0,-12700</point>
+        <point>20,-8000</point>
+        <point>60,-4000</point>
+        <point>100,0</point>
+    </reference>
+</volumes>
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
new file mode 100644
index 0000000..f72e379
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
@@ -0,0 +1,110 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+     -->
+
+<ProductStrategies>
+
+    <!-- "hidden strategies" like TTS, enforced audible:
+            Shall we expose them here or keep it hard coded -->
+
+    <!-- Used to identify the volume of audio streams for enforced system sounds in certain
+         countries (e.g. camera in Japan)
+         This strategy will only have higher priority than phone if force for system is set to
+         enforced. -->
+
+    <ProductStrategy name="STRATEGY_PHONE">
+        <AttributesGroup streamType="AUDIO_STREAM_VOICE_CALL">
+            <Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION"/> </Attributes>
+        </AttributesGroup>
+        <AttributesGroup streamType="AUDIO_STREAM_BLUETOOTH_SCO">
+            <Attributes> <Flags value="AUDIO_FLAG_SCO"/> </Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+
+    <ProductStrategy name="STRATEGY_SONIFICATION">
+        <AttributesGroup streamType="AUDIO_STREAM_RING">
+            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE"/> </Attributes>
+        </AttributesGroup>
+        <AttributesGroup streamType="AUDIO_STREAM_ALARM">
+            <Attributes> <Usage value="AUDIO_USAGE_ALARM"/> </Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+
+    <ProductStrategy name="STRATEGY_ENFORCED_AUDIBLE">
+        <AttributesGroup streamType="AUDIO_STREAM_ENFORCED_AUDIBLE">
+            <Attributes> <Flags value="AUDIO_FLAG_AUDIBILITY_ENFORCED"/> </Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+
+    <ProductStrategy name="STRATEGY_ACCESSIBILITY">
+        <AttributesGroup streamType="AUDIO_STREAM_ACCESSIBILITY">
+            <Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY"/> </Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+
+    <ProductStrategy name="STRATEGY_SONIFICATION_RESPECTFUL">
+        <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION">
+            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION"/> </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST"/> </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT"/> </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED"/> </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_EVENT"/> </Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+
+    <ProductStrategy name="STRATEGY_MEDIA">
+         <AttributesGroup streamType="AUDIO_STREAM_MUSIC">
+            <Attributes> <Usage value="AUDIO_USAGE_MEDIA"/> </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_GAME"/> </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_ASSISTANT"/> </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/> </Attributes>
+            <Attributes></Attributes>
+        </AttributesGroup>
+        <AttributesGroup streamType="AUDIO_STREAM_SYSTEM">
+            <Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_SONIFICATION"/> </Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+
+    <ProductStrategy name="STRATEGY_DTMF">
+        <AttributesGroup streamType="AUDIO_STREAM_DTMF">
+            <Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING"/> </Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+
+    <!-- Used to identify the volume of audio streams exclusively transmitted through the  speaker
+         (TTS) of the device -->
+    <ProductStrategy name="STRATEGY_TRANSMITTED_THROUGH_SPEAKER">
+        <AttributesGroup streamType="AUDIO_STREAM_TTS">
+            <Attributes> <Flags value="AUDIO_FLAG_BEACON"/> </Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+
+    <!-- Routing Strategy rerouting may be removed as following media??? -->
+    <ProductStrategy name="STRATEGY_REROUTING">
+        <AttributesGroup streamType="AUDIO_STREAM_REROUTING">
+            <Attributes></Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+
+    <!-- Default product strategy has empty attributes -->
+    <ProductStrategy name="STRATEGY_PATCH">
+        <AttributesGroup streamType="AUDIO_STREAM_PATCH">
+            <Attributes></Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+
+
+</ProductStrategies>
+
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_stream_volumes.xml b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_stream_volumes.xml
new file mode 100644
index 0000000..73bde1f
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_stream_volumes.xml
@@ -0,0 +1,231 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<!-- Volume section defines a volume curve for a given use case and device category.
+It contains a list of points of this curve expressing the attenuation in Millibels for a given
+volume index from 0 to 100.
+<volume stream=”AUDIO_STREAM_MUSIC” deviceCategory=””>
+<point>0,-9600</point>
+<point>100,0</point>
+</volume>
+-->
+
+<volumeGroups>
+    <volumeGroup>
+        <stream>AUDIO_STREAM_VOICE_CALL</stream>
+        <indexMin>1</indexMin>
+        <indexMax>7</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+            <point>0,-4200</point>
+            <point>33,-2800</point>
+            <point>66,-1400</point>
+            <point>100,0</point>
+        </volume>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+            <point>0,-2400</point>
+            <point>33,-1600</point>
+            <point>66,-800</point>
+            <point>100,0</point>
+        </volume>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+            <point>0,-2700</point>
+            <point>33,-1800</point>
+            <point>66,-900</point>
+            <point>100,0</point>
+        </volume>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_SYSTEM</stream>
+        <indexMin>0</indexMin>
+        <indexMax>7</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+            <point>1,-3000</point>
+            <point>33,-2600</point>
+            <point>66,-2200</point>
+            <point>100,-1800</point>
+        </volume>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+            <point>1,-5100</point>
+            <point>57,-2800</point>
+            <point>71,-2500</point>
+            <point>85,-2300</point>
+            <point>100,-2100</point>
+        </volume>
+        <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_RING</stream>
+        <indexMin>0</indexMin>
+        <indexMax>7</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_MUSIC</stream>
+        <indexMin>0</indexMin>
+        <indexMax>25</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID"  ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_ALARM</stream>
+        <indexMin>1</indexMin>
+        <indexMax>7</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_NOTIFICATION</stream>
+        <indexMin>0</indexMin>
+        <indexMax>7</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_SYSTEM_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_BLUETOOTH_SCO</stream>
+        <indexMin>0</indexMin>
+        <indexMax>15</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+            <point>0,-4200</point>
+            <point>33,-2800</point>
+            <point>66,-1400</point>
+            <point>100,0</point>
+        </volume>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+            <point>0,-2400</point>
+            <point>33,-1600</point>
+            <point>66,-800</point>
+            <point>100,0</point>
+        </volume>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+            <point>0,-4200</point>
+            <point>33,-2800</point>
+            <point>66,-1400</point>
+            <point>100,0</point>
+        </volume>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_ENFORCED_AUDIBLE</stream>
+        <indexMin>0</indexMin>
+        <indexMax>7</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+            <point>1,-3000</point>
+            <point>33,-2600</point>
+            <point>66,-2200</point>
+            <point>100,-1800</point>
+        </volume>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+            <point>1,-3400</point>
+            <point>71,-2400</point>
+            <point>100,-2000</point>
+        </volume>
+        <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_DTMF</stream>
+        <indexMin>0</indexMin>
+        <indexMax>15</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+            <point>1,-3000</point>
+            <point>33,-2600</point>
+            <point>66,-2200</point>
+            <point>100,-1800</point>
+        </volume>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+            <point>1,-4000</point>
+            <point>71,-2400</point>
+            <point>100,-1400</point>
+        </volume>
+        <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_TTS</stream>
+        <indexMin>0</indexMin>
+        <indexMax>15</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="SILENT_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="SILENT_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="SILENT_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="SILENT_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_ACCESSIBILITY</stream>
+        <indexMin>1</indexMin>
+        <indexMax>15</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_REROUTING</stream>
+        <indexMin>0</indexMin>
+        <indexMax>1</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="FULL_SCALE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="FULL_SCALE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="FULL_SCALE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="FULL_SCALE_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_PATCH</stream>
+        <indexMin>0</indexMin>
+        <indexMax>1</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="FULL_SCALE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="FULL_SCALE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="FULL_SCALE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="FULL_SCALE_VOLUME_CURVE"/>
+    </volumeGroup>
+</volumeGroups>
+
diff --git a/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h b/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
index 2e29a9b..1fc2264 100644
--- a/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
+++ b/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
@@ -16,10 +16,11 @@
 
 #pragma once
 
-#include <RoutingStrategy.h>
+#include <policy.h>
 #include <EngineDefinition.h>
 #include <Volume.h>
 #include <system/audio.h>
+#include <media/AudioCommonTypes.h>
 #include <utils/Errors.h>
 #include <string>
 #include <vector>
@@ -36,19 +37,6 @@
 {
 public:
     /**
-     * Add a strategy to the engine
-     *
-     * @param[in] name of the strategy to add
-     * @param[in] identifier: the numerical value associated to this member. It MUST match either
-     *            system/audio.h or system/audio_policy.h enumration value in order to link the
-     *            parameter controled by the PFW and the policy manager component.
-     *
-     * @return NO_ERROR if the strategy has been added successfully, error code otherwise.
-     *
-     */
-    virtual android::status_t addStrategy(const std::string &name, routing_strategy id) = 0;
-
-    /**
      * Add a streams to the engine.
      *
      * @param[in] name of the stream to add
@@ -62,19 +50,6 @@
     virtual android::status_t addStream(const std::string &name, audio_stream_type_t id) = 0;
 
     /**
-     * Add a usage to the engine
-     *
-     * @param[in] name of the usage to add
-     * @param[in] identifier: the numerical value associated to this member. It MUST match either
-     *            system/audio.h or system/audio_policy.h enumration value in order to link the
-     *            parameter controled by the PFW and the policy manager component.
-     *
-     * @return NO_ERROR if the usage has been added successfully, error code otherwise.
-     *
-     */
-    virtual android::status_t addUsage(const std::string &name, audio_usage_t id) = 0;
-
-    /**
      * Add an input source to the engine
      *
      * @param[in] name of the input source to add
@@ -88,26 +63,6 @@
     virtual android::status_t addInputSource(const std::string &name, audio_source_t id) = 0;
 
     /**
-     * Set the device to be used by a strategy.
-     *
-     * @param[in] strategy: name of the strategy for which the device to use has to be set
-     * @param[in] devices; mask of devices to be used for the given strategy.
-     *
-     * @return true if the devices were set correclty for this strategy, false otherwise.
-     */
-    virtual bool setDeviceForStrategy(const routing_strategy &strategy, audio_devices_t devices) = 0;
-
-    /**
-     * Set the strategy to be followed by a stream.
-     *
-     * @param[in] stream: name of the stream for which the strategy to use has to be set
-     * @param[in] strategy to follow for the given stream.
-     *
-     * @return true if the strategy were set correclty for this stream, false otherwise.
-     */
-    virtual bool setStrategyForStream(const audio_stream_type_t &stream, routing_strategy strategy) = 0;
-
-    /**
      * Set the strategy to be followed by a stream.
      *
      * @param[in] stream: name of the stream for which the strategy to use has to be set
@@ -119,16 +74,6 @@
                                            const audio_stream_type_t &volumeProfile) = 0;
 
     /**
-     * Set the strategy to be followed by a usage.
-     *
-     * @param[in] usage: name of the usage for which the strategy to use has to be set
-     * @param[in] strategy to follow for the given usage.
-     *
-     * @return true if the strategy were set correclty for this usage, false otherwise.
-     */
-    virtual bool setStrategyForUsage(const audio_usage_t &usage, routing_strategy strategy) = 0;
-
-    /**
      * Set the input device to be used by an input source.
      *
      * @param[in] inputSource: name of the input source for which the device to use has to be set
@@ -139,6 +84,22 @@
     virtual bool setDeviceForInputSource(const audio_source_t &inputSource,
                                          audio_devices_t device) = 0;
 
+    virtual void setDeviceAddressForProductStrategy(product_strategy_t strategy,
+                                                    const std::string &address) = 0;
+
+    /**
+     * Set the device to be used by a product strategy.
+     *
+     * @param[in] strategy: name of the product strategy for which the device to use has to be set
+     * @param[in] devices; mask of devices to be used for the given strategy.
+     *
+     * @return true if the devices were set correclty for this strategy, false otherwise.
+     */
+    virtual bool setDeviceTypesForProductStrategy(product_strategy_t strategy,
+                                                  audio_devices_t devices) = 0;
+
+    virtual product_strategy_t getProductStrategyByName(const std::string &address) = 0;
+
 protected:
     virtual ~AudioPolicyPluginInterface() {}
 };
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Android.mk
index 7631976..060830b 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Android.mk
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Android.mk
@@ -9,7 +9,7 @@
 
 LOCAL_PATH := $(call my-dir)
 
-ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), 1)
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),phone_configurable automotive_configurable no-output_configurable no-input_configurable))
 
 PFW_CORE := external/parameter-framework
 #@TODO: upstream new domain generator
@@ -20,116 +20,79 @@
 TOOLS := frameworks/av/services/audiopolicy/engineconfigurable/tools
 BUILD_PFW_SETTINGS := $(TOOLS)/build_audio_pfw_settings.mk
 
+endif
+
 ##################################################################
 # CONFIGURATION FILES
 ##################################################################
 ######### Policy PFW top level file #########
 
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),phone_configurable automotive_configurable))
+
 include $(CLEAR_VARS)
 LOCAL_MODULE := ParameterFrameworkConfigurationPolicy.xml
 LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_CLASS := ETC
 LOCAL_VENDOR_MODULE := true
-LOCAL_MODULE_PATH := $(TARGET_OUT_VENDOR_ETC)/parameter-framework
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework
 LOCAL_SRC_FILES := $(LOCAL_MODULE).in
 
+# external/parameter-framework prevents from using debug interface
 AUDIO_PATTERN = @TUNING_ALLOWED@
-ifeq ($(TARGET_BUILD_VARIANT),user)
+#ifeq ($(TARGET_BUILD_VARIANT),user)
 AUDIO_VALUE = false
-else
-AUDIO_VALUE = true
-endif
+#else
+#AUDIO_VALUE = true
+#endif
 
-LOCAL_POST_INSTALL_CMD := $(hide) sed -i -e 's|$(AUDIO_PATTERN)|$(AUDIO_VALUE)|g' $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE)
+LOCAL_POST_INSTALL_CMD := $(hide) sed -i -e 's|$(AUDIO_PATTERN)|$(AUDIO_VALUE)|g' $(TARGET_OUT_VENDOR_ETC)/$(LOCAL_MODULE_RELATIVE_PATH)/$(LOCAL_MODULE)
 
 include $(BUILD_PREBUILT)
 
-
-########## Policy PFW Structures #########
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := PolicyClass.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_MODULE_PATH := $(TARGET_OUT_VENDOR_ETC)/parameter-framework/Structure/Policy
-LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := PolicySubsystem.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_REQUIRED_MODULES := \
-    PolicySubsystem-CommonTypes.xml \
-    libpolicy-subsystem
-
-LOCAL_MODULE_PATH := $(TARGET_OUT_VENDOR_ETC)/parameter-framework/Structure/Policy
-LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
+########## Policy PFW Common Structures #########
 
 include $(CLEAR_VARS)
 LOCAL_MODULE := PolicySubsystem-CommonTypes.xml
 LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_CLASS := ETC
 LOCAL_VENDOR_MODULE := true
-LOCAL_MODULE_PATH := $(TARGET_OUT_VENDOR_ETC)/parameter-framework/Structure/Policy
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Structure/Policy
 LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE)
 include $(BUILD_PREBUILT)
 
-######### Policy PFW Settings #########
 include $(CLEAR_VARS)
-LOCAL_MODULE := parameter-framework.policy
-LOCAL_MODULE_STEM := PolicyConfigurableDomains.xml
+LOCAL_MODULE := PolicyClass.xml
+LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_CLASS := ETC
 LOCAL_VENDOR_MODULE := true
-LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Settings/Policy
-LOCAL_REQUIRED_MODULES := \
-    policy_criteria.xml \
-    policy_criterion_types.xml \
-    PolicySubsystem.xml \
-    PolicyClass.xml \
-    ParameterFrameworkConfigurationPolicy.xml
-
-ifeq ($(pfw_rebuild_settings),true)
-PFW_EDD_FILES := \
-        $(LOCAL_PATH)/Settings/device_for_strategy_media.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_phone.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_sonification.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_sonification_respectful.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_dtmf.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_enforced_audible.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_transmitted_through_speaker.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_accessibility.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_rerouting.pfw \
-        $(LOCAL_PATH)/Settings/strategy_for_stream.pfw \
-        $(LOCAL_PATH)/Settings/strategy_for_usage.pfw \
-        $(LOCAL_PATH)/Settings/device_for_input_source.pfw \
-        $(LOCAL_PATH)/Settings/volumes.pfw
-
-LOCAL_ADDITIONAL_DEPENDENCIES := \
-    $(PFW_EDD_FILES)
-
-
-PFW_CRITERION_TYPES_FILE := $(TARGET_OUT_VENDOR_ETC)/policy_criterion_types.xml
-PFW_CRITERIA_FILE := $(TARGET_OUT_VENDOR_ETC)/policy_criteria.xml
-
-PFW_TOPLEVEL_FILE := $(TARGET_OUT_VENDOR_ETC)/parameter-framework/ParameterFrameworkConfigurationPolicy.xml
-
-PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
-
-include $(BUILD_PFW_SETTINGS)
-else
-# Use the existing file
-LOCAL_SRC_FILES := Settings/$(LOCAL_MODULE_STEM)
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Structure/Policy
+LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE)
 include $(BUILD_PREBUILT)
-endif # pfw_rebuild_settings
 
-endif # ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), 0)
+endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),phone_configurable automotive_configurable))
+
+########## Policy PFW Example Structures #########
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),no-output_configurable no-input_configurable))
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := PolicySubsystem.xml.common
+LOCAL_MODULE_STEM := PolicySubsystem.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_REQUIRED_MODULES := \
+    PolicySubsystem-CommonTypes.xml \
+    PolicySubsystem-Volume.xml \
+    libpolicy-subsystem \
+
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Structure/Policy
+LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE_STEM)
+include $(BUILD_PREBUILT)
+
+endif # ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),no-output_configurable no-input_configurable))
 
 ######### Policy PFW Settings - No Output #########
-ifeq (0, 1)
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),no-output_configurable)
 
 include $(CLEAR_VARS)
 LOCAL_MODULE := parameter-framework.policy.no-output
@@ -138,26 +101,24 @@
 LOCAL_VENDOR_MODULE := true
 LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Settings/Policy
 LOCAL_REQUIRED_MODULES := \
-    policy_criteria.xml \
-    policy_criterion_types.xml \
-    PolicySubsystem.xml \
+    audio_policy_engine_criteria.xml \
+    audio_policy_engine_criterion_types.xml \
+    PolicySubsystem.xml.common \
     PolicyClass.xml \
     ParameterFrameworkConfigurationPolicy.xml
 
 PFW_TOPLEVEL_FILE := $(TARGET_OUT_VENDOR_ETC)/parameter-framework/ParameterFrameworkConfigurationPolicy.xml
-PFW_CRITERION_TYPES_FILE := $(TARGET_OUT_VENDOR_ETC)/policy_criterion_types.xml
-PFW_CRITERIA_FILE := $(TARGET_OUT_VENDOR_ETC)/policy_criteria.xml
+PFW_CRITERION_TYPES_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criterion_types.xml
+PFW_CRITERIA_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criteria.xml
 PFW_EDD_FILES := \
         $(LOCAL_PATH)/SettingsNoOutput/device_for_strategies.pfw \
-        $(LOCAL_PATH)/Settings/strategy_for_stream.pfw \
-        $(LOCAL_PATH)/Settings/strategy_for_usage.pfw \
         $(LOCAL_PATH)/Settings/device_for_input_source.pfw \
         $(LOCAL_PATH)/Settings/volumes.pfw
 
 include $(BUILD_PFW_SETTINGS)
-endif # ifeq (0, 1)
+endif # ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),no-output_configurable)
 ######### Policy PFW Settings - No Input #########
-ifeq (0, 1)
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),no-input_configurable)
 
 include $(CLEAR_VARS)
 LOCAL_MODULE := parameter-framework.policy.no-input
@@ -166,36 +127,26 @@
 LOCAL_VENDOR_MODULE := true
 LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Settings/Policy
 LOCAL_REQUIRED_MODULES := \
-    policy_criteria.xml \
-    policy_criterion_types.xml \
-    PolicySubsystem.xml \
+    audio_policy_engine_criteria.xml \
+    audio_policy_engine_criterion_types.xml \
+    PolicySubsystem.xml.common \
     PolicyClass.xml \
     ParameterFrameworkConfigurationPolicy.xml
 
 PFW_TOPLEVEL_FILE := $(TARGET_OUT_VENDOR_ETC)/parameter-framework/ParameterFrameworkConfigurationPolicy.xml
-PFW_CRITERION_TYPES_FILE := $(TARGET_OUT_VENDOR_ETC)/policy_criterion_types.xml
-PFW_CRITERIA_FILE := $(TARGET_OUT_VENDOR_ETC)/policy_criteria.xml
+PFW_CRITERION_TYPES_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criterion_types.xml
+PFW_CRITERIA_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criteria.xml
 PFW_EDD_FILES := \
-        $(LOCAL_PATH)/Settings/device_for_strategy_media.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_phone.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_sonification.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_sonification_respectful.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_dtmf.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_enforced_audible.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_transmitted_through_speaker.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_accessibility.pfw \
-        $(LOCAL_PATH)/Settings/device_for_strategy_rerouting.pfw \
-        $(LOCAL_PATH)/Settings/strategy_for_stream.pfw \
-        $(LOCAL_PATH)/Settings/strategy_for_usage.pfw \
         $(LOCAL_PATH)/SettingsNoInput/device_for_input_source.pfw \
         $(LOCAL_PATH)/Settings/volumes.pfw
 
 include $(BUILD_PFW_SETTINGS)
 
-endif # ifeq (1, 0)
-
+endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),no-input_configurable)
 #######################################################################
 # Recursive call sub-folder Android.mk
 #######################################################################
 
 include $(call all-makefiles-under,$(LOCAL_PATH))
+
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Android.mk
new file mode 100644
index 0000000..ea4a58f
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Android.mk
@@ -0,0 +1,86 @@
+################################################################################################
+#
+# @NOTE:
+# Audio Policy Engine configurable example for generic device build
+#
+# Any vendor shall have its own configuration within the corresponding device folder
+#
+################################################################################################
+
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), automotive_configurable)
+LOCAL_PATH := $(call my-dir)
+
+PFW_CORE := external/parameter-framework
+PFW_DEFAULT_SCHEMAS_DIR := $(PFW_CORE)/upstream/schemas
+PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
+
+TOOLS := frameworks/av/services/audiopolicy/engineconfigurable/tools
+BUILD_PFW_SETTINGS := $(TOOLS)/build_audio_pfw_settings.mk
+
+
+##################################################################
+# CONFIGURATION FILES
+##################################################################
+
+########## Policy PFW Structures #########
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := PolicySubsystem.xml.car
+LOCAL_MODULE_STEM := PolicySubsystem.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_REQUIRED_MODULES := \
+    ProductStrategies.xml.car \
+    PolicySubsystem-Volume.xml \
+    PolicySubsystem-CommonTypes.xml \
+    libpolicy-subsystem
+
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Structure/Policy
+LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE_STEM)
+include $(BUILD_PREBUILT)
+
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := ProductStrategies.xml.car
+LOCAL_MODULE_STEM := ProductStrategies.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Structure/Policy
+LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE_STEM)
+include $(BUILD_PREBUILT)
+
+######### Policy PFW Settings #########
+include $(CLEAR_VARS)
+LOCAL_MODULE := parameter-framework.policy.car
+LOCAL_MODULE_STEM := PolicyConfigurableDomains.xml
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Settings/Policy
+
+PFW_EDD_FILES := \
+        $(LOCAL_PATH)/Settings/device_for_product_strategies.pfw \
+        $(LOCAL_PATH)/../Settings/device_for_input_source.pfw \
+        $(LOCAL_PATH)/../Settings/volumes.pfw
+
+LOCAL_ADDITIONAL_DEPENDENCIES := \
+    $(PFW_EDD_FILES)
+
+LOCAL_REQUIRED_MODULES := \
+    PolicySubsystem.xml.car \
+    PolicyClass.xml \
+    audio_policy_engine_criteria.xml \
+    audio_policy_engine_criterion_types.xml \
+    ParameterFrameworkConfigurationPolicy.xml
+
+PFW_CRITERION_TYPES_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criterion_types.xml
+PFW_CRITERIA_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criteria.xml
+
+PFW_TOPLEVEL_FILE := $(TARGET_OUT_VENDOR_ETC)/parameter-framework/ParameterFrameworkConfigurationPolicy.xml
+
+PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
+
+include $(BUILD_PFW_SETTINGS)
+
+endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), automotive_configurable)
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Settings/device_for_product_strategies.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Settings/device_for_product_strategies.pfw
new file mode 100644
index 0000000..196d82c
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Settings/device_for_product_strategies.pfw
@@ -0,0 +1,717 @@
+supDomain: DeviceForProductStrategies
+	supDomain: OemTrafficAnouncement
+		domain: UnreachableDevices
+			conf: calibration
+				component: /Policy/policy/product_strategies/oem_traffic_anouncement/selected_output_devices/mask
+					earpiece = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					remote_submix = 0
+					telephony_tx = 0
+					line = 0
+					hdmi_arc = 0
+					spdif = 0
+					fm = 0
+					aux_line = 0
+					speaker_safe = 0
+					ip = 0
+					proxy = 0
+					usb_headset = 0
+					stub = 0
+				/Policy/policy/product_strategies/oem_traffic_anouncement/device_address = BUS08_OEM1
+
+		domain: SelectedDevice
+			conf: Bus
+				AvailableOutputDevices Includes Bus
+				AvailableOutputDevicesAddresses Includes BUS08_OEM1
+
+				component: /Policy/policy/product_strategies/oem_traffic_anouncement/selected_output_devices/mask
+					bus = 1
+
+			conf: Default
+				component: /Policy/policy/product_strategies/oem_traffic_anouncement/selected_output_devices/mask
+					bus = 0
+
+	supDomain: OemStrategy1
+		domain: UnreachableDevices
+			conf: calibration
+				component: /Policy/policy/product_strategies/oem_strategy_1/selected_output_devices/mask
+					earpiece = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					remote_submix = 0
+					telephony_tx = 0
+					line = 0
+					hdmi_arc = 0
+					spdif = 0
+					fm = 0
+					aux_line = 0
+					speaker_safe = 0
+					ip = 0
+					proxy = 0
+					usb_headset = 0
+					stub = 0
+				/Policy/policy/product_strategies/oem_strategy_1/device_address = BUS08_OEM1
+
+		domain: SelectedDevice
+			conf: Bus
+				AvailableOutputDevices Includes Bus
+				AvailableOutputDevicesAddresses Includes BUS08_OEM1
+
+				component: /Policy/policy/product_strategies/oem_strategy_1/selected_output_devices/mask
+					bus = 1
+
+			conf: Default
+				component: /Policy/policy/product_strategies/oem_strategy_1/selected_output_devices/mask
+					bus = 0
+
+
+
+	supDomain: OemStrategy2
+		domain: UnreachableDevices
+			conf: calibration
+				component: /Policy/policy/product_strategies/oem_strategy_2/selected_output_devices/mask
+					earpiece = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					remote_submix = 0
+					telephony_tx = 0
+					line = 0
+					hdmi_arc = 0
+					spdif = 0
+					fm = 0
+					aux_line = 0
+					speaker_safe = 0
+					ip = 0
+					proxy = 0
+					usb_headset = 0
+					stub = 0
+				/Policy/policy/product_strategies/oem_strategy_2/device_address = BUS08_OEM1
+
+		domain: SelectedDevice
+			conf: Bus
+				AvailableOutputDevices Includes Bus
+				AvailableOutputDevicesAddresses Includes BUS08_OEM1
+
+				component: /Policy/policy/product_strategies/oem_strategy_2/selected_output_devices/mask
+					bus = 1
+
+			conf: Default
+				component: /Policy/policy/product_strategies/oem_strategy_2/selected_output_devices/mask
+					bus = 0
+
+
+
+	supDomain: Radio
+		domain: UnreachableDevices
+			conf: calibration
+				component: /Policy/policy/product_strategies/radio/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					remote_submix = 0
+					telephony_tx = 0
+					line = 0
+					hdmi_arc = 0
+					spdif = 0
+					fm = 0
+					aux_line = 0
+					speaker_safe = 0
+					ip = 0
+					proxy = 0
+					usb_headset = 0
+					stub = 0
+				/Policy/policy/product_strategies/radio/device_address = BUS09_OEM2
+
+		domain: SelectedDevice
+			conf: Bus
+				AvailableOutputDevices Includes Bus
+				AvailableOutputDevicesAddresses Includes BUS09_OEM2
+
+				component: /Policy/policy/product_strategies/radio/selected_output_devices/mask
+					speaker = 0
+					bus = 1
+
+			conf: Speaker
+				AvailableOutputDevices Includes Speaker
+				component: /Policy/policy/product_strategies/radio/selected_output_devices/mask
+					speaker = 1
+					bus = 0
+
+			conf: Default
+				component: /Policy/policy/product_strategies/radio/selected_output_devices/mask
+					speaker = 0
+					bus = 0
+
+	supDomain: ExtAudioSource
+		domain: UnreachableDevices
+			conf: calibration
+				component: /Policy/policy/product_strategies/ext_audio_source/selected_output_devices/mask
+					earpiece = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					remote_submix = 0
+					telephony_tx = 0
+					line = 0
+					hdmi_arc = 0
+					spdif = 0
+					fm = 0
+					aux_line = 0
+					speaker_safe = 0
+					ip = 0
+					proxy = 0
+					usb_headset = 0
+					stub = 0
+				/Policy/policy/product_strategies/ext_audio_source/device_address = BUS09_OEM2
+
+		domain: SelectedDevice
+			conf: Bus
+				AvailableOutputDevices Includes Bus
+				AvailableOutputDevicesAddresses Includes BUS09_OEM2
+
+				component: /Policy/policy/product_strategies/ext_audio_source/selected_output_devices/mask
+					bus = 1
+
+			conf: Default
+				component: /Policy/policy/product_strategies/ext_audio_source/selected_output_devices/mask
+					bus = 0
+
+
+
+	supDomain: VoiceCommand
+		domain: UnreachableDevices
+			conf: calibration
+				component: /Policy/policy/product_strategies/voice_command/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					remote_submix = 0
+					telephony_tx = 0
+					line = 0
+					hdmi_arc = 0
+					spdif = 0
+					fm = 0
+					aux_line = 0
+					speaker_safe = 0
+					ip = 0
+					proxy = 0
+					usb_headset = 0
+					stub = 0
+				/Policy/policy/product_strategies/voice_command/device_address = BUS02_VOICE_COMMAND
+
+		domain: SelectedDevice
+			conf: Bus
+				AvailableOutputDevices Includes Bus
+				AvailableOutputDevicesAddresses Includes BUS02_VOICE_COMMAND
+
+				component: /Policy/policy/product_strategies/voice_command/selected_output_devices/mask
+					speaker = 0
+					bus = 1
+
+			conf: Speaker
+				AvailableOutputDevices Includes Speaker
+				component: /Policy/policy/product_strategies/voice_command/selected_output_devices/mask
+					speaker = 1
+					bus = 0
+
+			conf: Default
+				component: /Policy/policy/product_strategies/voice_command/selected_output_devices/mask
+					speaker = 0
+					bus = 0
+
+
+	supDomain: SafetyAlert
+		domain: UnreachableDevices
+			conf: calibration
+				component: /Policy/policy/product_strategies/safety_alert/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					remote_submix = 0
+					telephony_tx = 0
+					line = 0
+					hdmi_arc = 0
+					spdif = 0
+					fm = 0
+					aux_line = 0
+					speaker_safe = 0
+					ip = 0
+					proxy = 0
+					usb_headset = 0
+					stub = 0
+				/Policy/policy/product_strategies/safety_alert/device_address = BUS00_MEDIA
+
+		domain: SelectedDevice
+			conf: Bus
+				AvailableOutputDevices Includes Bus
+				AvailableOutputDevicesAddresses Includes BUS00_MEDIA
+
+				component: /Policy/policy/product_strategies/safety_alert/selected_output_devices/mask
+					speaker = 0
+					bus = 1
+
+			conf: Speaker
+				AvailableOutputDevices Includes Speaker
+				component: /Policy/policy/product_strategies/safety_alert/selected_output_devices/mask
+					speaker = 1
+					bus = 0
+
+			conf: Default
+				component: /Policy/policy/product_strategies/safety_alert/selected_output_devices/mask
+					speaker = 0
+					bus = 0
+
+
+	supDomain: Music
+		domain: UnreachableDevices
+			conf: calibration
+				component: /Policy/policy/product_strategies/music/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					remote_submix = 0
+					telephony_tx = 0
+					line = 0
+					hdmi_arc = 0
+					spdif = 0
+					fm = 0
+					aux_line = 0
+					speaker_safe = 0
+					ip = 0
+					proxy = 0
+					usb_headset = 0
+					stub = 0
+				/Policy/policy/product_strategies/music/device_address = BUS00_MEDIA
+
+		domain: SelectedDevice
+			conf: Bus
+				AvailableOutputDevices Includes Bus
+				AvailableOutputDevicesAddresses Includes BUS00_MEDIA
+
+				component: /Policy/policy/product_strategies/music/selected_output_devices/mask
+					speaker = 0
+					bus = 1
+
+			conf: Speaker
+				AvailableOutputDevices Includes Speaker
+				component: /Policy/policy/product_strategies/music/selected_output_devices/mask
+					speaker = 1
+					bus = 0
+
+			conf: Default
+				component: /Policy/policy/product_strategies/music/selected_output_devices/mask
+					speaker = 0
+					bus = 0
+
+
+
+	supDomain: NavGuidance
+		domain: UnreachableDevices
+			conf: calibration
+				component: /Policy/policy/product_strategies/nav_guidance/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					remote_submix = 0
+					telephony_tx = 0
+					line = 0
+					hdmi_arc = 0
+					spdif = 0
+					fm = 0
+					aux_line = 0
+					speaker_safe = 0
+					ip = 0
+					proxy = 0
+					usb_headset = 0
+					stub = 0
+				/Policy/policy/product_strategies/nav_guidance/device_address = BUS01_NAV_GUIDANCE
+
+		domain: SelectedDevice
+			conf: Bus
+				AvailableOutputDevices Includes Bus
+				AvailableOutputDevicesAddresses Includes BUS01_NAV_GUIDANCE
+
+				component: /Policy/policy/product_strategies/nav_guidance/selected_output_devices/mask
+					speaker = 0
+					bus = 1
+
+			conf: Speaker
+				AvailableOutputDevices Includes Speaker
+				component: /Policy/policy/product_strategies/nav_guidance/selected_output_devices/mask
+					speaker = 1
+					bus = 0
+
+			conf: Default
+				component: /Policy/policy/product_strategies/nav_guidance/selected_output_devices/mask
+					speaker = 0
+					bus = 0
+
+
+	supDomain: VoiceCall
+		domain: UnreachableDevices
+			conf: calibration
+				component: /Policy/policy/product_strategies/voice_call/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					remote_submix = 0
+					telephony_tx = 0
+					line = 0
+					hdmi_arc = 0
+					spdif = 0
+					fm = 0
+					aux_line = 0
+					speaker_safe = 0
+					ip = 0
+					proxy = 0
+					usb_headset = 0
+					stub = 0
+				/Policy/policy/product_strategies/voice_call/device_address = BUS04_CALL
+
+		domain: SelectedDevice
+			conf: Bus
+				AvailableOutputDevices Includes Bus
+				AvailableOutputDevicesAddresses Includes BUS04_CALL
+
+				component: /Policy/policy/product_strategies/voice_call/selected_output_devices/mask
+					speaker = 0
+					bus = 1
+
+			conf: Speaker
+				AvailableOutputDevices Includes Speaker
+				component: /Policy/policy/product_strategies/voice_call/selected_output_devices/mask
+					speaker = 1
+					bus = 0
+
+			conf: Default
+				component: /Policy/policy/product_strategies/voice_call/selected_output_devices/mask
+					speaker = 0
+					bus = 0
+
+
+	supDomain: Alarm
+		domain: UnreachableDevices
+			conf: calibration
+				component: /Policy/policy/product_strategies/alarm/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					remote_submix = 0
+					telephony_tx = 0
+					line = 0
+					hdmi_arc = 0
+					spdif = 0
+					fm = 0
+					aux_line = 0
+					speaker_safe = 0
+					ip = 0
+					proxy = 0
+					usb_headset = 0
+					stub = 0
+				/Policy/policy/product_strategies/alarm/device_address = BUS05_ALARM
+
+		domain: SelectedDevice
+			conf: Bus
+				AvailableOutputDevices Includes Bus
+				AvailableOutputDevicesAddresses Includes BUS05_ALARM
+
+				component: /Policy/policy/product_strategies/alarm/selected_output_devices/mask
+					speaker = 0
+					bus = 1
+
+			conf: Speaker
+				AvailableOutputDevices Includes Speaker
+				component: /Policy/policy/product_strategies/alarm/selected_output_devices/mask
+					speaker = 1
+					bus = 0
+
+			conf: Default
+				component: /Policy/policy/product_strategies/alarm/selected_output_devices/mask
+					speaker = 0
+					bus = 0
+
+
+	supDomain: Ring
+		domain: UnreachableDevices
+			conf: calibration
+				component: /Policy/policy/product_strategies/ring/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					remote_submix = 0
+					telephony_tx = 0
+					line = 0
+					hdmi_arc = 0
+					spdif = 0
+					fm = 0
+					aux_line = 0
+					speaker_safe = 0
+					ip = 0
+					proxy = 0
+					usb_headset = 0
+					stub = 0
+				/Policy/policy/product_strategies/ring/device_address = BUS03_CALL_RING
+
+		domain: SelectedDevice
+			conf: Bus
+				AvailableOutputDevices Includes Bus
+				AvailableOutputDevicesAddresses Includes BUS03_CALL_RING
+
+				component: /Policy/policy/product_strategies/ring/selected_output_devices/mask
+					speaker = 0
+					bus = 1
+
+			conf: Speaker
+				AvailableOutputDevices Includes Speaker
+				component: /Policy/policy/product_strategies/ring/selected_output_devices/mask
+					speaker = 1
+					bus = 0
+
+			conf: Default
+				component: /Policy/policy/product_strategies/ring/selected_output_devices/mask
+					speaker = 0
+					bus = 0
+
+
+	supDomain: Notification
+		domain: UnreachableDevices
+			conf: calibration
+				component: /Policy/policy/product_strategies/notification/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					remote_submix = 0
+					telephony_tx = 0
+					line = 0
+					hdmi_arc = 0
+					spdif = 0
+					fm = 0
+					aux_line = 0
+					speaker_safe = 0
+					ip = 0
+					proxy = 0
+					usb_headset = 0
+					stub = 0
+				/Policy/policy/product_strategies/notification/device_address = BUS06_NOTIFICATION
+
+		domain: SelectedDevice
+			conf: Bus
+				AvailableOutputDevices Includes Bus
+				AvailableOutputDevicesAddresses Includes BUS06_NOTIFICATION
+
+				component: /Policy/policy/product_strategies/notification/selected_output_devices/mask
+					speaker = 0
+					bus = 1
+
+			conf: Speaker
+				AvailableOutputDevices Includes Speaker
+				component: /Policy/policy/product_strategies/notification/selected_output_devices/mask
+					speaker = 1
+					bus = 0
+
+			conf: Default
+				component: /Policy/policy/product_strategies/notification/selected_output_devices/mask
+					speaker = 0
+					bus = 0
+
+
+	supDomain: System
+		domain: UnreachableDevices
+			conf: calibration
+				component: /Policy/policy/product_strategies/system/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					remote_submix = 0
+					telephony_tx = 0
+					line = 0
+					hdmi_arc = 0
+					spdif = 0
+					fm = 0
+					aux_line = 0
+					speaker_safe = 0
+					ip = 0
+					proxy = 0
+					usb_headset = 0
+					stub = 0
+				/Policy/policy/product_strategies/system/device_address = BUS07_SYSTEM_SOUND
+
+		domain: SelectedDevice
+			conf: Bus
+				AvailableOutputDevices Includes Bus
+				AvailableOutputDevicesAddresses Includes BUS07_SYSTEM_SOUND
+
+				component: /Policy/policy/product_strategies/system/selected_output_devices/mask
+					speaker = 0
+					bus = 1
+
+			conf: Speaker
+				AvailableOutputDevices Includes Speaker
+				component: /Policy/policy/product_strategies/system/selected_output_devices/mask
+					speaker = 1
+					bus = 0
+
+			conf: Default
+				component: /Policy/policy/product_strategies/system/selected_output_devices/mask
+					speaker = 0
+					bus = 0
+
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Structure/PolicySubsystem.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Structure/PolicySubsystem.xml
new file mode 100644
index 0000000..b55ce2c
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Structure/PolicySubsystem.xml
@@ -0,0 +1,88 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<Subsystem xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+           xmlns:xi="http://www.w3.org/2001/XInclude"
+           xsi:noNamespaceSchemaLocation="Schemas/Subsystem.xsd"
+           Name="policy" Type="Policy">
+
+    <ComponentLibrary>
+        <!--#################### GLOBAL COMPONENTS BEGIN ####################-->
+        <!-- Common Types defintion -->
+        <xi:include href="PolicySubsystem-CommonTypes.xml"/>
+        <xi:include href="ProductStrategies.xml"/>
+
+
+        <!--#################### GLOBAL COMPONENTS END ####################-->
+
+        <!--#################### STREAM BEGIN ####################-->
+
+        <ComponentType Name="Streams" Description="associated to audio_stream_type_t definition">
+            <Component Name="voice_call" Type="Stream" Mapping="Name:AUDIO_STREAM_VOICE_CALL"/>
+            <Component Name="system" Type="Stream" Mapping="Name:AUDIO_STREAM_SYSTEM"/>
+            <Component Name="ring" Type="Stream" Mapping="Name:AUDIO_STREAM_RING"/>
+            <Component Name="music" Type="Stream" Mapping="Name:AUDIO_STREAM_MUSIC"/>
+            <Component Name="alarm" Type="Stream" Mapping="Name:AUDIO_STREAM_ALARM"/>
+            <Component Name="notification" Type="Stream" Mapping="Name:AUDIO_STREAM_NOTIFICATION"/>
+            <Component Name="bluetooth_sco" Type="Stream" Mapping="Name:AUDIO_STREAM_BLUETOOTH_SCO"/>
+            <Component Name="enforced_audible" Type="Stream" Mapping="Name:AUDIO_STREAM_ENFORCED_AUDIBLE"
+                       Description="Sounds that cannot be muted by user and must be routed to speaker"/>
+            <Component Name="dtmf" Type="Stream" Mapping="Name:AUDIO_STREAM_DTMF"/>
+            <Component Name="tts" Type="Stream" Mapping="Name:AUDIO_STREAM_TTS"
+                             Description="Transmitted Through Speaker. Plays over speaker only, silent on other devices"/>
+            <Component Name="accessibility" Type="Stream" Mapping="Name:AUDIO_STREAM_ACCESSIBILITY"
+                             Description="For accessibility talk back prompts"/>
+            <Component Name="rerouting" Type="Stream" Mapping="Name:AUDIO_STREAM_REROUTING"
+                             Description="For dynamic policy output mixes"/>
+            <Component Name="patch" Type="Stream" Mapping="Name:AUDIO_STREAM_PATCH"
+                             Description="For internal audio flinger tracks. Fixed volume"/>
+        </ComponentType>
+
+        <!--#################### STREAM END ####################-->
+
+        <!--#################### INPUT SOURCE BEGIN ####################-->
+
+        <ComponentType Name="InputSources" Description="associated to audio_source_t definition,
+                             identifier mapping must match the value of the enum">
+            <Component Name="default" Type="InputSource" Mapping="Name:AUDIO_SOURCE_DEFAULT"/>
+            <Component Name="mic" Type="InputSource" Mapping="Name:AUDIO_SOURCE_MIC"/>
+            <Component Name="voice_uplink" Type="InputSource"
+                                           Mapping="Name:AUDIO_SOURCE_VOICE_UPLINK"/>
+            <Component Name="voice_downlink" Type="InputSource"
+                                             Mapping="Name:AUDIO_SOURCE_VOICE_DOWNLINK"/>
+            <Component Name="voice_call" Type="InputSource"
+                                         Mapping="Name:AUDIO_SOURCE_VOICE_CALL"/>
+            <Component Name="camcorder" Type="InputSource" Mapping="Name:AUDIO_SOURCE_CAMCORDER"/>
+            <Component Name="voice_recognition" Type="InputSource"
+                                                Mapping="Name:AUDIO_SOURCE_VOICE_RECOGNITION"/>
+            <Component Name="voice_communication" Type="InputSource"
+                                                  Mapping="Name:AUDIO_SOURCE_VOICE_COMMUNICATION"/>
+            <Component Name="remote_submix" Type="InputSource"
+                                            Mapping="Name:AUDIO_SOURCE_REMOTE_SUBMIX"/>
+            <Component Name="unprocessed" Type="InputSource"
+                                            Mapping="Name:AUDIO_SOURCE_UNPROCESSED"/>
+            <Component Name="fm_tuner" Type="InputSource" Mapping="Name:AUDIO_SOURCE_FM_TUNER"/>
+            <Component Name="hotword" Type="InputSource" Mapping="Name:AUDIO_SOURCE_HOTWORD"/>
+        </ComponentType>
+
+        <!--#################### INPUT SOURCE END ####################-->
+    </ComponentLibrary>
+
+    <InstanceDefinition>
+        <Component Name="streams" Type="Streams"/>
+        <Component Name="input_sources" Type="InputSources"/>
+        <Component Name="product_strategies" Type="ProductStrategies"/>
+    </InstanceDefinition>
+</Subsystem>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Structure/ProductStrategies.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Structure/ProductStrategies.xml
new file mode 100644
index 0000000..53bba03
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Structure/ProductStrategies.xml
@@ -0,0 +1,39 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<ComponentTypeSet xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+              xmlns:xi="http://www.w3.org/2001/XInclude"
+              xsi:noNamespaceSchemaLocation="Schemas/ComponentTypeSet.xsd">
+
+  <ComponentType Name="ProductStrategies" Description="">
+      <Component Name="oem_traffic_anouncement" Type="ProductStrategy"/>
+      <Component Name="oem_strategy_1" Type="ProductStrategy"/>
+      <Component Name="oem_strategy_2" Type="ProductStrategy"/>
+
+      <Component Name="radio" Type="ProductStrategy"/>
+      <Component Name="ext_audio_source" Type="ProductStrategy"/>
+      <Component Name="voice_command" Type="ProductStrategy"/>
+      <Component Name="safety_alert" Type="ProductStrategy"/>
+
+      <Component Name="music" Type="ProductStrategy"/>
+      <Component Name="nav_guidance" Type="ProductStrategy"/>
+      <Component Name="voice_call" Type="ProductStrategy"/>
+      <Component Name="alarm" Type="ProductStrategy"/>
+      <Component Name="ring" Type="ProductStrategy"/>
+      <Component Name="notification" Type="ProductStrategy"/>
+      <Component Name="system" Type="ProductStrategy"/>
+  </ComponentType>
+
+</ComponentTypeSet>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Android.mk
new file mode 100644
index 0000000..e9d67e9
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Android.mk
@@ -0,0 +1,94 @@
+################################################################################################
+#
+# @NOTE:
+# Audio Policy Engine configurable example for generic device build
+#
+# Any vendor shall have its own configuration within the corresponding device folder
+#
+################################################################################################
+
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_configurable)
+
+LOCAL_PATH := $(call my-dir)
+
+PFW_CORE := external/parameter-framework
+PFW_DEFAULT_SCHEMAS_DIR := $(PFW_CORE)/upstream/schemas
+PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
+
+TOOLS := frameworks/av/services/audiopolicy/engineconfigurable/tools
+BUILD_PFW_SETTINGS := $(TOOLS)/build_audio_pfw_settings.mk
+
+##################################################################
+# CONFIGURATION FILES
+##################################################################
+########## Policy PFW Structures #########
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := PolicySubsystem.xml.phone
+LOCAL_MODULE_STEM := PolicySubsystem.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_REQUIRED_MODULES := \
+    PolicySubsystem-CommonTypes.xml \
+    ProductStrategies.xml.phone \
+    PolicySubsystem-Volume.xml \
+    libpolicy-subsystem \
+
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Structure/Policy
+LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE_STEM)
+include $(BUILD_PREBUILT)
+
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := ProductStrategies.xml.phone
+LOCAL_MODULE_STEM := ProductStrategies.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Structure/Policy
+LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE_STEM)
+include $(BUILD_PREBUILT)
+
+######### Policy PFW Settings #########
+include $(CLEAR_VARS)
+LOCAL_MODULE := parameter-framework.policy.phone
+LOCAL_MODULE_STEM := PolicyConfigurableDomains.xml
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Settings/Policy
+
+PFW_EDD_FILES := \
+        $(LOCAL_PATH)/../Settings/device_for_input_source.pfw \
+        $(LOCAL_PATH)/../Settings/volumes.pfw \
+        $(LOCAL_PATH)/Settings/device_for_product_strategy_media.pfw \
+        $(LOCAL_PATH)/Settings/device_for_product_strategy_accessibility.pfw \
+        $(LOCAL_PATH)/Settings/device_for_product_strategy_dtmf.pfw \
+        $(LOCAL_PATH)/Settings/device_for_product_strategy_enforced_audible.pfw \
+        $(LOCAL_PATH)/Settings/device_for_product_strategy_phone.pfw \
+        $(LOCAL_PATH)/Settings/device_for_product_strategy_sonification.pfw \
+        $(LOCAL_PATH)/Settings/device_for_product_strategy_sonification_respectful.pfw \
+        $(LOCAL_PATH)/Settings/device_for_product_strategy_rerouting.pfw \
+        $(LOCAL_PATH)/Settings/device_for_product_strategy_transmitted_through_speaker.pfw \
+        $(LOCAL_PATH)/Settings/device_for_product_strategy_unknown.pfw
+
+LOCAL_ADDITIONAL_DEPENDENCIES := \
+    $(PFW_EDD_FILES)
+
+LOCAL_REQUIRED_MODULES := \
+    PolicySubsystem.xml.phone \
+    PolicyClass.xml \
+    audio_policy_engine_criteria.xml \
+    audio_policy_engine_criterion_types.xml \
+    ParameterFrameworkConfigurationPolicy.xml
+
+PFW_CRITERION_TYPES_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criterion_types.xml
+PFW_CRITERIA_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criteria.xml
+
+PFW_TOPLEVEL_FILE := $(TARGET_OUT_VENDOR_ETC)/parameter-framework/ParameterFrameworkConfigurationPolicy.xml
+
+PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
+
+include $(BUILD_PFW_SETTINGS)
+
+endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_configurable)
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_accessibility.pfw
similarity index 87%
rename from services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_accessibility.pfw
index 7c87c80..53e93de 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_accessibility.pfw
@@ -1,4 +1,4 @@
-supDomain: DeviceForStrategy
+supDomain: DeviceForProductStrategy
 
 	supDomain: Accessibility
 		#
@@ -9,7 +9,7 @@
 		#
 		domain: UnreachableDevices
 			conf: Calibration
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
 					hdmi_arc = 0
 					spdif = 0
 					aux_line = 0
@@ -18,6 +18,8 @@
 					telephony_tx = 0
 					ip = 0
 					bus = 0
+					proxy = 0
+					usb_headset = 0
 					stub = 0
 
 		domain: Device
@@ -30,7 +32,7 @@
 				AvailableOutputDevices Includes RemoteSubmix
 				AvailableOutputDevicesAddresses Includes 0
 
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
 					remote_submix = 1
 					earpiece = 0
 					bluetooth_a2dp = 0
@@ -58,7 +60,7 @@
 				ForceUseForMedia IsNot ForceNoBtA2dp
 				AvailableOutputDevices Includes BluetoothA2dp
 
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					bluetooth_a2dp = 1
@@ -86,7 +88,7 @@
 				ForceUseForMedia IsNot ForceNoBtA2dp
 				AvailableOutputDevices Includes BluetoothA2dpHeadphones
 
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					bluetooth_a2dp = 0
@@ -114,7 +116,7 @@
 				TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes BluetoothA2dpSpeaker
 
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					bluetooth_a2dp = 0
@@ -142,7 +144,7 @@
 				ForceUseForMedia Is ForceSpeaker
 				AvailableOutputDevices Includes Speaker
 
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					bluetooth_a2dp = 0
@@ -171,7 +173,7 @@
 				AvailableOutputDevices Includes BluetoothScoCarkit
 				ForceUseForCommunication Is ForceBtSco
 
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					bluetooth_a2dp = 0
@@ -200,7 +202,7 @@
 				AvailableOutputDevices Includes BluetoothScoHeadset
 				ForceUseForCommunication Is ForceBtSco
 
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					bluetooth_a2dp = 0
@@ -229,7 +231,7 @@
 				AvailableOutputDevices Includes BluetoothSco
 				ForceUseForCommunication Is ForceBtSco
 
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					bluetooth_a2dp = 0
@@ -266,7 +268,7 @@
 						TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes WiredHeadphone
 
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					bluetooth_a2dp = 0
@@ -302,7 +304,7 @@
 					#
 				AvailableOutputDevices Includes Line
 
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					bluetooth_a2dp = 0
@@ -339,7 +341,7 @@
 						TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes WiredHeadset
 
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					bluetooth_a2dp = 0
@@ -379,7 +381,7 @@
 						ForceUseForMedia IsNot ForceSpeaker
 				AvailableOutputDevices Includes UsbDevice
 
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					bluetooth_a2dp = 0
@@ -410,7 +412,7 @@
 				TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes UsbAccessory
 
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					bluetooth_a2dp = 0
@@ -440,7 +442,7 @@
 				TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes DgtlDockHeadset
 
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					bluetooth_a2dp = 0
@@ -470,7 +472,7 @@
 				TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes Hdmi
 
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					bluetooth_a2dp = 0
@@ -502,7 +504,7 @@
 				AvailableOutputDevices Includes AnlgDockHeadset
 				ForceUseForDock Is ForceAnalogDock
 
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					bluetooth_a2dp = 0
@@ -531,7 +533,7 @@
 				AvailableOutputDevices Includes Earpiece
 				ForceUseForCommunication IsNot ForceSpeaker
 
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 1
 					bluetooth_a2dp = 0
@@ -553,7 +555,7 @@
 			conf: Speaker
 				AvailableOutputDevices Includes Speaker
 
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					bluetooth_a2dp = 0
@@ -573,7 +575,7 @@
 					hdmi = 0
 
 			conf: Default
-				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					bluetooth_a2dp = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_dtmf.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_dtmf.pfw
similarity index 86%
rename from services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_dtmf.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_dtmf.pfw
index c830c42..b8426c6 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_dtmf.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_dtmf.pfw
@@ -1,16 +1,17 @@
-supDomain: DeviceForStrategy
-
+supDomain: DeviceForProductStrategies
 	supDomain: Dtmf
-
 		domain: UnreachableDevices
-			conf: Calibration
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+			conf: calibration
+				component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
 					fm = 0
 					speaker_safe = 0
 					bluetooth_sco_carkit = 0
 					ip = 0
 					bus = 0
+					proxy = 0
+					usb_headset = 0
 					stub = 0
+				/Policy/policy/product_strategies/dtmf/device_address =
 
 		domain: Device2
 			conf: RemoteSubmix
@@ -22,7 +23,7 @@
 				AvailableOutputDevices Includes RemoteSubmix
 				AvailableOutputDevicesAddresses Includes 0
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
 					remote_submix = 1
 					earpiece = 0
 					wired_headset = 0
@@ -50,7 +51,7 @@
 				ForceUseForMedia IsNot ForceNoBtA2dp
 				AvailableOutputDevices Includes BluetoothA2dp
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -78,7 +79,7 @@
 				ForceUseForMedia IsNot ForceNoBtA2dp
 				AvailableOutputDevices Includes BluetoothA2dpHeadphones
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -106,7 +107,7 @@
 				ForceUseForMedia IsNot ForceNoBtA2dp
 				AvailableOutputDevices Includes BluetoothA2dpSpeaker
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -135,7 +136,7 @@
 				ForceUseForHdmiSystemAudio IsNot ForceHdmiSystemEnforced
 				AvailableOutputDevices Includes Speaker
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -164,7 +165,7 @@
 				AvailableOutputDevices Includes BluetoothScoHeadset
 				ForceUseForCommunication Is ForceBtSco
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -193,7 +194,7 @@
 				AvailableOutputDevices Includes BluetoothSco
 				ForceUseForCommunication Is ForceBtSco
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -230,7 +231,7 @@
 						TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes WiredHeadphone
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -269,7 +270,7 @@
 						TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes Line
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -306,7 +307,7 @@
 						TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes WiredHeadset
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 1
@@ -346,7 +347,7 @@
 						ForceUseForCommunication Is ForceSpeaker
 				AvailableOutputDevices Includes UsbDevice
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -377,7 +378,7 @@
 				TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes UsbAccessory
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -407,7 +408,7 @@
 				TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes DgtlDockHeadset
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -437,7 +438,7 @@
 				TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes Hdmi
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -469,7 +470,7 @@
 				ForceUseForDock Is ForceAnalogDock
 				AvailableOutputDevices Includes AnlgDockHeadset
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -498,7 +499,7 @@
 				AvailableOutputDevices Includes Earpiece
 				ForceUseForCommunication IsNot ForceSpeaker
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 1
 					wired_headset = 0
@@ -536,7 +537,7 @@
 						ForceUseForHdmiSystemAudio IsNot ForceHdmiSystemEnforced
 				AvailableOutputDevices Includes Speaker
 
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -556,7 +557,7 @@
 					speaker = 1
 
 			conf: Default
-				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -586,10 +587,10 @@
 				TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes HdmiArc
 
-				/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi_arc = 1
+				/Policy/policy/product_strategies/dtmf/selected_output_devices/mask/hdmi_arc = 1
 
 			conf: NotSelected
-				/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi_arc = 0
+				/Policy/policy/product_strategies/dtmf/selected_output_devices/mask/hdmi_arc = 0
 
 		domain: Spdif
 			#
@@ -602,10 +603,10 @@
 				TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes Spdif
 
-				/Policy/policy/strategies/dtmf/selected_output_devices/mask/spdif = 1
+				/Policy/policy/product_strategies/dtmf/selected_output_devices/mask/spdif = 1
 
 			conf: NotSelected
-				/Policy/policy/strategies/dtmf/selected_output_devices/mask/spdif = 0
+				/Policy/policy/product_strategies/dtmf/selected_output_devices/mask/spdif = 0
 
 		domain: AuxLine
 			#
@@ -618,7 +619,7 @@
 				TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes AuxLine
 
-				/Policy/policy/strategies/dtmf/selected_output_devices/mask/aux_line = 1
+				/Policy/policy/product_strategies/dtmf/selected_output_devices/mask/aux_line = 1
 
 			conf: NotSelected
-				/Policy/policy/strategies/dtmf/selected_output_devices/mask/aux_line = 0
+				/Policy/policy/product_strategies/dtmf/selected_output_devices/mask/aux_line = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_enforced_audible.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_enforced_audible.pfw
similarity index 83%
rename from services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_enforced_audible.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_enforced_audible.pfw
index c641138..2daa9ac 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_enforced_audible.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_enforced_audible.pfw
@@ -1,10 +1,10 @@
-supDomain: DeviceForStrategy
+supDomain: DeviceForProductStrategy
 
 	supDomain: EnforcedAudible
 
 		domain: UnreachableDevices
 			conf: Calibration
-				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
 					# no enforced_audible on remote submix (e.g. WFD)
 					hdmi_arc = 0
 					spdif = 0
@@ -13,6 +13,8 @@
 					ip = 0
 					bus = 0
 					fm = 0
+					proxy = 0
+					usb_headset = 0
 					stub = 0
 
 		domain: Speaker
@@ -51,11 +53,11 @@
 							AvailableOutputDevices Excludes AnlgDockHeadset
 							ForceUseForDock IsNot ForceAnalogDock
 
-				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
 					speaker = 1
 
 			conf: NotSelected
-				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
 					speaker = 0
 
 		domain: Device2
@@ -63,7 +65,7 @@
 				AvailableOutputDevices Includes RemoteSubmix
 				AvailableOutputDevicesAddresses Includes 0
 
-				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
 					remote_submix = 1
 					earpiece = 0
 					wired_headset = 0
@@ -86,7 +88,7 @@
 				AvailableOutputDevices Includes BluetoothA2dp
 				ForceUseForMedia IsNot ForceNoBtA2dp
 
-				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -109,7 +111,7 @@
 				AvailableOutputDevices Includes BluetoothA2dpHeadphones
 				ForceUseForMedia IsNot ForceNoBtA2dp
 
-				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -132,7 +134,7 @@
 				AvailableOutputDevices Includes BluetoothA2dpSpeaker
 				ForceUseForMedia IsNot ForceNoBtA2dp
 
-				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -155,7 +157,7 @@
 				ForceUseForMedia IsNot ForceSpeaker
 				AvailableOutputDevices Includes WiredHeadphone
 
-				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -178,7 +180,7 @@
 				ForceUseForMedia IsNot ForceSpeaker
 				AvailableOutputDevices Includes Line
 
-				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -201,7 +203,7 @@
 				ForceUseForMedia IsNot ForceSpeaker
 				AvailableOutputDevices Includes WiredHeadset
 
-				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 1
@@ -224,7 +226,7 @@
 				ForceUseForMedia IsNot ForceSpeaker
 				AvailableOutputDevices Includes UsbAccessory
 
-				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -247,7 +249,7 @@
 				ForceUseForMedia IsNot ForceSpeaker
 				AvailableOutputDevices Includes UsbDevice
 
-				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -270,7 +272,7 @@
 				ForceUseForMedia IsNot ForceSpeaker
 				AvailableOutputDevices Includes DgtlDockHeadset
 
-				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -293,7 +295,7 @@
 				ForceUseForMedia IsNot ForceSpeaker
 				AvailableOutputDevices Includes Hdmi
 
-				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -317,7 +319,7 @@
 				ForceUseForDock Is ForceAnalogDock
 				AvailableOutputDevices Includes AnlgDockHeadset
 
-				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
@@ -337,7 +339,7 @@
 					line = 0
 
 			conf: NoDevice
-				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
 					remote_submix = 0
 					earpiece = 0
 					wired_headset = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_media.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_media.pfw
similarity index 77%
rename from services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_media.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_media.pfw
index f8bab3d..d6d355c 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_media.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_media.pfw
@@ -1,10 +1,8 @@
-domainGroup: DeviceForStrategy
-
-	domainGroup: Media
-
+supDomain: DeviceForProductStrategy
+	supDomain: Media
 		domain: UnreachableDevices
-			conf: Calibration
-				component: /Policy/policy/strategies/media/selected_output_devices/mask
+			conf: calibration
+				component: /Policy/policy/product_strategies/media/selected_output_devices/mask
 					fm = 0
 					speaker_safe = 0
 					earpiece = 0
@@ -13,15 +11,18 @@
 					bluetooth_sco_carkit = 0
 					telephony_tx = 0
 					ip = 0
+					proxy = 0
+					usb_headset = 0
 					bus = 0
 					stub = 0
+				/Policy/policy/product_strategies/media/device_address =
 
 		domain: Device2
 			conf: RemoteSubmix
 				AvailableOutputDevices Includes RemoteSubmix
 				AvailableOutputDevicesAddresses Includes 0
 
-				component: /Policy/policy/strategies/media/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/media/selected_output_devices/mask
 					speaker = 0
 					hdmi = 0
 					dgtl_dock_headset = 0
@@ -41,7 +42,7 @@
 				ForceUseForCommunication IsNot ForceBtSco
 				AvailableOutputDevices Includes BluetoothA2dp
 
-				component: /Policy/policy/strategies/media/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/media/selected_output_devices/mask
 					speaker = 0
 					hdmi = 0
 					dgtl_dock_headset = 0
@@ -61,7 +62,7 @@
 				ForceUseForCommunication IsNot ForceBtSco
 				AvailableOutputDevices Includes BluetoothA2dpHeadphones
 
-				component: /Policy/policy/strategies/media/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/media/selected_output_devices/mask
 					speaker = 0
 					hdmi = 0
 					dgtl_dock_headset = 0
@@ -81,7 +82,7 @@
 				ForceUseForCommunication IsNot ForceBtSco
 				AvailableOutputDevices Includes BluetoothA2dpSpeaker
 
-				component: /Policy/policy/strategies/media/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/media/selected_output_devices/mask
 					speaker = 0
 					hdmi = 0
 					dgtl_dock_headset = 0
@@ -104,7 +105,7 @@
 				#
 				ForceUseForHdmiSystemAudio IsNot ForceHdmiSystemEnforced
 
-				component: /Policy/policy/strategies/media/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/media/selected_output_devices/mask
 					speaker = 1
 					hdmi = 0
 					dgtl_dock_headset = 0
@@ -122,7 +123,7 @@
 			conf: WiredHeadphone
 				AvailableOutputDevices Includes WiredHeadphone
 
-				component: /Policy/policy/strategies/media/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/media/selected_output_devices/mask
 					speaker = 0
 					hdmi = 0
 					dgtl_dock_headset = 0
@@ -140,7 +141,7 @@
 			conf: Line
 				AvailableOutputDevices Includes Line
 
-				component: /Policy/policy/strategies/media/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/media/selected_output_devices/mask
 					speaker = 0
 					hdmi = 0
 					dgtl_dock_headset = 0
@@ -158,7 +159,7 @@
 			conf: WiredHeadset
 				AvailableOutputDevices Includes WiredHeadset
 
-				component: /Policy/policy/strategies/media/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/media/selected_output_devices/mask
 					speaker = 0
 					hdmi = 0
 					dgtl_dock_headset = 0
@@ -176,7 +177,7 @@
 			conf: UsbAccessory
 				AvailableOutputDevices Includes UsbAccessory
 
-				component: /Policy/policy/strategies/media/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/media/selected_output_devices/mask
 					speaker = 0
 					hdmi = 0
 					dgtl_dock_headset = 0
@@ -194,7 +195,7 @@
 			conf: UsbDevice
 				AvailableOutputDevices Includes UsbDevice
 
-				component: /Policy/policy/strategies/media/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/media/selected_output_devices/mask
 					speaker = 0
 					hdmi = 0
 					dgtl_dock_headset = 0
@@ -212,7 +213,7 @@
 			conf: DgtlDockHeadset
 				AvailableOutputDevices Includes DgtlDockHeadset
 
-				component: /Policy/policy/strategies/media/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/media/selected_output_devices/mask
 					speaker = 0
 					hdmi = 0
 					dgtl_dock_headset = 1
@@ -230,7 +231,7 @@
 			conf: AuxDigital
 				AvailableOutputDevices Includes Hdmi
 
-				component: /Policy/policy/strategies/media/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/media/selected_output_devices/mask
 					speaker = 0
 					hdmi = 1
 					dgtl_dock_headset = 0
@@ -249,7 +250,7 @@
 				AvailableOutputDevices Includes AnlgDockHeadset
 				ForceUseForDock Is ForceAnalogDock
 
-				component: /Policy/policy/strategies/media/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/media/selected_output_devices/mask
 					speaker = 0
 					hdmi = 0
 					dgtl_dock_headset = 0
@@ -272,7 +273,7 @@
 				ForceUseForHdmiSystemAudio IsNot ForceHdmiSystemEnforced
 				ForceUseForCommunication IsNot ForceBtSco
 
-				component: /Policy/policy/strategies/media/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/media/selected_output_devices/mask
 					speaker = 1
 					hdmi = 0
 					dgtl_dock_headset = 0
@@ -288,7 +289,7 @@
 					line = 0
 
 			conf: Default
-				component: /Policy/policy/strategies/media/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/media/selected_output_devices/mask
 					speaker = 0
 					hdmi = 0
 					dgtl_dock_headset = 0
@@ -311,10 +312,10 @@
 			conf: Selected
 				AvailableOutputDevices Includes HdmiArc
 
-				/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc = 1
+				/Policy/policy/product_strategies/media/selected_output_devices/mask/hdmi_arc = 1
 
 			conf: NotSelected
-				/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc = 0
+				/Policy/policy/product_strategies/media/selected_output_devices/mask/hdmi_arc = 0
 
 		domain: Spdif
 			#
@@ -324,16 +325,16 @@
 			conf: Selected
 				AvailableOutputDevices Includes Spdif
 
-				/Policy/policy/strategies/media/selected_output_devices/mask/spdif = 1
+				/Policy/policy/product_strategies/media/selected_output_devices/mask/spdif = 1
 
 			conf: NotSelected
-				/Policy/policy/strategies/media/selected_output_devices/mask/spdif = 0
+				/Policy/policy/product_strategies/media/selected_output_devices/mask/spdif = 0
 
 		domain: AuxLine
 			conf: Selected
 				AvailableOutputDevices Includes AuxLine
 
-				/Policy/policy/strategies/media/selected_output_devices/mask/aux_line = 1
+				/Policy/policy/product_strategies/media/selected_output_devices/mask/aux_line = 1
 
 			conf: NotSelected
-				/Policy/policy/strategies/media/selected_output_devices/mask/aux_line = 0
+				/Policy/policy/product_strategies/media/selected_output_devices/mask/aux_line = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_phone.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_phone.pfw
similarity index 86%
rename from services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_phone.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_phone.pfw
index d371ad9..5693d4e 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_phone.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_phone.pfw
@@ -1,10 +1,8 @@
-supDomain: DeviceForStrategy
-
+supDomain: DeviceForProductStrategy
 	supDomain: Phone
-
 		domain: UnreachableDevices
-			conf: Calibration
-				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+			conf: calibration
+				component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
 					# no sonification on remote submix (e.g. WFD)
 					remote_submix = 0
 					hdmi_arc = 0
@@ -12,16 +10,18 @@
 					spdif = 0
 					fm = 0
 					speaker_safe = 0
-					ip = 0
 					bus = 0
+					proxy = 0
+					usb_headset = 0
 					stub = 0
+				/Policy/policy/product_strategies/phone/device_address =
 
 		domain: Device
 			conf: ScoCarkit
 				AvailableOutputDevices Includes BluetoothScoCarkit
 				ForceUseForCommunication Is ForceBtSco
 
-				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -44,7 +44,7 @@
 				AvailableOutputDevices Includes BluetoothScoHeadset
 				ForceUseForCommunication Is ForceBtSco
 
-				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -67,7 +67,7 @@
 				AvailableOutputDevices Includes BluetoothSco
 				ForceUseForCommunication Is ForceBtSco
 
-				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -97,7 +97,7 @@
 				ForceUseForMedia IsNot ForceNoBtA2dp
 				ForceUseForCommunication Is ForceNone
 
-				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -127,7 +127,7 @@
 				ForceUseForMedia IsNot ForceNoBtA2dp
 				ForceUseForCommunication Is ForceNone
 
-				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -157,7 +157,7 @@
 				ForceUseForMedia IsNot ForceNoBtA2dp
 				ForceUseForCommunication Is ForceSpeaker
 
-				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -184,7 +184,7 @@
 				AvailableOutputDevices Includes WiredHeadphone
 				ForceUseForCommunication IsNot ForceSpeaker
 
-				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 1
@@ -211,7 +211,7 @@
 				AvailableOutputDevices Includes WiredHeadset
 				ForceUseForCommunication IsNot ForceSpeaker
 
-				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 1
 					wired_headphone = 0
@@ -234,7 +234,7 @@
 				AvailableOutputDevices Includes Line
 				ForceUseForCommunication IsNot ForceSpeaker
 
-				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -272,7 +272,7 @@
 						TelephonyMode IsNot InCall
 						TelephonyMode IsNot InCommunication
 
-				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -300,7 +300,7 @@
 				TelephonyMode IsNot InCommunication
 				TelephonyMode IsNot InCall
 
-				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -328,7 +328,7 @@
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
 
-				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -356,7 +356,7 @@
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
 
-				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -384,7 +384,7 @@
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
 
-				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -411,7 +411,7 @@
 				AvailableOutputDevices Includes Earpiece
 				ForceUseForCommunication IsNot ForceSpeaker
 
-				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
 					earpiece = 1
 					wired_headset = 0
 					wired_headphone = 0
@@ -438,7 +438,7 @@
 				AvailableOutputDevices Includes Speaker
 				ForceUseForCommunication Is ForceSpeaker
 
-				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -461,7 +461,7 @@
 				#
 				# Fallback on default output device which can be speaker for example
 				#
-				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_rerouting.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_rerouting.pfw
new file mode 100644
index 0000000..c064c18
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_rerouting.pfw
@@ -0,0 +1,43 @@
+supDomain: DeviceForProductStrategy
+	supDomain: Rerouting
+		domain: UnreachableDevices
+			conf: calibration
+				component: /Policy/policy/product_strategies/rerouting/selected_output_devices/mask
+					earpiece = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					remote_submix = 0
+					telephony_tx = 0
+					line = 0
+					hdmi_arc = 0
+					spdif = 0
+					fm = 0
+					aux_line = 0
+					speaker_safe = 0
+					ip = 0
+					proxy = 0
+					usb_headset = 0
+					stub = 0
+				/Policy/policy/product_strategies/rerouting/device_address =
+
+		domain: SelectedDevice
+			conf: Bus
+				component: /Policy/policy/product_strategies/rerouting/selected_output_devices/mask
+					bus = 1
+
+			conf: Default
+				component: /Policy/policy/product_strategies/rerouting/selected_output_devices/mask
+					bus = 0
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_sonification.pfw
similarity index 86%
rename from services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_sonification.pfw
index 70740d1..c4edeeb 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_sonification.pfw
@@ -1,11 +1,8 @@
-supDomain: DeviceForStrategy
-
+supDomain: DeviceForProductStrategy
 	supDomain: Sonification
-
 		domain: UnreachableDevices
-			conf: Calibration
-				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
-					# no sonification on remote submix (e.g. WFD)
+			conf: calibration
+				component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
 					remote_submix = 0
 					hdmi_arc = 0
 					spdif = 0
@@ -16,9 +13,12 @@
 					# Sonification follows phone strategy if in call but HDMI is not reachable
 					#
 					hdmi = 0
-					ip = 0
 					bus = 0
+					ip = 0
+					proxy = 0
+					usb_headset = 0
 					stub = 0
+				/Policy/policy/product_strategies/sonification/device_address =
 
 		domain: Speaker
 
@@ -41,11 +41,11 @@
 							TelephonyMode Is InCommunication
 						AvailableOutputDevices Excludes Line
 
-				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
 					speaker = 1
 
 			conf: NotSelected
-				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
 					speaker = 0
 
 		domain: Device2
@@ -59,7 +59,7 @@
 				TelephonyMode IsNot InCommunication
 				ForceUseForMedia IsNot ForceNoBtA2dp
 
-				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -85,7 +85,7 @@
 				TelephonyMode IsNot InCommunication
 				ForceUseForMedia IsNot ForceNoBtA2dp
 
-				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -111,7 +111,7 @@
 				TelephonyMode IsNot InCommunication
 				ForceUseForMedia IsNot ForceNoBtA2dp
 
-				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -138,7 +138,7 @@
 				ForceUseForCommunication Is ForceBtSco
 				AvailableOutputDevices Includes BluetoothScoCarkit
 
-				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -165,7 +165,7 @@
 				ForceUseForCommunication Is ForceBtSco
 				AvailableOutputDevices Includes BluetoothScoHeadset
 
-				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -192,7 +192,7 @@
 				ForceUseForCommunication Is ForceBtSco
 				AvailableOutputDevices Includes BluetoothSco
 
-				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -228,7 +228,7 @@
 							TelephonyMode Is InCommunication
 						ForceUseForCommunication IsNot ForceSpeaker
 
-				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 1
@@ -266,7 +266,7 @@
 						TelephonyMode IsNot InCommunication
 						ForceUseForMedia IsNot ForceSpeaker
 
-				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -302,7 +302,7 @@
 							TelephonyMode Is InCommunication
 						ForceUseForCommunication IsNot ForceSpeaker
 
-				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 1
 					wired_headphone = 0
@@ -339,7 +339,7 @@
 							TelephonyMode Is InCommunication
 						ForceUseForCommunication IsNot ForceSpeaker
 
-				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -368,7 +368,7 @@
 				TelephonyMode IsNot InCommunication
 				ForceUseForMedia IsNot ForceSpeaker
 
-				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -397,7 +397,7 @@
 				TelephonyMode IsNot InCommunication
 				ForceUseForMedia IsNot ForceSpeaker
 
-				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -427,7 +427,7 @@
 				ForceUseForMedia IsNot ForceSpeaker
 				ForceUseForDock Is ForceAnalogDock
 
-				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
@@ -454,7 +454,7 @@
 				ForceUseForCommunication IsNot ForceSpeaker
 				AvailableOutputDevices Includes Earpiece
 
-				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
 					earpiece = 1
 					wired_headset = 0
 					wired_headphone = 0
@@ -472,7 +472,7 @@
 					line = 0
 
 			conf: None
-				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
 					earpiece = 0
 					wired_headset = 0
 					wired_headphone = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_sonification_respectful.pfw
similarity index 85%
rename from services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_sonification_respectful.pfw
index b673c4f..0a3dd5f 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_sonification_respectful.pfw
@@ -1,6 +1,5 @@
-domainGroup: DeviceForStrategy
-
-	domainGroup: SonificationRespectful
+supDomain: DeviceForProductStrategy
+	supDomain: SonificationRespectful
 		#
 		# Sonificiation Respectful follows:
 		#	- If in call: Strategy sonification (that follows phone strategy in call also...)
@@ -12,10 +11,9 @@
 		#	  selected.
 		#
 		# Case of stream active handled programmatically
-
 		domain: UnreachableDevices
-			conf: Calibration
-				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+			conf: calibration
+				component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
 					remote_submix = 0
 					hdmi_arc = 0
 					aux_line = 0
@@ -23,8 +21,10 @@
 					fm = 0
 					telephony_tx = 0
 					ip = 0
-					bus = 0
+					proxy = 0
+					usb_headset = 0
 					stub = 0
+				/Policy/policy/product_strategies/sonification_respectful/device_address =
 
 		domain: Speakers
 
@@ -38,7 +38,7 @@
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
 
-				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
 					speaker_safe = 1
 					speaker = 0
 
@@ -61,12 +61,12 @@
 							TelephonyMode Is InCommunication
 						AvailableOutputDevices Excludes Line
 
-				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
 					speaker_safe = 0
 					speaker = 1
 
 			conf: None
-				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
 					speaker_safe = 0
 					speaker = 0
 
@@ -81,7 +81,7 @@
 				ForceUseForMedia IsNot ForceNoBtA2dp
 				AvailableOutputDevices Includes BluetoothA2dp
 
-				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
 					earpiece = 0
 					bluetooth_sco = 0
 					bluetooth_sco_headset = 0
@@ -108,7 +108,7 @@
 				ForceUseForMedia IsNot ForceNoBtA2dp
 				AvailableOutputDevices Includes BluetoothA2dpHeadphones
 
-				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
 					earpiece = 0
 					bluetooth_sco = 0
 					bluetooth_sco_headset = 0
@@ -135,7 +135,7 @@
 				ForceUseForMedia IsNot ForceNoBtA2dp
 				AvailableOutputDevices Includes BluetoothA2dpSpeaker
 
-				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
 					earpiece = 0
 					bluetooth_sco = 0
 					bluetooth_sco_headset = 0
@@ -162,7 +162,7 @@
 				ForceUseForCommunication Is ForceBtSco
 				AvailableOutputDevices Includes BluetoothScoCarkit
 
-				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
 					earpiece = 0
 					bluetooth_sco = 0
 					bluetooth_sco_headset = 0
@@ -189,7 +189,7 @@
 				ForceUseForCommunication Is ForceBtSco
 				AvailableOutputDevices Includes BluetoothScoHeadset
 
-				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
 					earpiece = 0
 					bluetooth_sco = 0
 					bluetooth_sco_headset = 1
@@ -216,7 +216,7 @@
 				ForceUseForCommunication Is ForceBtSco
 				AvailableOutputDevices Includes BluetoothSco
 
-				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
 					earpiece = 0
 					bluetooth_sco = 1
 					bluetooth_sco_headset = 0
@@ -253,7 +253,7 @@
 						ForceUseForMedia IsNot ForceSpeaker
 				AvailableOutputDevices Includes WiredHeadphone
 
-				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
 					earpiece = 0
 					bluetooth_sco = 0
 					bluetooth_sco_headset = 0
@@ -294,7 +294,7 @@
 						ForceUseForMedia IsNot ForceSpeaker
 				AvailableOutputDevices Includes Line
 
-				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
 					earpiece = 0
 					bluetooth_sco = 0
 					bluetooth_sco_headset = 0
@@ -331,7 +331,7 @@
 						ForceUseForMedia IsNot ForceSpeaker
 				AvailableOutputDevices Includes WiredHeadset
 
-				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
 					earpiece = 0
 					bluetooth_sco = 0
 					bluetooth_sco_headset = 0
@@ -369,7 +369,7 @@
 						AvailableOutputDevices Excludes UsbAccessory
 				AvailableOutputDevices Includes UsbDevice
 
-				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
 					earpiece = 0
 					bluetooth_sco = 0
 					bluetooth_sco_headset = 0
@@ -399,7 +399,7 @@
 				ForceUseForMedia IsNot ForceSpeaker
 				AvailableOutputDevices Includes UsbAccessory
 
-				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
 					earpiece = 0
 					bluetooth_sco = 0
 					bluetooth_sco_headset = 0
@@ -429,7 +429,7 @@
 				ForceUseForMedia IsNot ForceSpeaker
 				AvailableOutputDevices Includes DgtlDockHeadset
 
-				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
 					earpiece = 0
 					bluetooth_sco = 0
 					bluetooth_sco_headset = 0
@@ -459,7 +459,7 @@
 				ForceUseForMedia IsNot ForceSpeaker
 				AvailableOutputDevices Includes Hdmi
 
-				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
 					earpiece = 0
 					bluetooth_sco = 0
 					bluetooth_sco_headset = 0
@@ -490,7 +490,7 @@
 				ForceUseForDock Is ForceAnalogDock
 				AvailableOutputDevices Includes AnlgDockHeadset
 
-				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
 					earpiece = 0
 					bluetooth_sco = 0
 					bluetooth_sco_headset = 0
@@ -517,7 +517,7 @@
 				ForceUseForCommunication IsNot ForceSpeaker
 				AvailableOutputDevices Includes Earpiece
 
-				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
 					earpiece = 1
 					bluetooth_sco = 0
 					bluetooth_sco_headset = 0
@@ -534,20 +534,3 @@
 					usb_device = 0
 					hdmi = 0
 
-			conf: None
-				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
-					earpiece = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
-					bluetooth_sco_carkit = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					bluetooth_a2dp = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_transmitted_through_speaker.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_transmitted_through_speaker.pfw
similarity index 61%
rename from services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_transmitted_through_speaker.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_transmitted_through_speaker.pfw
index 9f9c211..3fc7670 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_transmitted_through_speaker.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_transmitted_through_speaker.pfw
@@ -1,9 +1,8 @@
-supDomain: DeviceForStrategy
-
+supDomain: DeviceForProductStrategy
 	supDomain: TransmittedThroughSpeaker
 		domain: UnreacheableDevices
 			conf: Calibration
-				component: /Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/transmitted_through_speaker/selected_output_devices/mask
 					remote_submix = 0
 					hdmi_arc = 0
 					spdif = 0
@@ -11,7 +10,7 @@
 					fm = 0
 					speaker_safe = 0
 					earpiece = 0
-					wired_headset = 1
+					wired_headset = 0
 					wired_headphone = 0
 					bluetooth_sco = 0
 					bluetooth_sco_headset = 0
@@ -29,15 +28,16 @@
 					ip = 0
 					bus = 0
 					stub = 0
+				/Policy/policy/product_strategies/transmitted_through_speaker/device_address =
 
 		domain: Speaker
 			conf: Selected
 				AvailableOutputDevices Includes Speaker
 
-				component: /Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/transmitted_through_speaker/selected_output_devices/mask
 					speaker = 1
 
 			conf: NotSelected
-				component: /Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask
+				component: /Policy/policy/product_strategies/transmitted_through_speaker/selected_output_devices/mask
 					speaker = 0
 
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_unknown.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_unknown.pfw
new file mode 100644
index 0000000..c46cf56
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_unknown.pfw
@@ -0,0 +1,36 @@
+supDomain: DeviceForProductStrategy
+	supDomain: Unknown
+		domain: UnreachableDevices
+			conf: calibration
+				component: /Policy/policy/product_strategies/unknown/selected_output_devices/mask
+					earpiece = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					remote_submix = 0
+					telephony_tx = 0
+					line = 0
+					hdmi_arc = 0
+					spdif = 0
+					fm = 0
+					aux_line = 0
+					speaker_safe = 0
+					ip = 0
+					proxy = 0
+					usb_headset = 0
+					bus = 0
+					stub = 0
+				/Policy/policy/product_strategies/unknown/device_address =
+
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Structure/PolicySubsystem.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Structure/PolicySubsystem.xml
new file mode 100644
index 0000000..b55ce2c
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Structure/PolicySubsystem.xml
@@ -0,0 +1,88 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<Subsystem xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+           xmlns:xi="http://www.w3.org/2001/XInclude"
+           xsi:noNamespaceSchemaLocation="Schemas/Subsystem.xsd"
+           Name="policy" Type="Policy">
+
+    <ComponentLibrary>
+        <!--#################### GLOBAL COMPONENTS BEGIN ####################-->
+        <!-- Common Types defintion -->
+        <xi:include href="PolicySubsystem-CommonTypes.xml"/>
+        <xi:include href="ProductStrategies.xml"/>
+
+
+        <!--#################### GLOBAL COMPONENTS END ####################-->
+
+        <!--#################### STREAM BEGIN ####################-->
+
+        <ComponentType Name="Streams" Description="associated to audio_stream_type_t definition">
+            <Component Name="voice_call" Type="Stream" Mapping="Name:AUDIO_STREAM_VOICE_CALL"/>
+            <Component Name="system" Type="Stream" Mapping="Name:AUDIO_STREAM_SYSTEM"/>
+            <Component Name="ring" Type="Stream" Mapping="Name:AUDIO_STREAM_RING"/>
+            <Component Name="music" Type="Stream" Mapping="Name:AUDIO_STREAM_MUSIC"/>
+            <Component Name="alarm" Type="Stream" Mapping="Name:AUDIO_STREAM_ALARM"/>
+            <Component Name="notification" Type="Stream" Mapping="Name:AUDIO_STREAM_NOTIFICATION"/>
+            <Component Name="bluetooth_sco" Type="Stream" Mapping="Name:AUDIO_STREAM_BLUETOOTH_SCO"/>
+            <Component Name="enforced_audible" Type="Stream" Mapping="Name:AUDIO_STREAM_ENFORCED_AUDIBLE"
+                       Description="Sounds that cannot be muted by user and must be routed to speaker"/>
+            <Component Name="dtmf" Type="Stream" Mapping="Name:AUDIO_STREAM_DTMF"/>
+            <Component Name="tts" Type="Stream" Mapping="Name:AUDIO_STREAM_TTS"
+                             Description="Transmitted Through Speaker. Plays over speaker only, silent on other devices"/>
+            <Component Name="accessibility" Type="Stream" Mapping="Name:AUDIO_STREAM_ACCESSIBILITY"
+                             Description="For accessibility talk back prompts"/>
+            <Component Name="rerouting" Type="Stream" Mapping="Name:AUDIO_STREAM_REROUTING"
+                             Description="For dynamic policy output mixes"/>
+            <Component Name="patch" Type="Stream" Mapping="Name:AUDIO_STREAM_PATCH"
+                             Description="For internal audio flinger tracks. Fixed volume"/>
+        </ComponentType>
+
+        <!--#################### STREAM END ####################-->
+
+        <!--#################### INPUT SOURCE BEGIN ####################-->
+
+        <ComponentType Name="InputSources" Description="associated to audio_source_t definition,
+                             identifier mapping must match the value of the enum">
+            <Component Name="default" Type="InputSource" Mapping="Name:AUDIO_SOURCE_DEFAULT"/>
+            <Component Name="mic" Type="InputSource" Mapping="Name:AUDIO_SOURCE_MIC"/>
+            <Component Name="voice_uplink" Type="InputSource"
+                                           Mapping="Name:AUDIO_SOURCE_VOICE_UPLINK"/>
+            <Component Name="voice_downlink" Type="InputSource"
+                                             Mapping="Name:AUDIO_SOURCE_VOICE_DOWNLINK"/>
+            <Component Name="voice_call" Type="InputSource"
+                                         Mapping="Name:AUDIO_SOURCE_VOICE_CALL"/>
+            <Component Name="camcorder" Type="InputSource" Mapping="Name:AUDIO_SOURCE_CAMCORDER"/>
+            <Component Name="voice_recognition" Type="InputSource"
+                                                Mapping="Name:AUDIO_SOURCE_VOICE_RECOGNITION"/>
+            <Component Name="voice_communication" Type="InputSource"
+                                                  Mapping="Name:AUDIO_SOURCE_VOICE_COMMUNICATION"/>
+            <Component Name="remote_submix" Type="InputSource"
+                                            Mapping="Name:AUDIO_SOURCE_REMOTE_SUBMIX"/>
+            <Component Name="unprocessed" Type="InputSource"
+                                            Mapping="Name:AUDIO_SOURCE_UNPROCESSED"/>
+            <Component Name="fm_tuner" Type="InputSource" Mapping="Name:AUDIO_SOURCE_FM_TUNER"/>
+            <Component Name="hotword" Type="InputSource" Mapping="Name:AUDIO_SOURCE_HOTWORD"/>
+        </ComponentType>
+
+        <!--#################### INPUT SOURCE END ####################-->
+    </ComponentLibrary>
+
+    <InstanceDefinition>
+        <Component Name="streams" Type="Streams"/>
+        <Component Name="input_sources" Type="InputSources"/>
+        <Component Name="product_strategies" Type="ProductStrategies"/>
+    </InstanceDefinition>
+</Subsystem>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Structure/ProductStrategies.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Structure/ProductStrategies.xml
new file mode 100644
index 0000000..4cbb3da
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Structure/ProductStrategies.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<ComponentTypeSet xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+              xmlns:xi="http://www.w3.org/2001/XInclude"
+              xsi:noNamespaceSchemaLocation="Schemas/ComponentTypeSet.xsd">
+
+  <ComponentType Name="ProductStrategies" Description="">
+      <Component Name="accessibility" Type="ProductStrategy"/>
+      <Component Name="enforced_audible" Type="ProductStrategy"/>
+      <Component Name="transmitted_through_speaker" Type="ProductStrategy"/>
+
+      <Component Name="media" Type="ProductStrategy"/>
+      <Component Name="phone" Type="ProductStrategy"/>
+      <Component Name="dtmf" Type="ProductStrategy"/>
+
+      <Component Name="sonification" Type="ProductStrategy"/>
+      <Component Name="sonification_respectful" Type="ProductStrategy"/>
+      <Component Name="rerouting" Type="ProductStrategy"/>
+      <Component Name="unknown" Type="ProductStrategy"/>
+  </ComponentType>
+
+</ComponentTypeSet>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_rerouting.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_rerouting.pfw
deleted file mode 100644
index 28a3629..0000000
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_rerouting.pfw
+++ /dev/null
@@ -1,301 +0,0 @@
-domainGroup: DeviceForStrategy
-
-	domainGroup: Rerouting
-		#
-		# Falls through media strategy
-		#
-		domain: UnreachableDevices
-			conf: Calibration
-				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
-					hdmi_arc = 0
-					spdif = 0
-					aux_line = 0
-					fm = 0
-					speaker_safe = 0
-					earpiece = 0
-					bluetooth_sco = 0
-					bluetooth_sco_headset = 0
-					bluetooth_sco_carkit = 0
-					telephony_tx = 0
-					ip = 0
-					bus = 0
-					stub = 0
-
-		domain: Device2
-			conf: RemoteSubmix
-				AvailableOutputDevices Includes RemoteSubmix
-				AvailableOutputDevicesAddresses Includes 0
-
-				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
-					remote_submix = 1
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: BluetoothA2dp
-				ForceUseForMedia IsNot ForceNoBtA2dp
-				AvailableOutputDevices Includes BluetoothA2dp
-
-				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 1
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: BluetoothA2dpHeadphone
-				ForceUseForMedia IsNot ForceNoBtA2dp
-				AvailableOutputDevices Includes BluetoothA2dpHeadphones
-
-				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 1
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: BluetoothA2dpSpeaker
-				ForceUseForMedia IsNot ForceNoBtA2dp
-				AvailableOutputDevices Includes BluetoothA2dpSpeaker
-
-				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 1
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: ForceSpeaker
-				ForceUseForMedia Is ForceSpeaker
-				AvailableOutputDevices Includes Speaker
-
-				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 1
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: WiredHeadphone
-				AvailableOutputDevices Includes WiredHeadphone
-
-				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 1
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: Line
-				AvailableOutputDevices Includes Line
-
-				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 1
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: WiredHeadset
-				AvailableOutputDevices Includes WiredHeadset
-
-				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 1
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: UsbAccessory
-				AvailableOutputDevices Includes UsbAccessory
-
-				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 1
-					usb_device = 0
-					hdmi = 0
-
-			conf: UsbDevice
-				AvailableOutputDevices Includes UsbDevice
-
-				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 1
-					hdmi = 0
-
-			conf: DgtlDockHeadset
-				AvailableOutputDevices Includes DgtlDockHeadset
-
-				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 1
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: AuxDigital
-				#
-				# Rerouting is similar to media and sonification (exept here: sonification is not allowed on HDMI)
-				#
-				AvailableOutputDevices Includes Hdmi
-
-				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 1
-
-			conf: AnlgDockHeadset
-				AvailableOutputDevices Includes AnlgDockHeadset
-				ForceUseForDock Is ForceAnalogDock
-
-				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 1
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: Speaker
-				AvailableOutputDevices Includes Speaker
-
-				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 1
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
-			conf: Default
-				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
-					remote_submix = 0
-					bluetooth_a2dp = 0
-					bluetooth_a2dp_headphones = 0
-					bluetooth_a2dp_speaker = 0
-					speaker = 0
-					wired_headset = 0
-					wired_headphone = 0
-					line = 0
-					angl_dock_headset = 0
-					dgtl_dock_headset = 0
-					usb_accessory = 0
-					usb_device = 0
-					hdmi = 0
-
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_stream.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_stream.pfw
deleted file mode 100644
index 3940b9d..0000000
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_stream.pfw
+++ /dev/null
@@ -1,20 +0,0 @@
-domain: StrategyForStream
-
-	conf: Calibration
-		/Policy/policy/streams/voice_call/applicable_strategy/strategy = phone
-		#
-		# NOTE: SYSTEM stream uses MEDIA strategy because muting music and switching outputs
-		# while key clicks are played produces a poor result
-		#
-		/Policy/policy/streams/system/applicable_strategy/strategy = media
-		/Policy/policy/streams/ring/applicable_strategy/strategy = sonification
-		/Policy/policy/streams/music/applicable_strategy/strategy = media
-		/Policy/policy/streams/alarm/applicable_strategy/strategy = sonification
-		/Policy/policy/streams/notification/applicable_strategy/strategy = sonification_respectful
-		/Policy/policy/streams/bluetooth_sco/applicable_strategy/strategy = phone
-		/Policy/policy/streams/enforced_audible/applicable_strategy/strategy = enforced_audible
-		/Policy/policy/streams/dtmf/applicable_strategy/strategy = dtmf
-		/Policy/policy/streams/tts/applicable_strategy/strategy = transmitted_through_speaker
-		/Policy/policy/streams/accessibility/applicable_strategy/strategy = accessibility
-		/Policy/policy/streams/rerouting/applicable_strategy/strategy = rerouting
-
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem-CommonTypes.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem-CommonTypes.xml
index daa7f68..56c5ed3 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem-CommonTypes.xml
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem-CommonTypes.xml
@@ -133,30 +133,6 @@
         </BitParameterBlock>
     </ComponentType>
 
-    <!-- Routing Strategy definition as an enumeration. Numerical value must match the value
-         of the routing strategy in policy header file. -->
-    <ComponentType Name="Strategy">
-        <EnumParameter Name="strategy" Size="32">
-            <ValuePair Literal="media" Numerical="0"/>
-            <ValuePair Literal="phone" Numerical="1"/>
-            <ValuePair Literal="sonification" Numerical="2"/>
-            <ValuePair Literal="sonification_respectful" Numerical="3"/>
-            <ValuePair Literal="dtmf" Numerical="4"/>
-            <ValuePair Literal="enforced_audible" Numerical="5"/>
-            <ValuePair Literal="transmitted_through_speaker" Numerical="6"/>
-            <ValuePair Literal="accessibility" Numerical="7"/>
-            <ValuePair Literal="rerouting" Numerical="8"/>
-        </EnumParameter>
-    </ComponentType>
-
-    <!--#################### STRATEGY COMMON TYPES BEGIN ####################-->
-
-    <ComponentType Name="StrategyConfig" Mapping="Strategy">
-        <Component Name="selected_output_devices" Type="OutputDevicesMask"/>
-    </ComponentType>
-
-    <!--#################### STRATEGY COMMON TYPES END ####################-->
-
     <!--#################### STREAM COMMON TYPES BEGIN ####################-->
 
     <ComponentType Name="VolumeProfileType">
@@ -178,21 +154,12 @@
     </ComponentType>
 
     <ComponentType Name="Stream"  Mapping="Stream">
-        <Component Name="applicable_strategy" Type="Strategy"/>
         <Component Name="applicable_volume_profile" Type="VolumeProfileType"
                    Description="Volume profile followed by a given stream type."/>
     </ComponentType>
 
     <!--#################### STREAM COMMON TYPES END ####################-->
 
-    <!--#################### USAGE COMMON TYPES BEGIN ####################-->
-
-    <ComponentType Name="Usage">
-        <Component Name="applicable_strategy" Type="Strategy" Mapping="Usage"/>
-    </ComponentType>
-
-    <!--#################### USAGE COMMON TYPES END ####################-->
-
     <!--#################### INPUT SOURCE COMMON TYPES BEGIN ####################-->
 
     <ComponentType Name="InputSource">
@@ -202,4 +169,14 @@
 
     <!--#################### INPUT SOURCE COMMON TYPES END ####################-->
 
+    <!--#################### PRODUCT STRATEGY COMMON TYPES BEGIN ####################-->
+
+    <ComponentType Name="ProductStrategy" Mapping="ProductStrategy">
+        <Component Name="selected_output_devices" Type="OutputDevicesMask"/>
+        <StringParameter Name="device_address" MaxLength="256"
+                         Description="if any, device address associated"/>
+    </ComponentType>
+
+    <!--#################### PRODUCT STRATEGY COMMON TYPES END ####################-->
+
 </ComponentTypeSet>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
index 45d1e8a..a4e7537 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
@@ -11,22 +11,6 @@
 
         <!--#################### GLOBAL COMPONENTS END ####################-->
 
-        <!--#################### STRATEGY BEGIN ####################-->
-
-        <ComponentType Name="Strategies">
-            <Component Name="media" Type="StrategyConfig" Mapping="Name:STRATEGY_MEDIA"/>
-            <Component Name="phone" Type="StrategyConfig" Mapping="Name:STRATEGY_PHONE"/>
-            <Component Name="sonification" Type="StrategyConfig" Mapping="Name:STRATEGY_SONIFICATION"/>
-            <Component Name="sonification_respectful" Type="StrategyConfig" Mapping="Name:STRATEGY_SONIFICATION_RESPECTFUL"/>
-            <Component Name="dtmf" Type="StrategyConfig" Mapping="Name:STRATEGY_DTMF"/>
-            <Component Name="enforced_audible" Type="StrategyConfig" Mapping="Name:STRATEGY_ENFORCED_AUDIBLE"/>
-            <Component Name="transmitted_through_speaker" Type="StrategyConfig" Mapping="Name:STRATEGY_TRANSMITTED_THROUGH_SPEAKER"/>
-            <Component Name="accessibility" Type="StrategyConfig" Mapping="Name:STRATEGY_ACCESSIBILITY"/>
-            <Component Name="rerouting" Type="StrategyConfig" Mapping=",Name:STRATEGY_REROUTING"/>
-        </ComponentType>
-
-        <!--#################### STRATEGY END ####################-->
-
         <!--#################### STREAM BEGIN ####################-->
 
         <ComponentType Name="Streams" Description="associated to audio_stream_type_t definition">
@@ -52,40 +36,6 @@
 
         <!--#################### STREAM END ####################-->
 
-        <!--#################### USAGE BEGIN ####################-->
-
-        <ComponentType Name="Usages" Description="associated to audio_usage_t definition">
-            <Component Name="unknown" Type="Usage" Mapping="Name:AUDIO_USAGE_UNKNOWN"/>
-            <Component Name="media" Type="Usage" Mapping="Name:AUDIO_USAGE_MEDIA"/>
-            <Component Name="voice_communication" Type="Usage"
-                       Mapping="Name:AUDIO_USAGE_VOICE_COMMUNICATION"/>
-            <Component Name="voice_communication_signalling" Type="Usage"
-                       Mapping="Name:AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING"/>
-            <Component Name="alarm" Type="Usage" Mapping="Name:AUDIO_USAGE_ALARM"/>
-            <Component Name="notification" Type="Usage" Mapping="Name:AUDIO_USAGE_NOTIFICATION"/>
-            <Component Name="notification_telephony_ringtone" Type="Usage"
-                       Mapping="Name:AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE"/>
-            <Component Name="notification_communication_request" Type="Usage"
-                       Mapping="Name:AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST"/>
-            <Component Name="notification_communication_instant" Type="Usage"
-                       Mapping="Name:AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT"/>
-            <Component Name="notification_communication_delayed" Type="Usage"
-                       Mapping="Name:AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED"/>
-            <Component Name="notification_event" Type="Usage"
-                       Mapping="Name:AUDIO_USAGE_NOTIFICATION_EVENT"/>
-            <Component Name="assistance_accessibility" Type="Usage"
-                       Mapping="Name:AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY"/>
-            <Component Name="assistance_navigation_guidance" Type="Usage"
-                       Mapping="Name:AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
-            <Component Name="assistance_sonification" Type="Usage"
-                       Mapping="Name:AUDIO_USAGE_ASSISTANCE_SONIFICATION"/>
-            <Component Name="game" Type="Usage" Mapping="Name:AUDIO_USAGE_GAME"/>
-            <Component Name="virtual_source" Type="Usage" Mapping="Name:AUDIO_USAGE_VIRTUAL_SOURCE"/>
-            <Component Name="assistant" Type="Usage" Mapping="Name:AUDIO_USAGE_ASSISTANT"/>
-        </ComponentType>
-
-        <!--#################### USAGE END ####################-->
-
         <!--#################### INPUT SOURCE BEGIN ####################-->
 
         <ComponentType Name="InputSources" Description="associated to audio_source_t definition,
@@ -117,8 +67,6 @@
 
     <InstanceDefinition>
         <Component Name="streams" Type="Streams"/>
-        <Component Name="strategies" Type="Strategies"/>
         <Component Name="input_sources" Type="InputSources"/>
-        <Component Name="usages" Type="Usages"/>
     </InstanceDefinition>
 </Subsystem>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
index db1f038..65dc9af 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
@@ -8,10 +8,9 @@
 LOCAL_SRC_FILES := \
     PolicySubsystemBuilder.cpp \
     PolicySubsystem.cpp \
-    Strategy.cpp \
     InputSource.cpp \
     Stream.cpp \
-    Usage.cpp
+    ProductStrategy.cpp
 
 LOCAL_CFLAGS += \
     -Wall \
@@ -21,9 +20,8 @@
     -fvisibility=hidden
 
 LOCAL_C_INCLUDES := \
-    frameworks/av/services/audiopolicy/common/include \
     frameworks/av/services/audiopolicy/engineconfigurable/include \
-    frameworks/av/services/audiopolicy/engineconfigurable/interface \
+    frameworks/av/services/audiopolicy/engineconfigurable/interface
 
 LOCAL_SHARED_LIBRARIES := \
     libaudiopolicyengineconfigurable  \
@@ -31,6 +29,11 @@
     libmedia_helper \
     liblog \
 
+LOCAL_HEADER_LIBRARIES := \
+    libaudiopolicycommon \
+    libaudioclient_headers \
+    libbase_headers
+
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
 LOCAL_STATIC_LIBRARIES := libpfw_utility
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp
index 7374fc3..bfc1bca 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp
@@ -17,10 +17,9 @@
 #include "PolicySubsystem.h"
 #include "SubsystemObjectFactory.h"
 #include "PolicyMappingKeys.h"
-#include "Strategy.h"
 #include "Stream.h"
 #include "InputSource.h"
-#include "Usage.h"
+#include "ProductStrategy.h"
 #include <AudioPolicyPluginInterface.h>
 #include <AudioPolicyEngineInstance.h>
 #include <utils/Log.h>
@@ -36,9 +35,8 @@
 
 
 const char *const PolicySubsystem::mStreamComponentName = "Stream";
-const char *const PolicySubsystem::mStrategyComponentName = "Strategy";
 const char *const PolicySubsystem::mInputSourceComponentName = "InputSource";
-const char *const PolicySubsystem::mUsageComponentName = "Usage";
+const char *const PolicySubsystem::mProductStrategyComponentName = "ProductStrategy";
 
 PolicySubsystem::PolicySubsystem(const std::string &name, core::log::Logger &logger)
     : CSubsystem(name, logger),
@@ -68,20 +66,14 @@
             (1 << MappingKeyName))
         );
     addSubsystemObjectFactory(
-        new TSubsystemObjectFactory<Strategy>(
-            mStrategyComponentName,
-            0)
-        );
-    addSubsystemObjectFactory(
-        new TSubsystemObjectFactory<Usage>(
-            mUsageComponentName,
-            (1 << MappingKeyName))
-        );
-    addSubsystemObjectFactory(
         new TSubsystemObjectFactory<InputSource>(
             mInputSourceComponentName,
             (1 << MappingKeyName))
         );
+    addSubsystemObjectFactory(
+        new TSubsystemObjectFactory<ProductStrategy>(
+            mProductStrategyComponentName, 0)
+        );
 }
 
 // Retrieve Route interface
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h
index 822eeb9..9bf1c23 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h
@@ -53,7 +53,6 @@
     static const char *const mKeyAmend3; /**< amend3 key mapping string. */
 
     static const char *const mStreamComponentName;
-    static const char *const mStrategyComponentName;
     static const char *const mInputSourceComponentName;
-    static const char *const mUsageComponentName;
+    static const char *const mProductStrategyComponentName;
 };
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.cpp
new file mode 100644
index 0000000..bb29ef1
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ProductStrategy.h"
+#include "PolicyMappingKeys.h"
+#include "PolicySubsystem.h"
+
+using std::string;
+using android::product_strategy_t;
+
+ProductStrategy::ProductStrategy(const string &mappingValue,
+                   CInstanceConfigurableElement *instanceConfigurableElement,
+                   const CMappingContext &context,
+                   core::log::Logger& logger)
+    : CFormattedSubsystemObject(instanceConfigurableElement,
+                                logger,
+                                mappingValue,
+                                MappingKeyAmend1,
+                                (MappingKeyAmendEnd - MappingKeyAmend1 + 1),
+                                context)
+{
+    ALOG_ASSERT(instanceConfigurableElement != nullptr, "Invalid Configurable Element");
+    mPolicySubsystem = static_cast<const PolicySubsystem *>(
+                instanceConfigurableElement->getBelongingSubsystem());
+    ALOG_ASSERT(mPolicySubsystem != nullptr, "Invalid Policy Subsystem");
+
+    mPolicyPluginInterface = mPolicySubsystem->getPolicyPluginInterface();
+    ALOG_ASSERT(mPolicyPluginInterface != nullptr, "Invalid Policy Plugin Interface");
+
+    std::string name(instanceConfigurableElement->getName());
+    mId = mPolicyPluginInterface->getProductStrategyByName(name);
+
+    ALOG_ASSERT(mId != PRODUCT_STRATEGY_INVALID, "Product Strategy %s not found", name.c_str());
+
+    ALOGE("Product Strategy %s added", name.c_str());
+}
+
+bool ProductStrategy::sendToHW(string & /*error*/)
+{
+    Device deviceParams;
+    blackboardRead(&deviceParams, sizeof(deviceParams));
+
+    mPolicyPluginInterface->setDeviceTypesForProductStrategy(mId, deviceParams.applicableDevice);
+    mPolicyPluginInterface->setDeviceAddressForProductStrategy(mId, deviceParams.deviceAddress);
+    return true;
+}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.h
similarity index 68%
rename from services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.h
rename to services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.h
index c02b82c..244f082 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.h
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015 The Android Open Source Project
+ * Copyright (C) 2018 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -20,14 +20,24 @@
 #include "InstanceConfigurableElement.h"
 #include "MappingContext.h"
 #include <AudioPolicyPluginInterface.h>
+#include <policy.h>
 #include <string>
 
 class PolicySubsystem;
 
-class Strategy : public CFormattedSubsystemObject
+class ProductStrategy : public CFormattedSubsystemObject
 {
+private:
+    static const uint32_t mMaxStringSize = 257; /**< max string size (plus zero terminal). */
+
+    struct Device
+    {
+        uint32_t applicableDevice; /**< applicable device for this strategy. */
+        char deviceAddress[mMaxStringSize]; /**< device address associated with this strategy. */
+    } __attribute__((packed));
+
 public:
-    Strategy(const std::string &mappingValue,
+    ProductStrategy(const std::string &mappingValue,
              CInstanceConfigurableElement *instanceConfigurableElement,
              const CMappingContext &context,
              core::log::Logger& logger);
@@ -38,10 +48,10 @@
 private:
     const PolicySubsystem *mPolicySubsystem; /**< Route subsytem plugin. */
 
+    android::product_strategy_t mId;
+
     /**
      * Interface to communicate with Audio Policy Engine.
      */
     android::AudioPolicyPluginInterface *mPolicyPluginInterface;
-
-    android::routing_strategy mId; /**< strategy identifier to link with audio.h.*/
 };
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.cpp
deleted file mode 100644
index 876bcb0..0000000
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.cpp
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Strategy.h"
-#include "PolicyMappingKeys.h"
-#include "PolicySubsystem.h"
-#include <RoutingStrategy.h>
-
-using std::string;
-using android::routing_strategy;
-
-namespace detail {
-
-constexpr std::pair<routing_strategy, const char*> routingStrategyMap[] = {
-    {android::STRATEGY_MEDIA, "STRATEGY_MEDIA"},
-    {android::STRATEGY_PHONE, "STRATEGY_PHONE"},
-    {android::STRATEGY_SONIFICATION, "STRATEGY_SONIFICATION"},
-    {android::STRATEGY_SONIFICATION_RESPECTFUL, "STRATEGY_SONIFICATION_RESPECTFUL"},
-    {android::STRATEGY_DTMF, "STRATEGY_DTMF"},
-    {android::STRATEGY_ENFORCED_AUDIBLE, "STRATEGY_ENFORCED_AUDIBLE"},
-    {android::STRATEGY_TRANSMITTED_THROUGH_SPEAKER, "STRATEGY_TRANSMITTED_THROUGH_SPEAKER"},
-    {android::STRATEGY_ACCESSIBILITY, "STRATEGY_ACCESSIBILITY"},
-    {android::STRATEGY_REROUTING, "STRATEGY_REROUTING"},
-};
-
-bool fromString(const char *literalName, routing_strategy &type)
-{
-    for (auto& pair : routingStrategyMap) {
-        if (strcmp(pair.second, literalName) == 0) {
-            type = pair.first;
-            return true;
-        }
-    }
-    return false;
-}
-
-}
-
-Strategy::Strategy(const string &mappingValue,
-                   CInstanceConfigurableElement *instanceConfigurableElement,
-                   const CMappingContext &context,
-                   core::log::Logger& logger)
-    : CFormattedSubsystemObject(instanceConfigurableElement,
-                                logger,
-                                mappingValue,
-                                MappingKeyAmend1,
-                                (MappingKeyAmendEnd - MappingKeyAmend1 + 1),
-                                context),
-      mPolicySubsystem(static_cast<const PolicySubsystem *>(
-                           instanceConfigurableElement->getBelongingSubsystem())),
-      mPolicyPluginInterface(mPolicySubsystem->getPolicyPluginInterface())
-{
-    std::string name(context.getItem(MappingKeyName));
-    if (not detail::fromString(name.c_str(), mId)) {
-        LOG_ALWAYS_FATAL("Invalid Strategy %s, invalid XML structure file", name.c_str());
-    }
-    // Declares the strategy to audio policy engine
-    mPolicyPluginInterface->addStrategy(instanceConfigurableElement->getName(), mId);
-}
-
-bool Strategy::sendToHW(string & /*error*/)
-{
-    uint32_t applicableOutputDevice;
-    blackboardRead(&applicableOutputDevice, sizeof(applicableOutputDevice));
-    return mPolicyPluginInterface->setDeviceForStrategy(mId, applicableOutputDevice);
-}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp
index 46c9e1c..5230e0e 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp
@@ -20,7 +20,6 @@
 #include <media/TypeConverter.h>
 
 using std::string;
-using android::routing_strategy;
 
 Stream::Stream(const string &/*mappingValue*/,
                CInstanceConfigurableElement *instanceConfigurableElement,
@@ -45,11 +44,8 @@
     Applicable params;
     blackboardRead(&params, sizeof(params));
 
-    mPolicyPluginInterface->setStrategyForStream(mId,
-                                                 static_cast<routing_strategy>(params.strategy));
-
-    mPolicyPluginInterface->setVolumeProfileForStream(mId,
-                                                      static_cast<audio_stream_type_t>(params.volumeProfile));
+    mPolicyPluginInterface->setVolumeProfileForStream(
+                mId, static_cast<audio_stream_type_t>(params.volumeProfile));
 
     return true;
 
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h
index 4a875db..e0ce2fa 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h
@@ -29,7 +29,6 @@
 private:
     struct Applicable
     {
-        uint32_t strategy; /**< applicable strategy for this stream. */
         uint32_t volumeProfile; /**< applicable strategy for this stream. */
     } __attribute__((packed));
 
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.cpp
deleted file mode 100644
index 925d631..0000000
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Usage.h"
-#include "PolicyMappingKeys.h"
-#include "PolicySubsystem.h"
-#include <media/TypeConverter.h>
-
-using std::string;
-using android::routing_strategy;
-
-Usage::Usage(const string &mappingValue,
-                   CInstanceConfigurableElement *instanceConfigurableElement,
-                   const CMappingContext &context, core::log::Logger &logger)
-    : CFormattedSubsystemObject(instanceConfigurableElement,
-                                logger,
-                                mappingValue,
-                                MappingKeyAmend1,
-                                (MappingKeyAmendEnd - MappingKeyAmend1 + 1),
-                                context),
-      mPolicySubsystem(static_cast<const PolicySubsystem *>(
-                           instanceConfigurableElement->getBelongingSubsystem())),
-      mPolicyPluginInterface(mPolicySubsystem->getPolicyPluginInterface())
-{
-    std::string name(context.getItem(MappingKeyName));
-
-    if (not android::UsageTypeConverter::fromString(name, mId)) {
-        LOG_ALWAYS_FATAL("Invalid Usage name: %s, invalid XML structure file", name.c_str());
-    }
-    // Declares the strategy to audio policy engine
-    mPolicyPluginInterface->addUsage(name, mId);
-}
-
-bool Usage::sendToHW(string & /*error*/)
-{
-    uint32_t applicableStrategy;
-    blackboardRead(&applicableStrategy, sizeof(applicableStrategy));
-    return mPolicyPluginInterface->setStrategyForUsage(mId,
-                                              static_cast<routing_strategy>(applicableStrategy));
-}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.h
deleted file mode 100644
index 860204f..0000000
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include "FormattedSubsystemObject.h"
-#include "InstanceConfigurableElement.h"
-#include "MappingContext.h"
-#include <AudioPolicyPluginInterface.h>
-#include <string>
-
-class PolicySubsystem;
-
-class Usage : public CFormattedSubsystemObject
-{
-public:
-    Usage(const std::string &mappingValue,
-          CInstanceConfigurableElement *instanceConfigurableElement,
-          const CMappingContext &context,
-          core::log::Logger& logger);
-
-protected:
-    virtual bool sendToHW(std::string &error);
-
-private:
-    const PolicySubsystem *mPolicySubsystem; /**< Route subsytem plugin. */
-
-    /**
-     * Interface to communicate with Audio Policy Engine.
-     */
-    android::AudioPolicyPluginInterface *mPolicyPluginInterface;
-
-    audio_usage_t mId; /**< usage identifier to link with audio.h. */
-};
diff --git a/services/audiopolicy/engineconfigurable/src/Collection.h b/services/audiopolicy/engineconfigurable/src/Collection.h
index 1f8ed8d..02b41cb 100644
--- a/services/audiopolicy/engineconfigurable/src/Collection.h
+++ b/services/audiopolicy/engineconfigurable/src/Collection.h
@@ -18,8 +18,6 @@
 
 #include "Element.h"
 #include "Stream.h"
-#include "Strategy.h"
-#include "Usage.h"
 #include "InputSource.h"
 #include <utils/Errors.h>
 #include <system/audio.h>
@@ -147,15 +145,9 @@
 template <>
 struct Collection<std::string>::collectionSupported {};
 template <>
-struct Collection<audio_usage_t>::collectionSupported {};
-template <>
 struct Collection<audio_source_t>::collectionSupported {};
-template <>
-struct Collection<routing_strategy>::collectionSupported {};
 
-typedef Collection<routing_strategy> StrategyCollection;
 typedef Collection<audio_stream_type_t> StreamCollection;
-typedef Collection<audio_usage_t> UsageCollection;
 typedef Collection<audio_source_t> InputSourceCollection;
 
 } // namespace audio_policy
diff --git a/services/audiopolicy/engineconfigurable/src/Element.h b/services/audiopolicy/engineconfigurable/src/Element.h
index 1b55c8c..97950d8 100644
--- a/services/audiopolicy/engineconfigurable/src/Element.h
+++ b/services/audiopolicy/engineconfigurable/src/Element.h
@@ -62,7 +62,7 @@
 
     /**
      * A Policy element may implement getter/setter function for a given property.
-     * Property may be routing_strategy, audio_stream_type_t, audio_usage_t, audio_source_t
+     * Property may be audio_stream_type_t, audio_usage_t, audio_source_t
      * or a string.
      *
      * @tparam Property for which this policy element has setter / getter.
@@ -73,7 +73,7 @@
 
     /**
      * A Policy element may implement getter/setter function for a given property.
-     * Property may be routing_strategy, audio_stream_type_t, audio_usage_t, audio_source_t
+     * Property may be audio_stream_type_t, audio_usage_t, audio_source_t
      * or a string.
      *
      * @tparam Property for which this policy element has setter / getter.
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.cpp b/services/audiopolicy/engineconfigurable/src/Engine.cpp
index 009cf90..89a1694 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Engine.cpp
@@ -25,11 +25,12 @@
 #endif
 
 #include "Engine.h"
-#include "Strategy.h"
 #include "Stream.h"
 #include "InputSource.h"
-#include "Usage.h"
+
+#include <EngineConfig.h>
 #include <policy.h>
+#include <AudioIODescriptorInterface.h>
 #include <ParameterManagerWrapper.h>
 
 using std::string;
@@ -39,77 +40,48 @@
 namespace audio_policy {
 
 template <>
-StrategyCollection &Engine::getCollection<routing_strategy>()
-{
-    return mStrategyCollection;
-}
-template <>
 StreamCollection &Engine::getCollection<audio_stream_type_t>()
 {
     return mStreamCollection;
 }
 template <>
-UsageCollection &Engine::getCollection<audio_usage_t>()
-{
-    return mUsageCollection;
-}
-template <>
 InputSourceCollection &Engine::getCollection<audio_source_t>()
 {
     return mInputSourceCollection;
 }
 
 template <>
-const StrategyCollection &Engine::getCollection<routing_strategy>() const
-{
-    return mStrategyCollection;
-}
-template <>
 const StreamCollection &Engine::getCollection<audio_stream_type_t>() const
 {
     return mStreamCollection;
 }
 template <>
-const UsageCollection &Engine::getCollection<audio_usage_t>() const
-{
-    return mUsageCollection;
-}
-template <>
 const InputSourceCollection &Engine::getCollection<audio_source_t>() const
 {
     return mInputSourceCollection;
 }
 
-Engine::Engine()
-    : mManagerInterface(this),
-      mPluginInterface(this),
-      mPolicyParameterMgr(new ParameterManagerWrapper()),
-      mApmObserver(NULL)
+Engine::Engine() : mPolicyParameterMgr(new ParameterManagerWrapper())
 {
+    status_t loadResult = loadAudioPolicyEngineConfig();
+    if (loadResult < 0) {
+        ALOGE("Policy Engine configuration is invalid.");
+    }
 }
 
 Engine::~Engine()
 {
-    mStrategyCollection.clear();
     mStreamCollection.clear();
     mInputSourceCollection.clear();
-    mUsageCollection.clear();
-}
-
-
-void Engine::setObserver(AudioPolicyManagerObserver *observer)
-{
-    ALOG_ASSERT(observer != NULL, "Invalid Audio Policy Manager observer");
-    mApmObserver = observer;
 }
 
 status_t Engine::initCheck()
 {
-    if (mPolicyParameterMgr == NULL || mPolicyParameterMgr->start() != NO_ERROR) {
+    if (mPolicyParameterMgr == nullptr || mPolicyParameterMgr->start() != NO_ERROR) {
         ALOGE("%s: could not start Policy PFW", __FUNCTION__);
         return NO_INIT;
     }
-    return (mApmObserver != NULL)? NO_ERROR : NO_INIT;
+    return EngineBase::initCheck();
 }
 
 template <typename Key>
@@ -137,55 +109,16 @@
     return element->template get<Property>();
 }
 
-routing_strategy Engine::ManagerInterfaceImpl::getStrategyForUsage(audio_usage_t usage)
+bool Engine::setVolumeProfileForStream(const audio_stream_type_t &stream,
+                                       const audio_stream_type_t &profile)
 {
-    return mPolicyEngine->getPropertyForKey<routing_strategy, audio_usage_t>(usage);
-}
-
-audio_devices_t Engine::ManagerInterfaceImpl::getDeviceForStrategy(routing_strategy strategy) const
-{
-    const SwAudioOutputCollection &outputs = mPolicyEngine->mApmObserver->getOutputs();
-
-    /** This is the only case handled programmatically because the PFW is unable to know the
-     * activity of streams.
-     *
-     * -While media is playing on a remote device, use the the sonification behavior.
-     * Note that we test this usecase before testing if media is playing because
-     * the isStreamActive() method only informs about the activity of a stream, not
-     * if it's for local playback. Note also that we use the same delay between both tests
-     *
-     * -When media is not playing anymore, fall back on the sonification behavior
-     */
-    if (strategy == STRATEGY_SONIFICATION_RESPECTFUL &&
-            !is_state_in_call(getPhoneState()) &&
-            !outputs.isStreamActiveRemotely(AUDIO_STREAM_MUSIC,
-                                    SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY) &&
-            outputs.isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
-        return mPolicyEngine->getPropertyForKey<audio_devices_t, routing_strategy>(STRATEGY_MEDIA);
-    }
-    if (strategy == STRATEGY_ACCESSIBILITY &&
-        (outputs.isStreamActive(AUDIO_STREAM_RING) || outputs.isStreamActive(AUDIO_STREAM_ALARM))) {
-            // do not route accessibility prompts to a digital output currently configured with a
-            // compressed format as they would likely not be mixed and dropped.
-            // Device For Sonification conf file has HDMI, SPDIF and HDMI ARC unreacheable.
-        return mPolicyEngine->getPropertyForKey<audio_devices_t, routing_strategy>(
-                    STRATEGY_SONIFICATION);
-    }
-    return mPolicyEngine->getPropertyForKey<audio_devices_t, routing_strategy>(strategy);
-}
-
-bool Engine::PluginInterfaceImpl::setVolumeProfileForStream(const audio_stream_type_t &stream,
-                                                            const audio_stream_type_t &profile)
-{
-    if (mPolicyEngine->setPropertyForKey<audio_stream_type_t, audio_stream_type_t>(stream,
-                                                                                   profile)) {
-        mPolicyEngine->mApmObserver->getVolumeCurves().switchVolumeCurve(profile, stream);
+    if (setPropertyForKey<audio_stream_type_t, audio_stream_type_t>(stream, profile)) {
+        switchVolumeCurve(profile, stream);
         return true;
     }
     return false;
 }
 
-
 template <typename Property, typename Key>
 bool Engine::setPropertyForKey(const Property &property, const Key &key)
 {
@@ -199,7 +132,11 @@
 
 status_t Engine::setPhoneState(audio_mode_t mode)
 {
-    return mPolicyParameterMgr->setPhoneState(mode);
+    status_t status = mPolicyParameterMgr->setPhoneState(mode);
+    if (status != NO_ERROR) {
+        return status;
+    }
+    return EngineBase::setPhoneState(mode);
 }
 
 audio_mode_t Engine::getPhoneState() const
@@ -210,7 +147,11 @@
 status_t Engine::setForceUse(audio_policy_force_use_t usage,
                                       audio_policy_forced_cfg_t config)
 {
-    return mPolicyParameterMgr->setForceUse(usage, config);
+    status_t status = mPolicyParameterMgr->setForceUse(usage, config);
+    if (status != NO_ERROR) {
+        return status;
+    }
+    return EngineBase::setForceUse(usage, config);
 }
 
 audio_policy_forced_cfg_t Engine::getForceUse(audio_policy_force_use_t usage) const
@@ -225,24 +166,210 @@
 
     if (audio_is_output_device(devDesc->type())) {
         return mPolicyParameterMgr->setAvailableOutputDevices(
-                    mApmObserver->getAvailableOutputDevices().types());
+                    getApmObserver()->getAvailableOutputDevices().types());
     } else if (audio_is_input_device(devDesc->type())) {
         return mPolicyParameterMgr->setAvailableInputDevices(
-                    mApmObserver->getAvailableInputDevices().types());
+                    getApmObserver()->getAvailableInputDevices().types());
     }
     return BAD_TYPE;
 }
 
+status_t Engine::loadAudioPolicyEngineConfig()
+{
+    auto result = EngineBase::loadAudioPolicyEngineConfig();
+
+    // Custom XML Parsing
+    auto loadCriteria= [this](const auto& configCriteria, const auto& configCriterionTypes) {
+        for (auto& criterion : configCriteria) {
+            engineConfig::CriterionType criterionType;
+            for (auto &configCriterionType : configCriterionTypes) {
+                if (configCriterionType.name == criterion.typeName) {
+                    criterionType = configCriterionType;
+                    break;
+                }
+            }
+            ALOG_ASSERT(not criterionType.name.empty(), "Invalid criterion type for %s",
+                        criterion.name.c_str());
+            mPolicyParameterMgr->addCriterion(criterion.name, criterionType.isInclusive,
+                                              criterionType.valuePairs,
+                                              criterion.defaultLiteralValue);
+        }
+    };
+
+    loadCriteria(result.parsedConfig->criteria, result.parsedConfig->criterionTypes);
+    return result.nbSkippedElement == 0? NO_ERROR : BAD_VALUE;
+}
+
+DeviceVector Engine::getDevicesForProductStrategy(product_strategy_t ps) const
+{
+    const auto productStrategies = getProductStrategies();
+    if (productStrategies.find(ps) == productStrategies.end()) {
+        ALOGE("%s: Trying to get device on invalid strategy %d", __FUNCTION__, ps);
+        return {};
+    }
+    const DeviceVector &availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
+    const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
+    uint32_t availableOutputDevicesType = availableOutputDevices.types();
+
+    /** This is the only case handled programmatically because the PFW is unable to know the
+     * activity of streams.
+     *
+     * -While media is playing on a remote device, use the the sonification behavior.
+     * Note that we test this usecase before testing if media is playing because
+     * the isStreamActive() method only informs about the activity of a stream, not
+     * if it's for local playback. Note also that we use the same delay between both tests
+     *
+     * -When media is not playing anymore, fall back on the sonification behavior
+     */
+    audio_devices_t devices = AUDIO_DEVICE_NONE;
+    if (ps == getProductStrategyForStream(AUDIO_STREAM_NOTIFICATION) &&
+            !is_state_in_call(getPhoneState()) &&
+            !outputs.isActiveRemotely(streamToVolumeSource(AUDIO_STREAM_MUSIC),
+                                      SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY) &&
+            outputs.isActive(streamToVolumeSource(AUDIO_STREAM_MUSIC),
+                             SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
+        product_strategy_t strategyForMedia =
+                getProductStrategyForStream(AUDIO_STREAM_MUSIC);
+        devices = productStrategies.getDeviceTypesForProductStrategy(strategyForMedia);
+    } else if (ps == getProductStrategyForStream(AUDIO_STREAM_ACCESSIBILITY) &&
+        (outputs.isActive(streamToVolumeSource(AUDIO_STREAM_RING)) ||
+         outputs.isActive(streamToVolumeSource(AUDIO_STREAM_ALARM)))) {
+            // do not route accessibility prompts to a digital output currently configured with a
+            // compressed format as they would likely not be mixed and dropped.
+            // Device For Sonification conf file has HDMI, SPDIF and HDMI ARC unreacheable.
+        product_strategy_t strategyNotification = getProductStrategyForStream(AUDIO_STREAM_RING);
+        devices = productStrategies.getDeviceTypesForProductStrategy(strategyNotification);
+    } else {
+        devices = productStrategies.getDeviceTypesForProductStrategy(ps);
+    }
+    if (devices == AUDIO_DEVICE_NONE ||
+            (devices & availableOutputDevicesType) == AUDIO_DEVICE_NONE) {
+        devices = getApmObserver()->getDefaultOutputDevice()->type();
+        ALOGE_IF(devices == AUDIO_DEVICE_NONE, "%s: no valid default device defined", __FUNCTION__);
+        return DeviceVector(getApmObserver()->getDefaultOutputDevice());
+    }
+    if (/*device_distinguishes_on_address(devices)*/ devices == AUDIO_DEVICE_OUT_BUS) {
+        // We do expect only one device for these types of devices
+        // Criterion device address garantee this one is available
+        // If this criterion is not wished, need to ensure this device is available
+        const String8 address(productStrategies.getDeviceAddressForProductStrategy(ps).c_str());
+        ALOGV("%s:device 0x%x %s %d", __FUNCTION__, devices, address.c_str(), ps);
+        return DeviceVector(availableOutputDevices.getDevice(devices,
+                                                             address,
+                                                             AUDIO_FORMAT_DEFAULT));
+    }
+    ALOGV("%s:device 0x%x %d", __FUNCTION__, devices, ps);
+    return availableOutputDevices.getDevicesFromTypeMask(devices);
+}
+
+DeviceVector Engine::getOutputDevicesForAttributes(const audio_attributes_t &attributes,
+                                                   const sp<DeviceDescriptor> &preferredDevice,
+                                                   bool fromCache) const
+{
+    // First check for explict routing device
+    if (preferredDevice != nullptr) {
+        ALOGV("%s explicit Routing on device %s", __func__, preferredDevice->toString().c_str());
+        return DeviceVector(preferredDevice);
+    }
+    product_strategy_t strategy = getProductStrategyForAttributes(attributes);
+    const DeviceVector &availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
+    const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
+    //
+    // @TODO: what is the priority of explicit routing? Shall it be considered first as it used to
+    // be by APM?
+    //
+    // Honor explicit routing requests only if all active clients have a preferred route in which
+    // case the last active client route is used
+    sp<DeviceDescriptor> device = findPreferredDevice(outputs, strategy, availableOutputDevices);
+    if (device != nullptr) {
+        return DeviceVector(device);
+    }
+
+    return fromCache? mDevicesForStrategies.at(strategy) : getDevicesForProductStrategy(strategy);
+}
+
+DeviceVector Engine::getOutputDevicesForStream(audio_stream_type_t stream, bool fromCache) const
+{
+    auto attributes = EngineBase::getAttributesForStreamType(stream);
+    return getOutputDevicesForAttributes(attributes, nullptr, fromCache);
+}
+
+sp<DeviceDescriptor> Engine::getInputDeviceForAttributes(const audio_attributes_t &attr,
+                                                         AudioMix **mix) const
+{
+    const auto &policyMixes = getApmObserver()->getAudioPolicyMixCollection();
+    const auto &availableInputDevices = getApmObserver()->getAvailableInputDevices();
+    const auto &inputs = getApmObserver()->getInputs();
+    std::string address;
+    //
+    // Explicit Routing ??? what is the priority of explicit routing? Shall it be considered
+    // first as it used to be by APM?
+    //
+    // Honor explicit routing requests only if all active clients have a preferred route in which
+    // case the last active client route is used
+    sp<DeviceDescriptor> device =
+            findPreferredDevice(inputs, attr.source, availableInputDevices);
+    if (device != nullptr) {
+        return device;
+    }
+
+    device = policyMixes.getDeviceAndMixForInputSource(attr.source, availableInputDevices, mix);
+    if (device != nullptr) {
+        return device;
+    }
+
+    audio_devices_t deviceType = getPropertyForKey<audio_devices_t, audio_source_t>(attr.source);
+
+    if (audio_is_remote_submix_device(deviceType)) {
+        address = "0";
+        std::size_t pos;
+        std::string tags { attr.tags };
+        if ((pos = tags.find("addr=")) != std::string::npos) {
+            address = tags.substr(pos + std::strlen("addr="));
+        }
+    }
+    return availableInputDevices.getDevice(deviceType, String8(address.c_str()), AUDIO_FORMAT_DEFAULT);
+}
+
+void Engine::updateDeviceSelectionCache()
+{
+    for (const auto &iter : getProductStrategies()) {
+        const auto &strategy = iter.second;
+        mDevicesForStrategies[strategy->getId()] = getDevicesForProductStrategy(strategy->getId());
+    }
+}
+
+void Engine::setDeviceAddressForProductStrategy(product_strategy_t strategy,
+                                                const std::string &address)
+{
+    if (getProductStrategies().find(strategy) == getProductStrategies().end()) {
+        ALOGE("%s: Trying to set address %s on invalid strategy %d", __FUNCTION__, address.c_str(),
+              strategy);
+        return;
+    }
+    getProductStrategies().at(strategy)->setDeviceAddress(address);
+}
+
+bool Engine::setDeviceTypesForProductStrategy(product_strategy_t strategy, audio_devices_t devices)
+{
+    if (getProductStrategies().find(strategy) == getProductStrategies().end()) {
+        ALOGE("%s: set device %d on invalid strategy %d", __FUNCTION__, devices, strategy);
+        return false;
+    }
+    getProductStrategies().at(strategy)->setDeviceTypes(devices);
+    return true;
+}
+
 template <>
 AudioPolicyManagerInterface *Engine::queryInterface()
 {
-    return &mManagerInterface;
+    return this;
 }
 
 template <>
 AudioPolicyPluginInterface *Engine::queryInterface()
 {
-    return &mPluginInterface;
+    return this;
 }
 
 } // namespace audio_policy
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.h b/services/audiopolicy/engineconfigurable/src/Engine.h
index ba4f889..5553994 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.h
+++ b/services/audiopolicy/engineconfigurable/src/Engine.h
@@ -16,7 +16,7 @@
 
 #pragma once
 
-
+#include "EngineBase.h"
 #include <AudioPolicyManagerInterface.h>
 #include <AudioPolicyPluginInterface.h>
 #include "Collection.h"
@@ -29,7 +29,7 @@
 class ParameterManagerWrapper;
 class VolumeProfile;
 
-class Engine
+class Engine : public EngineBase, AudioPolicyPluginInterface
 {
 public:
     Engine();
@@ -38,132 +38,69 @@
     template <class RequestedInterface>
     RequestedInterface *queryInterface();
 
-private:
-    /// Interface members
-    class ManagerInterfaceImpl : public AudioPolicyManagerInterface
+    ///
+    /// from EngineBase
+    ///
+    android::status_t initCheck() override;
+
+    status_t setPhoneState(audio_mode_t mode) override;
+
+    audio_mode_t getPhoneState() const override;
+
+    status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config) override;
+
+    audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const override;
+
+    android::status_t setDeviceConnectionState(const sp<DeviceDescriptor> devDesc,
+                                               audio_policy_dev_state_t state) override;
+
+    DeviceVector getOutputDevicesForAttributes(const audio_attributes_t &attr,
+                                               const sp<DeviceDescriptor> &preferedDevice = nullptr,
+                                               bool fromCache = false) const override;
+
+    DeviceVector getOutputDevicesForStream(audio_stream_type_t stream,
+                                           bool fromCache = false) const override;
+
+    sp<DeviceDescriptor> getInputDeviceForAttributes(
+            const audio_attributes_t &attr, AudioMix **mix = nullptr) const override;
+
+    void updateDeviceSelectionCache() override;
+
+    ///
+    /// from AudioPolicyPluginInterface
+    ///
+    status_t addStream(const std::string &name, audio_stream_type_t stream) override
     {
-    public:
-        ManagerInterfaceImpl(Engine *policyEngine)
-            : mPolicyEngine(policyEngine) {}
-
-        virtual android::status_t initCheck()
-        {
-            return mPolicyEngine->initCheck();
-        }
-        virtual void setObserver(AudioPolicyManagerObserver *observer)
-        {
-            mPolicyEngine->setObserver(observer);
-        }
-        virtual audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const
-        {
-            return mPolicyEngine->getPropertyForKey<audio_devices_t, audio_source_t>(inputSource);
-        }
-        virtual audio_devices_t getDeviceForStrategy(routing_strategy stategy) const;
-        virtual routing_strategy getStrategyForStream(audio_stream_type_t stream)
-        {
-            return mPolicyEngine->getPropertyForKey<routing_strategy, audio_stream_type_t>(stream);
-        }
-        virtual routing_strategy getStrategyForUsage(audio_usage_t usage);
-        virtual status_t setPhoneState(audio_mode_t mode)
-        {
-            return mPolicyEngine->setPhoneState(mode);
-        }
-        virtual audio_mode_t getPhoneState() const
-        {
-            return mPolicyEngine->getPhoneState();
-        }
-        virtual status_t setForceUse(audio_policy_force_use_t usage,
-                                              audio_policy_forced_cfg_t config)
-        {
-            return mPolicyEngine->setForceUse(usage, config);
-        }
-        virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const
-        {
-            return mPolicyEngine->getForceUse(usage);
-        }
-        virtual android::status_t setDeviceConnectionState(const sp<DeviceDescriptor> devDesc,
-                                                           audio_policy_dev_state_t state)
-        {
-            return mPolicyEngine->setDeviceConnectionState(devDesc, state);
-        }
-
-    private:
-        Engine *mPolicyEngine;
-    } mManagerInterface;
-
-    class PluginInterfaceImpl : public AudioPolicyPluginInterface
+        return add<audio_stream_type_t>(name, stream);
+    }
+    status_t addInputSource(const std::string &name, audio_source_t source) override
     {
-    public:
-        PluginInterfaceImpl(Engine *policyEngine)
-            : mPolicyEngine(policyEngine) {}
+        return add<audio_source_t>(name, source);
+    }
+    bool setVolumeProfileForStream(const audio_stream_type_t &stream,
+                                   const audio_stream_type_t &volumeProfile) override;
 
-        virtual status_t addStrategy(const std::string &name, routing_strategy strategy)
-        {
-            return mPolicyEngine->add<routing_strategy>(name, strategy);
-        }
-        virtual status_t addStream(const std::string &name, audio_stream_type_t stream)
-        {
-            return mPolicyEngine->add<audio_stream_type_t>(name, stream);
-        }
-        virtual status_t addUsage(const std::string &name, audio_usage_t usage)
-        {
-            return mPolicyEngine->add<audio_usage_t>(name, usage);
-        }
-        virtual status_t addInputSource(const std::string &name, audio_source_t source)
-        {
-            return mPolicyEngine->add<audio_source_t>(name, source);
-        }
-        virtual bool setDeviceForStrategy(const routing_strategy &strategy, audio_devices_t devices)
-        {
-            return mPolicyEngine->setPropertyForKey<audio_devices_t, routing_strategy>(devices,
-                                                                                       strategy);
-        }
-        virtual bool setStrategyForStream(const audio_stream_type_t &stream,
-                                          routing_strategy strategy)
-        {
-            return mPolicyEngine->setPropertyForKey<routing_strategy, audio_stream_type_t>(strategy,
-                                                                                           stream);
-        }
-        virtual bool setVolumeProfileForStream(const audio_stream_type_t &stream,
-                                               const audio_stream_type_t &volumeProfile);
+    bool setDeviceForInputSource(const audio_source_t &inputSource, audio_devices_t device) override
+    {
+        return setPropertyForKey<audio_devices_t, audio_source_t>(device, inputSource);
+    }
+    void setDeviceAddressForProductStrategy(product_strategy_t strategy,
+                                                    const std::string &address) override;
 
-        virtual bool setStrategyForUsage(const audio_usage_t &usage, routing_strategy strategy)
-        {
-            return mPolicyEngine->setPropertyForKey<routing_strategy, audio_usage_t>(strategy,
-                                                                                     usage);
-        }
-        virtual bool setDeviceForInputSource(const audio_source_t &inputSource,
-                                             audio_devices_t device)
-        {
-            return mPolicyEngine->setPropertyForKey<audio_devices_t, audio_source_t>(device,
-                                                                                     inputSource);
-        }
+    bool setDeviceTypesForProductStrategy(product_strategy_t strategy,
+                                                  audio_devices_t devices) override;
 
-    private:
-        Engine *mPolicyEngine;
-    } mPluginInterface;
+    product_strategy_t getProductStrategyByName(const std::string &name) override
+    {
+        return EngineBase::getProductStrategyByName(name);
+    }
 
 private:
     /* Copy facilities are put private to disable copy. */
     Engine(const Engine &object);
     Engine &operator=(const Engine &object);
 
-    void setObserver(AudioPolicyManagerObserver *observer);
-
-    bool setVolumeProfileForStream(const audio_stream_type_t &stream,
-                                   device_category deviceCategory,
-                                   const VolumeCurvePoints &points);
-
-    status_t initCheck();
-    status_t setPhoneState(audio_mode_t mode);
-    audio_mode_t getPhoneState() const;
-    status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
-    audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const;
-    status_t setDeviceConnectionState(const sp<DeviceDescriptor> devDesc,
-                                      audio_policy_dev_state_t state);
-    StrategyCollection mStrategyCollection; /**< Strategies indexed by their enum id. */
     StreamCollection mStreamCollection; /**< Streams indexed by their enum id.  */
-    UsageCollection mUsageCollection; /**< Usages indexed by their enum id. */
     InputSourceCollection mInputSourceCollection; /**< Input sources indexed by their enum id. */
 
     template <typename Key>
@@ -184,12 +121,16 @@
     template <typename Property, typename Key>
     bool setPropertyForKey(const Property &property, const Key &key);
 
+    status_t loadAudioPolicyEngineConfig();
+
+    DeviceVector getDevicesForProductStrategy(product_strategy_t strategy) const;
+
     /**
      * Policy Parameter Manager hidden through a wrapper.
      */
     ParameterManagerWrapper *mPolicyParameterMgr;
 
-    AudioPolicyManagerObserver *mApmObserver;
+    DeviceStrategyMap mDevicesForStrategies;
 };
 
 } // namespace audio_policy
diff --git a/services/audiopolicy/engineconfigurable/src/InputSource.h b/services/audiopolicy/engineconfigurable/src/InputSource.h
index 64b390e..e1865cc 100644
--- a/services/audiopolicy/engineconfigurable/src/InputSource.h
+++ b/services/audiopolicy/engineconfigurable/src/InputSource.h
@@ -59,7 +59,7 @@
 
     /**
      * A Policy element may implement getter/setter function for a given property.
-     * Property may be routing_strategy, audio_stream_type_t, audio_usage_t, audio_source_t
+     * Property may be audio_stream_type_t, audio_usage_t, audio_source_t
      * or a string.
      */
     template <typename Property>
diff --git a/services/audiopolicy/engineconfigurable/src/Strategy.cpp b/services/audiopolicy/engineconfigurable/src/Strategy.cpp
deleted file mode 100644
index 310b35e..0000000
--- a/services/audiopolicy/engineconfigurable/src/Strategy.cpp
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "APM::AudioPolicyEngine/Strategy"
-
-#include "Strategy.h"
-
-using std::string;
-
-namespace android {
-namespace audio_policy {
-
-status_t Element<routing_strategy>::setIdentifier(routing_strategy identifier)
-{
-    if (identifier >= NUM_STRATEGIES) {
-        return BAD_VALUE;
-    }
-    mIdentifier = identifier;
-    ALOGD("%s: Strategy %s identifier 0x%X", __FUNCTION__, getName().c_str(), identifier);
-    return NO_ERROR;
-}
-
-/**
- * Set the device associated to this strategy.
- * It checks if the output device is valid.
- *
- * @param[in] devices selected for the given strategy.
- *
- * @return NO_ERROR if the device is either valid or none, error code otherwise.
- */
-template <>
-status_t Element<routing_strategy>::set<audio_devices_t>(audio_devices_t devices)
-{
-    if (!audio_is_output_devices(devices) || devices == AUDIO_DEVICE_NONE) {
-        ALOGE("%s: trying to set an invalid device 0x%X for strategy %s",
-              __FUNCTION__, devices, getName().c_str());
-        return BAD_VALUE;
-    }
-    ALOGD("%s: 0x%X for strategy %s", __FUNCTION__, devices, getName().c_str());
-    mApplicableDevices = devices;
-    return NO_ERROR;
-}
-
-template <>
-audio_devices_t Element<routing_strategy>::get<audio_devices_t>() const
-{
-    ALOGV("%s: 0x%X for strategy %s", __FUNCTION__, mApplicableDevices, getName().c_str());
-    return mApplicableDevices;
-}
-
-} // namespace audio_policy
-} // namespace android
-
diff --git a/services/audiopolicy/engineconfigurable/src/Strategy.h b/services/audiopolicy/engineconfigurable/src/Strategy.h
deleted file mode 100644
index f2487fd..0000000
--- a/services/audiopolicy/engineconfigurable/src/Strategy.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include "Element.h"
-#include <RoutingStrategy.h>
-
-namespace android {
-namespace audio_policy {
-
-/**
- * @tparam audio_devices_t: Applicable output device(s) for this strategy.
- */
-template <>
-class Element<routing_strategy>
-{
-public:
-    Element(const std::string &name)
-        : mName(name),
-          mApplicableDevices(AUDIO_DEVICE_NONE)
-    {}
-    ~Element() {}
-
-    /**
-     * Returns identifier of this policy element
-     *
-     * @returns string representing the name of this policy element
-     */
-    const std::string &getName() const { return mName; }
-
-    /**
-    * Set the unique identifier for this policy element.
-    *
-    * @tparam Key type of the unique identifier.
-    * @param[in] identifier to be set.
-    *
-    * @return NO_ERROR if the identifier is valid and set correctly, error code otherwise.
-    */
-    status_t setIdentifier(routing_strategy identifier);
-
-    /**
-     * @return the unique identifier of this policy element.
-     */
-    routing_strategy getIdentifier() const { return mIdentifier; }
-
-    /**
-     * A Policy element may implement getter/setter function for a given property.
-     * Property may be routing_strategy, audio_stream_type_t, audio_usage_t, audio_source_t
-     * or a string.
-     */
-    template <typename Property>
-    Property get() const;
-
-    template <typename Property>
-    status_t set(Property property);
-
-private:
-    /* Copy facilities are put private to disable copy. */
-    Element(const Element &object);
-    Element &operator=(const Element &object);
-
-    std::string mName; /**< Unique literal Identifier of a policy base element*/
-    routing_strategy mIdentifier; /**< Unique numerical Identifier of a policy base element*/
-
-    audio_devices_t mApplicableDevices; /**< Applicable output device(s) for this strategy. */
-};
-
-typedef Element<routing_strategy> Strategy;
-
-} // namespace audio_policy
-} // namespace android
-
-
diff --git a/services/audiopolicy/engineconfigurable/src/Stream.cpp b/services/audiopolicy/engineconfigurable/src/Stream.cpp
index 73fb94d..297eb02 100644
--- a/services/audiopolicy/engineconfigurable/src/Stream.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Stream.cpp
@@ -34,32 +34,6 @@
     return NO_ERROR;
 }
 
-/**
-* Set the strategy to follow for this stream.
-* It checks if the strategy is valid.
-*
-* @param[in] strategy to be followed.
-*
-* @return NO_ERROR if the strategy is set correctly, error code otherwise.
-*/
-template <>
-status_t Element<audio_stream_type_t>::set<routing_strategy>(routing_strategy strategy)
-{
-    if (strategy >= NUM_STRATEGIES) {
-        return BAD_VALUE;
-    }
-    mApplicableStrategy = strategy;
-    ALOGD("%s: 0x%X for Stream %s", __FUNCTION__, strategy, getName().c_str());
-    return NO_ERROR;
-}
-
-template <>
-routing_strategy Element<audio_stream_type_t>::get<routing_strategy>() const
-{
-    ALOGV("%s: 0x%X for Stream %s", __FUNCTION__, mApplicableStrategy, getName().c_str());
-    return mApplicableStrategy;
-}
-
 template <>
 status_t Element<audio_stream_type_t>::set<audio_stream_type_t>(audio_stream_type_t volumeProfile)
 {
diff --git a/services/audiopolicy/engineconfigurable/src/Stream.h b/services/audiopolicy/engineconfigurable/src/Stream.h
index 2bf70b3..a4fdd39 100644
--- a/services/audiopolicy/engineconfigurable/src/Stream.h
+++ b/services/audiopolicy/engineconfigurable/src/Stream.h
@@ -18,22 +18,20 @@
 
 #include "Element.h"
 #include "EngineDefinition.h"
-#include <RoutingStrategy.h>
 #include <map>
 
 namespace android {
 namespace audio_policy {
 
 /**
- * @tparam routing_strategy: Applicable strategy for this stream.
+ * @tparam product_strategy_t: Applicable strategy for this stream.
  */
 template <>
 class Element<audio_stream_type_t>
 {
 public:
     Element(const std::string &name)
-        : mName(name),
-          mApplicableStrategy(STRATEGY_MEDIA)
+        : mName(name)
     {}
     ~Element() {}
 
@@ -61,7 +59,7 @@
 
     /**
      * A Policy element may implement getter/setter function for a given property.
-     * Property may be routing_strategy, audio_stream_type_t, audio_usage_t, audio_source_t
+     * Property may be  audio_stream_type_t, audio_usage_t, audio_source_t
      * or a string.
      */
     template <typename Property>
@@ -78,8 +76,6 @@
     std::string mName; /**< Unique literal Identifier of a policy base element*/
     audio_stream_type_t mIdentifier; /**< Unique numerical Identifier of a policy base element*/
 
-    routing_strategy mApplicableStrategy; /**< Applicable strategy for this stream. */
-
     audio_stream_type_t mVolumeProfile; /**< Volume Profile followed by this stream. */
 };
 
diff --git a/services/audiopolicy/engineconfigurable/src/Usage.cpp b/services/audiopolicy/engineconfigurable/src/Usage.cpp
deleted file mode 100644
index 8c0dfba..0000000
--- a/services/audiopolicy/engineconfigurable/src/Usage.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "APM::AudioPolicyEngine/Usage"
-
-#include "Usage.h"
-
-namespace android {
-namespace audio_policy {
-
-status_t Element<audio_usage_t>::setIdentifier(audio_usage_t identifier)
-{
-    if (identifier > AUDIO_USAGE_MAX) {
-        return BAD_VALUE;
-    }
-    mIdentifier = identifier;
-    ALOGD("%s: Usage %s has identifier 0x%X", __FUNCTION__, getName().c_str(), identifier);
-    return NO_ERROR;
-}
-
-template <>
-status_t Element<audio_usage_t>::set<routing_strategy>(routing_strategy strategy)
-{
-    if (strategy >= NUM_STRATEGIES) {
-        return BAD_VALUE;
-    }
-    ALOGD("%s: %d for Usage %s", __FUNCTION__, strategy, getName().c_str());
-    mApplicableStrategy = strategy;
-    return NO_ERROR;
-}
-
-template <>
-routing_strategy Element<audio_usage_t>::get<routing_strategy>() const
-{
-    ALOGD("%s: %d for Usage %s", __FUNCTION__, mApplicableStrategy, getName().c_str());
-    return mApplicableStrategy;
-}
-
-} // namespace audio_policy
-} // namespace android
-
-
diff --git a/services/audiopolicy/engineconfigurable/src/Usage.h b/services/audiopolicy/engineconfigurable/src/Usage.h
deleted file mode 100644
index 72a452f..0000000
--- a/services/audiopolicy/engineconfigurable/src/Usage.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include "Element.h"
-#include <RoutingStrategy.h>
-
-namespace android {
-namespace audio_policy {
-
-/**
- * @tparam routing_strategy: Applicable strategy for this usage.
- */
-template <>
-class Element<audio_usage_t>
-{
-public:
-    Element(const std::string &name)
-        : mName(name),
-          mApplicableStrategy(STRATEGY_MEDIA)
-    {}
-    ~Element() {}
-
-    /**
-     * Returns identifier of this policy element
-     *
-     * @returns string representing the name of this policy element
-     */
-    const std::string &getName() const { return mName; }
-
-    /**
-    * Set the unique identifier for this policy element.
-    *
-    * @tparam Key type of the unique identifier.
-    * @param[in] identifier to be set.
-    *
-    * @return NO_ERROR if the identifier is valid and set correctly, error code otherwise.
-    */
-    status_t setIdentifier(audio_usage_t identifier);
-
-    /**
-     * @return the unique identifier of this policy element.
-     */
-    audio_usage_t getIdentifier() const { return mIdentifier; }
-
-    /**
-     * A Policy element may implement getter/setter function for a given property.
-     * Property may be routing_strategy, audio_stream_type_t, audio_usage_t, audio_source_t
-     * or a string.
-     */
-    template <typename Property>
-    Property get() const;
-
-    template <typename Property>
-    status_t set(Property property);
-
-private:
-    /* Copy facilities are put private to disable copy. */
-    Element(const Element &object);
-    Element &operator=(const Element &object);
-
-    std::string mName; /**< Unique literal Identifier of a policy base element*/
-    audio_usage_t mIdentifier; /**< Unique numerical Identifier of a policy base element*/
-    routing_strategy mApplicableStrategy; /**< Applicable strategy for this usage. */
-};
-
-typedef Element<audio_usage_t> Usage;
-
-} // namespace audio_policy
-} // namespace android
-
-
diff --git a/services/audiopolicy/engineconfigurable/tools/provision_criterion_types_from_android_headers.mk b/services/audiopolicy/engineconfigurable/tools/provision_criterion_types_from_android_headers.mk
index 4814376..eebdfd6 100644
--- a/services/audiopolicy/engineconfigurable/tools/provision_criterion_types_from_android_headers.mk
+++ b/services/audiopolicy/engineconfigurable/tools/provision_criterion_types_from_android_headers.mk
@@ -8,7 +8,6 @@
 $(LOCAL_BUILT_MODULE): MY_AUDIO_POLICY_CONFIGURATION_FILE := $(AUDIO_POLICY_CONFIGURATION_FILE)
 $(LOCAL_BUILT_MODULE): MY_CRITERION_TOOL := $(HOST_OUT)/bin/buildPolicyCriterionTypes.py
 $(LOCAL_BUILT_MODULE): $(LOCAL_REQUIRED_MODULES) $(LOCAL_ADDITIONAL_DEPENDENCIES) \
-    buildPolicyCriterionTypes.py \
     $(CRITERION_TYPES_FILE) \
     $(ANDROID_AUDIO_BASE_HEADER_FILE)
 
diff --git a/services/audiopolicy/engineconfigurable/wrapper/Android.mk b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
index d19a364..c7d8d34 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/Android.mk
+++ b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
@@ -1,8 +1,5 @@
 LOCAL_PATH:= $(call my-dir)
 
-TOOLS := frameworks/av/services/audiopolicy/engineconfigurable/tools
-PROVISION_CRITERION_TYPES := $(TOOLS)/provision_criterion_types_from_android_headers.mk
-
 ##################################################################
 # WRAPPER LIBRARY
 ##################################################################
@@ -13,20 +10,20 @@
     $(LOCAL_PATH)/include \
     frameworks/av/services/audiopolicy/engineconfigurable/include \
     frameworks/av/services/audiopolicy/engineconfigurable/interface \
-    frameworks/av/services/audiopolicy/common/include \
     external/libxml2/include \
     external/icu/icu4c/source/common
 
 LOCAL_SRC_FILES:= \
-    ParameterManagerWrapper.cpp \
-    ParameterManagerWrapperConfig.cpp
+    ParameterManagerWrapper.cpp
 
 LOCAL_SHARED_LIBRARIES := \
     libparameter \
     libmedia_helper \
-    libicuuc \
     libxml2
 
+LOCAL_HEADER_LIBRARIES := \
+    libaudiopolicycommon
+
 LOCAL_STATIC_LIBRARIES := \
     libaudiopolicycomponents
 
@@ -40,39 +37,3 @@
 
 include $(BUILD_STATIC_LIBRARY)
 
-##################################################################
-# CONFIGURATION FILE
-##################################################################
-
-ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), 1)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := policy_wrapper_configuration.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES := config/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := policy_criteria.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES := config/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := policy_criterion_types.xml
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_ADDITIONAL_DEPENDENCIES := \
-    $(TARGET_OUT_VENDOR_ETC)/audio_policy_configuration.xml
-
-AUDIO_POLICY_CONFIGURATION_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_configuration.xml
-ANDROID_AUDIO_BASE_HEADER_FILE := system/media/audio/include/system/audio-base.h
-CRITERION_TYPES_FILE := $(LOCAL_PATH)/config/policy_criterion_types.xml.in
-
-include $(PROVISION_CRITERION_TYPES)
-
-endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), 1)
diff --git a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
index 09faa4c..4b57444 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
+++ b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
@@ -18,7 +18,6 @@
 //#define LOG_NDEBUG 0
 
 #include "ParameterManagerWrapper.h"
-#include "ParameterManagerWrapperConfig.h"
 #include <ParameterMgrPlatformConnector.h>
 #include <SelectionCriterionTypeInterface.h>
 #include <SelectionCriterionInterface.h>
@@ -38,7 +37,6 @@
 using std::string;
 using std::map;
 using std::vector;
-using CriterionTypes = std::map<std::string, ISelectionCriterionTypeInterface *>;
 
 /// PFW related definitions
 // Logger
@@ -106,63 +104,35 @@
 
     // Logger
     mPfwConnector->setLogger(mPfwConnectorLogger);
-
-    status_t loadResult = loadConfig();
-    if (loadResult < 0) {
-        ALOGE("Policy Wrapper configuration is partially invalid.");
-    }
 }
 
-status_t ParameterManagerWrapper::loadConfig()
+status_t ParameterManagerWrapper::addCriterion(const std::string &name, bool isInclusive,
+                                               ValuePairs pairs, const std::string &defaultValue)
 {
-    auto result = wrapper_config::parse();
-    if (result.parsedConfig == nullptr) {
-        return -ENOENT;
+    ALOG_ASSERT(not isStarted(), "Cannot add a criterion if PFW is already started");
+    auto criterionType = mPfwConnector->createSelectionCriterionType(isInclusive);
+
+    for (auto pair : pairs) {
+        std::string error;
+        ALOGV("%s: Adding pair %d,%s for criterionType %s", __FUNCTION__, pair.first,
+              pair.second.c_str(), name.c_str());
+        criterionType->addValuePair(pair.first, pair.second, error);
     }
-    ALOGE_IF(result.nbSkippedElement != 0, "skipped %zu elements", result.nbSkippedElement);
+    ALOG_ASSERT(mPolicyCriteria.find(name) == mPolicyCriteria.end(),
+                "%s: Criterion %s already added", __FUNCTION__, name.c_str());
 
-    CriterionTypes criterionTypes;
-    for (auto criterionType : result.parsedConfig->criterionTypes) {
-        ALOG_ASSERT(criterionTypes.find(criterionType.name) == criterionTypes.end(),
-                          "CriterionType %s already added", criterionType.name.c_str());
-        ALOGV("%s: Adding new criterionType %s", __FUNCTION__, criterionType.name.c_str());
+    auto criterion = mPfwConnector->createSelectionCriterion(name, criterionType);
+    mPolicyCriteria[name] = criterion;
 
-        auto criterionTypePfw =
-                mPfwConnector->createSelectionCriterionType(criterionType.isInclusive);
-
-        for (auto pair : criterionType.valuePairs) {
-            std::string error;
-            ALOGV("%s: Adding pair %d,%s for criterionType %s", __FUNCTION__, pair.first,
-                  pair.second.c_str(), criterionType.name.c_str());
-            criterionTypePfw->addValuePair(pair.first, pair.second, error);
+    if (not defaultValue.empty()) {
+        int numericalValue = 0;
+        if (not criterionType->getNumericalValue(defaultValue.c_str(), numericalValue)) {
+            ALOGE("%s; trying to apply invalid default literal value (%s)", __FUNCTION__,
+                  defaultValue.c_str());
         }
-        criterionTypes[criterionType.name] = criterionTypePfw;
+        criterion->setCriterionState(numericalValue);
     }
-
-    for (auto criterion : result.parsedConfig->criteria) {
-        ALOG_ASSERT(mPolicyCriteria.find(criterion.name) == mPolicyCriteria.end(),
-                    "%s: Criterion %s already added", __FUNCTION__, criterion.name.c_str());
-
-        auto criterionType =
-                getElement<ISelectionCriterionTypeInterface>(criterion.typeName, criterionTypes);
-        ALOG_ASSERT(criterionType != nullptr, "No %s Criterion type found for criterion %s",
-                    criterion.typeName.c_str(), criterion.name.c_str());
-
-        auto criterionPfw = mPfwConnector->createSelectionCriterion(criterion.name, criterionType);
-        mPolicyCriteria[criterion.name] = criterionPfw;
-
-        if (not criterion.defaultLiteralValue.empty()) {
-            int numericalValue = 0;
-            if (not criterionType->getNumericalValue(criterion.defaultLiteralValue.c_str(),
-                                                     numericalValue)) {
-                ALOGE("%s; trying to apply invalid default literal value (%s)", __FUNCTION__,
-                      criterion.defaultLiteralValue.c_str());
-                continue;
-            }
-            criterionPfw->setCriterionState(numericalValue);
-        }
-    }
-    return result.nbSkippedElement == 0? NO_ERROR : BAD_VALUE;
+    return NO_ERROR;
 }
 
 ParameterManagerWrapper::~ParameterManagerWrapper()
@@ -289,7 +259,7 @@
     std::string criterionName = audio_is_output_device(devDesc->type()) ?
                 gOutputDeviceAddressCriterionName : gInputDeviceAddressCriterionName;
 
-    ALOGV("%s: device with address %s %s", __FUNCTION__, devDesc->mAddress.string(),
+    ALOGV("%s: device with address %s %s", __FUNCTION__, devDesc->address().string(),
           state != AUDIO_POLICY_DEVICE_STATE_AVAILABLE? "disconnected" : "connected");
     ISelectionCriterionInterface *criterion =
             getElement<ISelectionCriterionInterface>(criterionName, mPolicyCriteria);
diff --git a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapperConfig.cpp b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapperConfig.cpp
deleted file mode 100644
index bc6d046..0000000
--- a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapperConfig.cpp
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "APM::AudioPolicyEngine/PFWWrapperConfig"
-#define LOG_NDEBUG 0
-
-#include "ParameterManagerWrapperConfig.h"
-
-#include <media/convert.h>
-#include <utils/Log.h>
-#include <libxml/parser.h>
-#include <libxml/xinclude.h>
-#include <string>
-#include <vector>
-#include <sstream>
-#include <istream>
-
-
-namespace android {
-
-using utilities::convertTo;
-
-namespace audio_policy {
-namespace wrapper_config {
-namespace detail {
-
-std::string getXmlAttribute(const xmlNode *cur, const char *attribute)
-{
-    xmlChar *xmlValue = xmlGetProp(cur, (const xmlChar *)attribute);
-    if (xmlValue == NULL) {
-        return "";
-    }
-    std::string value((const char *)xmlValue);
-    xmlFree(xmlValue);
-    return value;
-}
-
-template <class Trait>
-static status_t deserializeCollection(_xmlDoc *doc, const _xmlNode *cur,
-                                      typename Trait::Collection &collection,
-                                      size_t &nbSkippedElement)
-{
-    const xmlNode *root = cur->xmlChildrenNode;
-    while (root != NULL) {
-        if (xmlStrcmp(root->name, (const xmlChar *)Trait::collectionTag) &&
-            xmlStrcmp(root->name, (const xmlChar *)Trait::tag)) {
-            root = root->next;
-            continue;
-        }
-        const xmlNode *child = root;
-        if (!xmlStrcmp(child->name, (const xmlChar *)Trait::collectionTag)) {
-            child = child->xmlChildrenNode;
-        }
-        while (child != NULL) {
-            if (!xmlStrcmp(child->name, (const xmlChar *)Trait::tag)) {
-                status_t status = Trait::deserialize(doc, child, collection);
-                if (status == NO_ERROR) {
-                    nbSkippedElement += 1;
-                }
-            }
-            child = child->next;
-        }
-        if (!xmlStrcmp(root->name, (const xmlChar *)Trait::tag)) {
-            return NO_ERROR;
-        }
-        root = root->next;
-    }
-    return NO_ERROR;
-}
-
-const char *const ValueTraits::tag = "value";
-const char *const ValueTraits::collectionTag = "values";
-
-const char ValueTraits::Attributes::literal[] = "literal";
-const char ValueTraits::Attributes::numerical[] = "numerical";
-
-status_t ValueTraits::deserialize(_xmlDoc */*doc*/, const _xmlNode *child, Collection &values)
-{
-    std::string literal = getXmlAttribute(child, Attributes::literal);
-    if (literal.empty()) {
-        ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::literal);
-        return BAD_VALUE;
-    }
-    uint32_t numerical = 0;
-    std::string numericalTag = getXmlAttribute(child, Attributes::numerical);
-    if (numericalTag.empty()) {
-        ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::literal);
-        return BAD_VALUE;
-    }
-    if (!convertTo(numericalTag, numerical)) {
-        ALOGE("%s: : Invalid value(%s)", __FUNCTION__, numericalTag.c_str());
-        return BAD_VALUE;
-    }
-    values.push_back({numerical, literal});
-    return NO_ERROR;
-}
-
-const char *const CriterionTypeTraits::tag = "criterion_type";
-const char *const CriterionTypeTraits::collectionTag = "criterion_types";
-
-const char CriterionTypeTraits::Attributes::name[] = "name";
-const char CriterionTypeTraits::Attributes::type[] = "type";
-
-status_t CriterionTypeTraits::deserialize(_xmlDoc *doc, const _xmlNode *child,
-                                          Collection &criterionTypes)
-{
-    std::string name = getXmlAttribute(child, Attributes::name);
-    if (name.empty()) {
-        ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::name);
-        return BAD_VALUE;
-    }
-    ALOGV("%s: %s %s = %s", __FUNCTION__, tag, Attributes::name, name.c_str());
-
-    std::string type = getXmlAttribute(child, Attributes::type);
-    if (type.empty()) {
-        ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::type);
-        return BAD_VALUE;
-    }
-    ALOGV("%s: %s %s = %s", __FUNCTION__, tag, Attributes::type, type.c_str());
-    bool isInclusive(type == "inclusive");
-
-    ValuePairs pairs;
-    size_t nbSkippedElements = 0;
-    detail::deserializeCollection<detail::ValueTraits>(doc, child, pairs, nbSkippedElements);
-
-    criterionTypes.push_back({name, isInclusive, pairs});
-    return NO_ERROR;
-}
-
-const char *const CriterionTraits::tag = "criterion";
-const char *const CriterionTraits::collectionTag = "criteria";
-
-const char CriterionTraits::Attributes::name[] = "name";
-const char CriterionTraits::Attributes::type[] = "type";
-const char CriterionTraits::Attributes::defaultVal[] = "default";
-
-status_t CriterionTraits::deserialize(_xmlDoc */*doc*/, const _xmlNode *child, Collection &criteria)
-{
-    std::string name = getXmlAttribute(child, Attributes::name);
-    if (name.empty()) {
-        ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::name);
-        return BAD_VALUE;
-    }
-    ALOGV("%s: %s = %s", __FUNCTION__, Attributes::name, name.c_str());
-
-    std::string defaultValue = getXmlAttribute(child, Attributes::defaultVal);
-    if (defaultValue.empty()) {
-        // Not mandatory to provide a default value for a criterion, even it is recommanded...
-        ALOGV("%s: No attribute %s found", __FUNCTION__, Attributes::defaultVal);
-    }
-    ALOGV("%s: %s = %s", __FUNCTION__, Attributes::defaultVal, defaultValue.c_str());
-
-    std::string typeName = getXmlAttribute(child, Attributes::type);
-    if (typeName.empty()) {
-        ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::name);
-        return BAD_VALUE;
-    }
-    ALOGV("%s: %s = %s", __FUNCTION__, Attributes::type, typeName.c_str());
-
-    criteria.push_back({name, typeName, defaultValue});
-    return NO_ERROR;
-}
-} // namespace detail
-
-ParsingResult parse(const char* path) {
-    xmlDocPtr doc;
-    doc = xmlParseFile(path);
-    if (doc == NULL) {
-        ALOGE("%s: Could not parse document %s", __FUNCTION__, path);
-        return {nullptr, 0};
-    }
-    xmlNodePtr cur = xmlDocGetRootElement(doc);
-    if (cur == NULL) {
-        ALOGE("%s: Could not parse: empty document %s", __FUNCTION__, path);
-        xmlFreeDoc(doc);
-        return {nullptr, 0};
-    }
-    if (xmlXIncludeProcess(doc) < 0) {
-        ALOGE("%s: libxml failed to resolve XIncludes on document %s", __FUNCTION__, path);
-        return {nullptr, 0};
-    }
-    size_t nbSkippedElements = 0;
-    auto config = std::make_unique<Config>();
-
-    detail::deserializeCollection<detail::CriterionTraits>(
-                doc, cur, config->criteria, nbSkippedElements);
-    detail::deserializeCollection<detail::CriterionTypeTraits>(
-                doc, cur, config->criterionTypes, nbSkippedElements);
-
-    return {std::move(config), nbSkippedElements};
-}
-
-} // namespace wrapper_config
-} // namespace audio_policy
-} // namespace android
diff --git a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapperConfig.h b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapperConfig.h
deleted file mode 100644
index 467d0e1..0000000
--- a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapperConfig.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <stdint.h>
-#include <string>
-#include <vector>
-#include <utils/Errors.h>
-
-struct _xmlNode;
-struct _xmlDoc;
-
-namespace android {
-namespace audio_policy {
-namespace wrapper_config {
-
-/** Default path of audio policy usages configuration file. */
-constexpr char DEFAULT_PATH[] = "/vendor/etc/policy_wrapper_configuration.xml";
-
-/** Directories where the effect libraries will be search for. */
-constexpr const char* POLICY_USAGE_LIBRARY_PATH[] = {"/odm/etc/", "/vendor/etc/", "/system/etc/"};
-
-using ValuePair = std::pair<uint32_t, std::string>;
-using ValuePairs = std::vector<ValuePair>;
-
-struct CriterionType
-{
-    std::string name;
-    bool isInclusive;
-    ValuePairs valuePairs;
-};
-
-using CriterionTypes = std::vector<CriterionType>;
-
-struct Criterion
-{
-    std::string name;
-    std::string typeName;
-    std::string defaultLiteralValue;
-};
-
-using Criteria = std::vector<Criterion>;
-
-struct Config {
-    float version;
-    Criteria criteria;
-    CriterionTypes criterionTypes;
-};
-
-namespace detail
-{
-struct ValueTraits
-{
-    static const char *const tag;
-    static const char *const collectionTag;
-
-    struct Attributes
-    {
-        static const char literal[];
-        static const char numerical[];
-    };
-
-    typedef ValuePair Element;
-    typedef ValuePair *PtrElement;
-    typedef ValuePairs Collection;
-
-    static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
-                                         Collection &collection);
-};
-
-struct CriterionTypeTraits
-{
-    static const char *const tag;
-    static const char *const collectionTag;
-
-    struct Attributes
-    {
-        static const char name[];
-        static const char type[];
-    };
-
-    typedef CriterionType Element;
-    typedef CriterionType *PtrElement;
-    typedef CriterionTypes Collection;
-
-    static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
-                                         Collection &collection);
-};
-
-struct CriterionTraits
-{
-    static const char *const tag;
-    static const char *const collectionTag;
-
-    struct Attributes
-    {
-        static const char name[];
-        static const char type[];
-        static const char defaultVal[];
-    };
-
-    typedef Criterion Element;
-    typedef Criterion *PtrElement;
-    typedef Criteria Collection;
-
-    static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
-                                         Collection &collection);
-};
-} // namespace detail
-
-/** Result of `parse(const char*)` */
-struct ParsingResult {
-    /** Parsed config, nullptr if the xml lib could not load the file */
-    std::unique_ptr<Config> parsedConfig;
-    size_t nbSkippedElement; //< Number of skipped invalid product strategies
-};
-
-/** Parses the provided audio policy usage configuration.
- * @return audio policy usage @see Config
- */
-ParsingResult parse(const char* path = DEFAULT_PATH);
-
-} // namespace wrapper_config
-} // namespace audio_policy
-} // android
diff --git a/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
index cd39b6f..5bfad29 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
+++ b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
@@ -39,6 +39,9 @@
 namespace android {
 namespace audio_policy {
 
+using ValuePair = std::pair<uint32_t, std::string>;
+using ValuePairs = std::vector<ValuePair>;
+
 class ParameterManagerWrapper
 {
 private:
@@ -118,6 +121,17 @@
     status_t setDeviceConnectionState(const sp<DeviceDescriptor> devDesc,
                                       audio_policy_dev_state_t state);
 
+    /**
+     * @brief addCriterion to the policy pfw
+     * @param name of the criterion
+     * @param isInclusive if true, inclusive, if false exclusive criterion type
+     * @param pairs of numerical/literal values of the criterion
+     * @param defaultValue provided as literal.
+     * @return
+     */
+    status_t addCriterion(const std::string &name, bool isInclusive, ValuePairs pairs,
+                          const std::string &defaultValue);
+
 private:
     /**
      * Apply the configuration of the platform on the policy parameter manager.
@@ -131,13 +145,6 @@
      */
     void applyPlatformConfiguration();
 
-    /**
-     * Load the criterion configuration file.
-     *
-     * @return NO_ERROR is parsing successful, error code otherwise.
-     */
-    status_t loadConfig();
-
      /**
      * Retrieve an element from a map by its name.
      *
diff --git a/services/audiopolicy/enginedefault/Android.mk b/services/audiopolicy/enginedefault/Android.mk
index 837d5bb..94fa788 100644
--- a/services/audiopolicy/enginedefault/Android.mk
+++ b/services/audiopolicy/enginedefault/Android.mk
@@ -8,10 +8,13 @@
 LOCAL_SRC_FILES := \
     src/Engine.cpp \
     src/EngineInstance.cpp \
+    ../engine/common/src/VolumeCurve.cpp \
+    ../engine/common/src/StreamVolumeCurves.cpp \
+    ../engine/common/src/ProductStrategy.cpp \
+    ../engine/common/src/EngineBase.cpp
 
 audio_policy_engine_includes_common := \
-    $(LOCAL_PATH)/include \
-    frameworks/av/services/audiopolicy/engine/interface
+    $(LOCAL_PATH)/include
 
 LOCAL_CFLAGS += \
     -Wall \
@@ -26,8 +29,7 @@
     $(TARGET_OUT_HEADERS)/hw \
     $(call include-path-for, frameworks-av) \
     $(call include-path-for, audio-utils) \
-    $(call include-path-for, bionic) \
-    frameworks/av/services/audiopolicy/common/include
+    $(call include-path-for, bionic)
 
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
@@ -37,12 +39,19 @@
 LOCAL_HEADER_LIBRARIES := libbase_headers
 
 LOCAL_STATIC_LIBRARIES := \
-    libaudiopolicycomponents \
+    libaudiopolicycomponents
 
-LOCAL_SHARED_LIBRARIES += \
+LOCAL_SHARED_LIBRARIES := \
     liblog \
     libcutils \
     libutils \
-    libmedia_helper
+    libmedia_helper \
+    libaudiopolicyengineconfig \
+    libaudiopolicy
+
+LOCAL_HEADER_LIBRARIES := \
+    libaudiopolicycommon \
+    libaudiopolicyengine_common_headers \
+    libaudiopolicyengine_interface_headers
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audiopolicy/enginedefault/config/Android.mk b/services/audiopolicy/enginedefault/config/Android.mk
new file mode 100644
index 0000000..dcce8e3
--- /dev/null
+++ b/services/audiopolicy/enginedefault/config/Android.mk
@@ -0,0 +1,9 @@
+
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+#######################################################################
+# Recursive call sub-folder Android.mk
+#
+include $(call all-makefiles-under,$(LOCAL_PATH))
+
diff --git a/services/audiopolicy/enginedefault/config/example/Android.mk b/services/audiopolicy/enginedefault/config/example/Android.mk
new file mode 100644
index 0000000..f06ee4c
--- /dev/null
+++ b/services/audiopolicy/enginedefault/config/example/Android.mk
@@ -0,0 +1,50 @@
+LOCAL_PATH := $(call my-dir)
+
+##################################################################
+# CONFIGURATION TOP FILE
+##################################################################
+
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_default)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_configuration_phone.xml
+LOCAL_MODULE_STEM := audio_policy_engine_configuration.xml
+
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE_STEM)
+
+LOCAL_REQUIRED_MODULES := \
+    audio_policy_engine_product_strategies_phone.xml \
+    audio_policy_engine_stream_volumes.xml \
+    audio_policy_engine_default_stream_volumes.xml
+
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_product_strategies_phone.xml
+LOCAL_MODULE_STEM := audio_policy_engine_product_strategies.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE_STEM)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_stream_volumes.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_default_stream_volumes.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+endif # ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_default)
diff --git a/services/audiopolicy/engineconfigurable/wrapper/config/policy_wrapper_configuration.xml b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_configuration.xml
similarity index 67%
copy from services/audiopolicy/engineconfigurable/wrapper/config/policy_wrapper_configuration.xml
copy to services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_configuration.xml
index 5d9193b..4ca33b4 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/config/policy_wrapper_configuration.xml
+++ b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_configuration.xml
@@ -12,14 +12,13 @@
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      See the License for the specific language governing permissions and
      limitations under the License.
--->
-<!--
-    These are the minimum required criteria to be used by Audio HAL to ensure a basic
-    user experience on an Android device
--->
-<configuration name="audio_policy_wrapper_configuration" xmlns:xi="http://www.w3.org/2001/XInclude">
+     -->
 
-    <xi:include href="policy_criterion_types.xml"/>
-    <xi:include href="policy_criteria.xml"/>
+<configuration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+    <xi:include href="audio_policy_engine_product_strategies.xml"/>
+    <xi:include href="audio_policy_engine_stream_volumes.xml"/>
+    <xi:include href="audio_policy_engine_default_stream_volumes.xml"/>
 
 </configuration>
+
diff --git a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_default_stream_volumes.xml b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_default_stream_volumes.xml
new file mode 100644
index 0000000..21e6dd5
--- /dev/null
+++ b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_default_stream_volumes.xml
@@ -0,0 +1,136 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<!-- Default Volume Tables included by Audio Policy Configuration file -->
+<!-- Full Default Volume table for all device category -->
+<volumes>
+    <reference name="FULL_SCALE_VOLUME_CURVE">
+    <!-- Full Scale reference Volume Curve -->
+        <point>0,0</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="SILENT_VOLUME_CURVE">
+        <point>0,-9600</point>
+        <point>100,-9600</point>
+    </reference>
+    <reference name="DEFAULT_SYSTEM_VOLUME_CURVE">
+    <!-- Default System reference Volume Curve -->
+        <point>1,-2400</point>
+        <point>33,-1800</point>
+        <point>66,-1200</point>
+        <point>100,-600</point>
+    </reference>
+    <reference name="DEFAULT_MEDIA_VOLUME_CURVE">
+    <!-- Default Media reference Volume Curve -->
+        <point>1,-5800</point>
+        <point>20,-4000</point>
+        <point>60,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE">
+    <!--Default Volume Curve -->
+        <point>1,-4950</point>
+        <point>33,-3350</point>
+        <point>66,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE">
+    <!-- Default is Speaker Media Volume Curve -->
+        <point>1,-5800</point>
+        <point>20,-4000</point>
+        <point>60,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_DEVICE_CATEGORY_SPEAKER_SYSTEM_VOLUME_CURVE">
+    <!-- Default is Speaker System Volume Curve -->
+        <point>1,-4680</point>
+        <point>42,-2070</point>
+        <point>85,-540</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE">
+    <!--Default Volume Curve -->
+        <point>1,-4950</point>
+        <point>33,-3350</point>
+        <point>66,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE">
+    <!-- Default is Ext Media System Volume Curve -->
+        <point>1,-5800</point>
+        <point>20,-4000</point>
+        <point>60,-2100</point>
+        <point>100,-1000</point>
+    </reference>
+    <reference name="DEFAULT_HEARING_AID_VOLUME_CURVE">
+    <!-- Default Hearing Aid Volume Curve -->
+        <point>1,-12700</point>
+        <point>20,-8000</point>
+        <point>60,-4000</point>
+        <point>100,0</point>
+    </reference>
+    <!-- **************************************************************** -->
+    <!-- Non-mutable default volume curves:                               -->
+    <!--     * first point is always for index 0                          -->
+    <!--     * attenuation is small enough that stream can still be heard -->
+    <reference name="DEFAULT_NON_MUTABLE_VOLUME_CURVE">
+    <!-- Default non-mutable reference Volume Curve -->
+    <!--        based on DEFAULT_MEDIA_VOLUME_CURVE -->
+        <point>0,-5800</point>
+        <point>20,-4000</point>
+        <point>60,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE">
+    <!--Default non-mutable Volume Curve for headset -->
+    <!--    based on DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE -->
+        <point>0,-4950</point>
+        <point>33,-3350</point>
+        <point>66,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE">
+    <!-- Default non-mutable Speaker Volume Curve -->
+    <!--    based on DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE -->
+        <point>0,-5800</point>
+        <point>20,-4000</point>
+        <point>60,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE">
+    <!--Default non-mutable Volume Curve -->
+    <!--    based on DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE -->
+        <point>0,-4950</point>
+        <point>33,-3350</point>
+        <point>66,-1700</point>
+        <point>100,0</point>
+    </reference>
+    <reference name="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE">
+    <!-- Default non-mutable Ext Media System Volume Curve -->
+    <!--     based on DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE -->
+        <point>0,-5800</point>
+        <point>20,-4000</point>
+        <point>60,-2100</point>
+        <point>100,-1000</point>
+    </reference>
+    <reference name="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE">
+    <!-- Default non-mutable Hearing Aid Volume Curve -->
+    <!--     based on DEFAULT_HEARING_AID_VOLUME_CURVE -->
+        <point>0,-12700</point>
+        <point>20,-8000</point>
+        <point>60,-4000</point>
+        <point>100,0</point>
+    </reference>
+</volumes>
diff --git a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
new file mode 100644
index 0000000..f72e379
--- /dev/null
+++ b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
@@ -0,0 +1,110 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+     -->
+
+<ProductStrategies>
+
+    <!-- "hidden strategies" like TTS, enforced audible:
+            Shall we expose them here or keep it hard coded -->
+
+    <!-- Used to identify the volume of audio streams for enforced system sounds in certain
+         countries (e.g. camera in Japan)
+         This strategy will only have higher priority than phone if force for system is set to
+         enforced. -->
+
+    <ProductStrategy name="STRATEGY_PHONE">
+        <AttributesGroup streamType="AUDIO_STREAM_VOICE_CALL">
+            <Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION"/> </Attributes>
+        </AttributesGroup>
+        <AttributesGroup streamType="AUDIO_STREAM_BLUETOOTH_SCO">
+            <Attributes> <Flags value="AUDIO_FLAG_SCO"/> </Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+
+    <ProductStrategy name="STRATEGY_SONIFICATION">
+        <AttributesGroup streamType="AUDIO_STREAM_RING">
+            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE"/> </Attributes>
+        </AttributesGroup>
+        <AttributesGroup streamType="AUDIO_STREAM_ALARM">
+            <Attributes> <Usage value="AUDIO_USAGE_ALARM"/> </Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+
+    <ProductStrategy name="STRATEGY_ENFORCED_AUDIBLE">
+        <AttributesGroup streamType="AUDIO_STREAM_ENFORCED_AUDIBLE">
+            <Attributes> <Flags value="AUDIO_FLAG_AUDIBILITY_ENFORCED"/> </Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+
+    <ProductStrategy name="STRATEGY_ACCESSIBILITY">
+        <AttributesGroup streamType="AUDIO_STREAM_ACCESSIBILITY">
+            <Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY"/> </Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+
+    <ProductStrategy name="STRATEGY_SONIFICATION_RESPECTFUL">
+        <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION">
+            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION"/> </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST"/> </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT"/> </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED"/> </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_EVENT"/> </Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+
+    <ProductStrategy name="STRATEGY_MEDIA">
+         <AttributesGroup streamType="AUDIO_STREAM_MUSIC">
+            <Attributes> <Usage value="AUDIO_USAGE_MEDIA"/> </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_GAME"/> </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_ASSISTANT"/> </Attributes>
+            <Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/> </Attributes>
+            <Attributes></Attributes>
+        </AttributesGroup>
+        <AttributesGroup streamType="AUDIO_STREAM_SYSTEM">
+            <Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_SONIFICATION"/> </Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+
+    <ProductStrategy name="STRATEGY_DTMF">
+        <AttributesGroup streamType="AUDIO_STREAM_DTMF">
+            <Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING"/> </Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+
+    <!-- Used to identify the volume of audio streams exclusively transmitted through the  speaker
+         (TTS) of the device -->
+    <ProductStrategy name="STRATEGY_TRANSMITTED_THROUGH_SPEAKER">
+        <AttributesGroup streamType="AUDIO_STREAM_TTS">
+            <Attributes> <Flags value="AUDIO_FLAG_BEACON"/> </Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+
+    <!-- Routing Strategy rerouting may be removed as following media??? -->
+    <ProductStrategy name="STRATEGY_REROUTING">
+        <AttributesGroup streamType="AUDIO_STREAM_REROUTING">
+            <Attributes></Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+
+    <!-- Default product strategy has empty attributes -->
+    <ProductStrategy name="STRATEGY_PATCH">
+        <AttributesGroup streamType="AUDIO_STREAM_PATCH">
+            <Attributes></Attributes>
+        </AttributesGroup>
+    </ProductStrategy>
+
+
+</ProductStrategies>
+
diff --git a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_stream_volumes.xml b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_stream_volumes.xml
new file mode 100644
index 0000000..73bde1f
--- /dev/null
+++ b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_stream_volumes.xml
@@ -0,0 +1,231 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<!-- Volume section defines a volume curve for a given use case and device category.
+It contains a list of points of this curve expressing the attenuation in Millibels for a given
+volume index from 0 to 100.
+<volume stream=”AUDIO_STREAM_MUSIC” deviceCategory=””>
+<point>0,-9600</point>
+<point>100,0</point>
+</volume>
+-->
+
+<volumeGroups>
+    <volumeGroup>
+        <stream>AUDIO_STREAM_VOICE_CALL</stream>
+        <indexMin>1</indexMin>
+        <indexMax>7</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+            <point>0,-4200</point>
+            <point>33,-2800</point>
+            <point>66,-1400</point>
+            <point>100,0</point>
+        </volume>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+            <point>0,-2400</point>
+            <point>33,-1600</point>
+            <point>66,-800</point>
+            <point>100,0</point>
+        </volume>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+            <point>0,-2700</point>
+            <point>33,-1800</point>
+            <point>66,-900</point>
+            <point>100,0</point>
+        </volume>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_SYSTEM</stream>
+        <indexMin>0</indexMin>
+        <indexMax>7</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+            <point>1,-3000</point>
+            <point>33,-2600</point>
+            <point>66,-2200</point>
+            <point>100,-1800</point>
+        </volume>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+            <point>1,-5100</point>
+            <point>57,-2800</point>
+            <point>71,-2500</point>
+            <point>85,-2300</point>
+            <point>100,-2100</point>
+        </volume>
+        <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_RING</stream>
+        <indexMin>0</indexMin>
+        <indexMax>7</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_MUSIC</stream>
+        <indexMin>0</indexMin>
+        <indexMax>25</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID"  ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_ALARM</stream>
+        <indexMin>1</indexMin>
+        <indexMax>7</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_NOTIFICATION</stream>
+        <indexMin>0</indexMin>
+        <indexMax>7</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_SYSTEM_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_BLUETOOTH_SCO</stream>
+        <indexMin>0</indexMin>
+        <indexMax>15</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+            <point>0,-4200</point>
+            <point>33,-2800</point>
+            <point>66,-1400</point>
+            <point>100,0</point>
+        </volume>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+            <point>0,-2400</point>
+            <point>33,-1600</point>
+            <point>66,-800</point>
+            <point>100,0</point>
+        </volume>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+            <point>0,-4200</point>
+            <point>33,-2800</point>
+            <point>66,-1400</point>
+            <point>100,0</point>
+        </volume>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_ENFORCED_AUDIBLE</stream>
+        <indexMin>0</indexMin>
+        <indexMax>7</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+            <point>1,-3000</point>
+            <point>33,-2600</point>
+            <point>66,-2200</point>
+            <point>100,-1800</point>
+        </volume>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+            <point>1,-3400</point>
+            <point>71,-2400</point>
+            <point>100,-2000</point>
+        </volume>
+        <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_DTMF</stream>
+        <indexMin>0</indexMin>
+        <indexMax>15</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+            <point>1,-3000</point>
+            <point>33,-2600</point>
+            <point>66,-2200</point>
+            <point>100,-1800</point>
+        </volume>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+            <point>1,-4000</point>
+            <point>71,-2400</point>
+            <point>100,-1400</point>
+        </volume>
+        <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_TTS</stream>
+        <indexMin>0</indexMin>
+        <indexMax>15</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="SILENT_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="SILENT_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="SILENT_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="SILENT_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_ACCESSIBILITY</stream>
+        <indexMin>1</indexMin>
+        <indexMax>15</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_REROUTING</stream>
+        <indexMin>0</indexMin>
+        <indexMax>1</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="FULL_SCALE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="FULL_SCALE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="FULL_SCALE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="FULL_SCALE_VOLUME_CURVE"/>
+    </volumeGroup>
+
+    <volumeGroup>
+        <stream>AUDIO_STREAM_PATCH</stream>
+        <indexMin>0</indexMin>
+        <indexMax>1</indexMax>
+        <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="FULL_SCALE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="FULL_SCALE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="FULL_SCALE_VOLUME_CURVE"/>
+        <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="FULL_SCALE_VOLUME_CURVE"/>
+    </volumeGroup>
+</volumeGroups>
+
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index cc5a025..fd6a013 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -29,6 +29,7 @@
 #include <AudioPolicyManagerObserver.h>
 #include <AudioPort.h>
 #include <IOProfile.h>
+#include <AudioIODescriptorInterface.h>
 #include <policy.h>
 #include <utils/String8.h>
 #include <utils/Log.h>
@@ -38,60 +39,33 @@
 namespace audio_policy
 {
 
+struct legacy_strategy_map { const char *name; legacy_strategy id; };
+static const std::vector<legacy_strategy_map> gLegacyStrategy = {
+    { "STRATEGY_NONE", STRATEGY_NONE },
+    { "STRATEGY_MEDIA", STRATEGY_MEDIA },
+    { "STRATEGY_PHONE", STRATEGY_PHONE },
+    { "STRATEGY_SONIFICATION", STRATEGY_SONIFICATION },
+    { "STRATEGY_SONIFICATION_RESPECTFUL", STRATEGY_SONIFICATION_RESPECTFUL },
+    { "STRATEGY_DTMF", STRATEGY_DTMF },
+    { "STRATEGY_ENFORCED_AUDIBLE", STRATEGY_ENFORCED_AUDIBLE },
+    { "STRATEGY_TRANSMITTED_THROUGH_SPEAKER", STRATEGY_TRANSMITTED_THROUGH_SPEAKER },
+    { "STRATEGY_ACCESSIBILITY", STRATEGY_ACCESSIBILITY },
+    { "STRATEGY_REROUTING", STRATEGY_REROUTING },
+    { "STRATEGY_PATCH", STRATEGY_REROUTING }, // boiler to manage stream patch volume
+};
+
 Engine::Engine()
-    : mManagerInterface(this),
-      mPhoneState(AUDIO_MODE_NORMAL),
-      mApmObserver(NULL)
 {
-    for (int i = 0; i < AUDIO_POLICY_FORCE_USE_CNT; i++) {
-        mForceUse[i] = AUDIO_POLICY_FORCE_NONE;
+    auto result = EngineBase::loadAudioPolicyEngineConfig();
+    ALOGE_IF(result.nbSkippedElement != 0,
+             "Policy Engine configuration is partially invalid, skipped %zu elements",
+             result.nbSkippedElement);
+
+    for (const auto &strategy : gLegacyStrategy) {
+        mLegacyStrategyMap[getProductStrategyByName(strategy.name)] = strategy.id;
     }
 }
 
-Engine::~Engine()
-{
-}
-
-void Engine::setObserver(AudioPolicyManagerObserver *observer)
-{
-    ALOG_ASSERT(observer != NULL, "Invalid Audio Policy Manager observer");
-    mApmObserver = observer;
-}
-
-status_t Engine::initCheck()
-{
-    return (mApmObserver != NULL) ?  NO_ERROR : NO_INIT;
-}
-
-status_t Engine::setPhoneState(audio_mode_t state)
-{
-    ALOGV("setPhoneState() state %d", state);
-
-    if (state < 0 || state >= AUDIO_MODE_CNT) {
-        ALOGW("setPhoneState() invalid state %d", state);
-        return BAD_VALUE;
-    }
-
-    if (state == mPhoneState ) {
-        ALOGW("setPhoneState() setting same state %d", state);
-        return BAD_VALUE;
-    }
-
-    // store previous phone state for management of sonification strategy below
-    int oldState = mPhoneState;
-    mPhoneState = state;
-
-    if (!is_state_in_call(oldState) && is_state_in_call(state)) {
-        ALOGV("  Entering call in setPhoneState()");
-        mApmObserver->getVolumeCurves().switchVolumeCurve(AUDIO_STREAM_VOICE_CALL,
-                                                          AUDIO_STREAM_DTMF);
-    } else if (is_state_in_call(oldState) && !is_state_in_call(state)) {
-        ALOGV("  Exiting call in setPhoneState()");
-        mApmObserver->getVolumeCurves().restoreOriginVolumeCurve(AUDIO_STREAM_DTMF);
-    }
-    return NO_ERROR;
-}
-
 status_t Engine::setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config)
 {
     switch(usage) {
@@ -101,7 +75,6 @@
             ALOGW("setForceUse() invalid config %d for FOR_COMMUNICATION", config);
             return BAD_VALUE;
         }
-        mForceUse[usage] = config;
         break;
     case AUDIO_POLICY_FORCE_FOR_MEDIA:
         if (config != AUDIO_POLICY_FORCE_HEADPHONES && config != AUDIO_POLICY_FORCE_BT_A2DP &&
@@ -112,7 +85,6 @@
             ALOGW("setForceUse() invalid config %d for FOR_MEDIA", config);
             return BAD_VALUE;
         }
-        mForceUse[usage] = config;
         break;
     case AUDIO_POLICY_FORCE_FOR_RECORD:
         if (config != AUDIO_POLICY_FORCE_BT_SCO && config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY &&
@@ -120,7 +92,6 @@
             ALOGW("setForceUse() invalid config %d for FOR_RECORD", config);
             return BAD_VALUE;
         }
-        mForceUse[usage] = config;
         break;
     case AUDIO_POLICY_FORCE_FOR_DOCK:
         if (config != AUDIO_POLICY_FORCE_NONE && config != AUDIO_POLICY_FORCE_BT_CAR_DOCK &&
@@ -130,21 +101,18 @@
             config != AUDIO_POLICY_FORCE_DIGITAL_DOCK) {
             ALOGW("setForceUse() invalid config %d for FOR_DOCK", config);
         }
-        mForceUse[usage] = config;
         break;
     case AUDIO_POLICY_FORCE_FOR_SYSTEM:
         if (config != AUDIO_POLICY_FORCE_NONE &&
             config != AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
             ALOGW("setForceUse() invalid config %d for FOR_SYSTEM", config);
         }
-        mForceUse[usage] = config;
         break;
     case AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO:
         if (config != AUDIO_POLICY_FORCE_NONE &&
             config != AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED) {
             ALOGW("setForceUse() invalid config %d for HDMI_SYSTEM_AUDIO", config);
         }
-        mForceUse[usage] = config;
         break;
     case AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND:
         if (config != AUDIO_POLICY_FORCE_NONE &&
@@ -154,109 +122,25 @@
             ALOGW("setForceUse() invalid config %d for ENCODED_SURROUND", config);
             return BAD_VALUE;
         }
-        mForceUse[usage] = config;
         break;
     case AUDIO_POLICY_FORCE_FOR_VIBRATE_RINGING:
         if (config != AUDIO_POLICY_FORCE_BT_SCO && config != AUDIO_POLICY_FORCE_NONE) {
             ALOGW("setForceUse() invalid config %d for FOR_VIBRATE_RINGING", config);
             return BAD_VALUE;
         }
-        mForceUse[usage] = config;
         break;
     default:
         ALOGW("setForceUse() invalid usage %d", usage);
         break; // TODO return BAD_VALUE?
     }
-    return NO_ERROR;
+    return EngineBase::setForceUse(usage, config);
 }
 
-routing_strategy Engine::getStrategyForStream(audio_stream_type_t stream)
-{
-    // stream to strategy mapping
-    switch (stream) {
-    case AUDIO_STREAM_VOICE_CALL:
-    case AUDIO_STREAM_BLUETOOTH_SCO:
-        return STRATEGY_PHONE;
-    case AUDIO_STREAM_RING:
-    case AUDIO_STREAM_ALARM:
-        return STRATEGY_SONIFICATION;
-    case AUDIO_STREAM_NOTIFICATION:
-        return STRATEGY_SONIFICATION_RESPECTFUL;
-    case AUDIO_STREAM_DTMF:
-        return STRATEGY_DTMF;
-    default:
-        ALOGE("unknown stream type %d", stream);
-        FALLTHROUGH_INTENDED;
-    case AUDIO_STREAM_SYSTEM:
-        // NOTE: SYSTEM stream uses MEDIA strategy because muting music and switching outputs
-        // while key clicks are played produces a poor result
-    case AUDIO_STREAM_MUSIC:
-        return STRATEGY_MEDIA;
-    case AUDIO_STREAM_ENFORCED_AUDIBLE:
-        return STRATEGY_ENFORCED_AUDIBLE;
-    case AUDIO_STREAM_TTS:
-        return STRATEGY_TRANSMITTED_THROUGH_SPEAKER;
-    case AUDIO_STREAM_ACCESSIBILITY:
-        return STRATEGY_ACCESSIBILITY;
-    case AUDIO_STREAM_REROUTING:
-        return STRATEGY_REROUTING;
-    }
-}
-
-routing_strategy Engine::getStrategyForUsage(audio_usage_t usage)
-{
-    // usage to strategy mapping
-    switch (usage) {
-    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
-        return STRATEGY_ACCESSIBILITY;
-
-    case AUDIO_USAGE_MEDIA:
-    case AUDIO_USAGE_GAME:
-    case AUDIO_USAGE_ASSISTANT:
-    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
-    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
-        return STRATEGY_MEDIA;
-
-    case AUDIO_USAGE_VOICE_COMMUNICATION:
-        return STRATEGY_PHONE;
-
-    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
-        return STRATEGY_DTMF;
-
-    case AUDIO_USAGE_ALARM:
-    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
-        return STRATEGY_SONIFICATION;
-
-    case AUDIO_USAGE_NOTIFICATION:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
-    case AUDIO_USAGE_NOTIFICATION_EVENT:
-        return STRATEGY_SONIFICATION_RESPECTFUL;
-
-    case AUDIO_USAGE_UNKNOWN:
-    default:
-        return STRATEGY_MEDIA;
-    }
-}
-
-audio_devices_t Engine::getDeviceForStrategy(routing_strategy strategy) const
-{
-    DeviceVector availableOutputDevices = mApmObserver->getAvailableOutputDevices();
-    DeviceVector availableInputDevices = mApmObserver->getAvailableInputDevices();
-
-    const SwAudioOutputCollection &outputs = mApmObserver->getOutputs();
-
-    return getDeviceForStrategyInt(strategy, availableOutputDevices,
-                                   availableInputDevices, outputs, (uint32_t)AUDIO_DEVICE_NONE);
-}
-
-
-audio_devices_t Engine::getDeviceForStrategyInt(routing_strategy strategy,
-        DeviceVector availableOutputDevices,
-        DeviceVector availableInputDevices,
-        const SwAudioOutputCollection &outputs,
-        uint32_t outputDeviceTypesToIgnore) const
+audio_devices_t Engine::getDeviceForStrategyInt(legacy_strategy strategy,
+                                                DeviceVector availableOutputDevices,
+                                                DeviceVector availableInputDevices,
+                                                const SwAudioOutputCollection &outputs,
+                                                uint32_t outputDeviceTypesToIgnore) const
 {
     uint32_t device = AUDIO_DEVICE_NONE;
     uint32_t availableOutputDevicesType =
@@ -269,16 +153,17 @@
         break;
 
     case STRATEGY_SONIFICATION_RESPECTFUL:
-        if (isInCall() || outputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL)) {
+        if (isInCall() || outputs.isActiveLocally(streamToVolumeSource(AUDIO_STREAM_VOICE_CALL))) {
             device = getDeviceForStrategyInt(
                     STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs,
                     outputDeviceTypesToIgnore);
         } else {
             bool media_active_locally =
-                    outputs.isStreamActiveLocally(
-                            AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)
-                    || outputs.isStreamActiveLocally(
-                            AUDIO_STREAM_ACCESSIBILITY, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY);
+                    outputs.isActiveLocally(streamToVolumeSource(AUDIO_STREAM_MUSIC),
+                                            SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)
+                    || outputs.isActiveLocally(
+                        streamToVolumeSource(AUDIO_STREAM_ACCESSIBILITY),
+                        SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY);
             // routing is same as media without the "remote" device
             device = getDeviceForStrategyInt(STRATEGY_MEDIA,
                     availableOutputDevices,
@@ -334,7 +219,7 @@
         }
         // for phone strategy, we first consider the forced use and then the available devices by
         // order of priority
-        switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
+        switch (getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION)) {
         case AUDIO_POLICY_FORCE_BT_SCO:
             if (!isInCall() || strategy != STRATEGY_DTMF) {
                 device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
@@ -352,7 +237,7 @@
             if (device) break;
             // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to A2DP
             if (!isInCall() &&
-                    (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
+                    (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
                      outputs.isA2dpSupported()) {
                 device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
                 if (device) break;
@@ -386,7 +271,7 @@
             // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to
             // A2DP speaker when forcing to speaker output
             if (!isInCall() &&
-                    (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
+                    (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
                      outputs.isA2dpSupported()) {
                 device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
                 if (device) break;
@@ -411,7 +296,8 @@
     case STRATEGY_SONIFICATION:
 
         // If incall, just select the STRATEGY_PHONE device
-        if (isInCall() || outputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL)) {
+        if (isInCall() ||
+                outputs.isActiveLocally(streamToVolumeSource(AUDIO_STREAM_VOICE_CALL))) {
             device = getDeviceForStrategyInt(
                     STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
                     outputDeviceTypesToIgnore);
@@ -426,7 +312,7 @@
         //   - in countries where not enforced in which case it follows STRATEGY_MEDIA
 
         if ((strategy == STRATEGY_SONIFICATION) ||
-                (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) {
+                (getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) {
             device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
         }
 
@@ -442,9 +328,9 @@
                 device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO;
             }
             // Use ONLY Bluetooth SCO output when ringing in vibration mode
-            if (!((mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)
+            if (!((getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)
                     && (strategy == STRATEGY_ENFORCED_AUDIBLE))) {
-                if (mForceUse[AUDIO_POLICY_FORCE_FOR_VIBRATE_RINGING]
+                if (getForceUse(AUDIO_POLICY_FORCE_FOR_VIBRATE_RINGING)
                         == AUDIO_POLICY_FORCE_BT_SCO) {
                     if (device2 != AUDIO_DEVICE_NONE) {
                         device = device2;
@@ -453,7 +339,7 @@
                 }
             }
             // Use both Bluetooth SCO and phone default output when ringing in normal mode
-            if (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] == AUDIO_POLICY_FORCE_BT_SCO) {
+            if (getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION) == AUDIO_POLICY_FORCE_BT_SCO) {
                 if ((strategy == STRATEGY_SONIFICATION) &&
                         (device & AUDIO_DEVICE_OUT_SPEAKER) &&
                         (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
@@ -484,8 +370,8 @@
             }
             availableOutputDevices =
                     availableOutputDevices.getDevicesFromTypeMask(availableOutputDevicesType);
-            if (outputs.isStreamActive(AUDIO_STREAM_RING) ||
-                    outputs.isStreamActive(AUDIO_STREAM_ALARM)) {
+            if (outputs.isActive(streamToVolumeSource(AUDIO_STREAM_RING)) ||
+                    outputs.isActive(streamToVolumeSource(AUDIO_STREAM_ALARM))) {
                 return getDeviceForStrategyInt(
                     STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs,
                     outputDeviceTypesToIgnore);
@@ -520,7 +406,7 @@
             device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_HEARING_AID;
         }
         if ((device2 == AUDIO_DEVICE_NONE) &&
-                (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
+                (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
                  outputs.isA2dpSupported()) {
             device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
             if (device2 == AUDIO_DEVICE_NONE) {
@@ -531,7 +417,7 @@
             }
         }
         if ((device2 == AUDIO_DEVICE_NONE) &&
-            (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] == AUDIO_POLICY_FORCE_SPEAKER)) {
+            (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) == AUDIO_POLICY_FORCE_SPEAKER)) {
             device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
         }
         if (device2 == AUDIO_DEVICE_NONE) {
@@ -560,7 +446,7 @@
             device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
         }
         if ((device2 == AUDIO_DEVICE_NONE) &&
-                (mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK] == AUDIO_POLICY_FORCE_ANALOG_DOCK)) {
+                (getForceUse(AUDIO_POLICY_FORCE_FOR_DOCK) == AUDIO_POLICY_FORCE_ANALOG_DOCK)) {
             device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
         }
         if (device2 == AUDIO_DEVICE_NONE) {
@@ -581,7 +467,7 @@
 
         // If hdmi system audio mode is on, remove speaker out of output list.
         if ((strategy == STRATEGY_MEDIA) &&
-            (mForceUse[AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO] ==
+            (getForceUse(AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO) ==
                 AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED)) {
             device &= ~AUDIO_DEVICE_OUT_SPEAKER;
         }
@@ -603,7 +489,7 @@
 
     if (device == AUDIO_DEVICE_NONE) {
         ALOGV("getDeviceForStrategy() no device found for strategy %d", strategy);
-        device = mApmObserver->getDefaultOutputDevice()->type();
+        device = getApmObserver()->getDefaultOutputDevice()->type();
         ALOGE_IF(device == AUDIO_DEVICE_NONE,
                  "getDeviceForStrategy() no default device defined");
     }
@@ -614,9 +500,9 @@
 
 audio_devices_t Engine::getDeviceForInputSource(audio_source_t inputSource) const
 {
-    const DeviceVector &availableOutputDevices = mApmObserver->getAvailableOutputDevices();
-    const DeviceVector &availableInputDevices = mApmObserver->getAvailableInputDevices();
-    const SwAudioOutputCollection &outputs = mApmObserver->getOutputs();
+    const DeviceVector &availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
+    const DeviceVector &availableInputDevices = getApmObserver()->getAvailableInputDevices();
+    const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
     audio_devices_t availableDeviceTypes = availableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
 
     uint32_t device = AUDIO_DEVICE_NONE;
@@ -651,7 +537,7 @@
     case AUDIO_SOURCE_MIC:
     if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) {
         device = AUDIO_DEVICE_IN_BLUETOOTH_A2DP;
-    } else if ((mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO) &&
+    } else if ((getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) == AUDIO_POLICY_FORCE_BT_SCO) &&
         (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET)) {
         device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
     } else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
@@ -675,7 +561,7 @@
                     primaryOutput->getModuleHandle()) & ~AUDIO_DEVICE_BIT_IN;
         }
 
-        switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
+        switch (getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION)) {
         case AUDIO_POLICY_FORCE_BT_SCO:
             // if SCO device is requested but no SCO device is available, fall back to default case
             if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
@@ -709,7 +595,7 @@
     case AUDIO_SOURCE_VOICE_RECOGNITION:
     case AUDIO_SOURCE_UNPROCESSED:
     case AUDIO_SOURCE_HOTWORD:
-        if (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO &&
+        if (getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) == AUDIO_POLICY_FORCE_BT_SCO &&
                 availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
             device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
         } else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
@@ -780,10 +666,107 @@
     return device;
 }
 
+void Engine::updateDeviceSelectionCache()
+{
+    for (const auto &iter : getProductStrategies()) {
+        const auto &strategy = iter.second;
+        auto devices = getDevicesForProductStrategy(strategy->getId());
+        mDevicesForStrategies[strategy->getId()] = devices;
+        strategy->setDeviceTypes(devices.types());
+        strategy->setDeviceAddress(devices.getFirstValidAddress().c_str());
+    }
+}
+
+DeviceVector Engine::getDevicesForProductStrategy(product_strategy_t strategy) const
+{
+    DeviceVector availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
+    DeviceVector availableInputDevices = getApmObserver()->getAvailableInputDevices();
+    const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
+
+    auto legacyStrategy = mLegacyStrategyMap.find(strategy) != end(mLegacyStrategyMap) ?
+                mLegacyStrategyMap.at(strategy) : STRATEGY_NONE;
+    audio_devices_t devices = getDeviceForStrategyInt(legacyStrategy,
+                                                      availableOutputDevices,
+                                                      availableInputDevices, outputs,
+                                                      (uint32_t)AUDIO_DEVICE_NONE);
+    return availableOutputDevices.getDevicesFromTypeMask(devices);
+}
+
+DeviceVector Engine::getOutputDevicesForAttributes(const audio_attributes_t &attributes,
+                                                   const sp<DeviceDescriptor> &preferredDevice,
+                                                   bool fromCache) const
+{
+    // First check for explict routing device
+    if (preferredDevice != nullptr) {
+        ALOGV("%s explicit Routing on device %s", __func__, preferredDevice->toString().c_str());
+        return DeviceVector(preferredDevice);
+    }
+    product_strategy_t strategy = getProductStrategyForAttributes(attributes);
+    const DeviceVector &availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
+    const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
+    //
+    // @TODO: what is the priority of explicit routing? Shall it be considered first as it used to
+    // be by APM?
+    //
+    // Honor explicit routing requests only if all active clients have a preferred route in which
+    // case the last active client route is used
+    sp<DeviceDescriptor> device = findPreferredDevice(outputs, strategy, availableOutputDevices);
+    if (device != nullptr) {
+        return DeviceVector(device);
+    }
+
+    return fromCache? mDevicesForStrategies.at(strategy) : getDevicesForProductStrategy(strategy);
+}
+
+DeviceVector Engine::getOutputDevicesForStream(audio_stream_type_t stream, bool fromCache) const
+{
+    auto attributes = getAttributesForStreamType(stream);
+    return getOutputDevicesForAttributes(attributes, nullptr, fromCache);
+}
+
+sp<DeviceDescriptor> Engine::getInputDeviceForAttributes(const audio_attributes_t &attr,
+                                                         AudioMix **mix) const
+{
+    const auto &policyMixes = getApmObserver()->getAudioPolicyMixCollection();
+    const auto &availableInputDevices = getApmObserver()->getAvailableInputDevices();
+    const auto &inputs = getApmObserver()->getInputs();
+    std::string address;
+
+    //
+    // Explicit Routing ??? what is the priority of explicit routing? Shall it be considered
+    // first as it used to be by APM?
+    //
+    // Honor explicit routing requests only if all active clients have a preferred route in which
+    // case the last active client route is used
+    sp<DeviceDescriptor> device =
+            findPreferredDevice(inputs, attr.source, availableInputDevices);
+    if (device != nullptr) {
+        return device;
+    }
+
+    device = policyMixes.getDeviceAndMixForInputSource(attr.source, availableInputDevices, mix);
+    if (device != nullptr) {
+        return device;
+    }
+    audio_devices_t deviceType = getDeviceForInputSource(attr.source);
+
+    if (audio_is_remote_submix_device(deviceType)) {
+        address = "0";
+        std::size_t pos;
+        std::string tags { attr.tags };
+        if ((pos = tags.find("addr=")) != std::string::npos) {
+            address = tags.substr(pos + std::strlen("addr="));
+        }
+    }
+    return availableInputDevices.getDevice(deviceType,
+                                           String8(address.c_str()),
+                                           AUDIO_FORMAT_DEFAULT);
+}
+
 template <>
 AudioPolicyManagerInterface *Engine::queryInterface()
 {
-    return &mManagerInterface;
+    return this;
 }
 
 } // namespace audio_policy
diff --git a/services/audiopolicy/enginedefault/src/Engine.h b/services/audiopolicy/enginedefault/src/Engine.h
index 06186c1..d8a3698 100644
--- a/services/audiopolicy/enginedefault/src/Engine.h
+++ b/services/audiopolicy/enginedefault/src/Engine.h
@@ -16,7 +16,7 @@
 
 #pragma once
 
-
+#include "EngineBase.h"
 #include "AudioPolicyManagerInterface.h"
 #include <AudioGain.h>
 #include <policy.h>
@@ -29,114 +29,67 @@
 namespace audio_policy
 {
 
-class Engine
+enum legacy_strategy {
+    STRATEGY_NONE = -1,
+    STRATEGY_MEDIA,
+    STRATEGY_PHONE,
+    STRATEGY_SONIFICATION,
+    STRATEGY_SONIFICATION_RESPECTFUL,
+    STRATEGY_DTMF,
+    STRATEGY_ENFORCED_AUDIBLE,
+    STRATEGY_TRANSMITTED_THROUGH_SPEAKER,
+    STRATEGY_ACCESSIBILITY,
+    STRATEGY_REROUTING,
+};
+
+class Engine : public EngineBase
 {
 public:
     Engine();
-    virtual ~Engine();
+    virtual ~Engine() = default;
 
     template <class RequestedInterface>
     RequestedInterface *queryInterface();
 
 private:
-    /// Interface members
-    class ManagerInterfaceImpl : public AudioPolicyManagerInterface
-    {
-    public:
-        explicit ManagerInterfaceImpl(Engine *policyEngine)
-            : mPolicyEngine(policyEngine) {}
+    ///
+    /// from EngineBase, so from AudioPolicyManagerInterface
+    ///
+    status_t setForceUse(audio_policy_force_use_t usage,
+                         audio_policy_forced_cfg_t config) override;
 
-        virtual void setObserver(AudioPolicyManagerObserver *observer)
-        {
-            mPolicyEngine->setObserver(observer);
-        }
-        virtual status_t initCheck()
-        {
-            return mPolicyEngine->initCheck();
-        }
-        virtual audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const
-        {
-            return mPolicyEngine->getDeviceForInputSource(inputSource);
-        }
-        virtual audio_devices_t getDeviceForStrategy(routing_strategy strategy) const
-        {
-            return mPolicyEngine->getDeviceForStrategy(strategy);
-        }
-        virtual routing_strategy getStrategyForStream(audio_stream_type_t stream)
-        {
-            return mPolicyEngine->getStrategyForStream(stream);
-        }
-        virtual routing_strategy getStrategyForUsage(audio_usage_t usage)
-        {
-            return mPolicyEngine->getStrategyForUsage(usage);
-        }
-        virtual status_t setPhoneState(audio_mode_t mode)
-        {
-            return mPolicyEngine->setPhoneState(mode);
-        }
-        virtual audio_mode_t getPhoneState() const
-        {
-            return mPolicyEngine->getPhoneState();
-        }
-        virtual status_t setForceUse(audio_policy_force_use_t usage,
-                                     audio_policy_forced_cfg_t config)
-        {
-            return mPolicyEngine->setForceUse(usage, config);
-        }
-        virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const
-        {
-            return mPolicyEngine->getForceUse(usage);
-        }
-        virtual status_t setDeviceConnectionState(const sp<DeviceDescriptor> /*devDesc*/,
-                                                  audio_policy_dev_state_t /*state*/)
-        {
-            return NO_ERROR;
-        }
-    private:
-        Engine *mPolicyEngine;
-    } mManagerInterface;
+    DeviceVector getOutputDevicesForAttributes(const audio_attributes_t &attr,
+                                               const sp<DeviceDescriptor> &preferedDevice = nullptr,
+                                               bool fromCache = false) const override;
+
+    DeviceVector getOutputDevicesForStream(audio_stream_type_t stream,
+                                           bool fromCache = false) const override;
+
+    sp<DeviceDescriptor> getInputDeviceForAttributes(
+            const audio_attributes_t &attr, AudioMix **mix = nullptr) const override;
+
+    void updateDeviceSelectionCache() override;
 
 private:
     /* Copy facilities are put private to disable copy. */
     Engine(const Engine &object);
     Engine &operator=(const Engine &object);
 
-    void setObserver(AudioPolicyManagerObserver *observer);
-
-    status_t initCheck();
-
-    inline bool isInCall() const
-    {
-        return is_state_in_call(mPhoneState);
-    }
-
-    status_t setPhoneState(audio_mode_t mode);
-    audio_mode_t getPhoneState() const
-    {
-        return mPhoneState;
-    }
-    status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
-    audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const
-    {
-        return mForceUse[usage];
-    }
     status_t setDefaultDevice(audio_devices_t device);
 
-    routing_strategy getStrategyForStream(audio_stream_type_t stream);
-    routing_strategy getStrategyForUsage(audio_usage_t usage);
-    audio_devices_t getDeviceForStrategy(routing_strategy strategy) const;
-    audio_devices_t getDeviceForStrategyInt(routing_strategy strategy,
-            DeviceVector availableOutputDevices,
-            DeviceVector availableInputDevices,
-            const SwAudioOutputCollection &outputs,
-            uint32_t outputDeviceTypesToIgnore) const;
+    audio_devices_t getDeviceForStrategyInt(legacy_strategy strategy,
+                                            DeviceVector availableOutputDevices,
+                                            DeviceVector availableInputDevices,
+                                            const SwAudioOutputCollection &outputs,
+                                            uint32_t outputDeviceTypesToIgnore) const;
+
+    DeviceVector getDevicesForProductStrategy(product_strategy_t strategy) const;
+
     audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const;
-    audio_mode_t mPhoneState;  /**< current phone state. */
 
-    /** current forced use configuration. */
-    audio_policy_forced_cfg_t mForceUse[AUDIO_POLICY_FORCE_USE_CNT];
+    DeviceStrategyMap mDevicesForStrategies;
 
-    AudioPolicyManagerObserver *mApmObserver;
+    std::map<product_strategy_t, legacy_strategy> mLegacyStrategyMap;
 };
 } // namespace audio_policy
 } // namespace android
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index d7c7b4d..b563a04 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -33,9 +33,12 @@
 #define AUDIO_POLICY_XML_CONFIG_FILE_NAME "audio_policy_configuration.xml"
 #define AUDIO_POLICY_A2DP_OFFLOAD_DISABLED_XML_CONFIG_FILE_NAME \
         "audio_policy_configuration_a2dp_offload_disabled.xml"
+#define AUDIO_POLICY_BLUETOOTH_HAL_ENABLED_XML_CONFIG_FILE_NAME \
+        "audio_policy_configuration_bluetooth_hal_enabled.xml"
 
 #include <inttypes.h>
 #include <math.h>
+#include <set>
 #include <unordered_set>
 #include <vector>
 
@@ -44,7 +47,6 @@
 #include <cutils/properties.h>
 #include <utils/Log.h>
 #include <media/AudioParameter.h>
-#include <media/AudioPolicyHelper.h>
 #include <private/android_filesystem_config.h>
 #include <soundtrigger/SoundTrigger.h>
 #include <system/audio.h>
@@ -191,6 +193,8 @@
             // remove device from available output devices
             mAvailableOutputDevices.remove(device);
 
+            mOutputs.clearSessionRoutesForDevice(device);
+
             checkOutputsForDevice(device, state, outputs);
 
             // Reset active device codec
@@ -205,7 +209,26 @@
             return BAD_VALUE;
         }
 
-        checkForDeviceAndOutputChanges([&]() {
+        // No need to evaluate playback routing when connecting a remote submix
+        // output device used by a dynamic policy of type recorder as no
+        // playback use case is affected.
+        bool doCheckForDeviceAndOutputChanges = true;
+        if (device->type() == AUDIO_DEVICE_OUT_REMOTE_SUBMIX
+                && strncmp(device_address, "0", AUDIO_DEVICE_MAX_ADDRESS_LEN) != 0) {
+            for (audio_io_handle_t output : outputs) {
+                sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
+                if (desc->mPolicyMix != nullptr
+                        && desc->mPolicyMix->mMixType == MIX_TYPE_RECORDERS
+                        && strncmp(device_address,
+                                   desc->mPolicyMix->mDeviceAddress.string(),
+                                   AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0) {
+                    doCheckForDeviceAndOutputChanges = false;
+                    break;
+                }
+            }
+        }
+
+        auto checkCloseOutputs = [&]() {
             // outputs must be closed after checkOutputForAllStrategies() is executed
             if (!outputs.isEmpty()) {
                 for (audio_io_handle_t output : outputs) {
@@ -214,7 +237,7 @@
                     // been opened by checkOutputsForDevice() to query dynamic parameters
                     if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
                             (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
-                             (desc->mDirectOpenCount == 0))) {
+                                (desc->mDirectOpenCount == 0))) {
                         closeOutput(output);
                     }
                 }
@@ -222,7 +245,13 @@
                 return true;
             }
             return false;
-        });
+        };
+
+        if (doCheckForDeviceAndOutputChanges) {
+            checkForDeviceAndOutputChanges(checkCloseOutputs);
+        } else {
+            checkCloseOutputs();
+        }
 
         if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
             DeviceVector newDevices = getNewOutputDevices(mPrimaryOutput, false /*fromCache*/);
@@ -495,9 +524,10 @@
     ALOG_ASSERT(!rxDevices.isEmpty(), "updateCallRouting() no selected output device");
 
     audio_attributes_t attr = { .source = AUDIO_SOURCE_VOICE_COMMUNICATION };
-    auto txSourceDevice = getDeviceAndMixForAttributes(attr);
+    auto txSourceDevice = mEngine->getInputDeviceForAttributes(attr);
     ALOG_ASSERT(txSourceDevice != 0, "updateCallRouting() input selected device not available");
-    ALOGV("updateCallRouting device rxDevice %s txDevice %s", 
+
+    ALOGV("updateCallRouting device rxDevice %s txDevice %s",
           rxDevices.itemAt(0)->toString().c_str(), txSourceDevice->toString().c_str());
 
     // release existing RX patch if any
@@ -555,6 +585,10 @@
         muteWaitMs = setOutputDevices(mPrimaryOutput, rxDevices, true, delayMs);
     } else { // create RX path audio patch
         mCallRxPatch = createTelephonyPatch(true /*isRx*/, rxDevices.itemAt(0), delayMs);
+
+        // If the TX device is on the primary HW module but RX device is
+        // on other HW module, SinkMetaData of telephony input should handle it
+        // assuming the device uses audio HAL V5.0 and above
     }
     if (createTxPatch) { // create TX path audio patch
         mCallTxPatch = createTelephonyPatch(false /*isRx*/, txSourceDevice, delayMs);
@@ -677,26 +711,27 @@
     int delayMs = 0;
     if (isStateInCall(state)) {
         nsecs_t sysTime = systemTime();
+        auto musicStrategy = streamToStrategy(AUDIO_STREAM_MUSIC);
+        auto sonificationStrategy = streamToStrategy(AUDIO_STREAM_ALARM);
         for (size_t i = 0; i < mOutputs.size(); i++) {
             sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
             // mute media and sonification strategies and delay device switch by the largest
             // latency of any output where either strategy is active.
             // This avoid sending the ring tone or music tail into the earpiece or headset.
-            if ((isStrategyActive(desc, STRATEGY_MEDIA,
-                                  SONIFICATION_HEADSET_MUSIC_DELAY,
-                                  sysTime) ||
-                 isStrategyActive(desc, STRATEGY_SONIFICATION,
-                                  SONIFICATION_HEADSET_MUSIC_DELAY,
-                                  sysTime)) &&
+            if ((desc->isStrategyActive(musicStrategy, SONIFICATION_HEADSET_MUSIC_DELAY, sysTime) ||
+                 desc->isStrategyActive(sonificationStrategy, SONIFICATION_HEADSET_MUSIC_DELAY,
+                                        sysTime)) &&
                     (delayMs < (int)desc->latency()*2)) {
                 delayMs = desc->latency()*2;
             }
-            setStrategyMute(STRATEGY_MEDIA, true, desc);
-            setStrategyMute(STRATEGY_MEDIA, false, desc, MUTE_TIME_MS,
-                getDeviceForStrategy(STRATEGY_MEDIA, true /*fromCache*/));
-            setStrategyMute(STRATEGY_SONIFICATION, true, desc);
-            setStrategyMute(STRATEGY_SONIFICATION, false, desc, MUTE_TIME_MS,
-                getDeviceForStrategy(STRATEGY_SONIFICATION, true /*fromCache*/));
+            setStrategyMute(musicStrategy, true, desc);
+            setStrategyMute(musicStrategy, false, desc, MUTE_TIME_MS,
+                mEngine->getOutputDevicesForAttributes(attributes_initializer(AUDIO_USAGE_MEDIA),
+                                                       nullptr, true /*fromCache*/).types());
+            setStrategyMute(sonificationStrategy, true, desc);
+            setStrategyMute(sonificationStrategy, false, desc, MUTE_TIME_MS,
+                mEngine->getOutputDevicesForAttributes(attributes_initializer(AUDIO_USAGE_ALARM),
+                                                       nullptr, true /*fromCache*/).types());
         }
     }
 
@@ -743,12 +778,8 @@
     }
 
     // Flag that ringtone volume must be limited to music volume until we exit MODE_RINGTONE
-    if (state == AUDIO_MODE_RINGTONE &&
-        isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY)) {
-        mLimitRingtoneVolume = true;
-    } else {
-        mLimitRingtoneVolume = false;
-    }
+    mLimitRingtoneVolume = (state == AUDIO_MODE_RINGTONE &&
+                            isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY));
 }
 
 audio_mode_t AudioPolicyManager::getPhoneState() {
@@ -871,8 +902,7 @@
 
 audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream)
 {
-    routing_strategy strategy = getStrategy(stream);
-    DeviceVector devices = getDevicesForStrategy(strategy, false /*fromCache*/);
+    DeviceVector devices = mEngine->getOutputDevicesForStream(stream, false /*fromCache*/);
 
     // Note that related method getOutputForAttr() uses getOutputForDevice() not selectOutput().
     // We use selectOutput() here since we don't have the desired AudioTrack sample rate,
@@ -906,72 +936,75 @@
             ALOGE("%s:  invalid stream type", __func__);
             return BAD_VALUE;
         }
-        stream_type_to_audio_attributes(srcStream, dstAttr);
+        *dstAttr = mEngine->getAttributesForStreamType(srcStream);
     }
     return NO_ERROR;
 }
 
-status_t AudioPolicyManager::getOutputForAttrInt(audio_attributes_t *resultAttr,
-                                                 audio_io_handle_t *output,
-                                                 audio_session_t session,
-                                                 const audio_attributes_t *attr,
-                                                 audio_stream_type_t *stream,
-                                                 uid_t uid,
-                                                 const audio_config_t *config,
-                                                 audio_output_flags_t *flags,
-                                                 audio_port_handle_t *selectedDeviceId)
+status_t AudioPolicyManager::getOutputForAttrInt(
+        audio_attributes_t *resultAttr,
+        audio_io_handle_t *output,
+        audio_session_t session,
+        const audio_attributes_t *attr,
+        audio_stream_type_t *stream,
+        uid_t uid,
+        const audio_config_t *config,
+        audio_output_flags_t *flags,
+        audio_port_handle_t *selectedDeviceId,
+        bool *isRequestedDeviceForExclusiveUse,
+        std::vector<sp<SwAudioOutputDescriptor>> *secondaryDescs)
 {
-    DeviceVector devices;
-    routing_strategy strategy;
-    audio_devices_t deviceType = AUDIO_DEVICE_NONE;
+    DeviceVector outputDevices;
     const audio_port_handle_t requestedPortId = *selectedDeviceId;
     DeviceVector msdDevices = getMsdAudioOutDevices();
+    const sp<DeviceDescriptor> requestedDevice =
+        mAvailableOutputDevices.getDeviceFromId(requestedPortId);
 
     status_t status = getAudioAttributes(resultAttr, attr, *stream);
     if (status != NO_ERROR) {
         return status;
     }
+    *stream = mEngine->getStreamTypeForAttributes(*resultAttr);
 
-    ALOGV("%s usage=%d, content=%d, tag=%s flags=%08x"
-          " session %d selectedDeviceId %d",
-          __func__,
-          resultAttr->usage, resultAttr->content_type, resultAttr->tags, resultAttr->flags,
-          session, requestedPortId);
+    ALOGV("%s() attributes=%s stream=%s session %d selectedDeviceId %d", __func__,
+          toString(*resultAttr).c_str(), toString(*stream).c_str(), session, requestedPortId);
 
-    *stream = streamTypefromAttributesInt(resultAttr);
-
-    strategy = getStrategyForAttr(resultAttr);
-
-    // First check for explicit routing (eg. setPreferredDevice)
-    sp<DeviceDescriptor> requestedDevice = mAvailableOutputDevices.getDeviceFromId(requestedPortId);
-    if (requestedDevice != nullptr) {
-        deviceType = requestedDevice->type();
-    } else {
-        // If no explict route, is there a matching dynamic policy that applies?
-        sp<SwAudioOutputDescriptor> desc;
-        if (mPolicyMixes.getOutputForAttr(*resultAttr, uid, desc) == NO_ERROR) {
-            ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
-            if (!audio_has_proportional_frames(config->format)) {
-                return BAD_VALUE;
-            }
-            *stream = streamTypefromAttributesInt(resultAttr);
-            *output = desc->mIoHandle;
-            AudioMix *mix = desc->mPolicyMix;
-            sp<DeviceDescriptor> deviceDesc =
-                mAvailableOutputDevices.getDevice(
-                        mix->mDeviceType, mix->mDeviceAddress, AUDIO_FORMAT_DEFAULT);
-            *selectedDeviceId = deviceDesc != 0 ? deviceDesc->getId() : AUDIO_PORT_HANDLE_NONE;
-            ALOGV("%s returns output %d", __func__, *output);
-            return NO_ERROR;
-        }
-
-        // Virtual sources must always be dynamicaly or explicitly routed
-        if (resultAttr->usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
-            ALOGW("%s no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE", __func__);
-            return BAD_VALUE;
-        }
-        deviceType = getDeviceForStrategy(strategy, false /*fromCache*/);
+    // The primary output is the explicit routing (eg. setPreferredDevice) if specified,
+    //       otherwise, fallback to the dynamic policies, if none match, query the engine.
+    // Secondary outputs are always found by dynamic policies as the engine do not support them
+    sp<SwAudioOutputDescriptor> policyDesc;
+    if (mPolicyMixes.getOutputForAttr(*resultAttr, uid, policyDesc, secondaryDescs) != NO_ERROR) {
+        policyDesc = nullptr; // reset getOutputForAttr in case of failure
+        secondaryDescs->clear();
     }
+    // Explicit routing is higher priority then any dynamic policy primary output
+    bool usePrimaryOutputFromPolicyMixes = requestedDevice == nullptr && policyDesc != nullptr;
+
+    // FIXME: in case of RENDER policy, the output capabilities should be checked
+    if ((usePrimaryOutputFromPolicyMixes || !secondaryDescs->empty())
+        && !audio_is_linear_pcm(config->format)) {
+        ALOGD("%s: rejecting request as dynamic audio policy only support pcm", __func__);
+        return BAD_VALUE;
+    }
+    if (usePrimaryOutputFromPolicyMixes) {
+        *output = policyDesc->mIoHandle;
+        AudioMix *mix = policyDesc->mPolicyMix;
+        sp<DeviceDescriptor> deviceDesc =
+                mAvailableOutputDevices.getDevice(mix->mDeviceType,
+                                                  mix->mDeviceAddress,
+                                                  AUDIO_FORMAT_DEFAULT);
+        *selectedDeviceId = deviceDesc != 0 ? deviceDesc->getId() : AUDIO_PORT_HANDLE_NONE;
+        ALOGV("getOutputForAttr() returns output %d", *output);
+        return NO_ERROR;
+    }
+    // Virtual sources must always be dynamicaly or explicitly routed
+    if (resultAttr->usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
+        ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
+        return BAD_VALUE;
+    }
+    // explicit routing managed by getDeviceForStrategy in APM is now handled by engine
+    // in order to let the choice of the order to future vendor engine
+    outputDevices = mEngine->getOutputDevicesForAttributes(*resultAttr, requestedDevice, false);
 
     if ((resultAttr->flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
         *flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
@@ -982,45 +1015,40 @@
     // FIXME: provide a more generic approach which is not device specific and move this back
     // to getOutputForDevice.
     // TODO: Remove check of AUDIO_STREAM_MUSIC once migration is completed on the app side.
-    if (deviceType == AUDIO_DEVICE_OUT_TELEPHONY_TX &&
-        (*stream == AUDIO_STREAM_MUSIC || resultAttr->usage == AUDIO_USAGE_VOICE_COMMUNICATION) &&
+    if (outputDevices.types() == AUDIO_DEVICE_OUT_TELEPHONY_TX &&
+        (*stream == AUDIO_STREAM_MUSIC  || resultAttr->usage == AUDIO_USAGE_VOICE_COMMUNICATION) &&
         audio_is_linear_pcm(config->format) &&
         isInCall()) {
         if (requestedPortId != AUDIO_PORT_HANDLE_NONE) {
             *flags = (audio_output_flags_t)AUDIO_OUTPUT_FLAG_INCALL_MUSIC;
-        } else {
-            // Get the devce type directly from the engine to bypass preferred route logic
-            deviceType = mEngine->getDeviceForStrategy(strategy);
+            *isRequestedDeviceForExclusiveUse = true;
         }
     }
 
-    ALOGV("%s device 0x%x, sampling rate %d, format %#x, channel mask %#x, "
-          "flags %#x",
-          __func__,
-          deviceType, config->sample_rate, config->format, config->channel_mask, *flags);
+    ALOGV("%s() device %s, sampling rate %d, format %#x, channel mask %#x, flags %#x stream %s",
+          __func__, outputDevices.toString().c_str(), config->sample_rate, config->format,
+          config->channel_mask, *flags, toString(*stream).c_str());
 
     *output = AUDIO_IO_HANDLE_NONE;
     if (!msdDevices.isEmpty()) {
         *output = getOutputForDevices(msdDevices, session, *stream, config, flags);
-        sp<DeviceDescriptor> deviceDesc =
-                mAvailableOutputDevices.getDevice(deviceType, String8(), AUDIO_FORMAT_DEFAULT);
-        if (*output != AUDIO_IO_HANDLE_NONE && setMsdPatch(deviceDesc) == NO_ERROR) {
-            ALOGV("%s() Using MSD devices %s instead of device %s",
-                    __func__, msdDevices.toString().c_str(), deviceDesc->toString().c_str());
-            deviceType = msdDevices.types();
+        sp<DeviceDescriptor> device = outputDevices.isEmpty() ? nullptr : outputDevices.itemAt(0);
+        if (*output != AUDIO_IO_HANDLE_NONE && setMsdPatch(device) == NO_ERROR) {
+            ALOGV("%s() Using MSD devices %s instead of devices %s",
+                  __func__, msdDevices.toString().c_str(), outputDevices.toString().c_str());
+            outputDevices = msdDevices;
         } else {
             *output = AUDIO_IO_HANDLE_NONE;
         }
     }
-    devices = mAvailableOutputDevices.getDevicesFromTypeMask(deviceType);
     if (*output == AUDIO_IO_HANDLE_NONE) {
-        *output = getOutputForDevices(devices, session, *stream, config, flags);
+        *output = getOutputForDevices(outputDevices, session, *stream, config, flags);
     }
     if (*output == AUDIO_IO_HANDLE_NONE) {
         return INVALID_OPERATION;
     }
 
-    *selectedDeviceId = getFirstDeviceId(devices);
+    *selectedDeviceId = getFirstDeviceId(outputDevices);
 
     ALOGV("%s returns output %d selectedDeviceId %d", __func__, *output, *selectedDeviceId);
 
@@ -1035,7 +1063,8 @@
                                               const audio_config_t *config,
                                               audio_output_flags_t *flags,
                                               audio_port_handle_t *selectedDeviceId,
-                                              audio_port_handle_t *portId)
+                                              audio_port_handle_t *portId,
+                                              std::vector<audio_io_handle_t> *secondaryOutputs)
 {
     // The supplied portId must be AUDIO_PORT_HANDLE_NONE
     if (*portId != AUDIO_PORT_HANDLE_NONE) {
@@ -1043,11 +1072,27 @@
     }
     const audio_port_handle_t requestedPortId = *selectedDeviceId;
     audio_attributes_t resultAttr;
+    bool isRequestedDeviceForExclusiveUse = false;
+    std::vector<sp<SwAudioOutputDescriptor>> secondaryOutputDescs;
+    const sp<DeviceDescriptor> requestedDevice =
+      mAvailableOutputDevices.getDeviceFromId(requestedPortId);
+
+    // Prevent from storing invalid requested device id in clients
+    const audio_port_handle_t sanitizedRequestedPortId =
+      requestedDevice != nullptr ? requestedPortId : AUDIO_PORT_HANDLE_NONE;
+    *selectedDeviceId = sanitizedRequestedPortId;
+
     status_t status = getOutputForAttrInt(&resultAttr, output, session, attr, stream, uid,
-            config, flags, selectedDeviceId);
+            config, flags, selectedDeviceId, &isRequestedDeviceForExclusiveUse,
+            &secondaryOutputDescs);
     if (status != NO_ERROR) {
         return status;
     }
+    std::vector<wp<SwAudioOutputDescriptor>> weakSecondaryOutputDescs;
+    for (auto& secondaryDesc : secondaryOutputDescs) {
+        secondaryOutputs->push_back(secondaryDesc->mIoHandle);
+        weakSecondaryOutputDescs.push_back(secondaryDesc);
+    }
 
     audio_config_base_t clientConfig = {.sample_rate = config->sample_rate,
         .format = config->format,
@@ -1056,14 +1101,16 @@
 
     sp<TrackClientDescriptor> clientDesc =
         new TrackClientDescriptor(*portId, uid, session, resultAttr, clientConfig,
-                                  requestedPortId, *stream,
-                                  getStrategyForAttr(&resultAttr),
-                                  *flags);
+                                  sanitizedRequestedPortId, *stream,
+                                  mEngine->getProductStrategyForAttributes(resultAttr),
+                                  streamToVolumeSource(*stream),
+                                  *flags, isRequestedDeviceForExclusiveUse,
+                                  std::move(weakSecondaryOutputDescs));
     sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(*output);
     outputDesc->addClient(clientDesc);
 
-    ALOGV("%s returns output %d selectedDeviceId %d for port ID %d",
-          __func__, *output, requestedPortId, *portId);
+    ALOGV("%s() returns output %d requestedPortId %d selectedDeviceId %d for port ID %d", __func__,
+          *output, requestedPortId, *selectedDeviceId, *portId);
 
     return NO_ERROR;
 }
@@ -1368,7 +1415,8 @@
         // Use media strategy for unspecified output device. This should only
         // occur on checkForDeviceAndOutputChanges(). Device connection events may
         // therefore invalidate explicit routing requests.
-        DeviceVector devices = getDevicesForStrategy(STRATEGY_MEDIA, false /*fromCache*/);
+        DeviceVector devices = mEngine->getOutputDevicesForAttributes(
+                    attributes_initializer(AUDIO_USAGE_MEDIA), nullptr, false /*fromCache*/);
         LOG_ALWAYS_FATAL_IF(devices.isEmpty(), "no outpudevice to set Msd Patch");
         device = devices.itemAt(0);
     }
@@ -1547,9 +1595,13 @@
 
     *delayMs = 0;
     audio_stream_type_t stream = client->stream();
+    auto clientVolSrc = client->volumeSource();
+    auto clientStrategy = client->strategy();
+    auto clientAttr = client->attributes();
     if (stream == AUDIO_STREAM_TTS) {
         ALOGV("\t found BEACON stream");
-        if (!mTtsOutputAvailable && mOutputs.isAnyOutputActive(AUDIO_STREAM_TTS /*streamToIgnore*/)) {
+        if (!mTtsOutputAvailable && mOutputs.isAnyOutputActive(
+                                    streamToVolumeSource(AUDIO_STREAM_TTS) /*sourceToIgnore*/)) {
             return INVALID_OPERATION;
         } else {
             beaconMuteLatency = handleEventForBeacon(STARTING_BEACON);
@@ -1571,13 +1623,15 @@
         policyMix = outputDesc->mPolicyMix;
         audio_devices_t newDeviceType;
         address = policyMix->mDeviceAddress.string();
-        if ((policyMix->mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
-            newDeviceType = policyMix->mDeviceType;
-        } else {
+        if ((policyMix->mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) {
             newDeviceType = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
+        } else {
+            newDeviceType = policyMix->mDeviceType;
         }
-        devices.add(mAvailableOutputDevices.getDevice(newDeviceType,
-                                                      String8(address), AUDIO_FORMAT_DEFAULT));
+        sp device = mAvailableOutputDevices.getDevice(newDeviceType, String8(address),
+                                                        AUDIO_FORMAT_DEFAULT);
+        ALOG_ASSERT(device, "%s: no device found t=%u, a=%s", __func__, newDeviceType, address);
+        devices.add(device);
     }
 
     // requiresMuteCheck is false when we can bypass mute strategy.
@@ -1594,24 +1648,23 @@
     if (client->hasPreferredDevice(true)) {
         devices = getNewOutputDevices(outputDesc, false /*fromCache*/);
         if (devices != outputDesc->devices()) {
-            checkStrategyRoute(getStrategy(stream), outputDesc->mIoHandle);
+            checkStrategyRoute(clientStrategy, outputDesc->mIoHandle);
         }
     }
 
-    if (stream == AUDIO_STREAM_MUSIC) {
+    if (followsSameRouting(clientAttr, attributes_initializer(AUDIO_USAGE_MEDIA))) {
         selectOutputForMusicEffects();
     }
 
-    if (outputDesc->streamActiveCount(stream) == 1 || !devices.isEmpty()) {
+    if (outputDesc->getActivityCount(clientVolSrc) == 1 || !devices.isEmpty()) {
         // starting an output being rerouted?
         if (devices.isEmpty()) {
             devices = getNewOutputDevices(outputDesc, false /*fromCache*/);
         }
-
-        routing_strategy strategy = getStrategy(stream);
-        bool shouldWait = (strategy == STRATEGY_SONIFICATION) ||
-                            (strategy == STRATEGY_SONIFICATION_RESPECTFUL) ||
-                            (beaconMuteLatency > 0);
+        bool shouldWait =
+            (followsSameRouting(clientAttr, attributes_initializer(AUDIO_USAGE_ALARM)) ||
+             followsSameRouting(clientAttr, attributes_initializer(AUDIO_USAGE_NOTIFICATION)) ||
+             (beaconMuteLatency > 0));
         uint32_t waitMs = beaconMuteLatency;
         for (size_t i = 0; i < mOutputs.size(); i++) {
             sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
@@ -1657,7 +1710,7 @@
 
         // apply volume rules for current stream and device if necessary
         checkAndSetVolume(stream,
-                          mVolumeCurves->getVolumeIndex(stream, outputDesc->devices().types()),
+                          getVolumeCurves(stream).getVolumeIndex(outputDesc->devices().types()),
                           outputDesc,
                           outputDesc->devices().types());
 
@@ -1666,7 +1719,7 @@
         handleNotificationRoutingForStream(stream);
 
         // force reevaluating accessibility routing when ringtone or alarm starts
-        if (strategy == STRATEGY_SONIFICATION) {
+        if (followsSameRouting(clientAttr, attributes_initializer(AUDIO_USAGE_ALARM))) {
             mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
         }
 
@@ -1685,7 +1738,7 @@
 
     if (stream == AUDIO_STREAM_ENFORCED_AUDIBLE &&
             mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
-        setStrategyMute(STRATEGY_SONIFICATION, true, outputDesc);
+        setStrategyMute(streamToStrategy(AUDIO_STREAM_ALARM), true, outputDesc);
     }
 
     // Automatically enable the remote submix input when output is started on a re routing mix
@@ -1729,11 +1782,12 @@
 {
     // always handle stream stop, check which stream type is stopping
     audio_stream_type_t stream = client->stream();
+    auto clientVolSrc = client->volumeSource();
 
     handleEventForBeacon(stream == AUDIO_STREAM_TTS ? STOPPING_BEACON : STOPPING_OUTPUT);
 
-    if (outputDesc->streamActiveCount(stream) > 0) {
-        if (outputDesc->streamActiveCount(stream) == 1) {
+    if (outputDesc->getActivityCount(clientVolSrc) > 0) {
+        if (outputDesc->getActivityCount(clientVolSrc) == 1) {
             // Automatically disable the remote submix input when output is stopped on a
             // re routing mix of type MIX_TYPE_RECORDERS
             if (audio_is_remote_submix_device(outputDesc->devices().types()) &&
@@ -1747,7 +1801,7 @@
         }
         bool forceDeviceUpdate = false;
         if (client->hasPreferredDevice(true)) {
-            checkStrategyRoute(getStrategy(stream), AUDIO_IO_HANDLE_NONE);
+            checkStrategyRoute(client->strategy(), AUDIO_IO_HANDLE_NONE);
             forceDeviceUpdate = true;
         }
 
@@ -1755,8 +1809,8 @@
         outputDesc->setClientActive(client, false);
 
         // store time at which the stream was stopped - see isStreamActive()
-        if (outputDesc->streamActiveCount(stream) == 0 || forceDeviceUpdate) {
-            outputDesc->mStopTime[stream] = systemTime();
+        if (outputDesc->getActivityCount(clientVolSrc) == 0 || forceDeviceUpdate) {
+            outputDesc->setStopTime(client, systemTime());
             DeviceVector newDevices = getNewOutputDevices(outputDesc, false /*fromCache*/);
             // delay the device switch by twice the latency because stopOutput() is executed when
             // the track stop() command is received and at that time the audio track buffer can
@@ -1791,10 +1845,10 @@
 
         if (stream == AUDIO_STREAM_ENFORCED_AUDIBLE &&
                 mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
-            setStrategyMute(STRATEGY_SONIFICATION, false, outputDesc);
+            setStrategyMute(streamToStrategy(AUDIO_STREAM_RING), false, outputDesc);
         }
 
-        if (stream == AUDIO_STREAM_MUSIC) {
+        if (followsSameRouting(client->attributes(), attributes_initializer(AUDIO_USAGE_MEDIA))) {
             selectOutputForMusicEffects();
         }
         return NO_ERROR;
@@ -1849,9 +1903,9 @@
                                              input_type_t *inputType,
                                              audio_port_handle_t *portId)
 {
-    ALOGV("getInputForAttr() source %d, sampling rate %d, format %#x, channel mask %#x,"
-            "session %d, flags %#x",
-          attr->source, config->sample_rate, config->format, config->channel_mask, session, flags);
+    ALOGV("%s() source %d, sampling rate %d, format %#x, channel mask %#x, session %d, "
+          "flags %#x attributes=%s", __func__, attr->source, config->sample_rate,
+          config->format, config->channel_mask, session, flags, toString(*attr).c_str());
 
     status_t status = NO_ERROR;
     audio_source_t halInputSource;
@@ -1940,7 +1994,9 @@
         if (explicitRoutingDevice != nullptr) {
             device = explicitRoutingDevice;
         } else {
-            device = getDeviceAndMixForAttributes(attributes, &policyMix);
+            // Prevent from storing invalid requested device id in clients
+            requestedDeviceId = AUDIO_PORT_HANDLE_NONE;
+            device = mEngine->getInputDeviceForAttributes(attributes, &policyMix);
         }
         if (device == nullptr) {
             ALOGW("getInputForAttr() could not find device for source %d", attributes.source);
@@ -1954,8 +2010,6 @@
             // know about it and is therefore considered "legacy"
             *inputType = API_INPUT_LEGACY;
         } else if (audio_is_remote_submix_device(device->type())) {
-            device = mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_REMOTE_SUBMIX, String8("0"),
-                                                      AUDIO_FORMAT_DEFAULT);
             *inputType = API_INPUT_MIX_CAPTURE;
         } else if (device->type() == AUDIO_DEVICE_IN_TELEPHONY_RX) {
             *inputType = API_INPUT_TELEPHONY_RX;
@@ -1965,9 +2019,7 @@
 
     }
 
-    *input = getInputForDevice(device, session, attributes.source,
-                               config, flags,
-                               policyMix);
+    *input = getInputForDevice(device, session, attributes, config, flags, policyMix);
     if (*input == AUDIO_IO_HANDLE_NONE) {
         status = INVALID_OPERATION;
         goto error;
@@ -1975,8 +2027,8 @@
 
 exit:
 
-    *selectedDeviceId = mAvailableInputDevices.contains(device) ? 
-            device->getId() : AUDIO_PORT_HANDLE_NONE;
+    *selectedDeviceId = mAvailableInputDevices.contains(device) ?
+                device->getId() : AUDIO_PORT_HANDLE_NONE;
 
     isSoundTrigger = attributes.source == AUDIO_SOURCE_HOTWORD &&
         mSoundTriggerSessions.indexOfKey(session) > 0;
@@ -2000,16 +2052,16 @@
 
 audio_io_handle_t AudioPolicyManager::getInputForDevice(const sp<DeviceDescriptor> &device,
                                                         audio_session_t session,
-                                                        audio_source_t inputSource,
+                                                        const audio_attributes_t &attributes,
                                                         const audio_config_base_t *config,
                                                         audio_input_flags_t flags,
                                                         AudioMix *policyMix)
 {
     audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
-    audio_source_t halInputSource = inputSource;
+    audio_source_t halInputSource = attributes.source;
     bool isSoundTrigger = false;
 
-    if (inputSource == AUDIO_SOURCE_HOTWORD) {
+    if (attributes.source == AUDIO_SOURCE_HOTWORD) {
         ssize_t index = mSoundTriggerSessions.indexOfKey(session);
         if (index >= 0) {
             input = mSoundTriggerSessions.valueFor(session);
@@ -2019,7 +2071,7 @@
         } else {
             halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
         }
-    } else if (inputSource == AUDIO_SOURCE_VOICE_COMMUNICATION &&
+    } else if (attributes.source == AUDIO_SOURCE_VOICE_COMMUNICATION &&
                audio_is_linear_pcm(config->format)) {
         flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_VOIP_TX);
     }
@@ -2330,14 +2382,15 @@
         ALOGE("%s for stream %d: invalid min %d or max %d", __func__, stream , indexMin, indexMax);
         return;
     }
-    mVolumeCurves->initStreamVolume(stream, indexMin, indexMax);
+    // @todo: our proposal now use XML to store Indexes Min & Max
+    getVolumeCurves(stream).initVolume(indexMin, indexMax);
 
     // initialize other private stream volumes which follow this one
     for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
         if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
             continue;
         }
-        mVolumeCurves->initStreamVolume((audio_stream_type_t)curStream, indexMin, indexMax);
+        getVolumeCurves((audio_stream_type_t)curStream).initVolume(indexMin, indexMax);
     }
 }
 
@@ -2345,13 +2398,13 @@
                                                   int index,
                                                   audio_devices_t device)
 {
-
+    auto &curves = getVolumeCurves(stream);
     // VOICE_CALL and BLUETOOTH_SCO stream have minVolumeIndex > 0 but
     // can be muted directly by an app that has MODIFY_PHONE_STATE permission.
-    if (((index < mVolumeCurves->getVolumeIndexMin(stream)) &&
+    if (((index < curves.getVolumeIndexMin()) &&
             !((stream == AUDIO_STREAM_VOICE_CALL || stream == AUDIO_STREAM_BLUETOOTH_SCO) &&
             index == 0)) ||
-            (index > mVolumeCurves->getVolumeIndexMax(stream))) {
+            (index > curves.getVolumeIndexMax())) {
         return BAD_VALUE;
     }
     if (!audio_is_output_device(device)) {
@@ -2359,7 +2412,7 @@
     }
 
     // Force max volume if stream cannot be muted
-    if (!mVolumeCurves->canBeMuted(stream)) index = mVolumeCurves->getVolumeIndexMax(stream);
+    if (!curves.canBeMuted()) index = curves.getVolumeIndexMax();
 
     ALOGV("setStreamVolumeIndex() stream %d, device %08x, index %d",
           stream, device, index);
@@ -2369,15 +2422,16 @@
         if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
             continue;
         }
-        mVolumeCurves->addCurrentVolumeIndex((audio_stream_type_t)curStream, device, index);
+        auto &curCurves = getVolumeCurves(static_cast<audio_stream_type_t>(curStream));
+        curCurves.addCurrentVolumeIndex(device, index);
     }
 
     // update volume on all outputs and streams matching the following:
     // - The requested stream (or a stream matching for volume control) is active on the output
-    // - The device (or devices) selected by the strategy corresponding to this stream includes
+    // - The device (or devices) selected by the engine for this stream includes
     // the requested device
     // - For non default requested device, currently selected device on the output is either the
-    // requested device or one of the devices selected by the strategy
+    // requested device or one of the devices selected by the engine for this stream
     // - For default requested device (AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME), apply volume only if
     // no specific device volume value exists for currently selected device.
     status_t status = NO_ERROR;
@@ -2388,12 +2442,12 @@
             if (!(streamsMatchForvolume(stream, (audio_stream_type_t)curStream))) {
                 continue;
             }
-            if (!(desc->isStreamActive((audio_stream_type_t)curStream) || isInCall())) {
+            if (!(desc->isActive(streamToVolumeSource((audio_stream_type_t)curStream)) || isInCall())) {
                 continue;
             }
-            routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
-            audio_devices_t curStreamDevice = Volume::getDeviceForVolume(getDeviceForStrategy(
-                    curStrategy, false /*fromCache*/));
+            audio_devices_t curStreamDevice = Volume::getDeviceForVolume(
+                        mEngine->getOutputDevicesForStream((audio_stream_type_t)curStream,
+                                                           false /*fromCache*/).types());
             if ((device != AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) &&
                     ((curStreamDevice & device) == 0)) {
                 continue;
@@ -2403,8 +2457,7 @@
                 curStreamDevice |= device;
                 applyVolume = (Volume::getDeviceForVolume(curDevice) & curStreamDevice) != 0;
             } else {
-                applyVolume = !mVolumeCurves->hasVolumeIndexForDevice(
-                        stream, curStreamDevice);
+                applyVolume = !curves.hasVolumeIndexForDevice(curStreamDevice);
             }
             // rescale index before applying to curStream as ranges may be different for
             // stream and curStream
@@ -2413,9 +2466,10 @@
                 //FIXME: workaround for truncated touch sounds
                 // delayed volume change for system stream to be removed when the problem is
                 // handled by system UI
-                status_t volStatus =
-                        checkAndSetVolume((audio_stream_type_t)curStream, idx, desc, curDevice,
-                            (stream == AUDIO_STREAM_SYSTEM) ? TOUCH_SOUND_FIXED_DELAY_MS : 0);
+                status_t volStatus = checkAndSetVolume(
+                            (audio_stream_type_t)curStream, idx, desc, curDevice,
+                            (stream == AUDIO_STREAM_SYSTEM) ?
+                                TOUCH_SOUND_FIXED_DELAY_MS : 0);
                 if (volStatus != NO_ERROR) {
                     status = volStatus;
                 }
@@ -2435,14 +2489,14 @@
     if (!audio_is_output_device(device)) {
         return BAD_VALUE;
     }
-    // if device is AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, return volume for device corresponding to
-    // the strategy the stream belongs to.
+    // if device is AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, return volume for device selected for this
+    // stream by the engine.
     if (device == AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) {
-        device = getDeviceForStrategy(getStrategy(stream), true /*fromCache*/);
+        device = mEngine->getOutputDevicesForStream(stream, true /*fromCache*/).types();
     }
     device = Volume::getDeviceForVolume(device);
 
-    *index =  mVolumeCurves->getVolumeIndex(stream, device);
+    *index =  getVolumeCurves(stream).getVolumeIndex(device);
     ALOGV("getStreamVolumeIndex() stream %d device %08x index %d", stream, device, *index);
     return NO_ERROR;
 }
@@ -2458,8 +2512,8 @@
     // 3: The primary output
     // 4: the first output in the list
 
-    routing_strategy strategy = getStrategy(AUDIO_STREAM_MUSIC);
-    DeviceVector devices = getDevicesForStrategy(strategy, false /*fromCache*/);
+    DeviceVector devices = mEngine->getOutputDevicesForAttributes(
+                attributes_initializer(AUDIO_USAGE_MEDIA), nullptr, false /*fromCache*/);
     SortedVector<audio_io_handle_t> outputs = getOutputsForDevices(devices, mOutputs);
 
     if (outputs.size() == 0) {
@@ -2476,7 +2530,7 @@
 
         for (audio_io_handle_t output : outputs) {
             sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
-            if (activeOnly && !desc->isStreamActive(AUDIO_STREAM_MUSIC)) {
+            if (activeOnly && !desc->isActive(streamToVolumeSource(AUDIO_STREAM_MUSIC))) {
                 continue;
             }
             ALOGV("selectOutputForMusicEffects activeOnly %d output %d flags 0x%08x",
@@ -2531,7 +2585,9 @@
             return INVALID_OPERATION;
         }
     }
-    return mEffects.registerEffect(desc, io, strategy, session, id);
+    return mEffects.registerEffect(desc, io, session, id,
+                                   (strategy == streamToStrategy(AUDIO_STREAM_MUSIC) ||
+                                   strategy == PRODUCT_STRATEGY_NONE));
 }
 
 status_t AudioPolicyManager::unregisterEffect(int id)
@@ -2568,14 +2624,14 @@
         if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
             continue;
         }
-        active = mOutputs.isStreamActive((audio_stream_type_t)curStream, inPastMs);
+        active = mOutputs.isActive(streamToVolumeSource((audio_stream_type_t)curStream), inPastMs);
     }
     return active;
 }
 
 bool AudioPolicyManager::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
 {
-    return mOutputs.isStreamActiveRemotely(stream, inPastMs);
+    return mOutputs.isActiveRemotely(streamToVolumeSource((audio_stream_type_t)stream), inPastMs);
 }
 
 bool AudioPolicyManager::isSourceActive(audio_source_t source) const
@@ -2621,18 +2677,24 @@
     // examine each mix's route type
     for (size_t i = 0; i < mixes.size(); i++) {
         AudioMix mix = mixes[i];
-        // we only support MIX_ROUTE_FLAG_LOOP_BACK or MIX_ROUTE_FLAG_RENDER, not the combination
-        if ((mix.mRouteFlags & MIX_ROUTE_FLAG_ALL) == MIX_ROUTE_FLAG_ALL) {
+        // Only capture of playback is allowed in LOOP_BACK & RENDER mode
+        if (is_mix_loopback_render(mix.mRouteFlags) && mix.mMixType != MIX_TYPE_PLAYERS) {
+            ALOGE("Unsupported Policy Mix %zu of %zu: "
+                  "Only capture of playback is allowed in LOOP_BACK & RENDER mode",
+                   i, mixes.size());
             res = INVALID_OPERATION;
             break;
         }
+        // LOOP_BACK and LOOP_BACK | RENDER have the same remote submix backend and are handled
+        // in the same way.
         if ((mix.mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) {
-            ALOGV("registerPolicyMixes() mix %zu of %zu is LOOP_BACK", i, mixes.size());
+            ALOGV("registerPolicyMixes() mix %zu of %zu is LOOP_BACK %d", i, mixes.size(),
+                  mix.mRouteFlags);
             if (rSubmixModule == 0) {
                 rSubmixModule = mHwModules.getModuleFromName(
                         AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX);
                 if (rSubmixModule == 0) {
-                    ALOGE(" Unable to find audio module for submix, aborting mix %zu registration",
+                    ALOGE("Unable to find audio module for submix, aborting mix %zu registration",
                             i);
                     res = INVALID_OPERATION;
                     break;
@@ -2647,7 +2709,7 @@
             }
 
             if (mPolicyMixes.registerMix(address, mix, 0 /*output desc*/) != NO_ERROR) {
-                ALOGE(" Error registering mix %zu for address %s", i, address.string());
+                ALOGE("Error registering mix %zu for address %s", i, address.string());
                 res = INVALID_OPERATION;
                 break;
             }
@@ -2691,6 +2753,8 @@
 
                 if (desc->supportedDevices().contains(device)) {
                     if (mPolicyMixes.registerMix(address, mix, desc) != NO_ERROR) {
+                        ALOGE("Could not register mix RENDER,  dev=0x%X addr=%s", type,
+                              address.string());
                         res = INVALID_OPERATION;
                     } else {
                         foundOutput = true;
@@ -2758,7 +2822,7 @@
             rSubmixModule->removeOutputProfile(address);
             rSubmixModule->removeInputProfile(address);
 
-        } if ((mix.mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
+        } else if ((mix.mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
             if (mPolicyMixes.unregisterMix(mix.mDeviceAddress) != NO_ERROR) {
                 res = INVALID_OPERATION;
                 continue;
@@ -2865,11 +2929,13 @@
     mHwModulesAll.dump(dst);
     mOutputs.dump(dst);
     mInputs.dump(dst);
-    mVolumeCurves->dump(dst);
     mEffects.dump(dst);
     mAudioPatches.dump(dst);
     mPolicyMixes.dump(dst);
     mAudioSources.dump(dst);
+
+    dst->appendFormat("\nPolicy Engine dump:\n");
+    mEngine->dump(dst);
 }
 
 status_t AudioPolicyManager::dump(int fd)
@@ -2956,7 +3022,7 @@
 bool AudioPolicyManager::isDirectOutputSupported(const audio_config_base_t& config,
                                                  const audio_attributes_t& attributes) {
     audio_output_flags_t output_flags = AUDIO_OUTPUT_FLAG_NONE;
-    audio_attributes_flags_to_audio_output_flags(attributes.flags, output_flags);
+    audio_flags_to_audio_output_flags(attributes.flags, &output_flags);
     sp<IOProfile> profile = getProfileForOutput(DeviceVector() /*ignore device */,
                                             config.sample_rate,
                                             config.format,
@@ -3461,27 +3527,27 @@
     }
 }
 
-void AudioPolicyManager::checkStrategyRoute(routing_strategy strategy,
-                                            audio_io_handle_t ouptutToSkip)
+void AudioPolicyManager::checkStrategyRoute(product_strategy_t ps, audio_io_handle_t ouptutToSkip)
 {
-    DeviceVector devices = getDevicesForStrategy(strategy, false /*fromCache*/);
+    // Take the first attributes following the product strategy as it is used to retrieve the routed
+    // device. All attributes wihin a strategy follows the same "routing strategy"
+    auto attributes = mEngine->getAllAttributesForProductStrategy(ps).front();
+    DeviceVector devices = mEngine->getOutputDevicesForAttributes(attributes, nullptr, false);
     SortedVector<audio_io_handle_t> outputs = getOutputsForDevices(devices, mOutputs);
     for (size_t j = 0; j < mOutputs.size(); j++) {
         if (mOutputs.keyAt(j) == ouptutToSkip) {
             continue;
         }
         sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(j);
-        if (!isStrategyActive(outputDesc, (routing_strategy)strategy)) {
+        if (!outputDesc->isStrategyActive(ps)) {
             continue;
         }
         // If the default device for this strategy is on another output mix,
         // invalidate all tracks in this strategy to force re connection.
         // Otherwise select new device on the output mix.
         if (outputs.indexOf(mOutputs.keyAt(j)) < 0) {
-            for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
-                if (getStrategy((audio_stream_type_t)stream) == strategy) {
-                    mpClientInterface->invalidateStream((audio_stream_type_t)stream);
-                }
+            for (auto stream : mEngine->getStreamTypesForProductStrategy(ps)) {
+                mpClientInterface->invalidateStream(stream);
             }
         } else {
             setOutputDevices(
@@ -3493,13 +3559,18 @@
 void AudioPolicyManager::clearSessionRoutes(uid_t uid)
 {
     // remove output routes associated with this uid
-    SortedVector<routing_strategy> affectedStrategies;
+    std::vector<product_strategy_t> affectedStrategies;
     for (size_t i = 0; i < mOutputs.size(); i++) {
         sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
         for (const auto& client : outputDesc->getClientIterable()) {
             if (client->hasPreferredDevice() && client->uid() == uid) {
                 client->setPreferredDeviceId(AUDIO_PORT_HANDLE_NONE);
-                affectedStrategies.add(getStrategy(client->stream()));
+                auto clientStrategy = client->strategy();
+                if (std::find(begin(affectedStrategies), end(affectedStrategies), clientStrategy) !=
+                        end(affectedStrategies)) {
+                    continue;
+                }
+                affectedStrategies.push_back(client->strategy());
             }
         }
     }
@@ -3549,7 +3620,7 @@
     *session = (audio_session_t)mpClientInterface->newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
     *ioHandle = (audio_io_handle_t)mpClientInterface->newAudioUniqueId(AUDIO_UNIQUE_ID_USE_INPUT);
     audio_attributes_t attr = { .source = AUDIO_SOURCE_HOTWORD };
-    *device = getDeviceAndMixForAttributes(attr)->type();
+    *device = mEngine->getInputDeviceForAttributes(attr)->type();
 
     return mSoundTriggerSessions.acquireSession(*session, *ioHandle);
 }
@@ -3589,10 +3660,11 @@
     struct audio_patch dummyPatch = {};
     sp<AudioPatch> patchDesc = new AudioPatch(&dummyPatch, uid);
 
-    sp<SourceClientDescriptor> sourceDesc =
-        new SourceClientDescriptor(*portId, uid, *attributes, patchDesc, srcDevice,
-                                   streamTypefromAttributesInt(attributes),
-                                   getStrategyForAttr(attributes));
+    sp<SourceClientDescriptor> sourceDesc = new SourceClientDescriptor(
+                *portId, uid, *attributes, patchDesc, srcDevice,
+                mEngine->getStreamTypeForAttributes(*attributes),
+                mEngine->getProductStrategyForAttributes(*attributes),
+                streamToVolumeSource(mEngine->getStreamTypeForAttributes(*attributes)));
 
     status_t status = connectAudioSource(sourceDesc);
     if (status == NO_ERROR) {
@@ -3609,12 +3681,12 @@
     disconnectAudioSource(sourceDesc);
 
     audio_attributes_t attributes = sourceDesc->attributes();
-    routing_strategy strategy = getStrategyForAttr(&attributes);
     audio_stream_type_t stream = sourceDesc->stream();
     sp<DeviceDescriptor> srcDevice = sourceDesc->srcDevice();
 
-    DeviceVector sinkDevices = getDevicesForStrategy(strategy, true);
-    ALOG_ASSERT(!sinkDevices.isEmpty(), "connectAudioSource(): no device found for strategy");
+    DeviceVector sinkDevices =
+            mEngine->getOutputDevicesForAttributes(attributes, nullptr, true);
+    ALOG_ASSERT(!sinkDevices.isEmpty(), "connectAudioSource(): no device found for attributes");
     sp<DeviceDescriptor> sinkDevice = sinkDevices.itemAt(0);
     ALOG_ASSERT(mAvailableOutputDevices.contains(sinkDevice), "%s: Device %s not available",
                 __FUNCTION__, sinkDevice->toString().c_str());
@@ -3638,8 +3710,12 @@
         config.format = sourceDesc->config().format;
         audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE;
         audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+        bool isRequestedDeviceForExclusiveUse = false;
+        std::vector<sp<SwAudioOutputDescriptor>> secondaryOutputs;
         getOutputForAttrInt(&resultAttr, &output, AUDIO_SESSION_NONE,
-                &attributes, &stream, sourceDesc->uid(), &config, &flags, &selectedDeviceId);
+                &attributes, &stream, sourceDesc->uid(), &config, &flags,
+                &selectedDeviceId, &isRequestedDeviceForExclusiveUse,
+                &secondaryOutputs);
         if (output == AUDIO_IO_HANDLE_NONE) {
             ALOGV("%s no output for device %08x", __FUNCTION__, sinkDevices.types());
             return INVALID_OPERATION;
@@ -3968,16 +4044,15 @@
     return NO_ERROR;
 }
 
-sp<SourceClientDescriptor> AudioPolicyManager::getSourceForStrategyOnOutput(
-        audio_io_handle_t output, routing_strategy strategy)
+sp<SourceClientDescriptor> AudioPolicyManager::getSourceForAttributesOnOutput(
+        audio_io_handle_t output, const audio_attributes_t &attr)
 {
     sp<SourceClientDescriptor> source;
     for (size_t i = 0; i < mAudioSources.size(); i++)  {
         sp<SourceClientDescriptor> sourceDesc = mAudioSources.valueAt(i);
-        audio_attributes_t attributes = sourceDesc->attributes();
-        routing_strategy sourceStrategy = getStrategyForAttr(&attributes);
         sp<SwAudioOutputDescriptor> outputDesc = sourceDesc->swOutput().promote();
-        if (sourceStrategy == strategy && outputDesc != 0 && outputDesc->mIoHandle == output) {
+        if (followsSameRouting(attr, sourceDesc->attributes()) &&
+                               outputDesc != 0 && outputDesc->mIoHandle == output) {
             source = sourceDesc;
             break;
         }
@@ -4004,10 +4079,18 @@
     std::vector<const char*> fileNames;
     status_t ret;
 
-    if (property_get_bool("ro.bluetooth.a2dp_offload.supported", false) &&
-        property_get_bool("persist.bluetooth.a2dp_offload.disabled", false)) {
-        // A2DP offload supported but disabled: try to use special XML file
-        fileNames.push_back(AUDIO_POLICY_A2DP_OFFLOAD_DISABLED_XML_CONFIG_FILE_NAME);
+    if (property_get_bool("ro.bluetooth.a2dp_offload.supported", false)) {
+        if (property_get_bool("persist.bluetooth.a2dp_offload.disabled", false)) {
+            fileNames.push_back(AUDIO_POLICY_A2DP_OFFLOAD_DISABLED_XML_CONFIG_FILE_NAME);
+        } else if (property_get_bool("persist.bluetooth.bluetooth_audio_hal.enabled", false)) {
+            // This property persist.bluetooth.bluetooth_audio_hal.enabled is temporary only.
+            // xml files AUDIO_POLICY_BLUETOOTH_HAL_ENABLED_XML_CONFIG_FILE_NAME, although having
+            // the same name, must be different in offload and non offload cases in device
+            // specific configuration file.
+            fileNames.push_back(AUDIO_POLICY_BLUETOOTH_HAL_ENABLED_XML_CONFIG_FILE_NAME);
+        }
+    } else if (property_get_bool("persist.bluetooth.bluetooth_audio_hal.enabled", false)) {
+        fileNames.push_back(AUDIO_POLICY_BLUETOOTH_HAL_ENABLED_XML_CONFIG_FILE_NAME);
     }
     fileNames.push_back(AUDIO_POLICY_XML_CONFIG_FILE_NAME);
 
@@ -4032,9 +4115,7 @@
     mpClientInterface(clientInterface),
     mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f),
     mA2dpSuspended(false),
-    mVolumeCurves(new VolumeCurvesCollection()),
-    mConfig(mHwModulesAll, mAvailableOutputDevices, mAvailableInputDevices,
-            mDefaultOutputDevice, static_cast<VolumeCurvesCollection*>(mVolumeCurves.get())),
+    mConfig(mHwModulesAll, mAvailableOutputDevices, mAvailableInputDevices, mDefaultOutputDevice),
     mAudioPortGeneration(1),
     mBeaconMuteRefCount(0),
     mBeaconPlayingRefCount(0),
@@ -4068,8 +4149,6 @@
 }
 
 status_t AudioPolicyManager::initialize() {
-    mVolumeCurves->initializeVolumeCurves(getConfig().isSpeakerDrcEnabled());
-
     // Once policy config has been parsed, retrieve an instance of the engine and initialize it.
     audio_policy::EngineInstance *engineInstance = audio_policy::EngineInstance::getInstance();
     if (!engineInstance) {
@@ -4656,37 +4735,31 @@
 {
     ALOGV("closeOutput(%d)", output);
 
-    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
-    if (outputDesc == NULL) {
+    sp<SwAudioOutputDescriptor> closingOutput = mOutputs.valueFor(output);
+    if (closingOutput == NULL) {
         ALOGW("closeOutput() unknown output %d", output);
         return;
     }
-    mPolicyMixes.closeOutput(outputDesc);
+    mPolicyMixes.closeOutput(closingOutput);
 
     // look for duplicated outputs connected to the output being removed.
     for (size_t i = 0; i < mOutputs.size(); i++) {
-        sp<SwAudioOutputDescriptor> dupOutputDesc = mOutputs.valueAt(i);
-        if (dupOutputDesc->isDuplicated() &&
-                (dupOutputDesc->mOutput1 == outputDesc ||
-                dupOutputDesc->mOutput2 == outputDesc)) {
-            sp<SwAudioOutputDescriptor> outputDesc2;
-            if (dupOutputDesc->mOutput1 == outputDesc) {
-                outputDesc2 = dupOutputDesc->mOutput2;
-            } else {
-                outputDesc2 = dupOutputDesc->mOutput1;
-            }
+        sp<SwAudioOutputDescriptor> dupOutput = mOutputs.valueAt(i);
+        if (dupOutput->isDuplicated() &&
+                (dupOutput->mOutput1 == closingOutput || dupOutput->mOutput2 == closingOutput)) {
+            sp<SwAudioOutputDescriptor> remainingOutput =
+                dupOutput->mOutput1 == closingOutput ? dupOutput->mOutput2 : dupOutput->mOutput1;
             // As all active tracks on duplicated output will be deleted,
             // and as they were also referenced on the other output, the reference
             // count for their stream type must be adjusted accordingly on
             // the other output.
-            const bool wasActive = outputDesc2->isActive();
-            for (const auto &clientPair : dupOutputDesc->getActiveClients()) {
-                outputDesc2->changeStreamActiveCount(clientPair.first, -clientPair.second);
-            }
+            const bool wasActive = remainingOutput->isActive();
+            // Note: no-op on the closing output where all clients has already been set inactive
+            dupOutput->setAllClientsInactive();
             // stop() will be a no op if the output is still active but is needed in case all
             // active streams refcounts where cleared above
             if (wasActive) {
-                outputDesc2->stop();
+                remainingOutput->stop();
             }
             audio_io_handle_t duplicatedOutput = mOutputs.keyAt(i);
             ALOGV("closeOutput() closing also duplicated output %d", duplicatedOutput);
@@ -4698,7 +4771,7 @@
 
     nextAudioPortGeneration();
 
-    ssize_t index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
+    ssize_t index = mAudioPatches.indexOfKey(closingOutput->getPatchHandle());
     if (index >= 0) {
         sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
         (void) /*status_t status*/ mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
@@ -4706,7 +4779,7 @@
         mpClientInterface->onAudioPatchListUpdate();
     }
 
-    outputDesc->close();
+    closingOutput->close();
 
     removeOutput(output);
     mPreviousOutputs = mOutputs;
@@ -4785,6 +4858,7 @@
     // output is suspended before any tracks are moved to it
     checkA2dpSuspend();
     checkOutputForAllStrategies();
+    checkSecondaryOutputs();
     if (onOutputsChecked != nullptr && onOutputsChecked()) checkA2dpSuspend();
     updateDevicesAndOutputs();
     if (mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD) != 0) {
@@ -4792,16 +4866,25 @@
     }
 }
 
-void AudioPolicyManager::checkOutputForStrategy(routing_strategy strategy)
+bool AudioPolicyManager::followsSameRouting(const audio_attributes_t &lAttr,
+                                            const audio_attributes_t &rAttr) const
 {
-    DeviceVector oldDevices = getDevicesForStrategy(strategy, true /*fromCache*/);
-    DeviceVector newDevices = getDevicesForStrategy(strategy, false /*fromCache*/);
+    return mEngine->getProductStrategyForAttributes(lAttr) ==
+            mEngine->getProductStrategyForAttributes(rAttr);
+}
+
+void AudioPolicyManager::checkOutputForAttributes(const audio_attributes_t &attr)
+{
+    auto psId = mEngine->getProductStrategyForAttributes(attr);
+
+    DeviceVector oldDevices = mEngine->getOutputDevicesForAttributes(attr, 0, true /*fromCache*/);
+    DeviceVector newDevices = mEngine->getOutputDevicesForAttributes(attr, 0, false /*fromCache*/);
     SortedVector<audio_io_handle_t> srcOutputs = getOutputsForDevices(oldDevices, mPreviousOutputs);
     SortedVector<audio_io_handle_t> dstOutputs = getOutputsForDevices(newDevices, mOutputs);
 
     // also take into account external policy-related changes: add all outputs which are
     // associated with policies in the "before" and "after" output vectors
-    ALOGVV("checkOutputForStrategy(): policy related outputs");
+    ALOGVV("%s(): policy related outputs", __func__);
     for (size_t i = 0 ; i < mPreviousOutputs.size() ; i++) {
         const sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueAt(i);
         if (desc != 0 && desc->mPolicyMix != NULL) {
@@ -4817,7 +4900,7 @@
         }
     }
 
-    if (!dstOutputs.isEmpty() && srcOutputs != dstOutputs) {
+    if (srcOutputs != dstOutputs) {
         // get maximum latency of all source outputs to determine the minimum mute time guaranteeing
         // audio from invalidated tracks will be rendered when unmuting
         uint32_t maxLatency = 0;
@@ -4828,50 +4911,63 @@
             }
         }
         ALOGV_IF(!(srcOutputs.isEmpty() || dstOutputs.isEmpty()),
-              "%s: strategy %d, moving from output %s to output %s", __func__, strategy,
+              "%s: strategy %d, moving from output %s to output %s", __func__, psId,
               std::to_string(srcOutputs[0]).c_str(),
               std::to_string(dstOutputs[0]).c_str());
         // mute strategy while moving tracks from one output to another
         for (audio_io_handle_t srcOut : srcOutputs) {
             sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueFor(srcOut);
-            if (desc != 0 && isStrategyActive(desc, strategy)) {
-                setStrategyMute(strategy, true, desc);
-                setStrategyMute(strategy, false, desc, maxLatency * LATENCY_MUTE_FACTOR,
+            if (desc != 0 && desc->isStrategyActive(psId)) {
+                setStrategyMute(psId, true, desc);
+                setStrategyMute(psId, false, desc, maxLatency * LATENCY_MUTE_FACTOR,
                                 newDevices.types());
             }
-            sp<SourceClientDescriptor> source =
-                    getSourceForStrategyOnOutput(srcOut, strategy);
+            sp<SourceClientDescriptor> source = getSourceForAttributesOnOutput(srcOut, attr);
             if (source != 0){
                 connectAudioSource(source);
             }
         }
 
-        // Move effects associated to this strategy from previous output to new output
-        if (strategy == STRATEGY_MEDIA) {
+        // Move effects associated to this stream from previous output to new output
+        if (followsSameRouting(attr, attributes_initializer(AUDIO_USAGE_MEDIA))) {
             selectOutputForMusicEffects();
         }
-        // Move tracks associated to this strategy from previous output to new output
-        for (int i = 0; i < AUDIO_STREAM_FOR_POLICY_CNT; i++) {
-            if (getStrategy((audio_stream_type_t)i) == strategy) {
-                mpClientInterface->invalidateStream((audio_stream_type_t)i);
-            }
+        // Move tracks associated to this stream (and linked) from previous output to new output
+        for (auto stream :  mEngine->getStreamTypesForProductStrategy(psId)) {
+            mpClientInterface->invalidateStream(stream);
         }
     }
 }
 
 void AudioPolicyManager::checkOutputForAllStrategies()
 {
-    if (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)
-        checkOutputForStrategy(STRATEGY_ENFORCED_AUDIBLE);
-    checkOutputForStrategy(STRATEGY_PHONE);
-    if (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) != AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)
-        checkOutputForStrategy(STRATEGY_ENFORCED_AUDIBLE);
-    checkOutputForStrategy(STRATEGY_SONIFICATION);
-    checkOutputForStrategy(STRATEGY_SONIFICATION_RESPECTFUL);
-    checkOutputForStrategy(STRATEGY_ACCESSIBILITY);
-    checkOutputForStrategy(STRATEGY_MEDIA);
-    checkOutputForStrategy(STRATEGY_DTMF);
-    checkOutputForStrategy(STRATEGY_REROUTING);
+    for (const auto &strategy : mEngine->getOrderedProductStrategies()) {
+        auto attributes = mEngine->getAllAttributesForProductStrategy(strategy).front();
+        checkOutputForAttributes(attributes);
+    }
+}
+
+void AudioPolicyManager::checkSecondaryOutputs() {
+    std::set<audio_stream_type_t> streamsToInvalidate;
+    for (size_t i = 0; i < mOutputs.size(); i++) {
+        const sp<SwAudioOutputDescriptor>& outputDescriptor = mOutputs[i];
+        for (const sp<TrackClientDescriptor>& client : outputDescriptor->getClientIterable()) {
+            // FIXME code duplicated from getOutputForAttrInt
+            sp<SwAudioOutputDescriptor> desc;
+            std::vector<sp<SwAudioOutputDescriptor>> secondaryDescs;
+            mPolicyMixes.getOutputForAttr(client->attributes(), client->uid(), desc,
+                                          &secondaryDescs);
+            if (!std::equal(client->getSecondaryOutputs().begin(),
+                            client->getSecondaryOutputs().end(),
+                            secondaryDescs.begin(), secondaryDescs.end())) {
+                streamsToInvalidate.insert(client->stream());
+            }
+        }
+    }
+    for (audio_stream_type_t stream : streamsToInvalidate) {
+        ALOGD("%s Invalidate stream %d due to secondary output change", __func__, stream);
+        mpClientInterface->invalidateStream(stream);
+    }
 }
 
 void AudioPolicyManager::checkA2dpSuspend()
@@ -4924,38 +5020,6 @@
     }
 }
 
-template <class IoDescriptor, class Filter>
-sp<DeviceDescriptor> AudioPolicyManager::findPreferredDevice(
-        IoDescriptor& desc, Filter filter, bool& active, const DeviceVector& devices)
-{
-    auto activeClients = desc->clientsList(true /*activeOnly*/);
-    auto activeClientsWithRoute =
-        desc->clientsList(true /*activeOnly*/, filter, true /*preferredDevice*/);
-    active = activeClients.size() > 0;
-    if (active && activeClients.size() == activeClientsWithRoute.size()) {
-        return devices.getDeviceFromId(activeClientsWithRoute[0]->preferredDeviceId());
-    }
-    return nullptr;
-}
-
-template <class IoCollection, class Filter>
-sp<DeviceDescriptor> AudioPolicyManager::findPreferredDevice(
-        IoCollection& ioCollection, Filter filter, const DeviceVector& devices)
-{
-    sp<DeviceDescriptor> device;
-    for (size_t i = 0; i < ioCollection.size(); i++) {
-        auto desc = ioCollection.valueAt(i);
-        bool active;
-        sp<DeviceDescriptor> curDevice = findPreferredDevice(desc, filter, active, devices);
-        if (active && curDevice == nullptr) {
-            return nullptr;
-        } else if (curDevice != nullptr) {
-            device = curDevice;
-        }
-    }
-    return device;
-}
-
 DeviceVector AudioPolicyManager::getNewOutputDevices(const sp<SwAudioOutputDescriptor>& outputDesc,
                                                      bool fromCache)
 {
@@ -4975,7 +5039,7 @@
     // input: a specific app can not force routing for other apps by setting a preferred device.
     bool active; // unused
     sp<DeviceDescriptor> device =
-        findPreferredDevice(outputDesc, STRATEGY_NONE, active, mAvailableOutputDevices);
+        findPreferredDevice(outputDesc, PRODUCT_STRATEGY_NONE, active, mAvailableOutputDevices);
     if (device != nullptr) {
         return DeviceVector(device);
     }
@@ -4987,54 +5051,22 @@
         return DeviceVector(device);
     }
 
-    // check the following by order of priority to request a routing change if necessary:
-    // 1: the strategy enforced audible is active and enforced on the output:
-    //      use device for strategy enforced audible
-    // 2: we are in call or the strategy phone is active on the output:
-    //      use device for strategy phone
-    // 3: the strategy sonification is active on the output:
-    //      use device for strategy sonification
-    // 4: the strategy for enforced audible is active but not enforced on the output:
-    //      use the device for strategy enforced audible
-    // 5: the strategy accessibility is active on the output:
-    //      use device for strategy accessibility
-    // 6: the strategy "respectful" sonification is active on the output:
-    //      use device for strategy "respectful" sonification
-    // 7: the strategy media is active on the output:
-    //      use device for strategy media
-    // 8: the strategy DTMF is active on the output:
-    //      use device for strategy DTMF
-    // 9: the strategy for beacon, a.k.a. "transmitted through speaker" is active on the output:
-    //      use device for strategy t-t-s
+    for (const auto &productStrategy : mEngine->getOrderedProductStrategies()) {
+        StreamTypeVector streams = mEngine->getStreamTypesForProductStrategy(productStrategy);
+        auto attr = mEngine->getAllAttributesForProductStrategy(productStrategy).front();
 
-    // FIXME: extend use of isStrategyActiveOnSameModule() to all strategies
-    // with a refined rule considering mutually exclusive devices (using same backend)
-    // as opposed to all streams on the same audio HAL module.
-    if (isStrategyActive(outputDesc, STRATEGY_ENFORCED_AUDIBLE) &&
-        mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
-        devices = getDevicesForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
-    } else if (isInCall() ||
-               isStrategyActiveOnSameModule(outputDesc, STRATEGY_PHONE)) {
-        devices = getDevicesForStrategy(STRATEGY_PHONE, fromCache);
-    } else if (isStrategyActiveOnSameModule(outputDesc, STRATEGY_SONIFICATION)) {
-        devices = getDevicesForStrategy(STRATEGY_SONIFICATION, fromCache);
-    } else if (isStrategyActive(outputDesc, STRATEGY_ENFORCED_AUDIBLE)) {
-        devices = getDevicesForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
-    } else if (isStrategyActive(outputDesc, STRATEGY_ACCESSIBILITY)) {
-        devices = getDevicesForStrategy(STRATEGY_ACCESSIBILITY, fromCache);
-    } else if (isStrategyActive(outputDesc, STRATEGY_SONIFICATION_RESPECTFUL)) {
-        devices = getDevicesForStrategy(STRATEGY_SONIFICATION_RESPECTFUL, fromCache);
-    } else if (isStrategyActive(outputDesc, STRATEGY_MEDIA)) {
-        devices = getDevicesForStrategy(STRATEGY_MEDIA, fromCache);
-    } else if (isStrategyActive(outputDesc, STRATEGY_DTMF)) {
-        devices = getDevicesForStrategy(STRATEGY_DTMF, fromCache);
-    } else if (isStrategyActive(outputDesc, STRATEGY_TRANSMITTED_THROUGH_SPEAKER)) {
-        devices = getDevicesForStrategy(STRATEGY_TRANSMITTED_THROUGH_SPEAKER, fromCache);
-    } else if (isStrategyActive(outputDesc, STRATEGY_REROUTING)) {
-        devices = getDevicesForStrategy(STRATEGY_REROUTING, fromCache);
+        if ((hasVoiceStream(streams) &&
+             (isInCall() || mOutputs.isStrategyActiveOnSameModule(productStrategy, outputDesc))) ||
+             (hasStream(streams, AUDIO_STREAM_ALARM) &&
+                mOutputs.isStrategyActiveOnSameModule(productStrategy, outputDesc)) ||
+                outputDesc->isStrategyActive(productStrategy)) {
+            // Retrieval of devices for voice DL is done on primary output profile, cannot
+            // check the route (would force modifying configuration file for this profile)
+            devices = mEngine->getOutputDevicesForAttributes(attr, nullptr, fromCache);
+            break;
+        }
     }
-
-    ALOGV("getNewOutputDevice() selected devices %s", devices.toString().c_str());
+    ALOGV("%s selected devices %s", __func__, devices.toString().c_str());
     return devices;
 }
 
@@ -5068,7 +5100,7 @@
         attributes.source = AUDIO_SOURCE_VOICE_COMMUNICATION;
     }
     if (attributes.source != AUDIO_SOURCE_DEFAULT) {
-        device = getDeviceAndMixForAttributes(attributes);
+        device = mEngine->getInputDeviceForAttributes(attributes);
     }
 
     return device;
@@ -5079,30 +5111,26 @@
     return (stream1 == stream2);
 }
 
-uint32_t AudioPolicyManager::getStrategyForStream(audio_stream_type_t stream) {
-    return (uint32_t)getStrategy(stream);
-}
-
 audio_devices_t AudioPolicyManager::getDevicesForStream(audio_stream_type_t stream) {
     // By checking the range of stream before calling getStrategy, we avoid
-    // getStrategy's behavior for invalid streams.  getStrategy would do a ALOGE
-    // and then return STRATEGY_MEDIA, but we want to return the empty set.
-    if (stream < (audio_stream_type_t) 0 || stream >= AUDIO_STREAM_PUBLIC_CNT) {
+    // getOutputDevicesForStream's behavior for invalid streams.
+    // engine's getOutputDevicesForStream would fallback on its default behavior (most probably
+    // device for music stream), but we want to return the empty set.
+    if (stream < AUDIO_STREAM_MIN || stream >= AUDIO_STREAM_PUBLIC_CNT) {
         return AUDIO_DEVICE_NONE;
     }
     DeviceVector activeDevices;
     DeviceVector devices;
-    for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
-        if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
+    for (audio_stream_type_t curStream = AUDIO_STREAM_MIN; curStream < AUDIO_STREAM_PUBLIC_CNT;
+         curStream = (audio_stream_type_t) (curStream + 1)) {
+        if (!streamsMatchForvolume(stream, curStream)) {
             continue;
         }
-        routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
-        DeviceVector curDevices =
-                getDevicesForStrategy((routing_strategy)curStrategy, false /*fromCache*/);
+        DeviceVector curDevices = mEngine->getOutputDevicesForStream(curStream, false/*fromCache*/);
         devices.merge(curDevices);
         for (audio_io_handle_t output : getOutputsForDevices(curDevices, mOutputs)) {
             sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
-            if (outputDesc->isStreamActive((audio_stream_type_t)curStream)) {
+            if (outputDesc->isActive(streamToVolumeSource((audio_stream_type_t)curStream))) {
                 activeDevices.merge(outputDesc->devices());
             }
         }
@@ -5123,28 +5151,10 @@
     return devices.types();
 }
 
-routing_strategy AudioPolicyManager::getStrategy(audio_stream_type_t stream) const
-{
-    ALOG_ASSERT(stream != AUDIO_STREAM_PATCH,"getStrategy() called for AUDIO_STREAM_PATCH");
-    return mEngine->getStrategyForStream(stream);
-}
-
-routing_strategy AudioPolicyManager::getStrategyForAttr(const audio_attributes_t *attr) {
-    // flags to strategy mapping
-    if ((attr->flags & AUDIO_FLAG_BEACON) == AUDIO_FLAG_BEACON) {
-        return STRATEGY_TRANSMITTED_THROUGH_SPEAKER;
-    }
-    if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
-        return STRATEGY_ENFORCED_AUDIBLE;
-    }
-    // usage to strategy mapping
-    return mEngine->getStrategyForUsage(attr->usage);
-}
-
 void AudioPolicyManager::handleNotificationRoutingForStream(audio_stream_type_t stream) {
     switch(stream) {
     case AUDIO_STREAM_MUSIC:
-        checkOutputForStrategy(STRATEGY_SONIFICATION_RESPECTFUL);
+        checkOutputForAttributes(attributes_initializer(AUDIO_USAGE_NOTIFICATION));
         updateDevicesAndOutputs();
         break;
     default:
@@ -5211,33 +5221,14 @@
     return 0;
 }
 
-DeviceVector AudioPolicyManager::getDevicesForStrategy(routing_strategy strategy, bool fromCache)
-{
-    // Honor explicit routing requests only if all active clients have a preferred route in which
-    // case the last active client route is used
-    sp<DeviceDescriptor> device = findPreferredDevice(mOutputs, strategy, mAvailableOutputDevices);
-    if (device != nullptr) {
-        return DeviceVector(device);
-    }
-
-    if (fromCache) {
-        ALOGVV("%s from cache strategy %d, device %s", __func__, strategy,
-               mDevicesForStrategy[strategy].toString().c_str());
-        return mDevicesForStrategy[strategy];
-    }
-    return mAvailableOutputDevices.getDevicesFromTypeMask(mEngine->getDeviceForStrategy(strategy));
-}
-
 void AudioPolicyManager::updateDevicesAndOutputs()
 {
-    for (int i = 0; i < NUM_STRATEGIES; i++) {
-        mDevicesForStrategy[i] = getDevicesForStrategy((routing_strategy)i, false /*fromCache*/);
-    }
+    mEngine->updateDeviceSelectionCache();
     mPreviousOutputs = mOutputs;
 }
 
 uint32_t AudioPolicyManager::checkDeviceMuteStrategies(const sp<AudioOutputDescriptor>& outputDesc,
-                                                       audio_devices_t prevDeviceType,
+                                                       const DeviceVector &prevDevices,
                                                        uint32_t delayMs)
 {
     // mute/unmute strategies using an incompatible device combination
@@ -5248,22 +5239,24 @@
     }
 
     uint32_t muteWaitMs = 0;
-    audio_devices_t deviceType = outputDesc->devices().types();
-    bool shouldMute = outputDesc->isActive() && (popcount(deviceType) >= 2);
+    DeviceVector devices = outputDesc->devices();
+    bool shouldMute = outputDesc->isActive() && (devices.size() >= 2);
 
-    for (size_t i = 0; i < NUM_STRATEGIES; i++) {
-        audio_devices_t curDeviceType =
-                getDeviceForStrategy((routing_strategy)i, false /*fromCache*/);
-        curDeviceType = curDeviceType & outputDesc->supportedDevices().types();
-        bool mute = shouldMute && (curDeviceType & deviceType) && (curDeviceType != deviceType);
+    auto productStrategies = mEngine->getOrderedProductStrategies();
+    for (const auto &productStrategy : productStrategies) {
+        auto attributes = mEngine->getAllAttributesForProductStrategy(productStrategy).front();
+        DeviceVector curDevices =
+                mEngine->getOutputDevicesForAttributes(attributes, nullptr, false/*fromCache*/);
+        curDevices = curDevices.filter(outputDesc->supportedDevices());
+        bool mute = shouldMute && curDevices.containsAtLeastOne(devices) && curDevices != devices;
         bool doMute = false;
 
-        if (mute && !outputDesc->mStrategyMutedByDevice[i]) {
+        if (mute && !outputDesc->isStrategyMutedByDevice(productStrategy)) {
             doMute = true;
-            outputDesc->mStrategyMutedByDevice[i] = true;
-        } else if (!mute && outputDesc->mStrategyMutedByDevice[i]){
+            outputDesc->setStrategyMutedByDevice(productStrategy, true);
+        } else if (!mute && outputDesc->isStrategyMutedByDevice(productStrategy)) {
             doMute = true;
-            outputDesc->mStrategyMutedByDevice[i] = false;
+            outputDesc->setStrategyMutedByDevice(productStrategy, false);
         }
         if (doMute) {
             for (size_t j = 0; j < mOutputs.size(); j++) {
@@ -5272,10 +5265,10 @@
                 if (!desc->supportedDevices().containsAtLeastOne(outputDesc->supportedDevices())) {
                     continue;
                 }
-                ALOGVV("checkDeviceMuteStrategies() %s strategy %zu (curDevice %04x)",
-                      mute ? "muting" : "unmuting", i, curDeviceType);
-                setStrategyMute((routing_strategy)i, mute, desc, mute ? 0 : delayMs);
-                if (isStrategyActive(desc, (routing_strategy)i)) {
+                ALOGVV("%s() %s (curDevice %s)", __func__,
+                      mute ? "muting" : "unmuting", curDevices.toString().c_str());
+                setStrategyMute(productStrategy, mute, desc, mute ? 0 : delayMs);
+                if (desc->isStrategyActive(productStrategy)) {
                     if (mute) {
                         // FIXME: should not need to double latency if volume could be applied
                         // immediately by the audioflinger mixer. We must account for the delay
@@ -5293,7 +5286,7 @@
 
     // temporary mute output if device selection changes to avoid volume bursts due to
     // different per device volumes
-    if (outputDesc->isActive() && (deviceType != prevDeviceType)) {
+    if (outputDesc->isActive() && (devices != prevDevices)) {
         uint32_t tempMuteWaitMs = outputDesc->latency() * 2;
         // temporary mute duration is conservatively set to 4 times the reported latency
         uint32_t tempMuteDurationMs = outputDesc->latency() * 4;
@@ -5301,13 +5294,13 @@
             muteWaitMs = tempMuteWaitMs;
         }
 
-        for (size_t i = 0; i < NUM_STRATEGIES; i++) {
-            if (isStrategyActive(outputDesc, (routing_strategy)i)) {
+        for (const auto &productStrategy : productStrategies) {
+            if (outputDesc->isStrategyActive(productStrategy)) {
                 // make sure that we do not start the temporary mute period too early in case of
                 // delayed device change
-                setStrategyMute((routing_strategy)i, true, outputDesc, delayMs);
-                setStrategyMute((routing_strategy)i, false, outputDesc,
-                                delayMs + tempMuteDurationMs, deviceType);
+                setStrategyMute(productStrategy, true, outputDesc, delayMs);
+                setStrategyMute(productStrategy, false, outputDesc, delayMs + tempMuteDurationMs,
+                                devices.types());
             }
         }
     }
@@ -5341,16 +5334,17 @@
 
     // filter devices according to output selected
     DeviceVector filteredDevices = outputDesc->filterSupportedDevices(devices);
+    DeviceVector prevDevices = outputDesc->devices();
 
     // no need to proceed if new device is not AUDIO_DEVICE_NONE and not supported by current
-    // output profile
-    if (!devices.isEmpty() && filteredDevices.isEmpty()) {
+    // output profile or if new device is not supported AND previous device(s) is(are) still
+    // available (otherwise reset device must be done on the output)
+    if (!devices.isEmpty() && filteredDevices.isEmpty() &&
+            !mAvailableOutputDevices.filter(prevDevices).empty()) {
         ALOGV("%s: unsupported device %s for output", __func__, devices.toString().c_str());
         return 0;
     }
 
-    DeviceVector prevDevices = outputDesc->devices();
-
     ALOGV("setOutputDevices() prevDevice %s", prevDevices.toString().c_str());
 
     if (!filteredDevices.isEmpty()) {
@@ -5359,7 +5353,7 @@
 
     // if the outputs are not materially active, there is no need to mute.
     if (requiresMuteCheck) {
-        muteWaitMs = checkDeviceMuteStrategies(outputDesc, prevDevices.types(), delayMs);
+        muteWaitMs = checkDeviceMuteStrategies(outputDesc, prevDevices, delayMs);
     } else {
         ALOGV("%s: suppressing checkDeviceMuteStrategies", __func__);
         muteWaitMs = 0;
@@ -5531,41 +5525,12 @@
     return NULL;
 }
 
-sp<DeviceDescriptor> AudioPolicyManager::getDeviceAndMixForAttributes(
-        const audio_attributes_t &attributes, AudioMix **policyMix)
-{
-    // Honor explicit routing requests only if all active clients have a preferred route in which
-    // case the last active client route is used
-    sp<DeviceDescriptor> device =
-        findPreferredDevice(mInputs, attributes.source, mAvailableInputDevices);
-    if (device != nullptr) {
-        return device;
-    }
-
-    sp<DeviceDescriptor> selectedDeviceFromMix =
-           mPolicyMixes.getDeviceAndMixForInputSource(attributes.source, mAvailableInputDevices,
-                                                      policyMix);
-    return (selectedDeviceFromMix != nullptr) ?
-           selectedDeviceFromMix : getDeviceForAttributes(attributes);
-}
-
-sp<DeviceDescriptor> AudioPolicyManager::getDeviceForAttributes(const audio_attributes_t &attributes)
-{
-    audio_devices_t device = mEngine->getDeviceForInputSource(attributes.source);
-    if (attributes.source == AUDIO_SOURCE_REMOTE_SUBMIX &&
-                strncmp(attributes.tags, "addr=", strlen("addr=")) == 0) {
-        return mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
-                                                String8(attributes.tags + strlen("addr=")),
-                                                AUDIO_FORMAT_DEFAULT);
-    }
-    return mAvailableInputDevices.getDevice(device, String8(), AUDIO_FORMAT_DEFAULT);
-}
-
 float AudioPolicyManager::computeVolume(audio_stream_type_t stream,
                                         int index,
                                         audio_devices_t device)
 {
-    float volumeDB = mVolumeCurves->volIndexToDb(stream, Volume::getDeviceCategory(device), index);
+    auto &curves = getVolumeCurves(stream);
+    float volumeDB = curves.volIndexToDb(Volume::getDeviceCategory(device), index);
 
     // handle the case of accessibility active while a ringtone is playing: if the ringtone is much
     // louder than the accessibility prompt, the prompt cannot be heard, thus masking the touch
@@ -5580,7 +5545,7 @@
 
     // in-call: always cap volume by voice volume + some low headroom
     if ((stream != AUDIO_STREAM_VOICE_CALL) &&
-            (isInCall() || mOutputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL))) {
+            (isInCall() || mOutputs.isActiveLocally(streamToVolumeSource(AUDIO_STREAM_VOICE_CALL)))) {
         switch (stream) {
         case AUDIO_STREAM_SYSTEM:
         case AUDIO_STREAM_RING:
@@ -5590,8 +5555,7 @@
         case AUDIO_STREAM_ENFORCED_AUDIBLE:
         case AUDIO_STREAM_DTMF:
         case AUDIO_STREAM_ACCESSIBILITY: {
-            int voiceVolumeIndex =
-                mVolumeCurves->getVolumeIndex(AUDIO_STREAM_VOICE_CALL, device);
+            int voiceVolumeIndex = getVolumeCurves(AUDIO_STREAM_VOICE_CALL).getVolumeIndex(device);
             const float maxVoiceVolDb =
                 computeVolume(AUDIO_STREAM_VOICE_CALL, voiceVolumeIndex, device)
                 + IN_CALL_EARPIECE_HEADROOM_DB;
@@ -5613,30 +5577,31 @@
     // speaker is part of the select devices
     // - if music is playing, always limit the volume to current music volume,
     // with a minimum threshold at -36dB so that notification is always perceived.
-    const routing_strategy stream_strategy = getStrategy(stream);
     if ((device & (AUDIO_DEVICE_OUT_BLUETOOTH_A2DP |
             AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
             AUDIO_DEVICE_OUT_WIRED_HEADSET |
             AUDIO_DEVICE_OUT_WIRED_HEADPHONE |
             AUDIO_DEVICE_OUT_USB_HEADSET |
             AUDIO_DEVICE_OUT_HEARING_AID)) &&
-        ((stream_strategy == STRATEGY_SONIFICATION)
-                || (stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL)
+        ((stream == AUDIO_STREAM_ALARM || stream == AUDIO_STREAM_RING)
+                || (stream == AUDIO_STREAM_NOTIFICATION)
                 || (stream == AUDIO_STREAM_SYSTEM)
-                || ((stream_strategy == STRATEGY_ENFORCED_AUDIBLE) &&
-                    (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_NONE))) &&
-            mVolumeCurves->canBeMuted(stream)) {
+                || ((stream == AUDIO_STREAM_ENFORCED_AUDIBLE) &&
+                    (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) ==
+                     AUDIO_POLICY_FORCE_NONE))) &&
+            getVolumeCurves(stream).canBeMuted()) {
         // when the phone is ringing we must consider that music could have been paused just before
         // by the music application and behave as if music was active if the last music track was
         // just stopped
         if (isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY) ||
                 mLimitRingtoneVolume) {
             volumeDB += SONIFICATION_HEADSET_VOLUME_FACTOR_DB;
-            audio_devices_t musicDevice = getDeviceForStrategy(STRATEGY_MEDIA, true /*fromCache*/);
+            audio_devices_t musicDevice =
+                    mEngine->getOutputDevicesForAttributes(attributes_initializer(AUDIO_USAGE_MEDIA),
+                                                           nullptr, true /*fromCache*/).types();
             float musicVolDB = computeVolume(AUDIO_STREAM_MUSIC,
-                                             mVolumeCurves->getVolumeIndex(AUDIO_STREAM_MUSIC,
-                                                                              musicDevice),
-                                             musicDevice);
+                                   getVolumeCurves(AUDIO_STREAM_MUSIC).getVolumeIndex(musicDevice),
+                                   musicDevice);
             float minVolDB = (musicVolDB > SONIFICATION_HEADSET_VOLUME_MIN_DB) ?
                     musicVolDB : SONIFICATION_HEADSET_VOLUME_MIN_DB;
             if (volumeDB > minVolDB) {
@@ -5656,7 +5621,7 @@
                 }
             }
         } else if ((Volume::getDeviceForVolume(device) != AUDIO_DEVICE_OUT_SPEAKER) ||
-                stream_strategy != STRATEGY_SONIFICATION) {
+                (stream != AUDIO_STREAM_ALARM && stream != AUDIO_STREAM_RING)) {
             volumeDB += SONIFICATION_HEADSET_VOLUME_FACTOR_DB;
         }
     }
@@ -5671,10 +5636,12 @@
     if (srcStream == dstStream) {
         return srcIndex;
     }
-    float minSrc = (float)mVolumeCurves->getVolumeIndexMin(srcStream);
-    float maxSrc = (float)mVolumeCurves->getVolumeIndexMax(srcStream);
-    float minDst = (float)mVolumeCurves->getVolumeIndexMin(dstStream);
-    float maxDst = (float)mVolumeCurves->getVolumeIndexMax(dstStream);
+    auto &srcCurves = getVolumeCurves(srcStream);
+    auto &dstCurves = getVolumeCurves(dstStream);
+    float minSrc = (float)srcCurves.getVolumeIndexMin();
+    float maxSrc = (float)srcCurves.getVolumeIndexMax();
+    float minDst = (float)dstCurves.getVolumeIndexMin();
+    float maxDst = (float)dstCurves.getVolumeIndexMax();
 
     // preserve mute request or correct range
     if (srcIndex < minSrc) {
@@ -5689,16 +5656,15 @@
 }
 
 status_t AudioPolicyManager::checkAndSetVolume(audio_stream_type_t stream,
-                                                   int index,
-                                                   const sp<AudioOutputDescriptor>& outputDesc,
-                                                   audio_devices_t device,
-                                                   int delayMs,
-                                                   bool force)
+                                               int index,
+                                               const sp<AudioOutputDescriptor>& outputDesc,
+                                               audio_devices_t device,
+                                               int delayMs,
+                                               bool force)
 {
     // do not change actual stream volume if the stream is muted
-    if (outputDesc->mMuteCount[stream] != 0) {
-        ALOGVV("checkAndSetVolume() stream %d muted count %d",
-              stream, outputDesc->mMuteCount[stream]);
+    if (outputDesc->isMuted(streamToVolumeSource(stream))) {
+        ALOGVV("%s() stream %d muted count %d", __func__, stream, outputDesc->getMuteCount(stream));
         return NO_ERROR;
     }
     audio_policy_forced_cfg_t forceUseForComm =
@@ -5730,7 +5696,7 @@
         float voiceVolume;
         // Force voice volume to max for bluetooth SCO as volume is managed by the headset
         if (stream == AUDIO_STREAM_VOICE_CALL) {
-            voiceVolume = (float)index/(float)mVolumeCurves->getVolumeIndexMax(stream);
+            voiceVolume = (float)index/(float)getVolumeCurves(stream).getVolumeIndexMax();
         } else {
             voiceVolume = 1.0;
         }
@@ -5753,7 +5719,7 @@
 
     for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
         checkAndSetVolume((audio_stream_type_t)stream,
-                          mVolumeCurves->getVolumeIndex((audio_stream_type_t)stream, device),
+                          getVolumeCurves((audio_stream_type_t)stream).getVolumeIndex(device),
                           outputDesc,
                           device,
                           delayMs,
@@ -5761,18 +5727,16 @@
     }
 }
 
-void AudioPolicyManager::setStrategyMute(routing_strategy strategy,
-                                             bool on,
-                                             const sp<AudioOutputDescriptor>& outputDesc,
-                                             int delayMs,
-                                             audio_devices_t device)
+void AudioPolicyManager::setStrategyMute(product_strategy_t strategy,
+                                         bool on,
+                                         const sp<AudioOutputDescriptor>& outputDesc,
+                                         int delayMs,
+                                         audio_devices_t device)
 {
-    ALOGVV("setStrategyMute() strategy %d, mute %d, output ID %d",
-           strategy, on, outputDesc->getId());
-    for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
-        if (getStrategy((audio_stream_type_t)stream) == strategy) {
-            setStreamMute((audio_stream_type_t)stream, on, outputDesc, delayMs, device);
-        }
+    for (auto stream: mEngine->getStreamTypesForProductStrategy(strategy)) {
+        ALOGVV("%s() stream %d, mute %d, output ID %d", __FUNCTION__, stream, on,
+               outputDesc->getId());
+        setStreamMute(stream, on, outputDesc, delayMs, device);
     }
 }
 
@@ -5787,26 +5751,26 @@
     }
 
     ALOGVV("setStreamMute() stream %d, mute %d, mMuteCount %d device %04x",
-          stream, on, outputDesc->mMuteCount[stream], device);
-
+          stream, on, outputDesc->getMuteCount(stream), device);
+    auto &curves = getVolumeCurves(stream);
     if (on) {
-        if (outputDesc->mMuteCount[stream] == 0) {
-            if (mVolumeCurves->canBeMuted(stream) &&
+        if (!outputDesc->isMuted(streamToVolumeSource(stream))) {
+            if (curves.canBeMuted() &&
                     ((stream != AUDIO_STREAM_ENFORCED_AUDIBLE) ||
                      (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_NONE))) {
                 checkAndSetVolume(stream, 0, outputDesc, device, delayMs);
             }
         }
         // increment mMuteCount after calling checkAndSetVolume() so that volume change is not ignored
-        outputDesc->mMuteCount[stream]++;
+        outputDesc->incMuteCount(streamToVolumeSource(stream));
     } else {
-        if (outputDesc->mMuteCount[stream] == 0) {
+        if (!outputDesc->isMuted(streamToVolumeSource(stream))) {
             ALOGV("setStreamMute() unmuting non muted stream!");
             return;
         }
-        if (--outputDesc->mMuteCount[stream] == 0) {
+        if (outputDesc->decMuteCount(streamToVolumeSource(stream)) == 0) {
             checkAndSetVolume(stream,
-                              mVolumeCurves->getVolumeIndex(stream, device),
+                              curves.getVolumeIndex(device),
                               outputDesc,
                               device,
                               delayMs);
@@ -5814,25 +5778,9 @@
     }
 }
 
-audio_stream_type_t AudioPolicyManager::streamTypefromAttributesInt(const audio_attributes_t *attr)
-{
-    // flags to stream type mapping
-    if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
-        return AUDIO_STREAM_ENFORCED_AUDIBLE;
-    }
-    if ((attr->flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) {
-        return AUDIO_STREAM_BLUETOOTH_SCO;
-    }
-    if ((attr->flags & AUDIO_FLAG_BEACON) == AUDIO_FLAG_BEACON) {
-        return AUDIO_STREAM_TTS;
-    }
-
-    return audio_usage_to_stream_type(attr->usage);
-}
-
 bool AudioPolicyManager::isValidAttributes(const audio_attributes_t *paa)
 {
-    // has flags that map to a strategy?
+    // has flags that map to a stream type?
     if ((paa->flags & (AUDIO_FLAG_AUDIBILITY_ENFORCED | AUDIO_FLAG_SCO | AUDIO_FLAG_BEACON)) != 0) {
         return true;
     }
@@ -5863,37 +5811,6 @@
     return true;
 }
 
-bool AudioPolicyManager::isStrategyActive(const sp<AudioOutputDescriptor>& outputDesc,
-                                          routing_strategy strategy, uint32_t inPastMs,
-                                          nsecs_t sysTime) const
-{
-    if ((sysTime == 0) && (inPastMs != 0)) {
-        sysTime = systemTime();
-    }
-    for (int i = 0; i < (int)AUDIO_STREAM_FOR_POLICY_CNT; i++) {
-        if (((getStrategy((audio_stream_type_t)i) == strategy) ||
-                (STRATEGY_NONE == strategy)) &&
-                outputDesc->isStreamActive((audio_stream_type_t)i, inPastMs, sysTime)) {
-            return true;
-        }
-    }
-    return false;
-}
-
-bool AudioPolicyManager::isStrategyActiveOnSameModule(const sp<SwAudioOutputDescriptor>& outputDesc,
-                                                      routing_strategy strategy, uint32_t inPastMs,
-                                                      nsecs_t sysTime) const
-{
-    for (size_t i = 0; i < mOutputs.size(); i++) {
-        sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
-        if (outputDesc->sharesHwModuleWith(desc)
-            && isStrategyActive(desc, strategy, inPastMs, sysTime)) {
-            return true;
-        }
-    }
-    return false;
-}
-
 audio_policy_forced_cfg_t AudioPolicyManager::getForceUse(audio_policy_force_use_t usage)
 {
     return mEngine->getForceUse(usage);
@@ -5942,6 +5859,8 @@
         }
     }
 
+    mInputs.clearSessionRoutesForDevice(deviceDesc);
+
     mHwModules.cleanUpForDevice(deviceDesc);
 }
 
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index de6d489..06a1f3e 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -49,7 +49,7 @@
 #include <AudioPolicyMix.h>
 #include <EffectDescriptor.h>
 #include <SoundTriggerSession.h>
-#include <VolumeCurve.h>
+#include "TypeConverter.h"
 
 namespace android {
 
@@ -113,15 +113,16 @@
         virtual void setSystemProperty(const char* property, const char* value);
         virtual status_t initCheck();
         virtual audio_io_handle_t getOutput(audio_stream_type_t stream);
-        virtual status_t getOutputForAttr(const audio_attributes_t *attr,
-                                          audio_io_handle_t *output,
-                                          audio_session_t session,
-                                          audio_stream_type_t *stream,
-                                          uid_t uid,
-                                          const audio_config_t *config,
-                                          audio_output_flags_t *flags,
-                                          audio_port_handle_t *selectedDeviceId,
-                                          audio_port_handle_t *portId);
+        status_t getOutputForAttr(const audio_attributes_t *attr,
+                                  audio_io_handle_t *output,
+                                  audio_session_t session,
+                                  audio_stream_type_t *stream,
+                                  uid_t uid,
+                                  const audio_config_t *config,
+                                  audio_output_flags_t *flags,
+                                  audio_port_handle_t *selectedDeviceId,
+                                  audio_port_handle_t *portId,
+                                  std::vector<audio_io_handle_t> *secondaryOutputs) override;
         virtual status_t startOutput(audio_port_handle_t portId);
         virtual status_t stopOutput(audio_port_handle_t portId);
         virtual void releaseOutput(audio_port_handle_t portId);
@@ -153,9 +154,15 @@
                                               audio_devices_t device);
 
         // return the strategy corresponding to a given stream type
-        virtual uint32_t getStrategyForStream(audio_stream_type_t stream);
-        // return the strategy corresponding to the given audio attributes
-        virtual routing_strategy getStrategyForAttr(const audio_attributes_t *attr);
+        virtual uint32_t getStrategyForStream(audio_stream_type_t stream)
+        {
+            return streamToStrategy(stream);
+        }
+        product_strategy_t streamToStrategy(audio_stream_type_t stream) const
+        {
+            auto attributes = mEngine->getAttributesForStreamType(stream);
+            return mEngine->getProductStrategyForAttributes(attributes);
+        }
 
         // return the enabled output devices for the given stream type
         virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream);
@@ -244,13 +251,20 @@
         virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
                     std::vector<audio_format_t> *formats);
 
-        // return the strategy corresponding to a given stream type
-        routing_strategy getStrategy(audio_stream_type_t stream) const;
-
         virtual void setAppState(uid_t uid, app_state_t state);
 
         virtual bool isHapticPlaybackSupported();
 
+        virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies)
+        {
+            return mEngine->listAudioProductStrategies(strategies);
+        }
+
+        virtual product_strategy_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa)
+        {
+            return mEngine->getProductStrategyForAttributes(aa.getAttributes());
+        }
+
 protected:
         // A constructor that allows more fine-grained control over initialization process,
         // used in automatic tests.
@@ -296,42 +310,28 @@
         {
             return mAvailableInputDevices;
         }
-        virtual IVolumeCurvesCollection &getVolumeCurves() { return *mVolumeCurves; }
         virtual const sp<DeviceDescriptor> &getDefaultOutputDevice() const
         {
             return mDefaultOutputDevice;
         }
 
+        IVolumeCurves &getVolumeCurves(const audio_attributes_t &attr)
+        {
+            auto *curves = mEngine->getVolumeCurvesForAttributes(attr);
+            ALOG_ASSERT(curves != nullptr, "No curves for attributes %s", toString(attr).c_str());
+            return *curves;
+        }
+        IVolumeCurves &getVolumeCurves(audio_stream_type_t stream)
+        {
+            auto *curves = mEngine->getVolumeCurvesForStreamType(stream);
+            ALOG_ASSERT(curves != nullptr, "No curves for stream %s", toString(stream).c_str());
+            return *curves;
+        }
+
         void addOutput(audio_io_handle_t output, const sp<SwAudioOutputDescriptor>& outputDesc);
         void removeOutput(audio_io_handle_t output);
         void addInput(audio_io_handle_t input, const sp<AudioInputDescriptor>& inputDesc);
 
-        // return appropriate device for streams handled by the specified strategy according to current
-        // phone state, connected devices...
-        // if fromCache is true, the device is returned from mDeviceForStrategy[],
-        // otherwise it is determine by current state
-        // (device connected,phone state, force use, a2dp output...)
-        // This allows to:
-        //  1 speed up process when the state is stable (when starting or stopping an output)
-        //  2 access to either current device selection (fromCache == true) or
-        // "future" device selection (fromCache == false) when called from a context
-        //  where conditions are changing (setDeviceConnectionState(), setPhoneState()...) AND
-        //  before updateDevicesAndOutputs() is called.
-        virtual audio_devices_t getDeviceForStrategy(routing_strategy strategy,
-                                                     bool fromCache)
-        {
-            return getDevicesForStrategy(strategy, fromCache).types();
-        }
-
-        DeviceVector getDevicesForStrategy(routing_strategy strategy, bool fromCache);
-
-        bool isStrategyActive(const sp<AudioOutputDescriptor>& outputDesc, routing_strategy strategy,
-                              uint32_t inPastMs = 0, nsecs_t sysTime = 0) const;
-
-        bool isStrategyActiveOnSameModule(const sp<SwAudioOutputDescriptor>& outputDesc,
-                                          routing_strategy strategy, uint32_t inPastMs = 0,
-                                          nsecs_t sysTime = 0) const;
-
         // change the route of the specified output. Returns the number of ms we have slept to
         // allow new routing to take effect in certain cases.
         uint32_t setOutputDevices(const sp<SwAudioOutputDescriptor>& outputDesc,
@@ -350,9 +350,6 @@
         status_t resetInputDevice(audio_io_handle_t input,
                                   audio_patch_handle_t *patchHandle = NULL);
 
-        // select input device corresponding to requested audio source
-        sp<DeviceDescriptor> getDeviceForAttributes(const audio_attributes_t &attributes);
-
         // compute the actual volume for a given stream according to the requested index and a particular
         // device
         virtual float computeVolume(audio_stream_type_t stream,
@@ -373,8 +370,16 @@
         void applyStreamVolumes(const sp<AudioOutputDescriptor>& outputDesc,
                                 audio_devices_t device, int delayMs = 0, bool force = false);
 
-        // Mute or unmute all streams handled by the specified strategy on the specified output
-        void setStrategyMute(routing_strategy strategy,
+        /**
+         * @brief setStrategyMute Mute or unmute all active clients on the considered output
+         * following the given strategy.
+         * @param strategy to be considered
+         * @param on true for mute, false for unmute
+         * @param outputDesc to be considered
+         * @param delayMs
+         * @param device
+         */
+        void setStrategyMute(product_strategy_t strategy,
                              bool on,
                              const sp<AudioOutputDescriptor>& outputDesc,
                              int delayMs = 0,
@@ -420,26 +425,32 @@
         // A2DP suspend status is rechecked.
         void checkForDeviceAndOutputChanges(std::function<bool()> onOutputsChecked = nullptr);
 
-        // checks and if necessary changes outputs used for all strategies.
-        // must be called every time a condition that affects the output choice for a given strategy
-        // changes: connected device, phone state, force use...
-        // Must be called before updateDevicesAndOutputs()
-        void checkOutputForStrategy(routing_strategy strategy);
+        /**
+         * @brief checkOutputForAttributes checks and if necessary changes outputs used for the
+         * given audio attributes.
+         * must be called every time a condition that affects the output choice for a given
+         * attributes changes: connected device, phone state, force use...
+         * Must be called before updateDevicesAndOutputs()
+         * @param attr to be considered
+         */
+        void checkOutputForAttributes(const audio_attributes_t &attr);
 
-        // Same as checkOutputForStrategy() but for a all strategies in order of priority
+        bool followsSameRouting(const audio_attributes_t &lAttr,
+                                const audio_attributes_t &rAttr) const;
+
+        /**
+         * @brief checkOutputForAllStrategies Same as @see checkOutputForAttributes()
+         *      but for a all product strategies in order of priority
+         */
         void checkOutputForAllStrategies();
 
+        // Same as checkOutputForStrategy but for secondary outputs. Make sure if a secondary
+        // output condition changes, the track is properly rerouted
+        void checkSecondaryOutputs();
+
         // manages A2DP output suspend/restore according to phone state and BT SCO usage
         void checkA2dpSuspend();
 
-        template <class IoDescriptor, class Filter>
-        sp<DeviceDescriptor> findPreferredDevice(IoDescriptor& desc, Filter filter,
-                                                bool& active, const DeviceVector& devices);
-
-        template <class IoCollection, class Filter>
-        sp<DeviceDescriptor> findPreferredDevice(IoCollection& ioCollection, Filter filter,
-                                                const DeviceVector& devices);
-
         // selects the most appropriate device on output for current state
         // must be called every time a condition that affects the device choice for a given output is
         // changed: connected device, phone state, force use, output start, output stop..
@@ -447,11 +458,14 @@
         DeviceVector getNewOutputDevices(const sp<SwAudioOutputDescriptor>& outputDesc,
                                          bool fromCache);
 
-        // updates cache of device used by all strategies (mDeviceForStrategy[])
-        // must be called every time a condition that affects the device choice for a given strategy is
-        // changed: connected device, phone state, force use...
-        // cached values are used by getDeviceForStrategy() if parameter fromCache is true.
-         // Must be called after checkOutputForAllStrategies()
+        /**
+         * @brief updateDevicesAndOutputs: updates cache of devices of the engine
+         * must be called every time a condition that affects the device choice is changed:
+         * connected device, phone state, force use...
+         * cached values are used by getOutputDevicesForStream()/getDevicesForAttributes if
+         * parameter fromCache is true.
+         * Must be called after checkOutputForAllStrategies()
+         */
         void updateDevicesAndOutputs();
 
         // selects the most appropriate device on input for current state
@@ -470,13 +484,19 @@
         SortedVector<audio_io_handle_t> getOutputsForDevices(
                 const DeviceVector &devices, const SwAudioOutputCollection& openOutputs);
 
-        // mute/unmute strategies using an incompatible device combination
-        // if muting, wait for the audio in pcm buffer to be drained before proceeding
-        // if unmuting, unmute only after the specified delay
-        // Returns the number of ms waited
-        virtual uint32_t  checkDeviceMuteStrategies(const sp<AudioOutputDescriptor>& outputDesc,
-                                                    audio_devices_t prevDeviceType,
-                                                    uint32_t delayMs);
+        /**
+         * @brief checkDeviceMuteStrategies mute/unmute strategies
+         *      using an incompatible device combination.
+         *      if muting, wait for the audio in pcm buffer to be drained before proceeding
+         *      if unmuting, unmute only after the specified delay
+         * @param outputDesc
+         * @param prevDevice
+         * @param delayMs
+         * @return the number of ms waited
+         */
+        virtual uint32_t checkDeviceMuteStrategies(const sp<AudioOutputDescriptor>& outputDesc,
+                                                   const DeviceVector &prevDevices,
+                                                   uint32_t delayMs);
 
         audio_io_handle_t selectOutput(const SortedVector<audio_io_handle_t>& outputs,
                                        audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
@@ -570,15 +590,22 @@
 
         void clearAudioPatches(uid_t uid);
         void clearSessionRoutes(uid_t uid);
-        void checkStrategyRoute(routing_strategy strategy, audio_io_handle_t ouptutToSkip);
+
+        /**
+         * @brief checkStrategyRoute: when an output is beeing rerouted, reconsider each output
+         * that may host a strategy playing on the considered output.
+         * @param ps product strategy that initiated the rerouting
+         * @param ouptutToSkip output that initiated the rerouting
+         */
+        void checkStrategyRoute(product_strategy_t ps, audio_io_handle_t ouptutToSkip);
 
         status_t hasPrimaryOutput() const { return mPrimaryOutput != 0; }
 
         status_t connectAudioSource(const sp<SourceClientDescriptor>& sourceDesc);
         status_t disconnectAudioSource(const sp<SourceClientDescriptor>& sourceDesc);
 
-        sp<SourceClientDescriptor> getSourceForStrategyOnOutput(audio_io_handle_t output,
-                                                               routing_strategy strategy);
+        sp<SourceClientDescriptor> getSourceForAttributesOnOutput(audio_io_handle_t output,
+                                                                  const audio_attributes_t &attr);
 
         void cleanUpForDevice(const sp<DeviceDescriptor>& deviceDesc);
 
@@ -606,24 +633,15 @@
 
         bool    mLimitRingtoneVolume;        // limit ringtone volume to music volume if headset connected
 
-        /**
-         * @brief mDevicesForStrategy vector of devices that are assigned for a given strategy.
-         * Note: in case of removal of device (@see setDeviceConnectionState), the device descriptor
-         * will be removed from the @see mAvailableOutputDevices or @see mAvailableInputDevices
-         * but the devices for strategies will be reevaluated within the
-         * @see setDeviceConnectionState function.
-         */
-        DeviceVector mDevicesForStrategy[NUM_STRATEGIES];
-
         float   mLastVoiceVolume;            // last voice volume value sent to audio HAL
         bool    mA2dpSuspended;  // true if A2DP output is suspended
 
-        std::unique_ptr<IVolumeCurvesCollection> mVolumeCurves; // Volume Curves per use case and device category
         EffectDescriptorCollection mEffects;  // list of registered audio effects
         sp<DeviceDescriptor> mDefaultOutputDevice; // output device selected by default at boot time
         HwModuleCollection mHwModules; // contains only modules that have been loaded successfully
         HwModuleCollection mHwModulesAll; // normally not needed, used during construction and for
                                           // dumps
+
         AudioPolicyConfig mConfig;
 
         std::atomic<uint32_t> mAudioPortGeneration;
@@ -709,7 +727,9 @@
                 uid_t uid,
                 const audio_config_t *config,
                 audio_output_flags_t *flags,
-                audio_port_handle_t *selectedDeviceId);
+                audio_port_handle_t *selectedDeviceId,
+                bool *isRequestedDeviceForExclusiveUse,
+                std::vector<sp<SwAudioOutputDescriptor>> *secondaryDescs);
         // internal method to return the output handle for the given device and format
         audio_io_handle_t getOutputForDevices(
                 const DeviceVector &devices,
@@ -717,16 +737,26 @@
                 audio_stream_type_t stream,
                 const audio_config_t *config,
                 audio_output_flags_t *flags);
-        // internal method to return the input handle for the given device and format
+
+        /**
+         * @brief getInputForDevice selects an input handle for a given input device and
+         * requester context
+         * @param device to be used by requester, selected by policy mix rules or engine
+         * @param session requester session id
+         * @param uid requester uid
+         * @param attributes requester audio attributes (e.g. input source and tags matter)
+         * @param config requester audio configuration (e.g. sample rate, format, channel mask).
+         * @param flags requester input flags
+         * @param policyMix may be null, policy rules to be followed by the requester
+         * @return input io handle aka unique input identifier selected for this device.
+         */
         audio_io_handle_t getInputForDevice(const sp<DeviceDescriptor> &device,
                 audio_session_t session,
-                audio_source_t inputSource,
+                const audio_attributes_t &attributes,
                 const audio_config_base_t *config,
                 audio_input_flags_t flags,
                 AudioMix *policyMix);
 
-        // internal function to derive a stream type value from audio attributes
-        audio_stream_type_t streamTypefromAttributesInt(const audio_attributes_t *attr);
         // event is one of STARTING_OUTPUT, STARTING_BEACON, STOPPING_OUTPUT, STOPPING_BEACON
         // returns 0 if no mute/unmute event happened, the largest latency of the device where
         //   the mute/unmute happened
@@ -734,11 +764,6 @@
         uint32_t setBeaconMute(bool mute);
         bool     isValidAttributes(const audio_attributes_t *paa);
 
-        // select input device corresponding to requested audio source and return associated policy
-        // mix if any. Calls getDeviceForInputSource().
-        sp<DeviceDescriptor> getDeviceAndMixForAttributes(const audio_attributes_t &attributes,
-                                                          AudioMix **policyMix = NULL);
-
         // Called by setDeviceConnectionState().
         status_t setDeviceConnectionStateInt(audio_devices_t deviceType,
                                              audio_policy_dev_state_t state,
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index 919a90d..4947714 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -23,7 +23,6 @@
 #include <memory>
 #include <cutils/misc.h>
 #include <media/AudioEffect.h>
-#include <media/AudioPolicyHelper.h>
 #include <media/EffectsConfig.h>
 #include <mediautils/ServiceUtilities.h>
 #include <system/audio.h>
@@ -398,8 +397,7 @@
         ALOGE("addStreamDefaultEffect(): Null uuid or type uuid pointer");
         return BAD_VALUE;
     }
-
-    audio_stream_type_t stream = audio_usage_to_stream_type(usage);
+    audio_stream_type_t stream = AudioSystem::attributesToStreamType(attributes_initializer(usage));
 
     if (stream < AUDIO_STREAM_MIN || stream >= AUDIO_STREAM_PUBLIC_CNT) {
         ALOGE("addStreamDefaultEffect(): Unsupported stream type %d", stream);
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 45fb174..8ddf824 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -19,7 +19,6 @@
 
 #include "AudioPolicyService.h"
 #include "TypeConverter.h"
-#include <media/AudioPolicyHelper.h>
 #include <media/MediaAnalyticsItem.h>
 #include <mediautils/ServiceUtilities.h>
 #include <utils/Log.h>
@@ -176,7 +175,8 @@
                                               const audio_config_t *config,
                                               audio_output_flags_t flags,
                                               audio_port_handle_t *selectedDeviceId,
-                                              audio_port_handle_t *portId)
+                                              audio_port_handle_t *portId,
+                                              std::vector<audio_io_handle_t> *secondaryOutputs)
 {
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
@@ -194,7 +194,8 @@
     AutoCallerClear acc;
     status_t result = mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid,
                                                  config,
-                                                 &flags, selectedDeviceId, portId);
+                                                 &flags, selectedDeviceId, portId,
+                                                 secondaryOutputs);
 
     // FIXME: Introduce a way to check for the the telephony device before opening the output
     if ((result == NO_ERROR) &&
@@ -206,9 +207,10 @@
         flags = originalFlags;
         *selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
         *portId = AUDIO_PORT_HANDLE_NONE;
-        result = mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid,
-                                                 config,
-                                                 &flags, selectedDeviceId, portId);
+        secondaryOutputs->clear();
+        result = mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid, config,
+                                                       &flags, selectedDeviceId, portId,
+                                                       secondaryOutputs);
     }
 
     if (result == NO_ERROR) {
@@ -703,11 +705,12 @@
 uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream)
 {
     if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
-        return 0;
+        return PRODUCT_STRATEGY_NONE;
     }
     if (mAudioPolicyManager == NULL) {
-        return 0;
+        return PRODUCT_STRATEGY_NONE;
     }
+    // DO NOT LOCK, may be called from AudioFlinger with lock held, reaching deadlock
     AutoCallerClear acc;
     return mAudioPolicyManager->getStrategyForStream(stream);
 }
@@ -1186,4 +1189,22 @@
     return mAudioPolicyManager->isHapticPlaybackSupported();
 }
 
+status_t AudioPolicyService::listAudioProductStrategies(AudioProductStrategyVector &strategies)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    Mutex::Autolock _l(mLock);
+    return mAudioPolicyManager->listAudioProductStrategies(strategies);
+}
+
+product_strategy_t AudioPolicyService::getProductStrategyFromAudioAttributes(
+        const AudioAttributes &aa)
+{
+    if (mAudioPolicyManager == NULL) {
+        return PRODUCT_STRATEGY_NONE;
+    }
+    Mutex::Autolock _l(mLock);
+    return mAudioPolicyManager->getProductStrategyFromAudioAttributes(aa);
+}
 } // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index a39477d..76ac191 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -383,6 +383,8 @@
 //    OR The client is an accessibility service
 //        AND is on TOP OR latest started
 //        AND the source is VOICE_RECOGNITION or HOTWORD
+//    OR the source is one of: AUDIO_SOURCE_VOICE_DOWNLINK, AUDIO_SOURCE_VOICE_UPLINK,
+//       AUDIO_SOURCE_VOICE_CALL
 //    OR Any other client
 //        AND The assistant is not on TOP
 //        AND is on TOP OR latest started
@@ -463,6 +465,10 @@
                 (source == AUDIO_SOURCE_VOICE_RECOGNITION || source == AUDIO_SOURCE_HOTWORD)) {
                 forceIdle = false;
             }
+        } else if (source == AUDIO_SOURCE_VOICE_DOWNLINK ||
+                   source == AUDIO_SOURCE_VOICE_CALL ||
+                   (source == AUDIO_SOURCE_VOICE_UPLINK)) {
+            forceIdle = false;
         } else {
             if (!isAssistantOnTop && (isOnTop || isLatest) &&
                 (!isSensitiveActive || isLatestSensitive)) {
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index c073b7c..8cd6e81 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -74,16 +74,17 @@
     virtual status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
     virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
     virtual audio_io_handle_t getOutput(audio_stream_type_t stream);
-    virtual status_t getOutputForAttr(const audio_attributes_t *attr,
-                                      audio_io_handle_t *output,
-                                      audio_session_t session,
-                                      audio_stream_type_t *stream,
-                                      pid_t pid,
-                                      uid_t uid,
-                                      const audio_config_t *config,
-                                      audio_output_flags_t flags,
-                                      audio_port_handle_t *selectedDeviceId,
-                                      audio_port_handle_t *portId);
+    status_t getOutputForAttr(const audio_attributes_t *attr,
+                              audio_io_handle_t *output,
+                              audio_session_t session,
+                              audio_stream_type_t *stream,
+                              pid_t pid,
+                              uid_t uid,
+                              const audio_config_t *config,
+                              audio_output_flags_t flags,
+                              audio_port_handle_t *selectedDeviceId,
+                              audio_port_handle_t *portId,
+                              std::vector<audio_io_handle_t> *secondaryOutputs) override;
     virtual status_t startOutput(audio_port_handle_t portId);
     virtual status_t stopOutput(audio_port_handle_t portId);
     virtual void releaseOutput(audio_port_handle_t portId);
@@ -229,6 +230,9 @@
 
     virtual bool     isHapticPlaybackSupported();
 
+    virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies);
+    virtual product_strategy_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa);
+
             status_t doStopOutput(audio_port_handle_t portId);
             void doReleaseOutput(audio_port_handle_t portId);
 
diff --git a/services/audiopolicy/tests/Android.mk b/services/audiopolicy/tests/Android.mk
index e4fba0f..97be44c 100644
--- a/services/audiopolicy/tests/Android.mk
+++ b/services/audiopolicy/tests/Android.mk
@@ -4,8 +4,6 @@
 
 LOCAL_C_INCLUDES := \
   frameworks/av/services/audiopolicy \
-  frameworks/av/services/audiopolicy/common/include \
-  frameworks/av/services/audiopolicy/engine/interface \
   $(call include-path-for, audio-utils) \
 
 LOCAL_SHARED_LIBRARIES := \
@@ -18,6 +16,10 @@
 LOCAL_STATIC_LIBRARIES := \
   libaudiopolicycomponents \
 
+LOCAL_HEADER_LIBRARIES := \
+    libaudiopolicycommon \
+    libaudiopolicyengine_interface_headers
+
 LOCAL_SRC_FILES := \
   audiopolicymanager_tests.cpp \
 
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index e9f4657..de5670c 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -214,7 +214,7 @@
     *portId = AUDIO_PORT_HANDLE_NONE;
     ASSERT_EQ(OK, mManager->getOutputForAttr(
                     &attr, &output, AUDIO_SESSION_NONE, &stream, 0 /*uid*/, &config, &flags,
-                    selectedDeviceId, portId));
+                    selectedDeviceId, portId, {}));
     ASSERT_NE(AUDIO_PORT_HANDLE_NONE, *portId);
 }
 
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index a090479..7ec0e4c 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -41,6 +41,8 @@
         "api2/CameraDeviceClient.cpp",
         "api2/CompositeStream.cpp",
         "api2/DepthCompositeStream.cpp",
+        "api2/HeicEncoderInfoManager.cpp",
+        "api2/HeicCompositeStream.cpp",
         "device1/CameraHardwareInterface.cpp",
         "device3/Camera3Device.cpp",
         "device3/Camera3Stream.cpp",
@@ -62,12 +64,14 @@
         "hidl/HidlCameraService.cpp",
         "utils/CameraTraces.cpp",
         "utils/AutoConditionLock.cpp",
+        "utils/ExifUtils.cpp",
         "utils/TagMonitor.cpp",
         "utils/LatencyHistogram.cpp",
     ],
 
     shared_libs: [
         "libdl",
+        "libexif",
         "libui",
         "liblog",
         "libutilscallstack",
@@ -85,14 +89,18 @@
         "libhidlbase",
         "libhidltransport",
         "libjpeg",
+        "libmedia_omx",
         "libmemunreachable",
         "libsensorprivacy",
+        "libstagefright",
         "libstagefright_foundation",
+        "libyuv",
         "android.frameworks.cameraservice.common@2.0",
         "android.frameworks.cameraservice.service@2.0",
         "android.frameworks.cameraservice.device@2.0",
         "android.hardware.camera.common@1.0",
         "android.hardware.camera.provider@2.4",
+        "android.hardware.camera.provider@2.5",
         "android.hardware.camera.device@1.0",
         "android.hardware.camera.device@3.2",
         "android.hardware.camera.device@3.3",
@@ -130,6 +138,7 @@
     name: "libdepthphoto",
 
     srcs: [
+        "utils/ExifUtils.cpp",
         "common/DepthPhotoProcessor.cpp",
     ],
 
@@ -143,6 +152,8 @@
         "libcutils",
         "libjpeg",
         "libmemunreachable",
+        "libexif",
+        "libcamera_client",
     ],
 
     include_dirs: [
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index c3113bf..e06897f 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -49,6 +49,7 @@
 #include <hardware/hardware.h>
 #include "hidl/HidlCameraService.h"
 #include <hidl/HidlTransportSupport.h>
+#include <hwbinder/IPCThreadState.h>
 #include <memunreachable/memunreachable.h>
 #include <media/AudioSystem.h>
 #include <media/IMediaHTTPService.h>
@@ -226,7 +227,7 @@
     Mutex::Autolock lock(mStatusListenerLock);
 
     for (auto& i : mListenerList) {
-        i->onTorchStatusChanged(mapToInterface(status), String16{cameraId});
+        i.second->onTorchStatusChanged(mapToInterface(status), String16{cameraId});
     }
 }
 
@@ -1287,6 +1288,18 @@
     return ret;
 }
 
+bool CameraService::shouldRejectHiddenCameraConnection(const String8 & cameraId) {
+    // If the thread serving this call is not a hwbinder thread and the caller
+    // isn't the cameraserver itself, and the camera id being requested is to be
+    // publically hidden, we should reject the connection.
+    if (!hardware::IPCThreadState::self()->isServingCall() &&
+            CameraThreadState::getCallingPid() != getpid() &&
+            mCameraProviderManager->isPublicallyHiddenSecureCamera(cameraId.c_str())) {
+        return true;
+    }
+    return false;
+}
+
 Status CameraService::connectDevice(
         const sp<hardware::camera2::ICameraDeviceCallbacks>& cameraCb,
         const String16& cameraId,
@@ -1299,6 +1312,7 @@
     Status ret = Status::ok();
     String8 id = String8(cameraId);
     sp<CameraDeviceClient> client = nullptr;
+
     ret = connectHelper<hardware::camera2::ICameraDeviceCallbacks,CameraDeviceClient>(cameraCb, id,
             /*api1CameraId*/-1,
             CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName,
@@ -1330,6 +1344,14 @@
             (halVersion == -1) ? "default" : std::to_string(halVersion).c_str(),
             static_cast<int>(effectiveApiLevel));
 
+    if (shouldRejectHiddenCameraConnection(cameraId)) {
+        ALOGW("Attempting to connect to system-only camera id %s, connection rejected",
+              cameraId.c_str());
+        return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+                                "No camera device with ID \"%s\" currently available",
+                                cameraId.string());
+
+    }
     sp<CLIENT> client = nullptr;
     {
         // Acquire mServiceLock and prevent other clients from connecting
@@ -1632,9 +1654,60 @@
     return Status::ok();
 }
 
+Status CameraService::notifyDeviceStateChange(int64_t newState) {
+    const int pid = CameraThreadState::getCallingPid();
+    const int selfPid = getpid();
+
+    // Permission checks
+    if (pid != selfPid) {
+        // Ensure we're being called by system_server, or similar process with
+        // permissions to notify the camera service about system events
+        if (!checkCallingPermission(
+                String16("android.permission.CAMERA_SEND_SYSTEM_EVENTS"))) {
+            const int uid = CameraThreadState::getCallingUid();
+            ALOGE("Permission Denial: cannot send updates to camera service about device"
+                    " state changes from pid=%d, uid=%d", pid, uid);
+            return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
+                    "No permission to send updates to camera service about device state"
+                    " changes from pid=%d, uid=%d", pid, uid);
+        }
+    }
+
+    ATRACE_CALL();
+
+    using hardware::camera::provider::V2_5::DeviceState;
+    hardware::hidl_bitfield<DeviceState> newDeviceState{};
+    if (newState & ICameraService::DEVICE_STATE_BACK_COVERED) {
+        newDeviceState |= DeviceState::BACK_COVERED;
+    }
+    if (newState & ICameraService::DEVICE_STATE_FRONT_COVERED) {
+        newDeviceState |= DeviceState::FRONT_COVERED;
+    }
+    if (newState & ICameraService::DEVICE_STATE_FOLDED) {
+        newDeviceState |= DeviceState::FOLDED;
+    }
+    // Only map vendor bits directly
+    uint64_t vendorBits = static_cast<uint64_t>(newState) & 0xFFFFFFFF00000000l;
+    newDeviceState |= vendorBits;
+
+    ALOGV("%s: New device state 0x%" PRIx64, __FUNCTION__, newDeviceState);
+    Mutex::Autolock l(mServiceLock);
+    mCameraProviderManager->notifyDeviceStateChange(newDeviceState);
+
+    return Status::ok();
+}
+
 Status CameraService::addListener(const sp<ICameraServiceListener>& listener,
         /*out*/
         std::vector<hardware::CameraStatus> *cameraStatuses) {
+    return addListenerHelper(listener, cameraStatuses);
+}
+
+Status CameraService::addListenerHelper(const sp<ICameraServiceListener>& listener,
+        /*out*/
+        std::vector<hardware::CameraStatus> *cameraStatuses,
+        bool isVendorListener) {
+
     ATRACE_CALL();
 
     ALOGV("%s: Add listener %p", __FUNCTION__, listener.get());
@@ -1649,20 +1722,26 @@
     {
         Mutex::Autolock lock(mStatusListenerLock);
         for (auto& it : mListenerList) {
-            if (IInterface::asBinder(it) == IInterface::asBinder(listener)) {
+            if (IInterface::asBinder(it.second) == IInterface::asBinder(listener)) {
                 ALOGW("%s: Tried to add listener %p which was already subscribed",
                       __FUNCTION__, listener.get());
                 return STATUS_ERROR(ERROR_ALREADY_EXISTS, "Listener already registered");
             }
         }
 
-        mListenerList.push_back(listener);
+        mListenerList.emplace_back(isVendorListener, listener);
     }
 
     /* Collect current devices and status */
     {
         Mutex::Autolock lock(mCameraStatesLock);
         for (auto& i : mCameraStates) {
+            if (!isVendorListener &&
+                mCameraProviderManager->isPublicallyHiddenSecureCamera(i.first.c_str())) {
+                ALOGV("Cannot add public listener for hidden system-only %s for pid %d",
+                      i.first.c_str(), CameraThreadState::getCallingPid());
+                continue;
+            }
             cameraStatuses->emplace_back(i.first, mapToInterface(i.second->getStatus()));
         }
     }
@@ -1697,7 +1776,7 @@
     {
         Mutex::Autolock lock(mStatusListenerLock);
         for (auto it = mListenerList.begin(); it != mListenerList.end(); it++) {
-            if (IInterface::asBinder(*it) == IInterface::asBinder(listener)) {
+            if (IInterface::asBinder(it->second) == IInterface::asBinder(listener)) {
                 mListenerList.erase(it);
                 return Status::ok();
             }
@@ -3033,7 +3112,13 @@
             Mutex::Autolock lock(mStatusListenerLock);
 
             for (auto& listener : mListenerList) {
-                listener->onStatusChanged(mapToInterface(status), String16(cameraId));
+                if (!listener.first &&
+                    mCameraProviderManager->isPublicallyHiddenSecureCamera(cameraId.c_str())) {
+                    ALOGV("Skipping camera discovery callback for system-only camera %s",
+                          cameraId.c_str());
+                    continue;
+                }
+                listener.second->onStatusChanged(mapToInterface(status), String16(cameraId));
             }
         });
 }
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index a296198..cf0cef8 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -154,6 +154,8 @@
     virtual binder::Status    notifySystemEvent(int32_t eventId,
             const std::vector<int32_t>& args);
 
+    virtual binder::Status    notifyDeviceStateChange(int64_t newState);
+
     // OK = supports api of that version, -EOPNOTSUPP = does not support
     virtual binder::Status    supportsCameraApi(
             const String16& cameraId, int32_t apiVersion,
@@ -173,6 +175,10 @@
 
     virtual status_t    shellCommand(int in, int out, int err, const Vector<String16>& args);
 
+    binder::Status      addListenerHelper(const sp<hardware::ICameraServiceListener>& listener,
+            /*out*/
+            std::vector<hardware::CameraStatus>* cameraStatuses, bool isVendor = false);
+
     /////////////////////////////////////////////////////////////////////
     // Client functionality
 
@@ -615,6 +621,10 @@
         sp<BasicClient>* client,
         std::shared_ptr<resource_policy::ClientDescriptor<String8, sp<BasicClient>>>* partial);
 
+    // Should an operation attempt on a cameraId be rejected, if the camera id is
+    // advertised as a publically hidden secure camera, by the camera HAL ?
+    bool shouldRejectHiddenCameraConnection(const String8 & cameraId);
+
     // Single implementation shared between the various connect calls
     template<class CALLBACK, class CLIENT>
     binder::Status connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
@@ -781,7 +791,8 @@
     sp<CameraProviderManager> mCameraProviderManager;
 
     // Guarded by mStatusListenerMutex
-    std::vector<sp<hardware::ICameraServiceListener>> mListenerList;
+    std::vector<std::pair<bool, sp<hardware::ICameraServiceListener>>> mListenerList;
+
     Mutex       mStatusListenerLock;
 
     /**
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index c9c216b..162b50f 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -1767,6 +1767,7 @@
         case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
             ALOGW("%s: Received recoverable error %d from HAL - ignoring, requestId %" PRId32,
                     __FUNCTION__, errorCode, resultExtras.requestId);
+            mCaptureSequencer->notifyError(errorCode, resultExtras);
             return;
         default:
             err = CAMERA_ERROR_UNKNOWN;
@@ -1927,9 +1928,6 @@
 
 void Camera2Client::notifyShutter(const CaptureResultExtras& resultExtras,
                                   nsecs_t timestamp) {
-    (void)resultExtras;
-    (void)timestamp;
-
     ALOGV("%s: Shutter notification for request id %" PRId32 " at time %" PRId64,
             __FUNCTION__, resultExtras.requestId, timestamp);
     mCaptureSequencer->notifyShutter(resultExtras, timestamp);
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index 5029d4b..88799f9 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -117,6 +117,31 @@
     }
 }
 
+void CaptureSequencer::notifyError(int32_t errorCode, const CaptureResultExtras& resultExtras) {
+    ATRACE_CALL();
+    bool jpegBufferLost = false;
+    if (errorCode == hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER) {
+        sp<Camera2Client> client = mClient.promote();
+        if (client == nullptr) {
+            return;
+        }
+        int captureStreamId = client->getCaptureStreamId();
+        if (captureStreamId == resultExtras.errorStreamId) {
+            jpegBufferLost = true;
+        }
+    } else if (errorCode ==
+            hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST) {
+        if (resultExtras.requestId == mShutterCaptureId) {
+            jpegBufferLost = true;
+        }
+    }
+
+    if (jpegBufferLost) {
+        sp<MemoryBase> emptyBuffer;
+        onCaptureAvailable(/*timestamp*/0, emptyBuffer, /*captureError*/true);
+    }
+}
+
 void CaptureSequencer::onResultAvailable(const CaptureResult &result) {
     ATRACE_CALL();
     ALOGV("%s: New result available.", __FUNCTION__);
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
index c23b12d..727dd53 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
@@ -65,6 +65,9 @@
     void notifyShutter(const CaptureResultExtras& resultExtras,
                        nsecs_t timestamp);
 
+    // Notifications about shutter (capture start)
+    void notifyError(int32_t errorCode, const CaptureResultExtras& resultExtras);
+
     // Notification from the frame processor
     virtual void onResultAvailable(const CaptureResult &result);
 
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index e6f75f4..ddfe5e3 100755
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -62,30 +62,6 @@
     }
 }
 
-void JpegProcessor::onBufferRequestForFrameNumber(uint64_t /*frameNumber*/, int /*streamId*/) {
-    // Intentionally left empty
-}
-
-void JpegProcessor::onBufferAcquired(const BufferInfo& /*bufferInfo*/) {
-    // Intentionally left empty
-}
-
-void JpegProcessor::onBufferReleased(const BufferInfo& bufferInfo) {
-    ALOGV("%s", __FUNCTION__);
-    if (bufferInfo.mError) {
-        // Only lock in case of error, since we get one of these for each
-        // onFrameAvailable as well, and scheduling may delay this call late
-        // enough to run into later preview restart operations, for non-error
-        // cases.
-        // b/29524651
-        ALOGV("%s: JPEG buffer lost", __FUNCTION__);
-        Mutex::Autolock l(mInputMutex);
-        mCaptureDone = true;
-        mCaptureSuccess = false;
-        mCaptureDoneSignal.signal();
-    }
-}
-
 status_t JpegProcessor::updateStream(const Parameters &params) {
     ATRACE_CALL();
     ALOGV("%s", __FUNCTION__);
@@ -180,13 +156,6 @@
                     strerror(-res), res);
             return res;
         }
-
-        res = device->addBufferListenerForStream(mCaptureStreamId, this);
-        if (res != OK) {
-              ALOGE("%s: Camera %d: Can't add buffer listeneri: %s (%d)",
-                    __FUNCTION__, mId, strerror(-res), res);
-              return res;
-        }
     }
     return OK;
 }
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.h b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
index 2ee930e..977f11d 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
@@ -42,8 +42,7 @@
  * Still image capture output image processing
  */
 class JpegProcessor:
-            public Thread, public CpuConsumer::FrameAvailableListener,
-            public camera3::Camera3StreamBufferListener {
+            public Thread, public CpuConsumer::FrameAvailableListener {
   public:
     JpegProcessor(sp<Camera2Client> client, wp<CaptureSequencer> sequencer);
     ~JpegProcessor();
@@ -51,11 +50,6 @@
     // CpuConsumer listener implementation
     void onFrameAvailable(const BufferItem& item);
 
-    // Camera3StreamBufferListener implementation
-    void onBufferAcquired(const BufferInfo& bufferInfo) override;
-    void onBufferReleased(const BufferInfo& bufferInfo) override;
-    void onBufferRequestForFrameNumber(uint64_t frameNumber, int streamId) override;
-
     status_t updateStream(const Parameters &params);
     status_t deleteStream();
     int getStreamId() const;
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 9e203da..b512f2b 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -34,6 +34,7 @@
 #include <camera_metadata_hidden.h>
 
 #include "DepthCompositeStream.h"
+#include "HeicCompositeStream.h"
 
 // Convenience methods for constructing binder::Status objects for error returns
 
@@ -711,21 +712,35 @@
                 return res;
 
             if (!isStreamInfoValid) {
-                if (camera3::DepthCompositeStream::isDepthCompositeStream(surface)) {
+                bool isDepthCompositeStream =
+                        camera3::DepthCompositeStream::isDepthCompositeStream(surface);
+                bool isHeicCompositeStream =
+                        camera3::HeicCompositeStream::isHeicCompositeStream(surface);
+                if (isDepthCompositeStream || isHeicCompositeStream) {
                     // We need to take in to account that composite streams can have
                     // additional internal camera streams.
                     std::vector<OutputStreamInfo> compositeStreams;
-                    ret = camera3::DepthCompositeStream::getCompositeStreamInfo(streamInfo,
+                    if (isDepthCompositeStream) {
+                        ret = camera3::DepthCompositeStream::getCompositeStreamInfo(streamInfo,
+                                mDevice->info(), &compositeStreams);
+                    } else {
+                        ret = camera3::HeicCompositeStream::getCompositeStreamInfo(streamInfo,
                             mDevice->info(), &compositeStreams);
+                    }
                     if (ret != OK) {
                         String8 msg = String8::format(
-                                "Camera %s: Failed adding depth composite streams: %s (%d)",
+                                "Camera %s: Failed adding composite streams: %s (%d)",
                                 mCameraIdStr.string(), strerror(-ret), ret);
                         ALOGE("%s: %s", __FUNCTION__, msg.string());
                         return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
                     }
 
-                    if (compositeStreams.size() > 1) {
+                    if (compositeStreams.size() == 0) {
+                        // No internal streams means composite stream not
+                        // supported.
+                        *status = false;
+                        return binder::Status::ok();
+                    } else if (compositeStreams.size() > 1) {
                         streamCount += compositeStreams.size() - 1;
                         streamConfiguration.streams.resize(streamCount);
                     }
@@ -937,15 +952,16 @@
 
     int streamId = camera3::CAMERA3_STREAM_ID_INVALID;
     std::vector<int> surfaceIds;
-    if (!camera3::DepthCompositeStream::isDepthCompositeStream(surfaces[0])) {
-        err = mDevice->createStream(surfaces, deferredConsumer, streamInfo.width,
-                streamInfo.height, streamInfo.format, streamInfo.dataSpace,
-                static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
-                &streamId, physicalCameraId, &surfaceIds, outputConfiguration.getSurfaceSetID(),
-                isShared);
-    } else {
-        sp<CompositeStream> compositeStream = new camera3::DepthCompositeStream(mDevice,
-                getRemoteCallback());
+    bool isDepthCompositeStream = camera3::DepthCompositeStream::isDepthCompositeStream(surfaces[0]);
+    bool isHeicCompisiteStream = camera3::HeicCompositeStream::isHeicCompositeStream(surfaces[0]);
+    if (isDepthCompositeStream || isHeicCompisiteStream) {
+        sp<CompositeStream> compositeStream;
+        if (isDepthCompositeStream) {
+            compositeStream = new camera3::DepthCompositeStream(mDevice, getRemoteCallback());
+        } else {
+            compositeStream = new camera3::HeicCompositeStream(mDevice, getRemoteCallback());
+        }
+
         err = compositeStream->createStream(surfaces, deferredConsumer, streamInfo.width,
                 streamInfo.height, streamInfo.format,
                 static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
@@ -955,6 +971,12 @@
             mCompositeStreamMap.add(IInterface::asBinder(surfaces[0]->getIGraphicBufferProducer()),
                     compositeStream);
         }
+    } else {
+        err = mDevice->createStream(surfaces, deferredConsumer, streamInfo.width,
+                streamInfo.height, streamInfo.format, streamInfo.dataSpace,
+                static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
+                &streamId, physicalCameraId, &surfaceIds, outputConfiguration.getSurfaceSetID(),
+                isShared);
     }
 
     if (err != OK) {
@@ -1437,6 +1459,8 @@
     camera_metadata_ro_entry streamConfigs =
             (dataSpace == HAL_DATASPACE_DEPTH) ?
             info.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS) :
+            (dataSpace == static_cast<android_dataspace>(HAL_DATASPACE_HEIF)) ?
+            info.find(ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS) :
             info.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
 
     int32_t bestWidth = -1;
@@ -1930,6 +1954,10 @@
         remoteCb->onCaptureStarted(resultExtras, timestamp);
     }
     Camera2ClientBase::notifyShutter(resultExtras, timestamp);
+
+    for (size_t i = 0; i < mCompositeStreamMap.size(); i++) {
+        mCompositeStreamMap.valueAt(i)->onShutter(resultExtras, timestamp);
+    }
 }
 
 void CameraDeviceClient::notifyPrepared(int streamId) {
diff --git a/services/camera/libcameraservice/api2/CompositeStream.cpp b/services/camera/libcameraservice/api2/CompositeStream.cpp
index 796bf42..354eaf9 100644
--- a/services/camera/libcameraservice/api2/CompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/CompositeStream.cpp
@@ -82,7 +82,8 @@
     return deleteInternalStreams();
 }
 
-void CompositeStream::onBufferRequestForFrameNumber(uint64_t frameNumber, int streamId) {
+void CompositeStream::onBufferRequestForFrameNumber(uint64_t frameNumber, int streamId,
+        const CameraMetadata& /*settings*/) {
     Mutex::Autolock l(mMutex);
     if (!mErrorState && (streamId == getStreamId())) {
         mPendingCaptureResults.emplace(frameNumber, CameraMetadata());
diff --git a/services/camera/libcameraservice/api2/CompositeStream.h b/services/camera/libcameraservice/api2/CompositeStream.h
index 5837745..a401a82 100644
--- a/services/camera/libcameraservice/api2/CompositeStream.h
+++ b/services/camera/libcameraservice/api2/CompositeStream.h
@@ -23,6 +23,7 @@
 #include <android/hardware/camera2/ICameraDeviceCallbacks.h>
 #include <camera/CameraMetadata.h>
 #include <camera/camera2/OutputConfiguration.h>
+#include <gui/IProducerListener.h>
 #include "common/CameraDeviceBase.h"
 #include "device3/Camera3StreamInterface.h"
 
@@ -66,15 +67,24 @@
     // Return composite stream id.
     virtual int getStreamId() = 0;
 
+    // Notify when shutter notify is triggered
+    virtual void onShutter(const CaptureResultExtras& /*resultExtras*/, nsecs_t /*timestamp*/) {}
+
     void onResultAvailable(const CaptureResult& result);
     bool onError(int32_t errorCode, const CaptureResultExtras& resultExtras);
 
     // Camera3StreamBufferListener implementation
     void onBufferAcquired(const BufferInfo& /*bufferInfo*/) override { /*Empty for now */ }
     void onBufferReleased(const BufferInfo& bufferInfo) override;
-    void onBufferRequestForFrameNumber(uint64_t frameNumber, int streamId) override;
+    void onBufferRequestForFrameNumber(uint64_t frameNumber, int streamId,
+            const CameraMetadata& settings) override;
 
 protected:
+    struct ProducerListener : public BnProducerListener {
+        // ProducerListener impementation
+        void onBufferReleased() override { /*No impl. for now*/ };
+    };
+
     status_t registerCompositeStreamListener(int32_t streamId);
     void eraseResult(int64_t frameNumber);
     void flagAnErrorFrameNumber(int64_t frameNumber);
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
index f627b25..9525ad2 100644
--- a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
@@ -339,6 +339,21 @@
     } else {
         depthPhoto.mIsLensDistortionValid = 0;
     }
+    entry = inputFrame.result.find(ANDROID_JPEG_ORIENTATION);
+    if (entry.count > 0) {
+        // The camera jpeg orientation values must be within [0, 90, 180, 270].
+        switch (entry.data.i32[0]) {
+            case 0:
+            case 90:
+            case 180:
+            case 270:
+                depthPhoto.mOrientation = static_cast<DepthPhotoOrientation> (entry.data.i32[0]);
+                break;
+            default:
+                ALOGE("%s: Unexpected jpeg orientation value: %d, default to 0 degrees",
+                        __FUNCTION__, entry.data.i32[0]);
+        }
+    }
 
     size_t actualJpegSize = 0;
     res = mDepthPhotoProcess(depthPhoto, finalJpegBufferSize, dstBuffer, &actualJpegSize);
@@ -634,6 +649,11 @@
         mDepthStreamId = -1;
     }
 
+    if (mOutputSurface != nullptr) {
+        mOutputSurface->disconnect(NATIVE_WINDOW_API_CAMERA);
+        mOutputSurface.clear();
+    }
+
     return ret;
 }
 
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.h b/services/camera/libcameraservice/api2/DepthCompositeStream.h
index e8fe517..1bf31f4 100644
--- a/services/camera/libcameraservice/api2/DepthCompositeStream.h
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.h
@@ -21,7 +21,6 @@
 #include <dynamic_depth/imaging_model.h>
 #include <dynamic_depth/depth_map.h>
 
-#include <gui/IProducerListener.h>
 #include <gui/CpuConsumer.h>
 
 #include "CompositeStream.h"
@@ -116,11 +115,6 @@
     static const auto kDepthMapDataSpace = HAL_DATASPACE_DEPTH;
     static const auto kJpegDataSpace = HAL_DATASPACE_V0_JFIF;
 
-    struct ProducerListener : public BnProducerListener {
-        // ProducerListener implementation
-        void onBufferReleased() override { /*No impl. for now*/ };
-    };
-
     int                  mBlobStreamId, mBlobSurfaceId, mDepthStreamId, mDepthSurfaceId;
     size_t               mBlobWidth, mBlobHeight;
     sp<CpuConsumer>      mBlobConsumer, mDepthConsumer;
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
new file mode 100644
index 0000000..9fd0e8b
--- /dev/null
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
@@ -0,0 +1,1709 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-HeicCompositeStream"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <linux/memfd.h>
+#include <pthread.h>
+#include <sys/syscall.h>
+
+#include <android/hardware/camera/device/3.5/types.h>
+#include <libyuv.h>
+#include <gui/Surface.h>
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include <media/ICrypto.h>
+#include <media/MediaCodecBuffer.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/MediaDefs.h>
+#include <media/stagefright/MediaCodecConstants.h>
+
+#include "common/CameraDeviceBase.h"
+#include "utils/ExifUtils.h"
+#include "HeicEncoderInfoManager.h"
+#include "HeicCompositeStream.h"
+
+using android::hardware::camera::device::V3_5::CameraBlob;
+using android::hardware::camera::device::V3_5::CameraBlobId;
+
+namespace android {
+namespace camera3 {
+
+HeicCompositeStream::HeicCompositeStream(wp<CameraDeviceBase> device,
+        wp<hardware::camera2::ICameraDeviceCallbacks> cb) :
+        CompositeStream(device, cb),
+        mUseHeic(false),
+        mNumOutputTiles(1),
+        mOutputWidth(0),
+        mOutputHeight(0),
+        mMaxHeicBufferSize(0),
+        mGridWidth(HeicEncoderInfoManager::kGridWidth),
+        mGridHeight(HeicEncoderInfoManager::kGridHeight),
+        mGridRows(1),
+        mGridCols(1),
+        mUseGrid(false),
+        mAppSegmentStreamId(-1),
+        mAppSegmentSurfaceId(-1),
+        mAppSegmentBufferAcquired(false),
+        mMainImageStreamId(-1),
+        mMainImageSurfaceId(-1),
+        mYuvBufferAcquired(false),
+        mProducerListener(new ProducerListener()),
+        mOutputBufferCounter(0),
+        mGridTimestampUs(0) {
+}
+
+HeicCompositeStream::~HeicCompositeStream() {
+    // Call deinitCodec in case stream hasn't been deleted yet to avoid any
+    // memory/resource leak.
+    deinitCodec();
+
+    mInputAppSegmentBuffers.clear();
+    mCodecOutputBuffers.clear();
+
+    mAppSegmentStreamId = -1;
+    mAppSegmentSurfaceId = -1;
+    mAppSegmentConsumer.clear();
+    mAppSegmentSurface.clear();
+
+    mMainImageStreamId = -1;
+    mMainImageSurfaceId = -1;
+    mMainImageConsumer.clear();
+    mMainImageSurface.clear();
+}
+
+bool HeicCompositeStream::isHeicCompositeStream(const sp<Surface> &surface) {
+    ANativeWindow *anw = surface.get();
+    status_t err;
+    int format;
+    if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
+        String8 msg = String8::format("Failed to query Surface format: %s (%d)", strerror(-err),
+                err);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return false;
+    }
+
+    int dataspace;
+    if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE, &dataspace)) != OK) {
+        String8 msg = String8::format("Failed to query Surface dataspace: %s (%d)", strerror(-err),
+                err);
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return false;
+    }
+
+    return ((format == HAL_PIXEL_FORMAT_BLOB) && (dataspace == HAL_DATASPACE_HEIF));
+}
+
+status_t HeicCompositeStream::createInternalStreams(const std::vector<sp<Surface>>& consumers,
+        bool /*hasDeferredConsumer*/, uint32_t width, uint32_t height, int format,
+        camera3_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
+        std::vector<int> *surfaceIds, int /*streamSetId*/, bool /*isShared*/) {
+
+    sp<CameraDeviceBase> device = mDevice.promote();
+    if (!device.get()) {
+        ALOGE("%s: Invalid camera device!", __FUNCTION__);
+        return NO_INIT;
+    }
+
+    status_t res = initializeCodec(width, height, device);
+    if (res != OK) {
+        ALOGE("%s: Failed to initialize HEIC/HEVC codec: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return NO_INIT;
+    }
+
+    sp<IGraphicBufferProducer> producer;
+    sp<IGraphicBufferConsumer> consumer;
+    BufferQueue::createBufferQueue(&producer, &consumer);
+    mAppSegmentConsumer = new CpuConsumer(consumer, 1);
+    mAppSegmentConsumer->setFrameAvailableListener(this);
+    mAppSegmentConsumer->setName(String8("Camera3-HeicComposite-AppSegmentStream"));
+    mAppSegmentSurface = new Surface(producer);
+
+    mStaticInfo = device->info();
+
+    res = device->createStream(mAppSegmentSurface, mAppSegmentMaxSize, 1, format,
+            kAppSegmentDataSpace, rotation, &mAppSegmentStreamId, physicalCameraId, surfaceIds);
+    if (res == OK) {
+        mAppSegmentSurfaceId = (*surfaceIds)[0];
+    } else {
+        ALOGE("%s: Failed to create JPEG App segment stream: %s (%d)", __FUNCTION__,
+                strerror(-res), res);
+        return res;
+    }
+
+    if (!mUseGrid) {
+        res = mCodec->createInputSurface(&producer);
+        if (res != OK) {
+            ALOGE("%s: Failed to create input surface for Heic codec: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+    } else {
+        BufferQueue::createBufferQueue(&producer, &consumer);
+        mMainImageConsumer = new CpuConsumer(consumer, 1);
+        mMainImageConsumer->setFrameAvailableListener(this);
+        mMainImageConsumer->setName(String8("Camera3-HeicComposite-HevcInputYUVStream"));
+    }
+    mMainImageSurface = new Surface(producer);
+
+    res = mCodec->start();
+    if (res != OK) {
+        ALOGE("%s: Failed to start codec: %s (%d)", __FUNCTION__,
+                strerror(-res), res);
+        return res;
+    }
+
+    std::vector<int> sourceSurfaceId;
+    //Use YUV_888 format if framework tiling is needed.
+    int srcStreamFmt = mUseGrid ? HAL_PIXEL_FORMAT_YCbCr_420_888 :
+            HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+    res = device->createStream(mMainImageSurface, width, height, srcStreamFmt, kHeifDataSpace,
+            rotation, id, physicalCameraId, &sourceSurfaceId);
+    if (res == OK) {
+        mMainImageSurfaceId = sourceSurfaceId[0];
+        mMainImageStreamId = *id;
+    } else {
+        ALOGE("%s: Failed to create main image stream: %s (%d)", __FUNCTION__,
+                strerror(-res), res);
+        return res;
+    }
+
+    mOutputSurface = consumers[0];
+    res = registerCompositeStreamListener(getStreamId());
+    if (res != OK) {
+        ALOGE("%s: Failed to register HAL main image stream", __FUNCTION__);
+        return res;
+    }
+
+    initCopyRowFunction(width);
+    return res;
+}
+
+status_t HeicCompositeStream::deleteInternalStreams() {
+    requestExit();
+    auto res = join();
+    if (res != OK) {
+        ALOGE("%s: Failed to join with the main processing thread: %s (%d)", __FUNCTION__,
+                strerror(-res), res);
+    }
+
+    deinitCodec();
+
+    if (mAppSegmentStreamId >= 0) {
+        sp<CameraDeviceBase> device = mDevice.promote();
+        if (!device.get()) {
+            ALOGE("%s: Invalid camera device!", __FUNCTION__);
+            return NO_INIT;
+        }
+
+        res = device->deleteStream(mAppSegmentStreamId);
+        mAppSegmentStreamId = -1;
+    }
+
+    if (mOutputSurface != nullptr) {
+        mOutputSurface->disconnect(NATIVE_WINDOW_API_CAMERA);
+        mOutputSurface.clear();
+    }
+    return res;
+}
+
+void HeicCompositeStream::onBufferReleased(const BufferInfo& bufferInfo) {
+    Mutex::Autolock l(mMutex);
+
+    if (bufferInfo.mError) return;
+
+    mCodecOutputBufferTimestamps.push(bufferInfo.mTimestamp);
+}
+
+// We need to get the settings early to handle the case where the codec output
+// arrives earlier than result metadata.
+void HeicCompositeStream::onBufferRequestForFrameNumber(uint64_t frameNumber, int streamId,
+        const CameraMetadata& settings) {
+    ATRACE_ASYNC_BEGIN("HEIC capture", frameNumber);
+
+    Mutex::Autolock l(mMutex);
+    if (mErrorState || (streamId != getStreamId())) {
+        return;
+    }
+
+    mPendingCaptureResults.emplace(frameNumber, CameraMetadata());
+
+    camera_metadata_ro_entry entry;
+
+    int32_t orientation = 0;
+    entry = settings.find(ANDROID_JPEG_ORIENTATION);
+    if (entry.count == 1) {
+        orientation = entry.data.i32[0];
+    }
+
+    int32_t quality = kDefaultJpegQuality;
+    entry = settings.find(ANDROID_JPEG_QUALITY);
+    if (entry.count == 1) {
+        quality = entry.data.i32[0];
+    }
+
+    mSettingsByFrameNumber[frameNumber] = std::make_pair(orientation, quality);
+}
+
+void HeicCompositeStream::onFrameAvailable(const BufferItem& item) {
+    if (item.mDataSpace == static_cast<android_dataspace>(kAppSegmentDataSpace)) {
+        ALOGV("%s: JPEG APP segments buffer with ts: %" PRIu64 " ms. arrived!",
+                __func__, ns2ms(item.mTimestamp));
+
+        Mutex::Autolock l(mMutex);
+        if (!mErrorState) {
+            mInputAppSegmentBuffers.push_back(item.mTimestamp);
+            mInputReadyCondition.signal();
+        }
+    } else if (item.mDataSpace == kHeifDataSpace) {
+        ALOGV("%s: YUV_888 buffer with ts: %" PRIu64 " ms. arrived!",
+                __func__, ns2ms(item.mTimestamp));
+
+        Mutex::Autolock l(mMutex);
+        if (!mUseGrid) {
+            ALOGE("%s: YUV_888 internal stream is only supported for HEVC tiling",
+                    __FUNCTION__);
+            return;
+        }
+        if (!mErrorState) {
+            mInputYuvBuffers.push_back(item.mTimestamp);
+            mInputReadyCondition.signal();
+        }
+    } else {
+        ALOGE("%s: Unexpected data space: 0x%x", __FUNCTION__, item.mDataSpace);
+    }
+}
+
+status_t HeicCompositeStream::getCompositeStreamInfo(const OutputStreamInfo &streamInfo,
+            const CameraMetadata& ch, std::vector<OutputStreamInfo>* compositeOutput /*out*/) {
+    if (compositeOutput == nullptr) {
+        return BAD_VALUE;
+    }
+
+    compositeOutput->clear();
+
+    bool useGrid, useHeic;
+    bool isSizeSupported = isSizeSupportedByHeifEncoder(
+            streamInfo.width, streamInfo.height, &useHeic, &useGrid, nullptr);
+    if (!isSizeSupported) {
+        // Size is not supported by either encoder.
+        return OK;
+    }
+
+    compositeOutput->insert(compositeOutput->end(), 2, streamInfo);
+
+    // JPEG APPS segments Blob stream info
+    (*compositeOutput)[0].width = calcAppSegmentMaxSize(ch);
+    (*compositeOutput)[0].height = 1;
+    (*compositeOutput)[0].format = HAL_PIXEL_FORMAT_BLOB;
+    (*compositeOutput)[0].dataSpace = kAppSegmentDataSpace;
+    (*compositeOutput)[0].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
+
+    // YUV/IMPLEMENTATION_DEFINED stream info
+    (*compositeOutput)[1].width = streamInfo.width;
+    (*compositeOutput)[1].height = streamInfo.height;
+    (*compositeOutput)[1].format = useGrid ? HAL_PIXEL_FORMAT_YCbCr_420_888 :
+            HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+    (*compositeOutput)[1].dataSpace = kHeifDataSpace;
+    (*compositeOutput)[1].consumerUsage = useHeic ? GRALLOC_USAGE_HW_IMAGE_ENCODER :
+            useGrid ? GRALLOC_USAGE_SW_READ_OFTEN : GRALLOC_USAGE_HW_VIDEO_ENCODER;
+
+    return NO_ERROR;
+}
+
+bool HeicCompositeStream::isSizeSupportedByHeifEncoder(int32_t width, int32_t height,
+        bool* useHeic, bool* useGrid, int64_t* stall) {
+    static HeicEncoderInfoManager& heicManager = HeicEncoderInfoManager::getInstance();
+    return heicManager.isSizeSupported(width, height, useHeic, useGrid, stall);
+}
+
+bool HeicCompositeStream::isInMemoryTempFileSupported() {
+    int memfd = syscall(__NR_memfd_create, "HEIF-try-memfd", MFD_CLOEXEC);
+    if (memfd == -1) {
+        if (errno != ENOSYS) {
+            ALOGE("%s: Failed to create tmpfs file. errno %d", __FUNCTION__, errno);
+        }
+        return false;
+    }
+    close(memfd);
+    return true;
+}
+
+void HeicCompositeStream::onHeicOutputFrameAvailable(
+        const CodecOutputBufferInfo& outputBufferInfo) {
+    Mutex::Autolock l(mMutex);
+
+    ALOGV("%s: index %d, offset %d, size %d, time %" PRId64 ", flags 0x%x",
+            __FUNCTION__, outputBufferInfo.index, outputBufferInfo.offset,
+            outputBufferInfo.size, outputBufferInfo.timeUs, outputBufferInfo.flags);
+
+    if (!mErrorState) {
+        if ((outputBufferInfo.size > 0) &&
+                ((outputBufferInfo.flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) == 0)) {
+            mCodecOutputBuffers.push_back(outputBufferInfo);
+            mInputReadyCondition.signal();
+        } else {
+            mCodec->releaseOutputBuffer(outputBufferInfo.index);
+        }
+    } else {
+        mCodec->releaseOutputBuffer(outputBufferInfo.index);
+    }
+}
+
+void HeicCompositeStream::onHeicInputFrameAvailable(int32_t index) {
+    Mutex::Autolock l(mMutex);
+
+    if (!mUseGrid) {
+        ALOGE("%s: Codec YUV input mode must only be used for Hevc tiling mode", __FUNCTION__);
+        return;
+    }
+
+    mCodecInputBuffers.push_back(index);
+    mInputReadyCondition.signal();
+}
+
+void HeicCompositeStream::onHeicFormatChanged(sp<AMessage>& newFormat) {
+    if (newFormat == nullptr) {
+        ALOGE("%s: newFormat must not be null!", __FUNCTION__);
+        return;
+    }
+
+    Mutex::Autolock l(mMutex);
+
+    AString mime;
+    AString mimeHeic(MIMETYPE_IMAGE_ANDROID_HEIC);
+    newFormat->findString(KEY_MIME, &mime);
+    if (mime != mimeHeic) {
+        // For HEVC codec, below keys need to be filled out or overwritten so that the
+        // muxer can handle them as HEIC output image.
+        newFormat->setString(KEY_MIME, mimeHeic);
+        newFormat->setInt32(KEY_WIDTH, mOutputWidth);
+        newFormat->setInt32(KEY_HEIGHT, mOutputHeight);
+        if (mUseGrid) {
+            newFormat->setInt32(KEY_TILE_WIDTH, mGridWidth);
+            newFormat->setInt32(KEY_TILE_HEIGHT, mGridHeight);
+            newFormat->setInt32(KEY_GRID_ROWS, mGridRows);
+            newFormat->setInt32(KEY_GRID_COLUMNS, mGridCols);
+        }
+    }
+    newFormat->setInt32(KEY_IS_DEFAULT, 1 /*isPrimary*/);
+
+    int32_t gridRows, gridCols;
+    if (newFormat->findInt32(KEY_GRID_ROWS, &gridRows) &&
+            newFormat->findInt32(KEY_GRID_COLUMNS, &gridCols)) {
+        mNumOutputTiles = gridRows * gridCols;
+    } else {
+        mNumOutputTiles = 1;
+    }
+
+    ALOGV("%s: mNumOutputTiles is %zu", __FUNCTION__, mNumOutputTiles);
+    mFormat = newFormat;
+}
+
+void HeicCompositeStream::onHeicCodecError() {
+    Mutex::Autolock l(mMutex);
+    mErrorState = true;
+}
+
+status_t HeicCompositeStream::configureStream() {
+    if (isRunning()) {
+        // Processing thread is already running, nothing more to do.
+        return NO_ERROR;
+    }
+
+    if (mOutputSurface.get() == nullptr) {
+        ALOGE("%s: No valid output surface set!", __FUNCTION__);
+        return NO_INIT;
+    }
+
+    auto res = mOutputSurface->connect(NATIVE_WINDOW_API_CAMERA, mProducerListener);
+    if (res != OK) {
+        ALOGE("%s: Unable to connect to native window for stream %d",
+                __FUNCTION__, mMainImageStreamId);
+        return res;
+    }
+
+    if ((res = native_window_set_buffers_format(mOutputSurface.get(), HAL_PIXEL_FORMAT_BLOB))
+            != OK) {
+        ALOGE("%s: Unable to configure stream buffer format for stream %d", __FUNCTION__,
+                mMainImageStreamId);
+        return res;
+    }
+
+    ANativeWindow *anwConsumer = mOutputSurface.get();
+    int maxConsumerBuffers;
+    if ((res = anwConsumer->query(anwConsumer, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
+                    &maxConsumerBuffers)) != OK) {
+        ALOGE("%s: Unable to query consumer undequeued"
+                " buffer count for stream %d", __FUNCTION__, mMainImageStreamId);
+        return res;
+    }
+
+    // Cannot use SourceSurface buffer count since it could be codec's 512*512 tile
+    // buffer count.
+    int maxProducerBuffers = 1;
+    if ((res = native_window_set_buffer_count(
+                    anwConsumer, maxProducerBuffers + maxConsumerBuffers)) != OK) {
+        ALOGE("%s: Unable to set buffer count for stream %d", __FUNCTION__, mMainImageStreamId);
+        return res;
+    }
+
+    if ((res = native_window_set_buffers_dimensions(anwConsumer, mMaxHeicBufferSize, 1)) != OK) {
+        ALOGE("%s: Unable to set buffer dimension %zu x 1 for stream %d: %s (%d)",
+                __FUNCTION__, mMaxHeicBufferSize, mMainImageStreamId, strerror(-res), res);
+        return res;
+    }
+
+    run("HeicCompositeStreamProc");
+
+    return NO_ERROR;
+}
+
+status_t HeicCompositeStream::insertGbp(SurfaceMap* /*out*/outSurfaceMap,
+        Vector<int32_t>* /*out*/outputStreamIds, int32_t* /*out*/currentStreamId) {
+    if (outSurfaceMap->find(mAppSegmentStreamId) == outSurfaceMap->end()) {
+        (*outSurfaceMap)[mAppSegmentStreamId] = std::vector<size_t>();
+        outputStreamIds->push_back(mAppSegmentStreamId);
+    }
+    (*outSurfaceMap)[mAppSegmentStreamId].push_back(mAppSegmentSurfaceId);
+
+    if (outSurfaceMap->find(mMainImageStreamId) == outSurfaceMap->end()) {
+        (*outSurfaceMap)[mMainImageStreamId] = std::vector<size_t>();
+        outputStreamIds->push_back(mMainImageStreamId);
+    }
+    (*outSurfaceMap)[mMainImageStreamId].push_back(mMainImageSurfaceId);
+
+    if (currentStreamId != nullptr) {
+        *currentStreamId = mMainImageStreamId;
+    }
+
+    return NO_ERROR;
+}
+
+void HeicCompositeStream::onShutter(const CaptureResultExtras& resultExtras, nsecs_t timestamp) {
+    Mutex::Autolock l(mMutex);
+    if (mErrorState) {
+        return;
+    }
+
+    if (mSettingsByFrameNumber.find(resultExtras.frameNumber) != mSettingsByFrameNumber.end()) {
+        mFrameNumberMap.emplace(resultExtras.frameNumber, timestamp);
+        mSettingsByTimestamp[timestamp] = mSettingsByFrameNumber[resultExtras.frameNumber];
+        mSettingsByFrameNumber.erase(resultExtras.frameNumber);
+        mInputReadyCondition.signal();
+    }
+}
+
+void HeicCompositeStream::compilePendingInputLocked() {
+    while (!mSettingsByTimestamp.empty()) {
+        auto it = mSettingsByTimestamp.begin();
+        mPendingInputFrames[it->first].orientation = it->second.first;
+        mPendingInputFrames[it->first].quality = it->second.second;
+        mSettingsByTimestamp.erase(it);
+    }
+
+    while (!mInputAppSegmentBuffers.empty() && !mAppSegmentBufferAcquired) {
+        CpuConsumer::LockedBuffer imgBuffer;
+        auto it = mInputAppSegmentBuffers.begin();
+        auto res = mAppSegmentConsumer->lockNextBuffer(&imgBuffer);
+        if (res == NOT_ENOUGH_DATA) {
+            // Canot not lock any more buffers.
+            break;
+        } else if ((res != OK) || (*it != imgBuffer.timestamp)) {
+            if (res != OK) {
+                ALOGE("%s: Error locking JPEG_APP_SEGMENTS image buffer: %s (%d)", __FUNCTION__,
+                        strerror(-res), res);
+            } else {
+                ALOGE("%s: Expecting JPEG_APP_SEGMENTS buffer with time stamp: %" PRId64
+                        " received buffer with time stamp: %" PRId64, __FUNCTION__,
+                        *it, imgBuffer.timestamp);
+            }
+            mPendingInputFrames[*it].error = true;
+            mInputAppSegmentBuffers.erase(it);
+            continue;
+        }
+
+        if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
+                (mPendingInputFrames[imgBuffer.timestamp].error)) {
+            mAppSegmentConsumer->unlockBuffer(imgBuffer);
+        } else {
+            mPendingInputFrames[imgBuffer.timestamp].appSegmentBuffer = imgBuffer;
+            mAppSegmentBufferAcquired = true;
+        }
+        mInputAppSegmentBuffers.erase(it);
+    }
+
+    while (!mInputYuvBuffers.empty() && !mYuvBufferAcquired) {
+        CpuConsumer::LockedBuffer imgBuffer;
+        auto it = mInputYuvBuffers.begin();
+        auto res = mMainImageConsumer->lockNextBuffer(&imgBuffer);
+        if (res == NOT_ENOUGH_DATA) {
+            // Canot not lock any more buffers.
+            break;
+        } else if (res != OK) {
+            ALOGE("%s: Error locking YUV_888 image buffer: %s (%d)", __FUNCTION__,
+                    strerror(-res), res);
+            mPendingInputFrames[*it].error = true;
+            mInputYuvBuffers.erase(it);
+            continue;
+        } else if (*it != imgBuffer.timestamp) {
+            ALOGW("%s: Expecting YUV_888 buffer with time stamp: %" PRId64 " received buffer with "
+                    "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
+            mPendingInputFrames[*it].error = true;
+            mInputYuvBuffers.erase(it);
+            continue;
+        }
+
+        if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
+                (mPendingInputFrames[imgBuffer.timestamp].error)) {
+            mMainImageConsumer->unlockBuffer(imgBuffer);
+        } else {
+            mPendingInputFrames[imgBuffer.timestamp].yuvBuffer = imgBuffer;
+            mYuvBufferAcquired = true;
+        }
+        mInputYuvBuffers.erase(it);
+    }
+
+    while (!mCodecOutputBuffers.empty()) {
+        auto it = mCodecOutputBuffers.begin();
+        // Bitstream buffer timestamp doesn't necessarily directly correlate with input
+        // buffer timestamp. Assume encoder input to output is FIFO, use a queue
+        // to look up timestamp.
+        int64_t bufferTime = -1;
+        if (mCodecOutputBufferTimestamps.empty()) {
+            ALOGE("%s: Failed to find buffer timestamp for codec output buffer!", __FUNCTION__);
+        } else {
+            // Direct mapping between camera timestamp (in ns) and codec timestamp (in us).
+            bufferTime = mCodecOutputBufferTimestamps.front();
+            mOutputBufferCounter++;
+            if (mOutputBufferCounter == mNumOutputTiles) {
+                mCodecOutputBufferTimestamps.pop();
+                mOutputBufferCounter = 0;
+            }
+
+            mPendingInputFrames[bufferTime].codecOutputBuffers.push_back(*it);
+        }
+        mCodecOutputBuffers.erase(it);
+    }
+
+    while (!mFrameNumberMap.empty()) {
+        auto it = mFrameNumberMap.begin();
+        mPendingInputFrames[it->second].frameNumber = it->first;
+        mFrameNumberMap.erase(it);
+    }
+
+    while (!mCaptureResults.empty()) {
+        auto it = mCaptureResults.begin();
+        // Negative timestamp indicates that something went wrong during the capture result
+        // collection process.
+        if (it->first >= 0) {
+            if (mPendingInputFrames[it->first].frameNumber == std::get<0>(it->second)) {
+                mPendingInputFrames[it->first].result =
+                        std::make_unique<CameraMetadata>(std::get<1>(it->second));
+            } else {
+                ALOGE("%s: Capture result frameNumber/timestamp mapping changed between "
+                        "shutter and capture result!", __FUNCTION__);
+            }
+        }
+        mCaptureResults.erase(it);
+    }
+
+    // mErrorFrameNumbers stores frame number of dropped buffers.
+    auto it = mErrorFrameNumbers.begin();
+    while (it != mErrorFrameNumbers.end()) {
+        bool frameFound = false;
+        for (auto &inputFrame : mPendingInputFrames) {
+            if (inputFrame.second.frameNumber == *it) {
+                inputFrame.second.error = true;
+                frameFound = true;
+                break;
+            }
+        }
+
+        if (frameFound) {
+            it = mErrorFrameNumbers.erase(it);
+        } else {
+            ALOGW("%s: Not able to find failing input with frame number: %" PRId64, __FUNCTION__,
+                    *it);
+            it++;
+        }
+    }
+
+    // Distribute codec input buffers to be filled out from YUV output
+    for (auto it = mPendingInputFrames.begin();
+            it != mPendingInputFrames.end() && mCodecInputBuffers.size() > 0; it++) {
+        InputFrame& inputFrame(it->second);
+        if (inputFrame.codecInputCounter < mGridRows * mGridCols) {
+            // Available input tiles that are required for the current input
+            // image.
+            size_t newInputTiles = std::min(mCodecInputBuffers.size(),
+                    mGridRows * mGridCols - inputFrame.codecInputCounter);
+            for (size_t i = 0; i < newInputTiles; i++) {
+                CodecInputBufferInfo inputInfo =
+                        { mCodecInputBuffers[0], mGridTimestampUs++, inputFrame.codecInputCounter };
+                inputFrame.codecInputBuffers.push_back(inputInfo);
+
+                mCodecInputBuffers.erase(mCodecInputBuffers.begin());
+                inputFrame.codecInputCounter++;
+            }
+            break;
+        }
+    }
+}
+
+bool HeicCompositeStream::getNextReadyInputLocked(int64_t *currentTs /*out*/) {
+    if (currentTs == nullptr) {
+        return false;
+    }
+
+    bool newInputAvailable = false;
+    for (const auto& it : mPendingInputFrames) {
+        bool appSegmentReady = (it.second.appSegmentBuffer.data != nullptr) &&
+                !it.second.appSegmentWritten && it.second.result != nullptr;
+        bool codecOutputReady = !it.second.codecOutputBuffers.empty();
+        bool codecInputReady = (it.second.yuvBuffer.data != nullptr) &&
+                (!it.second.codecInputBuffers.empty());
+        if ((!it.second.error) &&
+                (it.first < *currentTs) &&
+                (appSegmentReady || codecOutputReady || codecInputReady)) {
+            *currentTs = it.first;
+            newInputAvailable = true;
+            break;
+        }
+    }
+
+    return newInputAvailable;
+}
+
+int64_t HeicCompositeStream::getNextFailingInputLocked(int64_t *currentTs /*out*/) {
+    int64_t res = -1;
+    if (currentTs == nullptr) {
+        return res;
+    }
+
+    for (const auto& it : mPendingInputFrames) {
+        if (it.second.error && !it.second.errorNotified && (it.first < *currentTs)) {
+            *currentTs = it.first;
+            res = it.second.frameNumber;
+            break;
+        }
+    }
+
+    return res;
+}
+
+status_t HeicCompositeStream::processInputFrame(nsecs_t timestamp,
+        InputFrame &inputFrame) {
+    ATRACE_CALL();
+    status_t res = OK;
+
+    bool appSegmentReady = inputFrame.appSegmentBuffer.data != nullptr &&
+            !inputFrame.appSegmentWritten && inputFrame.result != nullptr;
+    bool codecOutputReady = inputFrame.codecOutputBuffers.size() > 0;
+    bool codecInputReady = inputFrame.yuvBuffer.data != nullptr &&
+           !inputFrame.codecInputBuffers.empty();
+
+    if (!appSegmentReady && !codecOutputReady && !codecInputReady) {
+        ALOGW("%s: No valid appSegmentBuffer/codec input/outputBuffer available!", __FUNCTION__);
+        return OK;
+    }
+
+    // Handle inputs for Hevc tiling
+    if (codecInputReady) {
+        res = processCodecInputFrame(inputFrame);
+        if (res != OK) {
+            ALOGE("%s: Failed to process codec input frame: %s (%d)", __FUNCTION__,
+                    strerror(-res), res);
+            return res;
+        }
+    }
+
+    // Initialize and start muxer if not yet done so
+    if (inputFrame.muxer == nullptr) {
+        res = startMuxerForInputFrame(timestamp, inputFrame);
+        if (res != OK) {
+            ALOGE("%s: Failed to create and start muxer: %s (%d)", __FUNCTION__,
+                    strerror(-res), res);
+            return res;
+        }
+    }
+
+    // Write JPEG APP segments data to the muxer.
+    if (appSegmentReady && inputFrame.muxer != nullptr) {
+        res = processAppSegment(timestamp, inputFrame);
+        if (res != OK) {
+            ALOGE("%s: Failed to process JPEG APP segments: %s (%d)", __FUNCTION__,
+                    strerror(-res), res);
+            return res;
+        }
+    }
+
+    // Write media codec bitstream buffers to muxer.
+    while (!inputFrame.codecOutputBuffers.empty()) {
+        res = processOneCodecOutputFrame(timestamp, inputFrame);
+        if (res != OK) {
+            ALOGE("%s: Failed to process codec output frame: %s (%d)", __FUNCTION__,
+                    strerror(-res), res);
+            return res;
+        }
+    }
+
+    if (inputFrame.appSegmentWritten && inputFrame.pendingOutputTiles == 0) {
+        res = processCompletedInputFrame(timestamp, inputFrame);
+        if (res != OK) {
+            ALOGE("%s: Failed to process completed input frame: %s (%d)", __FUNCTION__,
+                    strerror(-res), res);
+            return res;
+        }
+    }
+
+    return res;
+}
+
+status_t HeicCompositeStream::startMuxerForInputFrame(nsecs_t timestamp, InputFrame &inputFrame) {
+    sp<ANativeWindow> outputANW = mOutputSurface;
+    if (inputFrame.codecOutputBuffers.size() == 0) {
+        // No single codec output buffer has been generated. Continue to
+        // wait.
+        return OK;
+    }
+
+    auto res = outputANW->dequeueBuffer(mOutputSurface.get(), &inputFrame.anb, &inputFrame.fenceFd);
+    if (res != OK) {
+        ALOGE("%s: Error retrieving output buffer: %s (%d)", __FUNCTION__, strerror(-res),
+                res);
+        return res;
+    }
+
+    // Combine current thread id, stream id and timestamp to uniquely identify image.
+    std::ostringstream tempOutputFile;
+    tempOutputFile << "HEIF-" << pthread_self() << "-"
+            << getStreamId() << "-" << timestamp;
+    inputFrame.fileFd = syscall(__NR_memfd_create, tempOutputFile.str().c_str(), MFD_CLOEXEC);
+    if (inputFrame.fileFd < 0) {
+        ALOGE("%s: Failed to create file %s. Error no is %d", __FUNCTION__,
+                tempOutputFile.str().c_str(), errno);
+        return NO_INIT;
+    }
+    inputFrame.muxer = new MediaMuxer(inputFrame.fileFd, MediaMuxer::OUTPUT_FORMAT_HEIF);
+    if (inputFrame.muxer == nullptr) {
+        ALOGE("%s: Failed to create MediaMuxer for file fd %d",
+                __FUNCTION__, inputFrame.fileFd);
+        return NO_INIT;
+    }
+
+    res = inputFrame.muxer->setOrientationHint(inputFrame.orientation);
+    if (res != OK) {
+        ALOGE("%s: Failed to setOrientationHint: %s (%d)", __FUNCTION__,
+                strerror(-res), res);
+        return res;
+    }
+    // Set encoder quality
+    {
+        sp<AMessage> qualityParams = new AMessage;
+        qualityParams->setInt32(PARAMETER_KEY_VIDEO_BITRATE, inputFrame.quality);
+        res = mCodec->setParameters(qualityParams);
+        if (res != OK) {
+            ALOGE("%s: Failed to set codec quality: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+    }
+
+    ssize_t trackId = inputFrame.muxer->addTrack(mFormat);
+    if (trackId < 0) {
+        ALOGE("%s: Failed to addTrack to the muxer: %zd", __FUNCTION__, trackId);
+        return NO_INIT;
+    }
+
+    inputFrame.trackIndex = trackId;
+    inputFrame.pendingOutputTiles = mNumOutputTiles;
+
+    res = inputFrame.muxer->start();
+    if (res != OK) {
+        ALOGE("%s: Failed to start MediaMuxer: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+
+    return OK;
+}
+
+status_t HeicCompositeStream::processAppSegment(nsecs_t timestamp, InputFrame &inputFrame) {
+    size_t app1Size = 0;
+    auto appSegmentSize = findAppSegmentsSize(inputFrame.appSegmentBuffer.data,
+            inputFrame.appSegmentBuffer.width * inputFrame.appSegmentBuffer.height,
+            &app1Size);
+    ALOGV("%s: appSegmentSize is %zu, width %d, height %d, app1Size %zu", __FUNCTION__,
+          appSegmentSize, inputFrame.appSegmentBuffer.width,
+          inputFrame.appSegmentBuffer.height, app1Size);
+    if (appSegmentSize == 0) {
+        ALOGE("%s: Failed to find JPEG APP segment size", __FUNCTION__);
+        return NO_INIT;
+    }
+
+    std::unique_ptr<ExifUtils> exifUtils(ExifUtils::create());
+    auto exifRes = exifUtils->initialize(inputFrame.appSegmentBuffer.data, app1Size);
+    if (!exifRes) {
+        ALOGE("%s: Failed to initialize ExifUtils object!", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    exifRes = exifUtils->setFromMetadata(*inputFrame.result, mStaticInfo,
+            mOutputWidth, mOutputHeight);
+    if (!exifRes) {
+        ALOGE("%s: Failed to set Exif tags using metadata and main image sizes", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    exifRes = exifUtils->setOrientation(inputFrame.orientation);
+    if (!exifRes) {
+        ALOGE("%s: ExifUtils failed to set orientation", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    exifRes = exifUtils->generateApp1();
+    if (!exifRes) {
+        ALOGE("%s: ExifUtils failed to generate APP1 segment", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    unsigned int newApp1Length = exifUtils->getApp1Length();
+    const uint8_t *newApp1Segment = exifUtils->getApp1Buffer();
+
+    //Assemble the APP1 marker buffer required by MediaCodec
+    uint8_t kExifApp1Marker[] = {'E', 'x', 'i', 'f', 0xFF, 0xE1, 0x00, 0x00};
+    kExifApp1Marker[6] = static_cast<uint8_t>(newApp1Length >> 8);
+    kExifApp1Marker[7] = static_cast<uint8_t>(newApp1Length & 0xFF);
+    size_t appSegmentBufferSize = sizeof(kExifApp1Marker) +
+            appSegmentSize - app1Size + newApp1Length;
+    uint8_t* appSegmentBuffer = new uint8_t[appSegmentBufferSize];
+    memcpy(appSegmentBuffer, kExifApp1Marker, sizeof(kExifApp1Marker));
+    memcpy(appSegmentBuffer + sizeof(kExifApp1Marker), newApp1Segment, newApp1Length);
+    if (appSegmentSize - app1Size > 0) {
+        memcpy(appSegmentBuffer + sizeof(kExifApp1Marker) + newApp1Length,
+                inputFrame.appSegmentBuffer.data + app1Size, appSegmentSize - app1Size);
+    }
+
+    sp<ABuffer> aBuffer = new ABuffer(appSegmentBuffer, appSegmentBufferSize);
+    auto res = inputFrame.muxer->writeSampleData(aBuffer, inputFrame.trackIndex,
+            timestamp, MediaCodec::BUFFER_FLAG_MUXER_DATA);
+    delete[] appSegmentBuffer;
+
+    if (res != OK) {
+        ALOGE("%s: Failed to write JPEG APP segments to muxer: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return res;
+    }
+    inputFrame.appSegmentWritten = true;
+
+    return OK;
+}
+
+status_t HeicCompositeStream::processCodecInputFrame(InputFrame &inputFrame) {
+    for (auto& inputBuffer : inputFrame.codecInputBuffers) {
+        sp<MediaCodecBuffer> buffer;
+        auto res = mCodec->getInputBuffer(inputBuffer.index, &buffer);
+        if (res != OK) {
+            ALOGE("%s: Error getting codec input buffer: %s (%d)", __FUNCTION__,
+                    strerror(-res), res);
+            return res;
+        }
+
+        // Copy one tile from source to destination.
+        size_t tileX = inputBuffer.tileIndex % mGridCols;
+        size_t tileY = inputBuffer.tileIndex / mGridCols;
+        size_t top = mGridHeight * tileY;
+        size_t left = mGridWidth * tileX;
+        size_t width = (tileX == static_cast<size_t>(mGridCols) - 1) ?
+                mOutputWidth - tileX * mGridWidth : mGridWidth;
+        size_t height = (tileY == static_cast<size_t>(mGridRows) - 1) ?
+                mOutputHeight - tileY * mGridHeight : mGridHeight;
+        ALOGV("%s: inputBuffer tileIndex [%zu, %zu], top %zu, left %zu, width %zu, height %zu",
+                __FUNCTION__, tileX, tileY, top, left, width, height);
+
+        res = copyOneYuvTile(buffer, inputFrame.yuvBuffer, top, left, width, height);
+        if (res != OK) {
+            ALOGE("%s: Failed to copy YUV tile %s (%d)", __FUNCTION__,
+                    strerror(-res), res);
+            return res;
+        }
+
+        res = mCodec->queueInputBuffer(inputBuffer.index, 0, buffer->capacity(),
+                inputBuffer.timeUs, 0, nullptr /*errorDetailMsg*/);
+        if (res != OK) {
+            ALOGE("%s: Failed to queueInputBuffer to Codec: %s (%d)",
+                    __FUNCTION__, strerror(-res), res);
+            return res;
+        }
+    }
+
+    inputFrame.codecInputBuffers.clear();
+    return OK;
+}
+
+status_t HeicCompositeStream::processOneCodecOutputFrame(nsecs_t timestamp,
+        InputFrame &inputFrame) {
+    auto it = inputFrame.codecOutputBuffers.begin();
+    sp<MediaCodecBuffer> buffer;
+    status_t res = mCodec->getOutputBuffer(it->index, &buffer);
+    if (res != OK) {
+        ALOGE("%s: Error getting Heic codec output buffer at index %d: %s (%d)",
+                __FUNCTION__, it->index, strerror(-res), res);
+        return res;
+    }
+    if (buffer == nullptr) {
+        ALOGE("%s: Invalid Heic codec output buffer at index %d",
+                __FUNCTION__, it->index);
+        return BAD_VALUE;
+    }
+
+    sp<ABuffer> aBuffer = new ABuffer(buffer->data(), buffer->size());
+    res = inputFrame.muxer->writeSampleData(
+            aBuffer, inputFrame.trackIndex, timestamp, 0 /*flags*/);
+    if (res != OK) {
+        ALOGE("%s: Failed to write buffer index %d to muxer: %s (%d)",
+                __FUNCTION__, it->index, strerror(-res), res);
+        return res;
+    }
+
+    mCodec->releaseOutputBuffer(it->index);
+    if (inputFrame.pendingOutputTiles == 0) {
+        ALOGW("%s: Codec generated more tiles than expected!", __FUNCTION__);
+    } else {
+        inputFrame.pendingOutputTiles--;
+    }
+
+    inputFrame.codecOutputBuffers.erase(inputFrame.codecOutputBuffers.begin());
+    return OK;
+}
+
+status_t HeicCompositeStream::processCompletedInputFrame(nsecs_t timestamp,
+        InputFrame &inputFrame) {
+    sp<ANativeWindow> outputANW = mOutputSurface;
+    inputFrame.muxer->stop();
+
+    // Copy the content of the file to memory.
+    sp<GraphicBuffer> gb = GraphicBuffer::from(inputFrame.anb);
+    void* dstBuffer;
+    auto res = gb->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN, &dstBuffer, inputFrame.fenceFd);
+    if (res != OK) {
+        ALOGE("%s: Error trying to lock output buffer fence: %s (%d)", __FUNCTION__,
+                strerror(-res), res);
+        return res;
+    }
+
+    off_t fSize = lseek(inputFrame.fileFd, 0, SEEK_END);
+    if (static_cast<size_t>(fSize) > mMaxHeicBufferSize - sizeof(CameraBlob)) {
+        ALOGE("%s: Error: MediaMuxer output size %ld is larger than buffer sizer %zu",
+                __FUNCTION__, fSize, mMaxHeicBufferSize - sizeof(CameraBlob));
+        return BAD_VALUE;
+    }
+
+    lseek(inputFrame.fileFd, 0, SEEK_SET);
+    ssize_t bytesRead = read(inputFrame.fileFd, dstBuffer, fSize);
+    if (bytesRead < fSize) {
+        ALOGE("%s: Only %zd of %ld bytes read", __FUNCTION__, bytesRead, fSize);
+        return BAD_VALUE;
+    }
+
+    close(inputFrame.fileFd);
+    inputFrame.fileFd = -1;
+
+    // Fill in HEIC header
+    uint8_t *header = static_cast<uint8_t*>(dstBuffer) + mMaxHeicBufferSize - sizeof(CameraBlob);
+    struct CameraBlob *blobHeader = (struct CameraBlob *)header;
+    // Must be in sync with CAMERA3_HEIC_BLOB_ID in android_media_Utils.cpp
+    blobHeader->blobId = static_cast<CameraBlobId>(0x00FE);
+    blobHeader->blobSize = fSize;
+
+    res = native_window_set_buffers_timestamp(mOutputSurface.get(), timestamp);
+    if (res != OK) {
+        ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
+               __FUNCTION__, getStreamId(), strerror(-res), res);
+        return res;
+    }
+
+    res = outputANW->queueBuffer(mOutputSurface.get(), inputFrame.anb, /*fence*/ -1);
+    if (res != OK) {
+        ALOGE("%s: Failed to queueBuffer to Heic stream: %s (%d)", __FUNCTION__,
+                strerror(-res), res);
+        return res;
+    }
+    inputFrame.anb = nullptr;
+
+    ATRACE_ASYNC_END("HEIC capture", inputFrame.frameNumber);
+    return OK;
+}
+
+
+void HeicCompositeStream::releaseInputFrameLocked(InputFrame *inputFrame /*out*/) {
+    if (inputFrame == nullptr) {
+        return;
+    }
+
+    if (inputFrame->appSegmentBuffer.data != nullptr) {
+        mAppSegmentConsumer->unlockBuffer(inputFrame->appSegmentBuffer);
+        inputFrame->appSegmentBuffer.data = nullptr;
+        mAppSegmentBufferAcquired = false;
+    }
+
+    while (!inputFrame->codecOutputBuffers.empty()) {
+        auto it = inputFrame->codecOutputBuffers.begin();
+        ALOGV("%s: releaseOutputBuffer index %d", __FUNCTION__, it->index);
+        mCodec->releaseOutputBuffer(it->index);
+        inputFrame->codecOutputBuffers.erase(it);
+    }
+
+    if (inputFrame->yuvBuffer.data != nullptr) {
+        mMainImageConsumer->unlockBuffer(inputFrame->yuvBuffer);
+        inputFrame->yuvBuffer.data = nullptr;
+        mYuvBufferAcquired = false;
+    }
+
+    while (!inputFrame->codecInputBuffers.empty()) {
+        auto it = inputFrame->codecInputBuffers.begin();
+        inputFrame->codecInputBuffers.erase(it);
+    }
+
+    if ((inputFrame->error || mErrorState) && !inputFrame->errorNotified) {
+        notifyError(inputFrame->frameNumber);
+        inputFrame->errorNotified = true;
+    }
+
+    if (inputFrame->fileFd >= 0) {
+        close(inputFrame->fileFd);
+        inputFrame->fileFd = -1;
+    }
+
+    if (inputFrame->anb != nullptr) {
+        sp<ANativeWindow> outputANW = mOutputSurface;
+        outputANW->cancelBuffer(mOutputSurface.get(), inputFrame->anb, /*fence*/ -1);
+        inputFrame->anb = nullptr;
+    }
+}
+
+void HeicCompositeStream::releaseInputFramesLocked(int64_t currentTs) {
+    auto it = mPendingInputFrames.begin();
+    while (it != mPendingInputFrames.end()) {
+        if (it->first <= currentTs) {
+            releaseInputFrameLocked(&it->second);
+            it = mPendingInputFrames.erase(it);
+        } else {
+            it++;
+        }
+    }
+}
+
+status_t HeicCompositeStream::initializeCodec(uint32_t width, uint32_t height,
+        const sp<CameraDeviceBase>& cameraDevice) {
+    ALOGV("%s", __FUNCTION__);
+
+    bool useGrid = false;
+    bool isSizeSupported = isSizeSupportedByHeifEncoder(width, height,
+            &mUseHeic, &useGrid, nullptr);
+    if (!isSizeSupported) {
+        ALOGE("%s: Encoder doesnt' support size %u x %u!",
+                __FUNCTION__, width, height);
+        return BAD_VALUE;
+    }
+
+    // Create Looper for MediaCodec.
+    auto desiredMime = mUseHeic ? MIMETYPE_IMAGE_ANDROID_HEIC : MIMETYPE_VIDEO_HEVC;
+    mCodecLooper = new ALooper;
+    mCodecLooper->setName("Camera3-HeicComposite-MediaCodecLooper");
+    status_t res = mCodecLooper->start(
+            false,   // runOnCallingThread
+            false,    // canCallJava
+            PRIORITY_AUDIO);
+    if (res != OK) {
+        ALOGE("%s: Failed to start codec looper: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return NO_INIT;
+    }
+
+    // Create HEIC/HEVC codec.
+    mCodec = MediaCodec::CreateByType(mCodecLooper, desiredMime, true /*encoder*/);
+    if (mCodec == nullptr) {
+        ALOGE("%s: Failed to create codec for %s", __FUNCTION__, desiredMime);
+        return NO_INIT;
+    }
+
+    // Create Looper and handler for Codec callback.
+    mCodecCallbackHandler = new CodecCallbackHandler(this);
+    if (mCodecCallbackHandler == nullptr) {
+        ALOGE("%s: Failed to create codec callback handler", __FUNCTION__);
+        return NO_MEMORY;
+    }
+    mCallbackLooper = new ALooper;
+    mCallbackLooper->setName("Camera3-HeicComposite-MediaCodecCallbackLooper");
+    res = mCallbackLooper->start(
+            false,   // runOnCallingThread
+            false,    // canCallJava
+            PRIORITY_AUDIO);
+    if (res != OK) {
+        ALOGE("%s: Failed to start media callback looper: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+        return NO_INIT;
+    }
+    mCallbackLooper->registerHandler(mCodecCallbackHandler);
+
+    mAsyncNotify = new AMessage(kWhatCallbackNotify, mCodecCallbackHandler);
+    res = mCodec->setCallback(mAsyncNotify);
+    if (res != OK) {
+        ALOGE("%s: Failed to set MediaCodec callback: %s (%d)", __FUNCTION__,
+                strerror(-res), res);
+        return res;
+    }
+
+    // Create output format and configure the Codec.
+    sp<AMessage> outputFormat = new AMessage();
+    outputFormat->setString(KEY_MIME, desiredMime);
+    outputFormat->setInt32(KEY_BITRATE_MODE, BITRATE_MODE_CQ);
+    outputFormat->setInt32(KEY_QUALITY, kDefaultJpegQuality);
+    // Ask codec to skip timestamp check and encode all frames.
+    outputFormat->setInt64("max-pts-gap-to-encoder", kNoFrameDropMaxPtsGap);
+
+    int32_t gridWidth, gridHeight, gridRows, gridCols;
+    if (useGrid || mUseHeic) {
+        gridWidth = HeicEncoderInfoManager::kGridWidth;
+        gridHeight = HeicEncoderInfoManager::kGridHeight;
+        gridRows = (height + gridHeight - 1)/gridHeight;
+        gridCols = (width + gridWidth - 1)/gridWidth;
+
+        if (mUseHeic) {
+            outputFormat->setInt32(KEY_TILE_WIDTH, gridWidth);
+            outputFormat->setInt32(KEY_TILE_HEIGHT, gridHeight);
+            outputFormat->setInt32(KEY_GRID_COLUMNS, gridCols);
+            outputFormat->setInt32(KEY_GRID_ROWS, gridRows);
+        }
+
+    } else {
+        gridWidth = width;
+        gridHeight = height;
+        gridRows = 1;
+        gridCols = 1;
+    }
+
+    outputFormat->setInt32(KEY_WIDTH, !useGrid ? width : gridWidth);
+    outputFormat->setInt32(KEY_HEIGHT, !useGrid ? height : gridHeight);
+    outputFormat->setInt32(KEY_I_FRAME_INTERVAL, 0);
+    outputFormat->setInt32(KEY_COLOR_FORMAT,
+            useGrid ? COLOR_FormatYUV420Flexible : COLOR_FormatSurface);
+    outputFormat->setInt32(KEY_FRAME_RATE, gridRows * gridCols);
+    // This only serves as a hint to encoder when encoding is not real-time.
+    outputFormat->setInt32(KEY_OPERATING_RATE, useGrid ? kGridOpRate : kNoGridOpRate);
+
+    res = mCodec->configure(outputFormat, nullptr /*nativeWindow*/,
+            nullptr /*crypto*/, CONFIGURE_FLAG_ENCODE);
+    if (res != OK) {
+        ALOGE("%s: Failed to configure codec: %s (%d)", __FUNCTION__,
+                strerror(-res), res);
+        return res;
+    }
+
+    mGridWidth = gridWidth;
+    mGridHeight = gridHeight;
+    mGridRows = gridRows;
+    mGridCols = gridCols;
+    mUseGrid = useGrid;
+    mOutputWidth = width;
+    mOutputHeight = height;
+    mAppSegmentMaxSize = calcAppSegmentMaxSize(cameraDevice->info());
+    mMaxHeicBufferSize = mOutputWidth * mOutputHeight * 3 / 2 + mAppSegmentMaxSize;
+
+    return OK;
+}
+
+void HeicCompositeStream::deinitCodec() {
+    ALOGV("%s", __FUNCTION__);
+    if (mCodec != nullptr) {
+        mCodec->stop();
+        mCodec->release();
+        mCodec.clear();
+    }
+
+    if (mCodecLooper != nullptr) {
+        mCodecLooper->stop();
+        mCodecLooper.clear();
+    }
+
+    if (mCallbackLooper != nullptr) {
+        mCallbackLooper->stop();
+        mCallbackLooper.clear();
+    }
+
+    mAsyncNotify.clear();
+    mFormat.clear();
+}
+
+// Return the size of the complete list of app segment, 0 indicates failure
+size_t HeicCompositeStream::findAppSegmentsSize(const uint8_t* appSegmentBuffer,
+        size_t maxSize, size_t *app1SegmentSize) {
+    if (appSegmentBuffer == nullptr || app1SegmentSize == nullptr) {
+        ALOGE("%s: Invalid input appSegmentBuffer %p, app1SegmentSize %p",
+                __FUNCTION__, appSegmentBuffer, app1SegmentSize);
+        return 0;
+    }
+
+    size_t expectedSize = 0;
+    // First check for EXIF transport header at the end of the buffer
+    const uint8_t *header = appSegmentBuffer + (maxSize - sizeof(struct CameraBlob));
+    const struct CameraBlob *blob = (const struct CameraBlob*)(header);
+    if (blob->blobId != CameraBlobId::JPEG_APP_SEGMENTS) {
+        ALOGE("%s: Invalid EXIF blobId %hu", __FUNCTION__, blob->blobId);
+        return 0;
+    }
+
+    expectedSize = blob->blobSize;
+    if (expectedSize == 0 || expectedSize > maxSize - sizeof(struct CameraBlob)) {
+        ALOGE("%s: Invalid blobSize %zu.", __FUNCTION__, expectedSize);
+        return 0;
+    }
+
+    uint32_t totalSize = 0;
+
+    // Verify APP1 marker (mandatory)
+    uint8_t app1Marker[] = {0xFF, 0xE1};
+    if (memcmp(appSegmentBuffer, app1Marker, sizeof(app1Marker))) {
+        ALOGE("%s: Invalid APP1 marker: %x, %x", __FUNCTION__,
+                appSegmentBuffer[0], appSegmentBuffer[1]);
+        return 0;
+    }
+    totalSize += sizeof(app1Marker);
+
+    uint16_t app1Size = (static_cast<uint16_t>(appSegmentBuffer[totalSize]) << 8) +
+            appSegmentBuffer[totalSize+1];
+    totalSize += app1Size;
+
+    ALOGV("%s: Expected APP segments size %zu, APP1 segment size %u",
+            __FUNCTION__, expectedSize, app1Size);
+    while (totalSize < expectedSize) {
+        if (appSegmentBuffer[totalSize] != 0xFF ||
+                appSegmentBuffer[totalSize+1] <= 0xE1 ||
+                appSegmentBuffer[totalSize+1] > 0xEF) {
+            // Invalid APPn marker
+            ALOGE("%s: Invalid APPn marker: %x, %x", __FUNCTION__,
+                    appSegmentBuffer[totalSize], appSegmentBuffer[totalSize+1]);
+            return 0;
+        }
+        totalSize += 2;
+
+        uint16_t appnSize = (static_cast<uint16_t>(appSegmentBuffer[totalSize]) << 8) +
+                appSegmentBuffer[totalSize+1];
+        totalSize += appnSize;
+    }
+
+    if (totalSize != expectedSize) {
+        ALOGE("%s: Invalid JPEG APP segments: totalSize %u vs expected size %zu",
+                __FUNCTION__, totalSize, expectedSize);
+        return 0;
+    }
+
+    *app1SegmentSize = app1Size + sizeof(app1Marker);
+    return expectedSize;
+}
+
+int64_t HeicCompositeStream::findTimestampInNsLocked(int64_t timeInUs) {
+    for (const auto& fn : mFrameNumberMap) {
+        if (timeInUs == ns2us(fn.second)) {
+            return fn.second;
+        }
+    }
+    for (const auto& inputFrame : mPendingInputFrames) {
+        if (timeInUs == ns2us(inputFrame.first)) {
+            return inputFrame.first;
+        }
+    }
+    return -1;
+}
+
+status_t HeicCompositeStream::copyOneYuvTile(sp<MediaCodecBuffer>& codecBuffer,
+        const CpuConsumer::LockedBuffer& yuvBuffer,
+        size_t top, size_t left, size_t width, size_t height) {
+    ATRACE_CALL();
+
+    // Get stride information for codecBuffer
+    sp<ABuffer> imageData;
+    if (!codecBuffer->meta()->findBuffer("image-data", &imageData)) {
+        ALOGE("%s: Codec input buffer is not for image data!", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    if (imageData->size() != sizeof(MediaImage2)) {
+        ALOGE("%s: Invalid codec input image size %zu, expected %zu",
+                __FUNCTION__, imageData->size(), sizeof(MediaImage2));
+        return BAD_VALUE;
+    }
+    MediaImage2* imageInfo = reinterpret_cast<MediaImage2*>(imageData->data());
+    if (imageInfo->mType != MediaImage2::MEDIA_IMAGE_TYPE_YUV ||
+            imageInfo->mBitDepth != 8 ||
+            imageInfo->mBitDepthAllocated != 8 ||
+            imageInfo->mNumPlanes != 3) {
+        ALOGE("%s: Invalid codec input image info: mType %d, mBitDepth %d, "
+                "mBitDepthAllocated %d, mNumPlanes %d!", __FUNCTION__,
+                imageInfo->mType, imageInfo->mBitDepth,
+                imageInfo->mBitDepthAllocated, imageInfo->mNumPlanes);
+        return BAD_VALUE;
+    }
+
+    ALOGV("%s: yuvBuffer chromaStep %d, chromaStride %d",
+            __FUNCTION__, yuvBuffer.chromaStep, yuvBuffer.chromaStride);
+    ALOGV("%s: U offset %u, V offset %u, U rowInc %d, V rowInc %d, U colInc %d, V colInc %d",
+            __FUNCTION__, imageInfo->mPlane[MediaImage2::U].mOffset,
+            imageInfo->mPlane[MediaImage2::V].mOffset,
+            imageInfo->mPlane[MediaImage2::U].mRowInc,
+            imageInfo->mPlane[MediaImage2::V].mRowInc,
+            imageInfo->mPlane[MediaImage2::U].mColInc,
+            imageInfo->mPlane[MediaImage2::V].mColInc);
+
+    // Y
+    for (auto row = top; row < top+height; row++) {
+        uint8_t *dst = codecBuffer->data() + imageInfo->mPlane[MediaImage2::Y].mOffset +
+                imageInfo->mPlane[MediaImage2::Y].mRowInc * (row - top);
+        mFnCopyRow(yuvBuffer.data+row*yuvBuffer.stride+left, dst, width);
+    }
+
+    // U is Cb, V is Cr
+    bool codecUPlaneFirst = imageInfo->mPlane[MediaImage2::V].mOffset >
+            imageInfo->mPlane[MediaImage2::U].mOffset;
+    uint32_t codecUvOffsetDiff = codecUPlaneFirst ?
+            imageInfo->mPlane[MediaImage2::V].mOffset - imageInfo->mPlane[MediaImage2::U].mOffset :
+            imageInfo->mPlane[MediaImage2::U].mOffset - imageInfo->mPlane[MediaImage2::V].mOffset;
+    bool isCodecUvSemiplannar = (codecUvOffsetDiff == 1) &&
+            (imageInfo->mPlane[MediaImage2::U].mRowInc ==
+            imageInfo->mPlane[MediaImage2::V].mRowInc) &&
+            (imageInfo->mPlane[MediaImage2::U].mColInc == 2) &&
+            (imageInfo->mPlane[MediaImage2::V].mColInc == 2);
+    bool isCodecUvPlannar =
+            ((codecUPlaneFirst && codecUvOffsetDiff >=
+                    imageInfo->mPlane[MediaImage2::U].mRowInc * imageInfo->mHeight/2) ||
+            ((!codecUPlaneFirst && codecUvOffsetDiff >=
+                    imageInfo->mPlane[MediaImage2::V].mRowInc * imageInfo->mHeight/2))) &&
+            imageInfo->mPlane[MediaImage2::U].mColInc == 1 &&
+            imageInfo->mPlane[MediaImage2::V].mColInc == 1;
+    bool cameraUPlaneFirst = yuvBuffer.dataCr > yuvBuffer.dataCb;
+
+    if (isCodecUvSemiplannar && yuvBuffer.chromaStep == 2 &&
+            (codecUPlaneFirst == cameraUPlaneFirst)) {
+        // UV semiplannar
+        // The chrome plane could be either Cb first, or Cr first. Take the
+        // smaller address.
+        uint8_t *src = std::min(yuvBuffer.dataCb, yuvBuffer.dataCr);
+        MediaImage2::PlaneIndex dstPlane = codecUvOffsetDiff > 0 ? MediaImage2::U : MediaImage2::V;
+        for (auto row = top/2; row < (top+height)/2; row++) {
+            uint8_t *dst = codecBuffer->data() + imageInfo->mPlane[dstPlane].mOffset +
+                    imageInfo->mPlane[dstPlane].mRowInc * (row - top/2);
+            mFnCopyRow(src+row*yuvBuffer.chromaStride+left, dst, width);
+        }
+    } else if (isCodecUvPlannar && yuvBuffer.chromaStep == 1) {
+        // U plane
+        for (auto row = top/2; row < (top+height)/2; row++) {
+            uint8_t *dst = codecBuffer->data() + imageInfo->mPlane[MediaImage2::U].mOffset +
+                    imageInfo->mPlane[MediaImage2::U].mRowInc * (row - top/2);
+            mFnCopyRow(yuvBuffer.dataCb+row*yuvBuffer.chromaStride+left/2, dst, width/2);
+        }
+
+        // V plane
+        for (auto row = top/2; row < (top+height)/2; row++) {
+            uint8_t *dst = codecBuffer->data() + imageInfo->mPlane[MediaImage2::V].mOffset +
+                    imageInfo->mPlane[MediaImage2::V].mRowInc * (row - top/2);
+            mFnCopyRow(yuvBuffer.dataCr+row*yuvBuffer.chromaStride+left/2, dst, width/2);
+        }
+    } else {
+        // Convert between semiplannar and plannar, or when UV orders are
+        // different.
+        uint8_t *dst = codecBuffer->data();
+        for (auto row = top/2; row < (top+height)/2; row++) {
+            for (auto col = left/2; col < (left+width)/2; col++) {
+                // U/Cb
+                int32_t dstIndex = imageInfo->mPlane[MediaImage2::U].mOffset +
+                        imageInfo->mPlane[MediaImage2::U].mRowInc * (row - top/2) +
+                        imageInfo->mPlane[MediaImage2::U].mColInc * (col - left/2);
+                int32_t srcIndex = row * yuvBuffer.chromaStride + yuvBuffer.chromaStep * col;
+                dst[dstIndex] = yuvBuffer.dataCb[srcIndex];
+
+                // V/Cr
+                dstIndex = imageInfo->mPlane[MediaImage2::V].mOffset +
+                        imageInfo->mPlane[MediaImage2::V].mRowInc * (row - top/2) +
+                        imageInfo->mPlane[MediaImage2::V].mColInc * (col - left/2);
+                srcIndex = row * yuvBuffer.chromaStride + yuvBuffer.chromaStep * col;
+                dst[dstIndex] = yuvBuffer.dataCr[srcIndex];
+            }
+        }
+    }
+    return OK;
+}
+
+void HeicCompositeStream::initCopyRowFunction(int32_t width)
+{
+    using namespace libyuv;
+
+    mFnCopyRow = CopyRow_C;
+#if defined(HAS_COPYROW_SSE2)
+    if (TestCpuFlag(kCpuHasSSE2)) {
+        mFnCopyRow = IS_ALIGNED(width, 32) ? CopyRow_SSE2 : CopyRow_Any_SSE2;
+    }
+#endif
+#if defined(HAS_COPYROW_AVX)
+    if (TestCpuFlag(kCpuHasAVX)) {
+        mFnCopyRow = IS_ALIGNED(width, 64) ? CopyRow_AVX : CopyRow_Any_AVX;
+    }
+#endif
+#if defined(HAS_COPYROW_ERMS)
+    if (TestCpuFlag(kCpuHasERMS)) {
+        mFnCopyRow = CopyRow_ERMS;
+    }
+#endif
+#if defined(HAS_COPYROW_NEON)
+    if (TestCpuFlag(kCpuHasNEON)) {
+        mFnCopyRow = IS_ALIGNED(width, 32) ? CopyRow_NEON : CopyRow_Any_NEON;
+    }
+#endif
+#if defined(HAS_COPYROW_MIPS)
+    if (TestCpuFlag(kCpuHasMIPS)) {
+        mFnCopyRow = CopyRow_MIPS;
+    }
+#endif
+}
+
+size_t HeicCompositeStream::calcAppSegmentMaxSize(const CameraMetadata& info) {
+    camera_metadata_ro_entry_t entry = info.find(ANDROID_HEIC_INFO_MAX_JPEG_APP_SEGMENTS_COUNT);
+    size_t maxAppsSegment = 1;
+    if (entry.count > 0) {
+        maxAppsSegment = entry.data.u8[0] < 1 ? 1 :
+                entry.data.u8[0] > 16 ? 16 : entry.data.u8[0];
+    }
+    return maxAppsSegment * (2 + 0xFFFF) + sizeof(struct CameraBlob);
+}
+
+bool HeicCompositeStream::threadLoop() {
+    int64_t currentTs = INT64_MAX;
+    bool newInputAvailable = false;
+
+    {
+        Mutex::Autolock l(mMutex);
+        if (mErrorState) {
+            // In case we landed in error state, return any pending buffers and
+            // halt all further processing.
+            compilePendingInputLocked();
+            releaseInputFramesLocked(currentTs);
+            return false;
+        }
+
+
+        while (!newInputAvailable) {
+            compilePendingInputLocked();
+            newInputAvailable = getNextReadyInputLocked(&currentTs);
+
+            if (!newInputAvailable) {
+                auto failingFrameNumber = getNextFailingInputLocked(&currentTs);
+                if (failingFrameNumber >= 0) {
+                    // We cannot erase 'mPendingInputFrames[currentTs]' at this point because it is
+                    // possible for two internal stream buffers to fail. In such scenario the
+                    // composite stream should notify the client about a stream buffer error only
+                    // once and this information is kept within 'errorNotified'.
+                    // Any present failed input frames will be removed on a subsequent call to
+                    // 'releaseInputFramesLocked()'.
+                    releaseInputFrameLocked(&mPendingInputFrames[currentTs]);
+                    currentTs = INT64_MAX;
+                }
+
+                auto ret = mInputReadyCondition.waitRelative(mMutex, kWaitDuration);
+                if (ret == TIMED_OUT) {
+                    return true;
+                } else if (ret != OK) {
+                    ALOGE("%s: Timed wait on condition failed: %s (%d)", __FUNCTION__,
+                            strerror(-ret), ret);
+                    return false;
+                }
+            }
+        }
+    }
+
+    auto res = processInputFrame(currentTs, mPendingInputFrames[currentTs]);
+    Mutex::Autolock l(mMutex);
+    if (res != OK) {
+        ALOGE("%s: Failed processing frame with timestamp: %" PRIu64 ": %s (%d)",
+                __FUNCTION__, currentTs, strerror(-res), res);
+        mPendingInputFrames[currentTs].error = true;
+    }
+
+    if (mPendingInputFrames[currentTs].error ||
+            (mPendingInputFrames[currentTs].appSegmentWritten &&
+            mPendingInputFrames[currentTs].pendingOutputTiles == 0)) {
+        releaseInputFramesLocked(currentTs);
+    }
+
+    return true;
+}
+
+bool HeicCompositeStream::onStreamBufferError(const CaptureResultExtras& resultExtras) {
+    bool res = false;
+    // Buffer errors concerning internal composite streams should not be directly visible to
+    // camera clients. They must only receive a single buffer error with the public composite
+    // stream id.
+    if ((resultExtras.errorStreamId == mAppSegmentStreamId) ||
+            (resultExtras.errorStreamId == mMainImageStreamId)) {
+        flagAnErrorFrameNumber(resultExtras.frameNumber);
+        res = true;
+    }
+
+    return res;
+}
+
+void HeicCompositeStream::onResultError(const CaptureResultExtras& resultExtras) {
+    // For result error, since the APPS_SEGMENT buffer already contains EXIF,
+    // simply skip using the capture result metadata to override EXIF.
+    Mutex::Autolock l(mMutex);
+
+    int64_t timestamp = -1;
+    for (const auto& fn : mFrameNumberMap) {
+        if (fn.first == resultExtras.frameNumber) {
+            timestamp = fn.second;
+            break;
+        }
+    }
+    if (timestamp == -1) {
+        for (const auto& inputFrame : mPendingInputFrames) {
+            if (inputFrame.second.frameNumber == resultExtras.frameNumber) {
+                timestamp = inputFrame.first;
+                break;
+            }
+        }
+    }
+
+    if (timestamp == -1) {
+        ALOGE("%s: Failed to find shutter timestamp for result error!", __FUNCTION__);
+        return;
+    }
+
+    mCaptureResults.emplace(timestamp, std::make_tuple(resultExtras.frameNumber, CameraMetadata()));
+    mInputReadyCondition.signal();
+}
+
+void HeicCompositeStream::CodecCallbackHandler::onMessageReceived(const sp<AMessage> &msg) {
+    sp<HeicCompositeStream> parent = mParent.promote();
+    if (parent == nullptr) return;
+
+    switch (msg->what()) {
+        case kWhatCallbackNotify: {
+             int32_t cbID;
+             if (!msg->findInt32("callbackID", &cbID)) {
+                 ALOGE("kWhatCallbackNotify: callbackID is expected.");
+                 break;
+             }
+
+             ALOGV("kWhatCallbackNotify: cbID = %d", cbID);
+
+             switch (cbID) {
+                 case MediaCodec::CB_INPUT_AVAILABLE: {
+                     int32_t index;
+                     if (!msg->findInt32("index", &index)) {
+                         ALOGE("CB_INPUT_AVAILABLE: index is expected.");
+                         break;
+                     }
+                     parent->onHeicInputFrameAvailable(index);
+                     break;
+                 }
+
+                 case MediaCodec::CB_OUTPUT_AVAILABLE: {
+                     int32_t index;
+                     size_t offset;
+                     size_t size;
+                     int64_t timeUs;
+                     int32_t flags;
+
+                     if (!msg->findInt32("index", &index)) {
+                         ALOGE("CB_OUTPUT_AVAILABLE: index is expected.");
+                         break;
+                     }
+                     if (!msg->findSize("offset", &offset)) {
+                         ALOGE("CB_OUTPUT_AVAILABLE: offset is expected.");
+                         break;
+                     }
+                     if (!msg->findSize("size", &size)) {
+                         ALOGE("CB_OUTPUT_AVAILABLE: size is expected.");
+                         break;
+                     }
+                     if (!msg->findInt64("timeUs", &timeUs)) {
+                         ALOGE("CB_OUTPUT_AVAILABLE: timeUs is expected.");
+                         break;
+                     }
+                     if (!msg->findInt32("flags", &flags)) {
+                         ALOGE("CB_OUTPUT_AVAILABLE: flags is expected.");
+                         break;
+                     }
+
+                     CodecOutputBufferInfo bufferInfo = {
+                         index,
+                         (int32_t)offset,
+                         (int32_t)size,
+                         timeUs,
+                         (uint32_t)flags};
+
+                     parent->onHeicOutputFrameAvailable(bufferInfo);
+                     break;
+                 }
+
+                 case MediaCodec::CB_OUTPUT_FORMAT_CHANGED: {
+                     sp<AMessage> format;
+                     if (!msg->findMessage("format", &format)) {
+                         ALOGE("CB_OUTPUT_FORMAT_CHANGED: format is expected.");
+                         break;
+                     }
+
+                     parent->onHeicFormatChanged(format);
+                     break;
+                 }
+
+                 case MediaCodec::CB_ERROR: {
+                     status_t err;
+                     int32_t actionCode;
+                     AString detail;
+                     if (!msg->findInt32("err", &err)) {
+                         ALOGE("CB_ERROR: err is expected.");
+                         break;
+                     }
+                     if (!msg->findInt32("action", &actionCode)) {
+                         ALOGE("CB_ERROR: action is expected.");
+                         break;
+                     }
+                     msg->findString("detail", &detail);
+                     ALOGE("Codec reported error(0x%x), actionCode(%d), detail(%s)",
+                             err, actionCode, detail.c_str());
+
+                     parent->onHeicCodecError();
+                     break;
+                 }
+
+                 default: {
+                     ALOGE("kWhatCallbackNotify: callbackID(%d) is unexpected.", cbID);
+                     break;
+                 }
+             }
+             break;
+        }
+
+        default:
+            ALOGE("shouldn't be here");
+            break;
+    }
+}
+
+}; // namespace camera3
+}; // namespace android
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.h b/services/camera/libcameraservice/api2/HeicCompositeStream.h
new file mode 100644
index 0000000..2aa3c38
--- /dev/null
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA3_HEIC_COMPOSITE_STREAM_H
+#define ANDROID_SERVERS_CAMERA_CAMERA3_HEIC_COMPOSITE_STREAM_H
+
+#include <queue>
+
+#include <gui/IProducerListener.h>
+#include <gui/CpuConsumer.h>
+
+#include <media/hardware/VideoAPI.h>
+#include <media/MediaCodecBuffer.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaMuxer.h>
+
+#include "CompositeStream.h"
+
+namespace android {
+namespace camera3 {
+
+class HeicCompositeStream : public CompositeStream, public Thread,
+        public CpuConsumer::FrameAvailableListener {
+public:
+    HeicCompositeStream(wp<CameraDeviceBase> device,
+            wp<hardware::camera2::ICameraDeviceCallbacks> cb);
+    ~HeicCompositeStream() override;
+
+    static bool isHeicCompositeStream(const sp<Surface> &surface);
+
+    status_t createInternalStreams(const std::vector<sp<Surface>>& consumers,
+            bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+            camera3_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
+            std::vector<int> *surfaceIds, int streamSetId, bool isShared) override;
+
+    status_t deleteInternalStreams() override;
+
+    status_t configureStream() override;
+
+    status_t insertGbp(SurfaceMap* /*out*/outSurfaceMap, Vector<int32_t>* /*out*/outputStreamIds,
+            int32_t* /*out*/currentStreamId) override;
+
+    void onShutter(const CaptureResultExtras& resultExtras, nsecs_t timestamp) override;
+
+    int getStreamId() override { return mMainImageStreamId; }
+
+    // Use onShutter to keep track of frame number <-> timestamp mapping.
+    void onBufferReleased(const BufferInfo& bufferInfo) override;
+    void onBufferRequestForFrameNumber(uint64_t frameNumber, int streamId,
+            const CameraMetadata& settings) override;
+
+    // CpuConsumer listener implementation
+    void onFrameAvailable(const BufferItem& item) override;
+
+    // Return stream information about the internal camera streams
+    static status_t getCompositeStreamInfo(const OutputStreamInfo &streamInfo,
+            const CameraMetadata& ch, std::vector<OutputStreamInfo>* compositeOutput /*out*/);
+
+    static bool isSizeSupportedByHeifEncoder(int32_t width, int32_t height,
+            bool* useHeic, bool* useGrid, int64_t* stall);
+    static bool isInMemoryTempFileSupported();
+protected:
+
+    bool threadLoop() override;
+    bool onStreamBufferError(const CaptureResultExtras& resultExtras) override;
+    void onResultError(const CaptureResultExtras& resultExtras) override;
+
+private:
+    //
+    // HEIC/HEVC Codec related structures, utility functions, and callbacks
+    //
+    struct CodecOutputBufferInfo {
+        int32_t index;
+        int32_t offset;
+        int32_t size;
+        int64_t timeUs;
+        uint32_t flags;
+    };
+
+    struct CodecInputBufferInfo {
+        int32_t index;
+        int64_t timeUs;
+        size_t tileIndex;
+    };
+
+    class CodecCallbackHandler : public AHandler {
+    public:
+        explicit CodecCallbackHandler(wp<HeicCompositeStream> parent) {
+            mParent = parent;
+        }
+        virtual void onMessageReceived(const sp<AMessage> &msg);
+    private:
+        wp<HeicCompositeStream> mParent;
+    };
+
+    enum {
+        kWhatCallbackNotify,
+    };
+
+    bool              mUseHeic;
+    sp<MediaCodec>    mCodec;
+    sp<ALooper>       mCodecLooper, mCallbackLooper;
+    sp<CodecCallbackHandler> mCodecCallbackHandler;
+    sp<AMessage>      mAsyncNotify;
+    sp<AMessage>      mFormat;
+    size_t            mNumOutputTiles;
+
+    int32_t           mOutputWidth, mOutputHeight;
+    size_t            mMaxHeicBufferSize;
+    int32_t           mGridWidth, mGridHeight;
+    size_t            mGridRows, mGridCols;
+    bool              mUseGrid; // Whether to use framework YUV frame tiling.
+
+    static const int64_t kNoFrameDropMaxPtsGap = -1000000;
+    static const int32_t kNoGridOpRate = 30;
+    static const int32_t kGridOpRate = 120;
+
+    void onHeicOutputFrameAvailable(const CodecOutputBufferInfo& bufferInfo);
+    void onHeicInputFrameAvailable(int32_t index);  // Only called for YUV input mode.
+    void onHeicFormatChanged(sp<AMessage>& newFormat);
+    void onHeicCodecError();
+
+    status_t initializeCodec(uint32_t width, uint32_t height,
+            const sp<CameraDeviceBase>& cameraDevice);
+    void deinitCodec();
+
+    //
+    // Composite stream related structures, utility functions and callbacks.
+    //
+    struct InputFrame {
+        int32_t                   orientation;
+        int32_t                   quality;
+
+        CpuConsumer::LockedBuffer          appSegmentBuffer;
+        std::vector<CodecOutputBufferInfo> codecOutputBuffers;
+        std::unique_ptr<CameraMetadata>    result;
+
+        // Fields that are only applicable to HEVC tiling.
+        CpuConsumer::LockedBuffer          yuvBuffer;
+        std::vector<CodecInputBufferInfo>  codecInputBuffers;
+
+        bool                      error;
+        bool                      errorNotified;
+        int64_t                   frameNumber;
+
+        sp<MediaMuxer>            muxer;
+        int                       fenceFd;
+        int                       fileFd;
+        ssize_t                   trackIndex;
+        ANativeWindowBuffer       *anb;
+
+        bool                      appSegmentWritten;
+        size_t                    pendingOutputTiles;
+        size_t                    codecInputCounter;
+
+        InputFrame() : orientation(0), quality(kDefaultJpegQuality), error(false),
+                       errorNotified(false), frameNumber(-1), fenceFd(-1), fileFd(-1),
+                       trackIndex(-1), anb(nullptr), appSegmentWritten(false),
+                       pendingOutputTiles(0), codecInputCounter(0) { }
+    };
+
+    void compilePendingInputLocked();
+    // Find first complete and valid frame with smallest timestamp
+    bool getNextReadyInputLocked(int64_t *currentTs /*out*/);
+    // Find next failing frame number with smallest timestamp and return respective frame number
+    int64_t getNextFailingInputLocked(int64_t *currentTs /*out*/);
+
+    status_t processInputFrame(nsecs_t timestamp, InputFrame &inputFrame);
+    status_t processCodecInputFrame(InputFrame &inputFrame);
+    status_t startMuxerForInputFrame(nsecs_t timestamp, InputFrame &inputFrame);
+    status_t processAppSegment(nsecs_t timestamp, InputFrame &inputFrame);
+    status_t processOneCodecOutputFrame(nsecs_t timestamp, InputFrame &inputFrame);
+    status_t processCompletedInputFrame(nsecs_t timestamp, InputFrame &inputFrame);
+
+    void releaseInputFrameLocked(InputFrame *inputFrame /*out*/);
+    void releaseInputFramesLocked(int64_t currentTs);
+
+    size_t findAppSegmentsSize(const uint8_t* appSegmentBuffer, size_t maxSize,
+            size_t* app1SegmentSize);
+    int64_t findTimestampInNsLocked(int64_t timeInUs);
+    status_t copyOneYuvTile(sp<MediaCodecBuffer>& codecBuffer,
+            const CpuConsumer::LockedBuffer& yuvBuffer,
+            size_t top, size_t left, size_t width, size_t height);
+    void initCopyRowFunction(int32_t width);
+    static size_t calcAppSegmentMaxSize(const CameraMetadata& info);
+
+    static const nsecs_t kWaitDuration = 10000000; // 10 ms
+    static const int32_t kDefaultJpegQuality = 99;
+    static const auto kJpegDataSpace = HAL_DATASPACE_V0_JFIF;
+    static const android_dataspace kAppSegmentDataSpace =
+            static_cast<android_dataspace>(HAL_DATASPACE_JPEG_APP_SEGMENTS);
+    static const android_dataspace kHeifDataSpace =
+            static_cast<android_dataspace>(HAL_DATASPACE_HEIF);
+
+    int               mAppSegmentStreamId, mAppSegmentSurfaceId;
+    sp<CpuConsumer>   mAppSegmentConsumer;
+    sp<Surface>       mAppSegmentSurface;
+    bool              mAppSegmentBufferAcquired;
+    size_t            mAppSegmentMaxSize;
+    CameraMetadata    mStaticInfo;
+
+    int               mMainImageStreamId, mMainImageSurfaceId;
+    sp<Surface>       mMainImageSurface;
+    sp<CpuConsumer>   mMainImageConsumer; // Only applicable for HEVC codec.
+    bool              mYuvBufferAcquired; // Only applicable to HEVC codec
+
+    sp<Surface>       mOutputSurface;
+    sp<ProducerListener> mProducerListener;
+
+
+    // Map from frame number to JPEG setting of orientation+quality
+    std::map<int64_t, std::pair<int32_t, int32_t>> mSettingsByFrameNumber;
+    // Map from timestamp to JPEG setting of orientation+quality
+    std::map<int64_t, std::pair<int32_t, int32_t>> mSettingsByTimestamp;
+
+    // Keep all incoming APP segment Blob buffer pending further processing.
+    std::vector<int64_t> mInputAppSegmentBuffers;
+
+    // Keep all incoming HEIC blob buffer pending further processing.
+    std::vector<CodecOutputBufferInfo> mCodecOutputBuffers;
+    std::queue<int64_t> mCodecOutputBufferTimestamps;
+    size_t mOutputBufferCounter;
+
+    // Keep all incoming Yuv buffer pending tiling and encoding (for HEVC YUV tiling only)
+    std::vector<int64_t> mInputYuvBuffers;
+    // Keep all codec input buffers ready to be filled out (for HEVC YUV tiling only)
+    std::vector<int32_t> mCodecInputBuffers;
+
+    // Artificial strictly incremental YUV grid timestamp to make encoder happy.
+    int64_t mGridTimestampUs;
+
+    // In most common use case, entries are accessed in order.
+    std::map<int64_t, InputFrame> mPendingInputFrames;
+
+    // Function pointer of libyuv row copy.
+    void (*mFnCopyRow)(const uint8_t* src, uint8_t* dst, int width);
+};
+
+}; // namespace camera3
+}; // namespace android
+
+#endif //ANDROID_SERVERS_CAMERA_CAMERA3_HEIC_COMPOSITE_STREAM_H
diff --git a/services/camera/libcameraservice/api2/HeicEncoderInfoManager.cpp b/services/camera/libcameraservice/api2/HeicEncoderInfoManager.cpp
new file mode 100644
index 0000000..ed9be6e
--- /dev/null
+++ b/services/camera/libcameraservice/api2/HeicEncoderInfoManager.cpp
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "HeicEncoderInfoManager"
+//#define LOG_NDEBUG 0
+
+#include <cstdint>
+#include <regex>
+
+#include <cutils/properties.h>
+#include <log/log_main.h>
+#include <system/graphics.h>
+
+#include <media/stagefright/MediaCodecList.h>
+#include <media/stagefright/foundation/MediaDefs.h>
+#include <media/stagefright/foundation/ABuffer.h>
+
+#include "HeicEncoderInfoManager.h"
+
+namespace android {
+namespace camera3 {
+
+HeicEncoderInfoManager::HeicEncoderInfoManager() :
+        mIsInited(false),
+        mMinSizeHeic(0, 0),
+        mMaxSizeHeic(INT32_MAX, INT32_MAX),
+        mHasHEVC(false),
+        mHasHEIC(false),
+        mDisableGrid(false) {
+    if (initialize() == OK) {
+        mIsInited = true;
+    }
+}
+
+HeicEncoderInfoManager::~HeicEncoderInfoManager() {
+}
+
+bool HeicEncoderInfoManager::isSizeSupported(int32_t width, int32_t height, bool* useHeic,
+        bool* useGrid, int64_t* stall) const {
+    if (useHeic == nullptr || useGrid == nullptr) {
+        ALOGE("%s: invalid parameters: useHeic %p, useGrid %p",
+                __FUNCTION__, useHeic, useGrid);
+        return false;
+    }
+    if (!mIsInited) return false;
+
+    bool chooseHeic = false, enableGrid = true;
+    if (mHasHEIC && width >= mMinSizeHeic.first &&
+            height >= mMinSizeHeic.second && width <= mMaxSizeHeic.first &&
+            height <= mMaxSizeHeic.second) {
+        chooseHeic = true;
+        enableGrid = false;
+    } else if (mHasHEVC) {
+        bool fullSizeSupportedByHevc = (width >= mMinSizeHevc.first &&
+                height >= mMinSizeHevc.second &&
+                width <= mMaxSizeHevc.first &&
+                height <= mMaxSizeHevc.second);
+        if (fullSizeSupportedByHevc && (mDisableGrid ||
+                (width <= 1920 && height <= 1080))) {
+            enableGrid = false;
+        }
+    } else {
+        // No encoder available for the requested size.
+        return false;
+    }
+
+    if (stall != nullptr) {
+        // Find preferred encoder which advertise
+        // "measured-frame-rate-WIDTHxHEIGHT-range" key.
+        const FrameRateMaps& maps =
+                (chooseHeic && mHeicFrameRateMaps.size() > 0) ?
+                mHeicFrameRateMaps : mHevcFrameRateMaps;
+        const auto& closestSize = findClosestSize(maps, width, height);
+        if (closestSize == maps.end()) {
+            // The "measured-frame-rate-WIDTHxHEIGHT-range" key is optional.
+            // Hardcode to some default value (3.33ms * tile count) based on resolution.
+            *stall = 3333333LL * width * height / (kGridWidth * kGridHeight);
+            return true;
+        }
+
+        // Derive stall durations based on average fps of the closest size.
+        constexpr int64_t NSEC_PER_SEC = 1000000000LL;
+        int32_t avgFps = (closestSize->second.first + closestSize->second.second)/2;
+        float ratio = 1.0f * width * height /
+                (closestSize->first.first * closestSize->first.second);
+        *stall = ratio * NSEC_PER_SEC / avgFps;
+    }
+
+    *useHeic = chooseHeic;
+    *useGrid = enableGrid;
+    return true;
+}
+
+status_t HeicEncoderInfoManager::initialize() {
+    mDisableGrid = property_get_bool("camera.heic.disable_grid", false);
+    sp<IMediaCodecList> codecsList = MediaCodecList::getInstance();
+    if (codecsList == nullptr) {
+        // No media codec available.
+        return OK;
+    }
+
+    sp<AMessage> heicDetails = getCodecDetails(codecsList, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC);
+    sp<AMessage> hevcDetails = getCodecDetails(codecsList, MEDIA_MIMETYPE_VIDEO_HEVC);
+
+    if (hevcDetails == nullptr) {
+        if (heicDetails != nullptr) {
+            ALOGE("%s: Device must support HEVC codec if HEIC codec is available!",
+                    __FUNCTION__);
+            return BAD_VALUE;
+        }
+        return OK;
+    }
+
+    // Check CQ mode for HEVC codec
+    {
+        AString bitrateModes;
+        auto hasItem = hevcDetails->findString("feature-bitrate-modes", &bitrateModes);
+        if (!hasItem) {
+            ALOGE("%s: Failed to query bitrate modes for HEVC codec", __FUNCTION__);
+            return BAD_VALUE;
+        }
+        ALOGV("%s: HEVC codec's feature-bitrate-modes value is %d, %s",
+                __FUNCTION__, hasItem, bitrateModes.c_str());
+        std::regex pattern("(^|,)CQ($|,)", std::regex_constants::icase);
+        if (!std::regex_search(bitrateModes.c_str(), pattern)) {
+            return OK;
+        }
+    }
+
+    // HEIC size range
+    if (heicDetails != nullptr) {
+        auto res = getCodecSizeRange(MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC,
+                heicDetails, &mMinSizeHeic, &mMaxSizeHeic, &mHeicFrameRateMaps);
+        if (res != OK) {
+            ALOGE("%s: Failed to get HEIC codec size range: %s (%d)", __FUNCTION__,
+                    strerror(-res), res);
+            return BAD_VALUE;
+        }
+        mHasHEIC = true;
+    }
+
+    // HEVC size range
+    {
+        auto res = getCodecSizeRange(MEDIA_MIMETYPE_VIDEO_HEVC,
+                hevcDetails, &mMinSizeHevc, &mMaxSizeHevc, &mHevcFrameRateMaps);
+        if (res != OK) {
+            ALOGE("%s: Failed to get HEVC codec size range: %s (%d)", __FUNCTION__,
+                    strerror(-res), res);
+            return BAD_VALUE;
+        }
+
+        mHasHEVC = true;
+    }
+
+    return OK;
+}
+
+status_t HeicEncoderInfoManager::getFrameRateMaps(sp<AMessage> details, FrameRateMaps* maps) {
+    if (details == nullptr || maps == nullptr) {
+        ALOGE("%s: Invalid input: details: %p, maps: %p", __FUNCTION__, details.get(), maps);
+        return BAD_VALUE;
+    }
+
+    for (size_t i = 0; i < details->countEntries(); i++) {
+        AMessage::Type type;
+        const char* entryName = details->getEntryNameAt(i, &type);
+        if (type != AMessage::kTypeString) continue;
+        std::regex frameRateNamePattern("measured-frame-rate-([0-9]+)[*x]([0-9]+)-range",
+                std::regex_constants::icase);
+        std::cmatch sizeMatch;
+        if (std::regex_match(entryName, sizeMatch, frameRateNamePattern) &&
+                sizeMatch.size() == 3) {
+            AMessage::ItemData item = details->getEntryAt(i);
+            AString fpsRangeStr;
+            if (item.find(&fpsRangeStr)) {
+                ALOGV("%s: %s", entryName, fpsRangeStr.c_str());
+                std::regex frameRatePattern("([0-9]+)-([0-9]+)");
+                std::cmatch fpsMatch;
+                if (std::regex_match(fpsRangeStr.c_str(), fpsMatch, frameRatePattern) &&
+                        fpsMatch.size() == 3) {
+                    maps->emplace(
+                            std::make_pair(stoi(sizeMatch[1]), stoi(sizeMatch[2])),
+                            std::make_pair(stoi(fpsMatch[1]), stoi(fpsMatch[2])));
+                } else {
+                    return BAD_VALUE;
+                }
+            }
+        }
+    }
+    return OK;
+}
+
+status_t HeicEncoderInfoManager::getCodecSizeRange(
+        const char* codecName,
+        sp<AMessage> details,
+        std::pair<int32_t, int32_t>* minSize,
+        std::pair<int32_t, int32_t>* maxSize,
+        FrameRateMaps* frameRateMaps) {
+    if (codecName == nullptr || minSize == nullptr || maxSize == nullptr ||
+            details == nullptr || frameRateMaps == nullptr) {
+        return BAD_VALUE;
+    }
+
+    AString sizeRange;
+    auto hasItem = details->findString("size-range", &sizeRange);
+    if (!hasItem) {
+        ALOGE("%s: Failed to query size range for codec %s", __FUNCTION__, codecName);
+        return BAD_VALUE;
+    }
+    ALOGV("%s: %s codec's size range is %s", __FUNCTION__, codecName, sizeRange.c_str());
+    std::regex pattern("([0-9]+)[*x]([0-9]+)-([0-9]+)[*x]([0-9]+)");
+    std::cmatch match;
+    if (std::regex_match(sizeRange.c_str(), match, pattern)) {
+        if (match.size() == 5) {
+            minSize->first = stoi(match[1]);
+            minSize->second = stoi(match[2]);
+            maxSize->first = stoi(match[3]);
+            maxSize->second = stoi(match[4]);
+            if (minSize->first > maxSize->first ||
+                    minSize->second > maxSize->second) {
+                ALOGE("%s: Invalid %s code size range: %s",
+                        __FUNCTION__, codecName, sizeRange.c_str());
+                return BAD_VALUE;
+            }
+        } else {
+            return BAD_VALUE;
+        }
+    }
+
+    auto res = getFrameRateMaps(details, frameRateMaps);
+    if (res != OK) {
+        return res;
+    }
+
+    return OK;
+}
+
+HeicEncoderInfoManager::FrameRateMaps::const_iterator HeicEncoderInfoManager::findClosestSize(
+        const FrameRateMaps& maps, int32_t width, int32_t height) const {
+    int32_t minDiff = INT32_MAX;
+    FrameRateMaps::const_iterator closestIter = maps.begin();
+    for (auto iter = maps.begin(); iter != maps.end(); iter++) {
+        // Use area difference between the sizes to approximate size
+        // difference.
+        int32_t diff = abs(iter->first.first * iter->first.second - width * height);
+        if (diff < minDiff) {
+            closestIter = iter;
+            minDiff = diff;
+        }
+    }
+    return closestIter;
+}
+
+sp<AMessage> HeicEncoderInfoManager::getCodecDetails(
+        sp<IMediaCodecList> codecsList, const char* name) {
+    ssize_t idx = codecsList->findCodecByType(name, true /*encoder*/);
+    if (idx < 0) {
+        return nullptr;
+    }
+
+    const sp<MediaCodecInfo> info = codecsList->getCodecInfo(idx);
+    if (info == nullptr) {
+        ALOGE("%s: Failed to get codec info for %s", __FUNCTION__, name);
+        return nullptr;
+    }
+    const sp<MediaCodecInfo::Capabilities> caps =
+            info->getCapabilitiesFor(name);
+    if (caps == nullptr) {
+        ALOGE("%s: Failed to get capabilities for codec %s", __FUNCTION__, name);
+        return nullptr;
+    }
+    const sp<AMessage> details = caps->getDetails();
+    if (details == nullptr) {
+        ALOGE("%s: Failed to get details for codec %s", __FUNCTION__, name);
+        return nullptr;
+    }
+
+    return details;
+}
+} //namespace camera3
+} // namespace android
diff --git a/services/camera/libcameraservice/api2/HeicEncoderInfoManager.h b/services/camera/libcameraservice/api2/HeicEncoderInfoManager.h
new file mode 100644
index 0000000..fb0b914
--- /dev/null
+++ b/services/camera/libcameraservice/api2/HeicEncoderInfoManager.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_HEICENCODER_INFO_MANAGER_H
+#define ANDROID_SERVERS_CAMERA_HEICENCODER_INFO_MANAGER_H
+
+#include <unordered_map>
+#include <utility>
+#include <utils/Errors.h>
+#include <utils/StrongPointer.h>
+
+#include <media/IMediaCodecList.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+namespace android {
+namespace camera3 {
+
+class HeicEncoderInfoManager {
+public:
+    static HeicEncoderInfoManager& getInstance() {
+        static HeicEncoderInfoManager instance;
+        return instance;
+    }
+
+    bool isSizeSupported(int32_t width, int32_t height,
+            bool* useHeic, bool* useGrid, int64_t* stall) const;
+
+    static const auto kGridWidth = 512;
+    static const auto kGridHeight = 512;
+private:
+    struct SizePairHash {
+        std::size_t operator () (const std::pair<int32_t,int32_t> &p) const {
+            return p.first * 31 + p.second;
+        }
+    };
+
+    typedef std::unordered_map<std::pair<int32_t, int32_t>,
+            std::pair<int32_t, int32_t>, SizePairHash> FrameRateMaps;
+
+    HeicEncoderInfoManager();
+    virtual ~HeicEncoderInfoManager();
+
+    status_t initialize();
+    status_t getFrameRateMaps(sp<AMessage> details, FrameRateMaps* maps);
+    status_t getCodecSizeRange(const char* codecName, sp<AMessage> details,
+            std::pair<int32_t, int32_t>* minSize, std::pair<int32_t, int32_t>* maxSize,
+            FrameRateMaps* frameRateMaps);
+    FrameRateMaps::const_iterator findClosestSize(const FrameRateMaps& maps,
+            int32_t width, int32_t height) const;
+    sp<AMessage> getCodecDetails(sp<IMediaCodecList> codecsList, const char* name);
+
+    bool mIsInited;
+    std::pair<int32_t, int32_t> mMinSizeHeic, mMaxSizeHeic;
+    std::pair<int32_t, int32_t> mMinSizeHevc, mMaxSizeHevc;
+    bool mHasHEVC, mHasHEIC;
+    FrameRateMaps mHeicFrameRateMaps, mHevcFrameRateMaps;
+    bool mDisableGrid;
+
+};
+
+} // namespace camera3
+} // namespace android
+
+#endif // ANDROID_SERVERS_CAMERA_HEICENCODER_INFO_MANAGER_H
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 3059b07..d6789a4 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -38,6 +38,8 @@
 #include <hwbinder/IPCThreadState.h>
 #include <utils/Trace.h>
 
+#include "api2/HeicCompositeStream.h"
+
 namespace android {
 
 using namespace ::android::hardware::camera;
@@ -50,14 +52,6 @@
 const std::string kLegacyProviderName("legacy/0");
 const std::string kExternalProviderName("external/0");
 const bool kEnableLazyHal(property_get_bool("ro.camera.enableLazyHal", false));
-
-// The extra amount of time to hold a reference to an ICameraProvider after it is no longer needed.
-// Hold the reference for this extra time so that if the camera is unreferenced and then referenced
-// again quickly, we do not let the HAL exit and then need to immediately restart it. An example
-// when this could happen is switching from a front-facing to a rear-facing camera. If the HAL were
-// to exit during the camera switch, the camera could appear janky to the user.
-const std::chrono::system_clock::duration kCameraKeepAliveDelay = 3s;
-
 } // anonymous namespace
 
 const float CameraProviderManager::kDepthARTolerance = .1f;
@@ -77,6 +71,8 @@
     }
     mListener = listener;
     mServiceProxy = proxy;
+    mDeviceState = static_cast<hardware::hidl_bitfield<provider::V2_5::DeviceState>>(
+        provider::V2_5::DeviceState::NORMAL);
 
     // Registering will trigger notifications for all already-known providers
     bool success = mServiceProxy->registerForNotifications(
@@ -280,6 +276,26 @@
     return OK;
 }
 
+status_t CameraProviderManager::notifyDeviceStateChange(
+        hardware::hidl_bitfield<provider::V2_5::DeviceState> newState) {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+    mDeviceState = newState;
+    status_t res = OK;
+    for (auto& provider : mProviders) {
+        ALOGV("%s: Notifying %s for new state 0x%" PRIx64,
+                __FUNCTION__, provider->mProviderName.c_str(), newState);
+        status_t singleRes = provider->notifyDeviceStateChange(mDeviceState);
+        if (singleRes != OK) {
+            ALOGE("%s: Unable to notify provider %s about device state change",
+                    __FUNCTION__,
+                    provider->mProviderName.c_str());
+            res = singleRes;
+            // continue to do the rest of the providers instead of returning now
+        }
+    }
+    return res;
+}
+
 status_t CameraProviderManager::openSession(const std::string &id,
         const sp<device::V3_2::ICameraDeviceCallback>& callback,
         /*out*/
@@ -365,7 +381,7 @@
     if (!kEnableLazyHal) {
         return;
     }
-    ALOGI("Saving camera provider %s for camera device %s", provider->descriptor, cameraId.c_str());
+    ALOGV("Saving camera provider %s for camera device %s", provider->descriptor, cameraId.c_str());
     std::lock_guard<std::mutex> lock(mProviderInterfaceMapLock);
     std::unordered_map<std::string, sp<provider::V2_4::ICameraProvider>> *primaryMap, *alternateMap;
     if (usageType == DeviceMode::TORCH) {
@@ -389,7 +405,7 @@
     if (!kEnableLazyHal) {
         return;
     }
-    ALOGI("Removing camera device %s", cameraId.c_str());
+    ALOGV("Removing camera device %s", cameraId.c_str());
     std::unordered_map<std::string, sp<provider::V2_4::ICameraProvider>> *providerMap;
     if (usageType == DeviceMode::TORCH) {
         providerMap = &mTorchProviderByCameraId;
@@ -399,12 +415,15 @@
     std::lock_guard<std::mutex> lock(mProviderInterfaceMapLock);
     auto search = providerMap->find(cameraId.c_str());
     if (search != providerMap->end()) {
-        auto ptr = search->second;
-        auto future = std::async(std::launch::async, [ptr] {
-            std::this_thread::sleep_for(kCameraKeepAliveDelay);
-            IPCThreadState::self()->flushCommands();
-        });
+        // Drop the reference to this ICameraProvider. This is safe to do immediately (without an
+        // added delay) because hwservicemanager guarantees to hold the reference for at least five
+        // more seconds.  We depend on this behavior so that if the provider is unreferenced and
+        // then referenced again quickly, we do not let the HAL exit and then need to immediately
+        // restart it. An example when this could happen is switching from a front-facing to a
+        // rear-facing camera. If the HAL were to exit during the camera switch, the camera could
+        // appear janky to the user.
         providerMap->erase(cameraId.c_str());
+        IPCThreadState::self()->flushCommands();
     } else {
         ALOGE("%s: Asked to remove reference for camera %s, but no reference to it was found. This "
                 "could mean removeRef was called twice for the same camera ID.", __FUNCTION__,
@@ -504,6 +523,17 @@
     }
 }
 
+bool CameraProviderManager::ProviderInfo::DeviceInfo3::isPublicallyHiddenSecureCamera() {
+    camera_metadata_entry_t entryCap;
+    entryCap = mCameraCharacteristics.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+    if (entryCap.count != 1) {
+        // Do NOT hide this camera device if the capabilities specify anything more
+        // than ANDROID_REQUEST_AVAILABLE_CAPABILITIES_SECURE_IMAGE_DATA.
+        return false;
+    }
+    return entryCap.data.u8[0] == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_SECURE_IMAGE_DATA;
+}
+
 void CameraProviderManager::ProviderInfo::DeviceInfo3::getSupportedSizes(
         const CameraMetadata& ch, uint32_t tag, android_pixel_format_t format,
         std::vector<std::tuple<size_t, size_t>> *sizes/*out*/) {
@@ -648,7 +678,7 @@
     bool isDepthExclusivePresent = std::find(chTags.data.i32, chTags.data.i32 + chTags.count,
             depthExclTag) != (chTags.data.i32 + chTags.count);
     bool isDepthSizePresent = std::find(chTags.data.i32, chTags.data.i32 + chTags.count,
-            depthExclTag) != (chTags.data.i32 + chTags.count);
+            depthSizesTag) != (chTags.data.i32 + chTags.count);
     if (!(isDepthExclusivePresent && isDepthSizePresent)) {
         // No depth support, nothing more to do.
         return OK;
@@ -676,7 +706,6 @@
     getSupportedDynamicDepthSizes(supportedBlobSizes, supportedDepthSizes,
             &supportedDynamicDepthSizes, &internalDepthSizes);
     if (supportedDynamicDepthSizes.empty()) {
-        ALOGE("%s: No dynamic depth size matched!", __func__);
         // Nothing more to do.
         return OK;
     }
@@ -869,6 +898,130 @@
     return res;
 }
 
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::fillHeicStreamCombinations(
+        std::vector<int32_t>* outputs,
+        std::vector<int64_t>* durations,
+        std::vector<int64_t>* stallDurations,
+        const camera_metadata_entry& halStreamConfigs,
+        const camera_metadata_entry& halStreamDurations) {
+    if (outputs == nullptr || durations == nullptr || stallDurations == nullptr) {
+        return BAD_VALUE;
+    }
+
+    static bool supportInMemoryTempFile =
+            camera3::HeicCompositeStream::isInMemoryTempFileSupported();
+    if (!supportInMemoryTempFile) {
+        ALOGI("%s: No HEIC support due to absence of in memory temp file support",
+                __FUNCTION__);
+        return OK;
+    }
+
+    for (size_t i = 0; i < halStreamConfigs.count; i += 4) {
+        int32_t format = halStreamConfigs.data.i32[i];
+        // Only IMPLEMENTATION_DEFINED and YUV_888 can be used to generate HEIC
+        // image.
+        if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
+                format != HAL_PIXEL_FORMAT_YCBCR_420_888) {
+            continue;
+        }
+
+        bool sizeAvail = false;
+        for (size_t j = 0; j < outputs->size(); j+= 4) {
+            if ((*outputs)[j+1] == halStreamConfigs.data.i32[i+1] &&
+                    (*outputs)[j+2] == halStreamConfigs.data.i32[i+2]) {
+                sizeAvail = true;
+                break;
+            }
+        }
+        if (sizeAvail) continue;
+
+        int64_t stall = 0;
+        bool useHeic, useGrid;
+        if (camera3::HeicCompositeStream::isSizeSupportedByHeifEncoder(
+                halStreamConfigs.data.i32[i+1], halStreamConfigs.data.i32[i+2],
+                &useHeic, &useGrid, &stall)) {
+            if (useGrid != (format == HAL_PIXEL_FORMAT_YCBCR_420_888)) {
+                continue;
+            }
+
+            // HEIC configuration
+            int32_t config[] = {HAL_PIXEL_FORMAT_BLOB, halStreamConfigs.data.i32[i+1],
+                    halStreamConfigs.data.i32[i+2], 0 /*isInput*/};
+            outputs->insert(outputs->end(), config, config + 4);
+
+            // HEIC minFrameDuration
+            for (size_t j = 0; j < halStreamDurations.count; j += 4) {
+                if (halStreamDurations.data.i64[j] == format &&
+                        halStreamDurations.data.i64[j+1] == halStreamConfigs.data.i32[i+1] &&
+                        halStreamDurations.data.i64[j+2] == halStreamConfigs.data.i32[i+2]) {
+                    int64_t duration[] = {HAL_PIXEL_FORMAT_BLOB, halStreamConfigs.data.i32[i+1],
+                            halStreamConfigs.data.i32[i+2], halStreamDurations.data.i64[j+3]};
+                    durations->insert(durations->end(), duration, duration+4);
+                    break;
+                }
+            }
+
+            // HEIC stallDuration
+            int64_t stallDuration[] = {HAL_PIXEL_FORMAT_BLOB, halStreamConfigs.data.i32[i+1],
+                    halStreamConfigs.data.i32[i+2], stall};
+            stallDurations->insert(stallDurations->end(), stallDuration, stallDuration+4);
+        }
+    }
+    return OK;
+}
+
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::deriveHeicTags() {
+    auto& c = mCameraCharacteristics;
+
+    camera_metadata_entry halHeicSupport = c.find(ANDROID_HEIC_INFO_SUPPORTED);
+    if (halHeicSupport.count > 1) {
+        ALOGE("%s: Invalid entry count %zu for ANDROID_HEIC_INFO_SUPPORTED",
+                __FUNCTION__, halHeicSupport.count);
+        return BAD_VALUE;
+    } else if (halHeicSupport.count == 0 ||
+            halHeicSupport.data.u8[0] == ANDROID_HEIC_INFO_SUPPORTED_FALSE) {
+        // Camera HAL doesn't support mandatory stream combinations for HEIC.
+        return OK;
+    }
+
+    camera_metadata_entry maxJpegAppsSegments =
+            c.find(ANDROID_HEIC_INFO_MAX_JPEG_APP_SEGMENTS_COUNT);
+    if (maxJpegAppsSegments.count != 1 || maxJpegAppsSegments.data.u8[0] == 0 ||
+            maxJpegAppsSegments.data.u8[0] > 16) {
+        ALOGE("%s: ANDROID_HEIC_INFO_MAX_JPEG_APP_SEGMENTS_COUNT must be within [1, 16]",
+                __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    // Populate HEIC output configurations and its related min frame duration
+    // and stall duration.
+    std::vector<int32_t> heicOutputs;
+    std::vector<int64_t> heicDurations;
+    std::vector<int64_t> heicStallDurations;
+
+    camera_metadata_entry halStreamConfigs =
+            c.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
+    camera_metadata_entry minFrameDurations =
+            c.find(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS);
+
+    status_t res = fillHeicStreamCombinations(&heicOutputs, &heicDurations, &heicStallDurations,
+            halStreamConfigs, minFrameDurations);
+    if (res != OK) {
+        ALOGE("%s: Failed to fill HEIC stream combinations: %s (%d)", __FUNCTION__,
+                strerror(-res), res);
+        return res;
+    }
+
+    c.update(ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS,
+           heicOutputs.data(), heicOutputs.size());
+    c.update(ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS,
+            heicDurations.data(), heicDurations.size());
+    c.update(ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS,
+            heicStallDurations.data(), heicStallDurations.size());
+
+    return OK;
+}
+
 bool CameraProviderManager::isLogicalCamera(const std::string& id,
         std::vector<std::string>* physicalCameraIds) {
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
@@ -882,6 +1035,16 @@
     return deviceInfo->mIsLogicalCamera;
 }
 
+bool CameraProviderManager::isPublicallyHiddenSecureCamera(const std::string& id) {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id);
+    if (deviceInfo == nullptr) {
+        return false;
+    }
+    return deviceInfo->mIsPublicallyHiddenSecureCamera;
+}
+
 bool CameraProviderManager::isHiddenPhysicalCamera(const std::string& cameraId) {
     for (auto& provider : mProviders) {
         for (auto& deviceInfo : provider->mDevices) {
@@ -947,7 +1110,7 @@
     }
 
     sp<ProviderInfo> providerInfo = new ProviderInfo(newProvider, this);
-    status_t res = providerInfo->initialize(interface);
+    status_t res = providerInfo->initialize(interface, mDeviceState);
     if (res != OK) {
         return res;
     }
@@ -1008,7 +1171,8 @@
 }
 
 status_t CameraProviderManager::ProviderInfo::initialize(
-        sp<provider::V2_4::ICameraProvider>& interface) {
+        sp<provider::V2_4::ICameraProvider>& interface,
+        hardware::hidl_bitfield<provider::V2_5::DeviceState> currentDeviceState) {
     status_t res = parseProviderName(mProviderName, &mType, &mId);
     if (res != OK) {
         ALOGE("%s: Invalid provider name, ignoring", __FUNCTION__);
@@ -1016,6 +1180,15 @@
     }
     ALOGI("Connecting to new camera provider: %s, isRemote? %d",
             mProviderName.c_str(), interface->isRemote());
+
+    // Determine minor version
+    auto castResult = provider::V2_5::ICameraProvider::castFrom(interface);
+    if (castResult.isOk()) {
+        mMinorVersion = 5;
+    } else {
+        mMinorVersion = 4;
+    }
+
     // cameraDeviceStatusChange callbacks may be called (and causing new devices added)
     // before setCallback returns
     hardware::Return<Status> status = interface->setCallback(this);
@@ -1040,6 +1213,24 @@
                 __FUNCTION__, mProviderName.c_str());
     }
 
+    if (!kEnableLazyHal) {
+        // Save HAL reference indefinitely
+        mSavedInterface = interface;
+    } else {
+        mActiveInterface = interface;
+    }
+
+    ALOGV("%s: Setting device state for %s: 0x%" PRIx64,
+            __FUNCTION__, mProviderName.c_str(), mDeviceState);
+    notifyDeviceStateChange(currentDeviceState);
+
+    res = setUpVendorTags();
+    if (res != OK) {
+        ALOGE("%s: Unable to set up vendor tags from provider '%s'",
+                __FUNCTION__, mProviderName.c_str());
+        return res;
+    }
+
     // Get initial list of camera devices, if any
     std::vector<std::string> devices;
     hardware::Return<void> ret = interface->getCameraIdList([&status, this, &devices](
@@ -1096,34 +1287,28 @@
         }
     }
 
-    res = setUpVendorTags();
-    if (res != OK) {
-        ALOGE("%s: Unable to set up vendor tags from provider '%s'",
-                __FUNCTION__, mProviderName.c_str());
-        return res;
-    }
-
     ALOGI("Camera provider %s ready with %zu camera devices",
             mProviderName.c_str(), mDevices.size());
 
     mInitialized = true;
-    if (!kEnableLazyHal) {
-        // Save HAL reference indefinitely
-        mSavedInterface = interface;
-    }
     return OK;
 }
 
 const sp<provider::V2_4::ICameraProvider>
 CameraProviderManager::ProviderInfo::startProviderInterface() {
     ATRACE_CALL();
-    ALOGI("Request to start camera provider: %s", mProviderName.c_str());
+    ALOGV("Request to start camera provider: %s", mProviderName.c_str());
     if (mSavedInterface != nullptr) {
         return mSavedInterface;
     }
+    if (!kEnableLazyHal) {
+        ALOGE("Bad provider state! Should not be here on a non-lazy HAL!");
+        return nullptr;
+    }
+
     auto interface = mActiveInterface.promote();
     if (interface == nullptr) {
-        ALOGI("Could not promote, calling getService(%s)", mProviderName.c_str());
+        ALOGI("Camera HAL provider needs restart, calling getService(%s)", mProviderName.c_str());
         interface = mManager->mServiceProxy->getService(mProviderName);
         interface->setCallback(this);
         hardware::Return<bool> linked = interface->linkToDeath(this, /*cookie*/ mId);
@@ -1136,9 +1321,22 @@
             ALOGW("%s: Unable to link to provider '%s' death notifications",
                     __FUNCTION__, mProviderName.c_str());
         }
+        // Send current device state
+        if (mMinorVersion >= 5) {
+            auto castResult = provider::V2_5::ICameraProvider::castFrom(interface);
+            if (castResult.isOk()) {
+                sp<provider::V2_5::ICameraProvider> interface_2_5 = castResult;
+                if (interface_2_5 != nullptr) {
+                    ALOGV("%s: Initial device state for %s: 0x %" PRIx64,
+                            __FUNCTION__, mProviderName.c_str(), mDeviceState);
+                    interface_2_5->notifyDeviceStateChange(mDeviceState);
+                }
+            }
+        }
+
         mActiveInterface = interface;
     } else {
-        ALOGI("Camera provider (%s) already in use. Re-using instance.", mProviderName.c_str());
+        ALOGV("Camera provider (%s) already in use. Re-using instance.", mProviderName.c_str());
     }
     return interface;
 }
@@ -1223,8 +1421,10 @@
 }
 
 status_t CameraProviderManager::ProviderInfo::dump(int fd, const Vector<String16>&) const {
-    dprintf(fd, "== Camera Provider HAL %s (v2.4, %s) static info: %zu devices: ==\n",
-            mProviderName.c_str(), mIsRemote ? "remote" : "passthrough",
+    dprintf(fd, "== Camera Provider HAL %s (v2.%d, %s) static info: %zu devices: ==\n",
+            mProviderName.c_str(),
+            mMinorVersion,
+            mIsRemote ? "remote" : "passthrough",
             mDevices.size());
 
     for (auto& device : mDevices) {
@@ -1423,6 +1623,26 @@
     return OK;
 }
 
+status_t CameraProviderManager::ProviderInfo::notifyDeviceStateChange(
+        hardware::hidl_bitfield<provider::V2_5::DeviceState> newDeviceState) {
+    mDeviceState = newDeviceState;
+    if (mMinorVersion >= 5) {
+        // Check if the provider is currently active - not going to start it up for this notification
+        auto interface = mSavedInterface != nullptr ? mSavedInterface : mActiveInterface.promote();
+        if (interface != nullptr) {
+            // Send current device state
+            auto castResult = provider::V2_5::ICameraProvider::castFrom(interface);
+            if (castResult.isOk()) {
+                sp<provider::V2_5::ICameraProvider> interface_2_5 = castResult;
+                if (interface_2_5 != nullptr) {
+                    interface_2_5->notifyDeviceStateChange(mDeviceState);
+                }
+            }
+        }
+    }
+    return OK;
+}
+
 template<class DeviceInfoT>
 std::unique_ptr<CameraProviderManager::ProviderInfo::DeviceInfo>
     CameraProviderManager::ProviderInfo::initializeDeviceInfo(
@@ -1709,18 +1929,26 @@
                 __FUNCTION__, id.c_str(), CameraProviderManager::statusToString(status), status);
         return;
     }
+
+    mIsPublicallyHiddenSecureCamera = isPublicallyHiddenSecureCamera();
+
     status_t res = fixupMonochromeTags();
     if (OK != res) {
         ALOGE("%s: Unable to fix up monochrome tags based for older HAL version: %s (%d)",
                 __FUNCTION__, strerror(-res), res);
         return;
     }
-    res = addDynamicDepthTags();
-    if (OK != res) {
-        ALOGE("%s: Failed appending dynamic depth tags: %s (%d)", __FUNCTION__, strerror(-res),
-                res);
-        return;
+    auto stat = addDynamicDepthTags();
+    if (OK != stat) {
+        ALOGE("%s: Failed appending dynamic depth tags: %s (%d)", __FUNCTION__, strerror(-stat),
+                stat);
     }
+    res = deriveHeicTags();
+    if (OK != res) {
+        ALOGE("%s: Unable to derive HEIC tags based on camera and media capabilities: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+    }
+
     camera_metadata_entry flashAvailable =
             mCameraCharacteristics.find(ANDROID_FLASH_INFO_AVAILABLE);
     if (flashAvailable.count == 1 &&
@@ -1731,6 +1959,7 @@
     }
 
     queryPhysicalCameraIds();
+
     // Get physical camera characteristics if applicable
     auto castResult = device::V3_5::ICameraDevice::castFrom(interface);
     if (!castResult.isOk()) {
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index fbd7d2e..a42fb4d 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -28,9 +28,8 @@
 #include <camera/CameraBase.h>
 #include <utils/Errors.h>
 #include <android/hardware/camera/common/1.0/types.h>
-#include <android/hardware/camera/provider/2.4/ICameraProvider.h>
+#include <android/hardware/camera/provider/2.5/ICameraProvider.h>
 #include <android/hardware/camera/device/3.4/ICameraDeviceSession.h>
-//#include <android/hardware/camera/provider/2.4/ICameraProviderCallbacks.h>
 #include <android/hidl/manager/1.0/IServiceNotification.h>
 #include <camera/VendorTagDescriptor.h>
 
@@ -206,6 +205,12 @@
     status_t setUpVendorTags();
 
     /**
+     * Inform registered providers about a device state change, such as folding or unfolding
+     */
+    status_t notifyDeviceStateChange(
+        android::hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState> newState);
+
+    /**
      * Open an active session to a camera device.
      *
      * This fully powers on the camera device hardware, and returns a handle to a
@@ -264,6 +269,7 @@
      */
     bool isLogicalCamera(const std::string& id, std::vector<std::string>* physicalCameraIds);
 
+    bool isPublicallyHiddenSecureCamera(const std::string& id);
     bool isHiddenPhysicalCamera(const std::string& cameraId);
 
     static const float kDepthARTolerance;
@@ -276,6 +282,9 @@
     wp<StatusListener> mListener;
     ServiceInteractionProxy* mServiceProxy;
 
+    // Current overall Android device physical status
+    android::hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState> mDeviceState;
+
     // mProviderLifecycleLock is locked during onRegistration and removeProvider
     mutable std::mutex mProviderLifecycleLock;
 
@@ -302,10 +311,14 @@
     {
         const std::string mProviderName;
         const metadata_vendor_id_t mProviderTagid;
+        int mMinorVersion;
         sp<VendorTagDescriptor> mVendorTagDescriptor;
         bool mSetTorchModeSupported;
         bool mIsRemote;
 
+        // Current overall Android device physical status
+        hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState> mDeviceState;
+
         // This pointer is used to keep a reference to the ICameraProvider that was last accessed.
         wp<hardware::camera::provider::V2_4::ICameraProvider> mActiveInterface;
 
@@ -315,7 +328,9 @@
                 CameraProviderManager *manager);
         ~ProviderInfo();
 
-        status_t initialize(sp<hardware::camera::provider::V2_4::ICameraProvider>& interface);
+        status_t initialize(sp<hardware::camera::provider::V2_4::ICameraProvider>& interface,
+                hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState>
+                    currentDeviceState);
 
         const sp<hardware::camera::provider::V2_4::ICameraProvider> startProviderInterface();
 
@@ -344,6 +359,13 @@
          */
         status_t setUpVendorTags();
 
+        /**
+         * Notify provider about top-level device physical state changes
+         */
+        status_t notifyDeviceStateChange(
+                hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState>
+                    newDeviceState);
+
         // Basic device information, common to all camera devices
         struct DeviceInfo {
             const std::string mName;  // Full instance name
@@ -354,6 +376,7 @@
             std::vector<std::string> mPhysicalIds;
             hardware::CameraInfo mInfo;
             sp<IBase> mSavedInterface;
+            bool mIsPublicallyHiddenSecureCamera = false;
 
             const hardware::camera::common::V1_0::CameraResourceCost mResourceCost;
 
@@ -471,6 +494,7 @@
             CameraMetadata mCameraCharacteristics;
             std::unordered_map<std::string, CameraMetadata> mPhysicalCameraCharacteristics;
             void queryPhysicalCameraIds();
+            bool isPublicallyHiddenSecureCamera();
             status_t fixupMonochromeTags();
             status_t addDynamicDepthTags();
             static void getSupportedSizes(const CameraMetadata& ch, uint32_t tag,
@@ -491,6 +515,12 @@
                     std::vector<std::tuple<size_t, size_t>> *internalDepthSizes /*out*/);
             status_t removeAvailableKeys(CameraMetadata& c, const std::vector<uint32_t>& keys,
                     uint32_t keyTag);
+            status_t fillHeicStreamCombinations(std::vector<int32_t>* outputs,
+                    std::vector<int64_t>* durations,
+                    std::vector<int64_t>* stallDurations,
+                    const camera_metadata_entry& halStreamConfigs,
+                    const camera_metadata_entry& halStreamDurations);
+            status_t deriveHeicTags();
         };
 
     private:
diff --git a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
index a945aca..6d96163 100644
--- a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
+++ b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
@@ -32,9 +32,12 @@
 #include <dynamic_depth/profile.h>
 #include <dynamic_depth/profiles.h>
 #include <jpeglib.h>
+#include <libexif/exif-data.h>
+#include <libexif/exif-system.h>
 #include <math.h>
 #include <sstream>
 #include <utils/Errors.h>
+#include <utils/ExifUtils.h>
 #include <utils/Log.h>
 #include <xmpmeta/xmp_data.h>
 #include <xmpmeta/xmp_writer.h>
@@ -61,8 +64,44 @@
 namespace android {
 namespace camera3 {
 
+ExifOrientation getExifOrientation(const unsigned char *jpegBuffer, size_t jpegBufferSize) {
+    if ((jpegBuffer == nullptr) || (jpegBufferSize == 0)) {
+        return ExifOrientation::ORIENTATION_UNDEFINED;
+    }
+
+    auto exifData = exif_data_new();
+    exif_data_load_data(exifData, jpegBuffer, jpegBufferSize);
+    ExifEntry *orientation = exif_content_get_entry(exifData->ifd[EXIF_IFD_0],
+            EXIF_TAG_ORIENTATION);
+    if ((orientation == nullptr) || (orientation->size != sizeof(ExifShort))) {
+        ALOGV("%s: Orientation EXIF entry invalid!", __FUNCTION__);
+        exif_data_unref(exifData);
+        return ExifOrientation::ORIENTATION_0_DEGREES;
+    }
+
+    auto orientationValue = exif_get_short(orientation->data, exif_data_get_byte_order(exifData));
+    ExifOrientation ret;
+    switch (orientationValue) {
+        case ExifOrientation::ORIENTATION_0_DEGREES:
+        case ExifOrientation::ORIENTATION_90_DEGREES:
+        case ExifOrientation::ORIENTATION_180_DEGREES:
+        case ExifOrientation::ORIENTATION_270_DEGREES:
+            ret = static_cast<ExifOrientation> (orientationValue);
+            break;
+        default:
+            ALOGE("%s: Unexpected EXIF orientation value: %d, defaulting to 0 degrees",
+                    __FUNCTION__, orientationValue);
+            ret = ExifOrientation::ORIENTATION_0_DEGREES;
+    }
+
+    exif_data_unref(exifData);
+
+    return ret;
+}
+
 status_t encodeGrayscaleJpeg(size_t width, size_t height, uint8_t *in, void *out,
-        const size_t maxOutSize, uint8_t jpegQuality, size_t &actualSize) {
+        const size_t maxOutSize, uint8_t jpegQuality, ExifOrientation exifOrientation,
+        size_t &actualSize) {
     status_t ret;
     // libjpeg is a C library so we use C-style "inheritance" by
     // putting libjpeg's jpeg_destination_mgr first in our custom
@@ -151,6 +190,23 @@
 
     jpeg_start_compress(&cinfo, TRUE);
 
+    if (exifOrientation != ExifOrientation::ORIENTATION_UNDEFINED) {
+        std::unique_ptr<ExifUtils> utils(ExifUtils::create());
+        utils->initializeEmpty();
+        utils->setImageWidth(width);
+        utils->setImageHeight(height);
+        utils->setOrientationValue(exifOrientation);
+
+        if (utils->generateApp1()) {
+            const uint8_t* exifBuffer = utils->getApp1Buffer();
+            size_t exifBufferSize = utils->getApp1Length();
+            jpeg_write_marker(&cinfo, JPEG_APP0 + 1, static_cast<const JOCTET*>(exifBuffer),
+                    exifBufferSize);
+        } else {
+            ALOGE("%s: Unable to generate App1 buffer", __FUNCTION__);
+        }
+    }
+
     for (size_t i = 0; i < cinfo.image_height; i++) {
         auto currentRow  = static_cast<JSAMPROW>(in + i*width);
         jpeg_write_scanlines(&cinfo, &currentRow, /*num_lines*/1);
@@ -168,8 +224,106 @@
     return ret;
 }
 
+inline void unpackDepth16(uint16_t value, std::vector<float> *points /*out*/,
+        std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+    // Android densely packed depth map. The units for the range are in
+    // millimeters and need to be scaled to meters.
+    // The confidence value is encoded in the 3 most significant bits.
+    // The confidence data needs to be additionally normalized with
+    // values 1.0f, 0.0f representing maximum and minimum confidence
+    // respectively.
+    auto point = static_cast<float>(value & 0x1FFF) / 1000.f;
+    points->push_back(point);
+
+    auto conf = (value >> 13) & 0x7;
+    float normConfidence = (conf == 0) ? 1.f : (static_cast<float>(conf) - 1) / 7.f;
+    confidence->push_back(normConfidence);
+
+    if (*near > point) {
+        *near = point;
+    }
+    if (*far < point) {
+        *far = point;
+    }
+}
+
+// Trivial case, read forward from top,left corner.
+void rotate0AndUnpack(DepthPhotoInputFrame inputFrame, std::vector<float> *points /*out*/,
+        std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+    for (size_t i = 0; i < inputFrame.mDepthMapHeight; i++) {
+        for (size_t j = 0; j < inputFrame.mDepthMapWidth; j++) {
+            unpackDepth16(inputFrame.mDepthMapBuffer[i*inputFrame.mDepthMapStride + j], points,
+                    confidence, near, far);
+        }
+    }
+}
+
+// 90 degrees CW rotation can be applied by starting to read from bottom, left corner
+// transposing rows and columns.
+void rotate90AndUnpack(DepthPhotoInputFrame inputFrame, std::vector<float> *points /*out*/,
+        std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+    for (size_t i = 0; i < inputFrame.mDepthMapWidth; i++) {
+        for (ssize_t j = inputFrame.mDepthMapHeight-1; j >= 0; j--) {
+            unpackDepth16(inputFrame.mDepthMapBuffer[j*inputFrame.mDepthMapStride + i], points,
+                    confidence, near, far);
+        }
+    }
+}
+
+// 180 CW degrees rotation can be applied by starting to read backwards from bottom, right corner.
+void rotate180AndUnpack(DepthPhotoInputFrame inputFrame, std::vector<float> *points /*out*/,
+        std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+    for (ssize_t i = inputFrame.mDepthMapHeight-1; i >= 0; i--) {
+        for (ssize_t j = inputFrame.mDepthMapWidth-1; j >= 0; j--) {
+            unpackDepth16(inputFrame.mDepthMapBuffer[i*inputFrame.mDepthMapStride + j], points,
+                    confidence, near, far);
+        }
+    }
+}
+
+// 270 degrees CW rotation can be applied by starting to read from top, right corner
+// transposing rows and columns.
+void rotate270AndUnpack(DepthPhotoInputFrame inputFrame, std::vector<float> *points /*out*/,
+        std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+    for (ssize_t i = inputFrame.mDepthMapWidth-1; i >= 0; i--) {
+        for (size_t j = 0; j < inputFrame.mDepthMapHeight; j++) {
+            unpackDepth16(inputFrame.mDepthMapBuffer[j*inputFrame.mDepthMapStride + i], points,
+                    confidence, near, far);
+        }
+    }
+}
+
+bool rotateAndUnpack(DepthPhotoInputFrame inputFrame, std::vector<float> *points /*out*/,
+        std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+    switch (inputFrame.mOrientation) {
+        case DepthPhotoOrientation::DEPTH_ORIENTATION_0_DEGREES:
+            rotate0AndUnpack(inputFrame, points, confidence, near, far);
+            return false;
+        case DepthPhotoOrientation::DEPTH_ORIENTATION_90_DEGREES:
+            rotate90AndUnpack(inputFrame, points, confidence, near, far);
+            return true;
+        case DepthPhotoOrientation::DEPTH_ORIENTATION_180_DEGREES:
+            rotate180AndUnpack(inputFrame, points, confidence, near, far);
+            return false;
+        case DepthPhotoOrientation::DEPTH_ORIENTATION_270_DEGREES:
+            rotate270AndUnpack(inputFrame, points, confidence, near, far);
+            return true;
+        default:
+            ALOGE("%s: Unsupported depth photo rotation: %d, default to 0", __FUNCTION__,
+                    inputFrame.mOrientation);
+            rotate0AndUnpack(inputFrame, points, confidence, near, far);
+    }
+
+    return false;
+}
+
 std::unique_ptr<dynamic_depth::DepthMap> processDepthMapFrame(DepthPhotoInputFrame inputFrame,
-                std::vector<std::unique_ptr<Item>> *items /*out*/) {
+        ExifOrientation exifOrientation, std::vector<std::unique_ptr<Item>> *items /*out*/,
+        bool *switchDimensions /*out*/) {
+    if ((items == nullptr) || (switchDimensions == nullptr)) {
+        return nullptr;
+    }
+
     std::vector<float> points, confidence;
 
     size_t pointCount = inputFrame.mDepthMapWidth * inputFrame.mDepthMapHeight;
@@ -177,29 +331,21 @@
     confidence.reserve(pointCount);
     float near = UINT16_MAX;
     float far = .0f;
-    for (size_t i = 0; i < inputFrame.mDepthMapHeight; i++) {
-        for (size_t j = 0; j < inputFrame.mDepthMapWidth; j++) {
-            // Android densely packed depth map. The units for the range are in
-            // millimeters and need to be scaled to meters.
-            // The confidence value is encoded in the 3 most significant bits.
-            // The confidence data needs to be additionally normalized with
-            // values 1.0f, 0.0f representing maximum and minimum confidence
-            // respectively.
-            auto value = inputFrame.mDepthMapBuffer[i*inputFrame.mDepthMapStride + j];
-            auto point = static_cast<float>(value & 0x1FFF) / 1000.f;
-            points.push_back(point);
+    *switchDimensions = false;
+    // Physical rotation of depth and confidence maps may be needed in case
+    // the EXIF orientation is set to 0 degrees and the depth photo orientation
+    // (source color image) has some different value.
+    if (exifOrientation == ExifOrientation::ORIENTATION_0_DEGREES) {
+        *switchDimensions = rotateAndUnpack(inputFrame, &points, &confidence, &near, &far);
+    } else {
+        rotate0AndUnpack(inputFrame, &points, &confidence, &near, &far);
+    }
 
-            auto conf = (value >> 13) & 0x7;
-            float normConfidence = (conf == 0) ? 1.f : (static_cast<float>(conf) - 1) / 7.f;
-            confidence.push_back(normConfidence);
-
-            if (near > point) {
-                near = point;
-            }
-            if (far < point) {
-                far = point;
-            }
-        }
+    size_t width = inputFrame.mDepthMapWidth;
+    size_t height = inputFrame.mDepthMapHeight;
+    if (*switchDimensions) {
+        width = inputFrame.mDepthMapHeight;
+        height = inputFrame.mDepthMapWidth;
     }
 
     if (near == far) {
@@ -225,18 +371,18 @@
     depthParams.depth_image_data.resize(inputFrame.mMaxJpegSize);
     depthParams.confidence_data.resize(inputFrame.mMaxJpegSize);
     size_t actualJpegSize;
-    auto ret = encodeGrayscaleJpeg(inputFrame.mDepthMapWidth, inputFrame.mDepthMapHeight,
-            pointsQuantized.data(), depthParams.depth_image_data.data(), inputFrame.mMaxJpegSize,
-            inputFrame.mJpegQuality, actualJpegSize);
+    auto ret = encodeGrayscaleJpeg(width, height, pointsQuantized.data(),
+            depthParams.depth_image_data.data(), inputFrame.mMaxJpegSize,
+            inputFrame.mJpegQuality, exifOrientation, actualJpegSize);
     if (ret != NO_ERROR) {
         ALOGE("%s: Depth map compression failed!", __FUNCTION__);
         return nullptr;
     }
     depthParams.depth_image_data.resize(actualJpegSize);
 
-    ret = encodeGrayscaleJpeg(inputFrame.mDepthMapWidth, inputFrame.mDepthMapHeight,
-            confidenceQuantized.data(), depthParams.confidence_data.data(), inputFrame.mMaxJpegSize,
-            inputFrame.mJpegQuality, actualJpegSize);
+    ret = encodeGrayscaleJpeg(width, height, confidenceQuantized.data(),
+            depthParams.confidence_data.data(), inputFrame.mMaxJpegSize,
+            inputFrame.mJpegQuality, exifOrientation, actualJpegSize);
     if (ret != NO_ERROR) {
         ALOGE("%s: Confidence map compression failed!", __FUNCTION__);
         return nullptr;
@@ -262,7 +408,12 @@
         return BAD_VALUE;
     }
 
-    cameraParams->depth_map = processDepthMapFrame(inputFrame, &items);
+    ExifOrientation exifOrientation = getExifOrientation(
+            reinterpret_cast<const unsigned char*> (inputFrame.mMainJpegBuffer),
+            inputFrame.mMainJpegSize);
+    bool switchDimensions;
+    cameraParams->depth_map = processDepthMapFrame(inputFrame, exifOrientation, &items,
+            &switchDimensions);
     if (cameraParams->depth_map == nullptr) {
         ALOGE("%s: Depth map processing failed!", __FUNCTION__);
         return BAD_VALUE;
@@ -274,7 +425,13 @@
         // [focalLengthX, focalLengthY, opticalCenterX, opticalCenterY, skew]
         const dynamic_depth::Point<double> focalLength(inputFrame.mInstrinsicCalibration[0],
                 inputFrame.mInstrinsicCalibration[1]);
-        const Dimension imageSize(inputFrame.mMainJpegWidth, inputFrame.mMainJpegHeight);
+        size_t width = inputFrame.mMainJpegWidth;
+        size_t height = inputFrame.mMainJpegHeight;
+        if (switchDimensions) {
+            width = inputFrame.mMainJpegHeight;
+            height = inputFrame.mMainJpegWidth;
+        }
+        const Dimension imageSize(width, height);
         ImagingModelParams imagingParams(focalLength, imageSize);
         imagingParams.principal_point.x = inputFrame.mInstrinsicCalibration[2];
         imagingParams.principal_point.y = inputFrame.mInstrinsicCalibration[3];
diff --git a/services/camera/libcameraservice/common/DepthPhotoProcessor.h b/services/camera/libcameraservice/common/DepthPhotoProcessor.h
index 19889a1..6a2fbff 100644
--- a/services/camera/libcameraservice/common/DepthPhotoProcessor.h
+++ b/services/camera/libcameraservice/common/DepthPhotoProcessor.h
@@ -23,19 +23,27 @@
 namespace android {
 namespace camera3 {
 
+enum DepthPhotoOrientation {
+    DEPTH_ORIENTATION_0_DEGREES   = 0,
+    DEPTH_ORIENTATION_90_DEGREES  = 90,
+    DEPTH_ORIENTATION_180_DEGREES = 180,
+    DEPTH_ORIENTATION_270_DEGREES = 270,
+};
+
 struct DepthPhotoInputFrame {
-    const char* mMainJpegBuffer;
-    size_t      mMainJpegSize;
-    size_t      mMainJpegWidth, mMainJpegHeight;
-    uint16_t*   mDepthMapBuffer;
-    size_t      mDepthMapWidth, mDepthMapHeight, mDepthMapStride;
-    size_t      mMaxJpegSize;
-    uint8_t     mJpegQuality;
-    uint8_t     mIsLogical;
-    float       mInstrinsicCalibration[5];
-    uint8_t     mIsInstrinsicCalibrationValid;
-    float       mLensDistortion[5];
-    uint8_t     mIsLensDistortionValid;
+    const char*           mMainJpegBuffer;
+    size_t                mMainJpegSize;
+    size_t                mMainJpegWidth, mMainJpegHeight;
+    uint16_t*             mDepthMapBuffer;
+    size_t                mDepthMapWidth, mDepthMapHeight, mDepthMapStride;
+    size_t                mMaxJpegSize;
+    uint8_t               mJpegQuality;
+    uint8_t               mIsLogical;
+    float                 mInstrinsicCalibration[5];
+    uint8_t               mIsInstrinsicCalibrationValid;
+    float                 mLensDistortion[5];
+    uint8_t               mIsLensDistortionValid;
+    DepthPhotoOrientation mOrientation;
 
     DepthPhotoInputFrame() :
             mMainJpegBuffer(nullptr),
@@ -52,7 +60,8 @@
             mInstrinsicCalibration{0.f},
             mIsInstrinsicCalibrationValid(0),
             mLensDistortion{0.f},
-            mIsLensDistortionValid(0) {}
+            mIsLensDistortionValid(0),
+            mOrientation(DepthPhotoOrientation::DEPTH_ORIENTATION_0_DEGREES) {}
 };
 
 static const char *kDepthPhotoLibrary = "libdepthphoto.so";
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 99b8043..923d17a 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -885,14 +885,14 @@
     return OK;
 }
 
-status_t Camera3Device::capture(CameraMetadata &request, int64_t* /*lastFrameNumber*/) {
+status_t Camera3Device::capture(CameraMetadata &request, int64_t* lastFrameNumber) {
     ATRACE_CALL();
 
     List<const PhysicalCameraSettingsList> requestsList;
     std::list<const SurfaceMap> surfaceMaps;
     convertToRequestList(requestsList, surfaceMaps, request);
 
-    return captureList(requestsList, surfaceMaps, /*lastFrameNumber*/NULL);
+    return captureList(requestsList, surfaceMaps, lastFrameNumber);
 }
 
 void Camera3Device::convertToRequestList(List<const PhysicalCameraSettingsList>& requestsList,
@@ -1027,11 +1027,22 @@
             return hardware::Void();
         }
 
+        if (outputStream->isAbandoned()) {
+            bufRet.val.error(StreamBufferRequestError::STREAM_DISCONNECTED);
+            allReqsSucceeds = false;
+            continue;
+        }
+
         bufRet.streamId = streamId;
+        size_t handOutBufferCount = outputStream->getOutstandingBuffersCount();
         uint32_t numBuffersRequested = bufReq.numBuffersRequested;
-        size_t totalHandout = outputStream->getOutstandingBuffersCount() + numBuffersRequested;
-        if (totalHandout > outputStream->asHalStream()->max_buffers) {
+        size_t totalHandout = handOutBufferCount + numBuffersRequested;
+        uint32_t maxBuffers = outputStream->asHalStream()->max_buffers;
+        if (totalHandout > maxBuffers) {
             // Not able to allocate enough buffer. Exit early for this stream
+            ALOGE("%s: request too much buffers for stream %d: at HAL: %zu + requesting: %d"
+                    " > max: %d", __FUNCTION__, streamId, handOutBufferCount,
+                    numBuffersRequested, maxBuffers);
             bufRet.val.error(StreamBufferRequestError::MAX_BUFFER_EXCEEDED);
             allReqsSucceeds = false;
             continue;
@@ -1757,18 +1768,20 @@
 
     if (format == HAL_PIXEL_FORMAT_BLOB) {
         ssize_t blobBufferSize;
-        if (dataSpace != HAL_DATASPACE_DEPTH) {
-            blobBufferSize = getJpegBufferSize(width, height);
-            if (blobBufferSize <= 0) {
-                SET_ERR_L("Invalid jpeg buffer size %zd", blobBufferSize);
-                return BAD_VALUE;
-            }
-        } else {
+        if (dataSpace == HAL_DATASPACE_DEPTH) {
             blobBufferSize = getPointCloudBufferSize();
             if (blobBufferSize <= 0) {
                 SET_ERR_L("Invalid point cloud buffer size %zd", blobBufferSize);
                 return BAD_VALUE;
             }
+        } else if (dataSpace == static_cast<android_dataspace>(HAL_DATASPACE_JPEG_APP_SEGMENTS)) {
+            blobBufferSize = width * height;
+        } else {
+            blobBufferSize = getJpegBufferSize(width, height);
+            if (blobBufferSize <= 0) {
+                SET_ERR_L("Invalid jpeg buffer size %zd", blobBufferSize);
+                return BAD_VALUE;
+            }
         }
         newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
                 width, height, blobBufferSize, format, dataSpace, rotation,
@@ -2138,7 +2151,11 @@
 
 // Pause to reconfigure
 status_t Camera3Device::internalPauseAndWaitLocked(nsecs_t maxExpectedDuration) {
-    mRequestThread->setPaused(true);
+    if (mRequestThread.get() != nullptr) {
+        mRequestThread->setPaused(true);
+    } else {
+        return NO_INIT;
+    }
 
     ALOGV("%s: Camera %s: Internal wait until idle (% " PRIi64 " ns)", __FUNCTION__, mId.string(),
           maxExpectedDuration);
@@ -2184,12 +2201,11 @@
 
     mStatusWaiters++;
 
-    // Notify HAL to start draining. We need to notify the HalInterface layer
-    // even when the device is already IDLE, so HalInterface can reject incoming
-    // requestStreamBuffers call.
     if (!active && mUseHalBufManager) {
         auto streamIds = mOutputStreams.getStreamIds();
-        mRequestThread->signalPipelineDrain(streamIds);
+        if (mStatus == STATUS_ACTIVE) {
+            mRequestThread->signalPipelineDrain(streamIds);
+        }
         mRequestBufferSM.onWaitUntilIdle();
     }
 
@@ -3878,7 +3894,8 @@
             bool useHalBufManager) :
         mHidlSession(session),
         mRequestMetadataQueue(queue),
-        mUseHalBufManager(useHalBufManager) {
+        mUseHalBufManager(useHalBufManager),
+        mIsReconfigurationQuerySupported(true) {
     // Check with hardware service manager if we can downcast these interfaces
     // Somewhat expensive, so cache the results at startup
     auto castResult_3_5 = device::V3_5::ICameraDeviceSession::castFrom(mHidlSession);
@@ -3984,6 +4001,52 @@
     return res;
 }
 
+bool Camera3Device::HalInterface::isReconfigurationRequired(CameraMetadata& oldSessionParams,
+        CameraMetadata& newSessionParams) {
+    // We do reconfiguration by default;
+    bool ret = true;
+    if ((mHidlSession_3_5 != nullptr) && mIsReconfigurationQuerySupported) {
+        android::hardware::hidl_vec<uint8_t> oldParams, newParams;
+        camera_metadata_t* oldSessioMeta = const_cast<camera_metadata_t*>(
+                oldSessionParams.getAndLock());
+        camera_metadata_t* newSessioMeta = const_cast<camera_metadata_t*>(
+                newSessionParams.getAndLock());
+        oldParams.setToExternal(reinterpret_cast<uint8_t*>(oldSessioMeta),
+                get_camera_metadata_size(oldSessioMeta));
+        newParams.setToExternal(reinterpret_cast<uint8_t*>(newSessioMeta),
+                get_camera_metadata_size(newSessioMeta));
+        hardware::camera::common::V1_0::Status callStatus;
+        bool required;
+        auto hidlCb = [&callStatus, &required] (hardware::camera::common::V1_0::Status s,
+                bool requiredFlag) {
+            callStatus = s;
+            required = requiredFlag;
+        };
+        auto err = mHidlSession_3_5->isReconfigurationRequired(oldParams, newParams, hidlCb);
+        oldSessionParams.unlock(oldSessioMeta);
+        newSessionParams.unlock(newSessioMeta);
+        if (err.isOk()) {
+            switch (callStatus) {
+                case hardware::camera::common::V1_0::Status::OK:
+                    ret = required;
+                    break;
+                case hardware::camera::common::V1_0::Status::METHOD_NOT_SUPPORTED:
+                    mIsReconfigurationQuerySupported = false;
+                    ret = true;
+                    break;
+                default:
+                    ALOGV("%s: Reconfiguration query failed: %d", __FUNCTION__, callStatus);
+                    ret = true;
+            }
+        } else {
+            ALOGE("%s: Unexpected binder error: %s", __FUNCTION__, err.description().c_str());
+            ret = true;
+        }
+    }
+
+    return ret;
+}
+
 status_t Camera3Device::HalInterface::configureStreams(const camera_metadata_t *sessionParams,
         camera3_stream_configuration *config, const std::vector<uint32_t>& bufferSizes) {
     ATRACE_NAME("CameraHal::configureStreams");
@@ -4499,7 +4562,7 @@
         return;
     }
 
-    auto err = mHidlSession_3_5->signalStreamFlush(streamIds, mNextStreamConfigCounter);
+    auto err = mHidlSession_3_5->signalStreamFlush(streamIds, mNextStreamConfigCounter - 1);
     if (!err.isOk()) {
         ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
         return;
@@ -5095,9 +5158,10 @@
     ATRACE_CALL();
     bool updatesDetected = false;
 
+    CameraMetadata updatedParams(mLatestSessionParams);
     for (auto tag : mSessionParamKeys) {
         camera_metadata_ro_entry entry = settings.find(tag);
-        camera_metadata_entry lastEntry = mLatestSessionParams.find(tag);
+        camera_metadata_entry lastEntry = updatedParams.find(tag);
 
         if (entry.count > 0) {
             bool isDifferent = false;
@@ -5126,17 +5190,26 @@
                 if (!skipHFRTargetFPSUpdate(tag, entry, lastEntry)) {
                     updatesDetected = true;
                 }
-                mLatestSessionParams.update(entry);
+                updatedParams.update(entry);
             }
         } else if (lastEntry.count > 0) {
             // Value has been removed
             ALOGV("%s: Session parameter tag id %d removed", __FUNCTION__, tag);
-            mLatestSessionParams.erase(tag);
+            updatedParams.erase(tag);
             updatesDetected = true;
         }
     }
 
-    return updatesDetected;
+    bool reconfigureRequired;
+    if (updatesDetected) {
+        reconfigureRequired = mInterface->isReconfigurationRequired(mLatestSessionParams,
+                updatedParams);
+        mLatestSessionParams = updatedParams;
+    } else {
+        reconfigureRequired = false;
+    }
+
+    return reconfigureRequired;
 }
 
 bool Camera3Device::RequestThread::threadLoop() {
@@ -5249,6 +5322,11 @@
     ALOGVV("%s: %d: submitting %zu requests in a batch.", __FUNCTION__, __LINE__,
             mNextRequests.size());
 
+    sp<Camera3Device> parent = mParent.promote();
+    if (parent != nullptr) {
+        parent->mRequestBufferSM.onSubmittingRequest();
+    }
+
     bool submitRequestSuccess = false;
     nsecs_t tRequestStart = systemTime(SYSTEM_TIME_MONOTONIC);
     if (mInterface->supportBatchRequest()) {
@@ -5259,13 +5337,6 @@
     nsecs_t tRequestEnd = systemTime(SYSTEM_TIME_MONOTONIC);
     mRequestLatency.add(tRequestStart, tRequestEnd);
 
-    if (submitRequestSuccess) {
-        sp<Camera3Device> parent = mParent.promote();
-        if (parent != nullptr) {
-            parent->mRequestBufferSM.onRequestSubmitted();
-        }
-    }
-
     if (useFlushLock) {
         mFlushLock.unlock();
     }
@@ -5473,8 +5544,22 @@
                     return TIMED_OUT;
                 }
             }
-            outputStream->fireBufferRequestForFrameNumber(
-                    captureRequest->mResultExtras.frameNumber);
+
+            {
+                sp<Camera3Device> parent = mParent.promote();
+                if (parent != nullptr) {
+                    const String8& streamCameraId = outputStream->getPhysicalCameraId();
+                    for (const auto& settings : captureRequest->mSettingsList) {
+                        if ((streamCameraId.isEmpty() &&
+                                parent->getId() == settings.cameraId.c_str()) ||
+                                streamCameraId == settings.cameraId.c_str()) {
+                            outputStream->fireBufferRequestForFrameNumber(
+                                    captureRequest->mResultExtras.frameNumber,
+                                    settings.metadata);
+                        }
+                    }
+                }
+            }
 
             String8 physicalCameraId = outputStream->getPhysicalCameraId();
 
@@ -5689,18 +5774,21 @@
             captureRequest->mInputStream->returnInputBuffer(captureRequest->mInputBuffer);
         }
 
-        for (size_t i = 0; i < halRequest->num_output_buffers; i++) {
-            //Buffers that failed processing could still have
-            //valid acquire fence.
-            int acquireFence = (*outputBuffers)[i].acquire_fence;
-            if (0 <= acquireFence) {
-                close(acquireFence);
-                outputBuffers->editItemAt(i).acquire_fence = -1;
+        // No output buffer can be returned when using HAL buffer manager
+        if (!mUseHalBufManager) {
+            for (size_t i = 0; i < halRequest->num_output_buffers; i++) {
+                //Buffers that failed processing could still have
+                //valid acquire fence.
+                int acquireFence = (*outputBuffers)[i].acquire_fence;
+                if (0 <= acquireFence) {
+                    close(acquireFence);
+                    outputBuffers->editItemAt(i).acquire_fence = -1;
+                }
+                outputBuffers->editItemAt(i).status = CAMERA3_BUFFER_STATUS_ERROR;
+                captureRequest->mOutputStreams.editItemAt(i)->returnBuffer((*outputBuffers)[i], 0,
+                        /*timestampIncreasing*/true, std::vector<size_t> (),
+                        captureRequest->mResultExtras.frameNumber);
             }
-            outputBuffers->editItemAt(i).status = CAMERA3_BUFFER_STATUS_ERROR;
-            captureRequest->mOutputStreams.editItemAt(i)->returnBuffer((*outputBuffers)[i], 0,
-                    /*timestampIncreasing*/true, std::vector<size_t> (),
-                    captureRequest->mResultExtras.frameNumber);
         }
 
         if (sendRequestError) {
@@ -5806,16 +5894,16 @@
             if (mPaused == false) {
                 ALOGV("%s: RequestThread: Going idle", __FUNCTION__);
                 mPaused = true;
-                // Let the tracker know
-                sp<StatusTracker> statusTracker = mStatusTracker.promote();
-                if (statusTracker != 0) {
-                    statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
-                }
                 if (mNotifyPipelineDrain) {
                     mInterface->signalPipelineDrain(mStreamIdsToBeDrained);
                     mNotifyPipelineDrain = false;
                     mStreamIdsToBeDrained.clear();
                 }
+                // Let the tracker know
+                sp<StatusTracker> statusTracker = mStatusTracker.promote();
+                if (statusTracker != 0) {
+                    statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
+                }
                 sp<Camera3Device> parent = mParent.promote();
                 if (parent != nullptr) {
                     parent->mRequestBufferSM.onRequestThreadPaused();
@@ -5899,16 +5987,16 @@
         if (mPaused == false) {
             mPaused = true;
             ALOGV("%s: RequestThread: Paused", __FUNCTION__);
-            // Let the tracker know
-            sp<StatusTracker> statusTracker = mStatusTracker.promote();
-            if (statusTracker != 0) {
-                statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
-            }
             if (mNotifyPipelineDrain) {
                 mInterface->signalPipelineDrain(mStreamIdsToBeDrained);
                 mNotifyPipelineDrain = false;
                 mStreamIdsToBeDrained.clear();
             }
+            // Let the tracker know
+            sp<StatusTracker> statusTracker = mStatusTracker.promote();
+            if (statusTracker != 0) {
+                statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
+            }
             sp<Camera3Device> parent = mParent.promote();
             if (parent != nullptr) {
                 parent->mRequestBufferSM.onRequestThreadPaused();
@@ -6410,9 +6498,11 @@
     return;
 }
 
-void Camera3Device::RequestBufferStateMachine::onRequestSubmitted() {
+void Camera3Device::RequestBufferStateMachine::onSubmittingRequest() {
     std::lock_guard<std::mutex> lock(mLock);
     mRequestThreadPaused = false;
+    // inflight map register actually happens in prepareHalRequest now, but it is close enough
+    // approximation.
     mInflightMapEmpty = false;
     if (mStatus == RB_STATUS_STOPPED) {
         mStatus = RB_STATUS_READY;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index e5a38bb..b25d89d 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -309,6 +309,8 @@
         status_t close();
 
         void signalPipelineDrain(const std::vector<int>& streamIds);
+        bool isReconfigurationRequired(CameraMetadata& oldSessionParams,
+                CameraMetadata& newSessionParams);
 
         // method to extract buffer's unique ID
         // return pair of (newlySeenBuffer?, bufferId)
@@ -401,6 +403,7 @@
         uint32_t mNextStreamConfigCounter = 1;
 
         const bool mUseHalBufManager;
+        bool mIsReconfigurationQuerySupported;
     };
 
     sp<HalInterface> mInterface;
@@ -1317,7 +1320,7 @@
         void onInflightMapEmpty();
 
         // Events triggered by RequestThread
-        void onRequestSubmitted();
+        void onSubmittingRequest();
         void onRequestThreadPaused();
 
       private:
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index b296513..0571741 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -588,7 +588,11 @@
     if (mState != STATE_CONFIGURED) {
         ALOGE("%s: Stream %d: Can't get buffers if stream is not in CONFIGURED state %d",
                 __FUNCTION__, mId, mState);
-        return INVALID_OPERATION;
+        if (mState == STATE_ABANDONED) {
+            return DEAD_OBJECT;
+        } else {
+            return INVALID_OPERATION;
+        }
     }
 
     // Wait for new buffer returned back if we are running into the limit.
@@ -763,14 +767,15 @@
     return getInputBufferProducerLocked(producer);
 }
 
-void Camera3Stream::fireBufferRequestForFrameNumber(uint64_t frameNumber) {
+void Camera3Stream::fireBufferRequestForFrameNumber(uint64_t frameNumber,
+        const CameraMetadata& settings) {
     ATRACE_CALL();
     Mutex::Autolock l(mLock);
 
     for (auto &it : mBufferListenerList) {
         sp<Camera3StreamBufferListener> listener = it.promote();
         if (listener.get() != nullptr) {
-            listener->onBufferRequestForFrameNumber(frameNumber, getId());
+            listener->onBufferRequestForFrameNumber(frameNumber, getId(), settings);
         }
     }
 }
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 06deba9..5eb6a23 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -434,7 +434,8 @@
     /**
      * Notify buffer stream listeners about incoming request with particular frame number.
      */
-    void fireBufferRequestForFrameNumber(uint64_t frameNumber) override;
+    void fireBufferRequestForFrameNumber(uint64_t frameNumber,
+            const CameraMetadata& settings) override;
 
   protected:
     const int mId;
diff --git a/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h b/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
index 0e6104e..d0aee27 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
@@ -17,6 +17,7 @@
 #ifndef ANDROID_SERVERS_CAMERA3_STREAMBUFFERLISTENER_H
 #define ANDROID_SERVERS_CAMERA3_STREAMBUFFERLISTENER_H
 
+#include <camera/CameraMetadata.h>
 #include <gui/Surface.h>
 #include <utils/RefBase.h>
 
@@ -42,7 +43,8 @@
     // Buffer was released by the HAL
     virtual void onBufferReleased(const BufferInfo& bufferInfo) = 0;
     // Notify about incoming buffer request frame number
-    virtual void onBufferRequestForFrameNumber(uint64_t frameNumber, int streamId) = 0;
+    virtual void onBufferRequestForFrameNumber(uint64_t frameNumber, int streamId,
+            const CameraMetadata& settings) = 0;
 };
 
 }; //namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index 7b80cbd..5cd11b7 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -18,6 +18,8 @@
 #define ANDROID_SERVERS_CAMERA3_STREAM_INTERFACE_H
 
 #include <utils/RefBase.h>
+
+#include <camera/CameraMetadata.h>
 #include "Camera3StreamBufferListener.h"
 #include "Camera3StreamBufferFreedListener.h"
 
@@ -346,7 +348,8 @@
     /**
      * Notify buffer stream listeners about incoming request with particular frame number.
      */
-    virtual void fireBufferRequestForFrameNumber(uint64_t frameNumber) = 0;
+    virtual void fireBufferRequestForFrameNumber(uint64_t frameNumber,
+            const CameraMetadata& settings) = 0;
 };
 
 } // namespace camera3
diff --git a/services/camera/libcameraservice/hidl/HidlCameraService.cpp b/services/camera/libcameraservice/hidl/HidlCameraService.cpp
index 48f1d37..74cfe42 100644
--- a/services/camera/libcameraservice/hidl/HidlCameraService.cpp
+++ b/services/camera/libcameraservice/hidl/HidlCameraService.cpp
@@ -182,7 +182,8 @@
         }
     }
     std::vector<hardware::CameraStatus> cameraStatusAndIds{};
-    binder::Status serviceRet = mAidlICameraService->addListener(csListener, &cameraStatusAndIds);
+    binder::Status serviceRet =
+        mAidlICameraService->addListenerHelper(csListener, &cameraStatusAndIds, true);
     HStatus status = HStatus::NO_ERROR;
     if (!serviceRet.isOk()) {
       ALOGE("%s: Unable to add camera device status listener", __FUNCTION__);
diff --git a/services/camera/libcameraservice/tests/Android.mk b/services/camera/libcameraservice/tests/Android.mk
index ad9963a..b4e7c32 100644
--- a/services/camera/libcameraservice/tests/Android.mk
+++ b/services/camera/libcameraservice/tests/Android.mk
@@ -27,14 +27,19 @@
     libcamera_client \
     libcamera_metadata \
     libutils \
+    libjpeg \
+    libexif \
     android.hardware.camera.common@1.0 \
     android.hardware.camera.provider@2.4 \
+    android.hardware.camera.provider@2.5 \
     android.hardware.camera.device@1.0 \
     android.hardware.camera.device@3.2 \
     android.hardware.camera.device@3.4
 
 LOCAL_C_INCLUDES += \
     system/media/private/camera/include \
+    external/dynamic_depth/includes \
+    external/dynamic_depth/internal \
 
 LOCAL_CFLAGS += -Wall -Wextra -Werror
 
diff --git a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
index 0086c6c..f47e5a5 100644
--- a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
+++ b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
@@ -33,6 +33,7 @@
 using android::hardware::camera::common::V1_0::CameraMetadataType;
 using android::hardware::camera::device::V3_2::ICameraDeviceCallback;
 using android::hardware::camera::device::V3_2::ICameraDeviceSession;
+using android::hardware::camera::provider::V2_5::DeviceState;
 
 /**
  * Basic test implementation of a camera ver. 3.2 device interface
@@ -87,7 +88,7 @@
 /**
  * Basic test implementation of a camera provider
  */
-struct TestICameraProvider : virtual public provider::V2_4::ICameraProvider {
+struct TestICameraProvider : virtual public provider::V2_5::ICameraProvider {
     sp<provider::V2_4::ICameraProviderCallback> mCallbacks;
     std::vector<hardware::hidl_string> mDeviceNames;
     sp<device::V3_2::ICameraDevice> mDeviceInterface;
@@ -101,6 +102,7 @@
 
     virtual hardware::Return<Status> setCallback(
             const sp<provider::V2_4::ICameraProviderCallback>& callbacks) override {
+        mCalledCounter[SET_CALLBACK]++;
         mCallbacks = callbacks;
         return hardware::Return<Status>(Status::OK);
     }
@@ -108,6 +110,7 @@
     using getVendorTags_cb = std::function<void(Status status,
             const hardware::hidl_vec<common::V1_0::VendorTagSection>& sections)>;
     hardware::Return<void> getVendorTags(getVendorTags_cb _hidl_cb) override {
+        mCalledCounter[GET_VENDOR_TAGS]++;
         _hidl_cb(Status::OK, mVendorTagSections);
         return hardware::Void();
     }
@@ -117,6 +120,7 @@
              bool support)>;
     virtual ::hardware::Return<void> isSetTorchModeSupported(
             isSetTorchModeSupported_cb _hidl_cb) override {
+        mCalledCounter[IS_SET_TORCH_MODE_SUPPORTED]++;
         _hidl_cb(Status::OK, false);
         return hardware::Void();
     }
@@ -124,6 +128,7 @@
     using getCameraIdList_cb = std::function<void(Status status,
             const hardware::hidl_vec<hardware::hidl_string>& cameraDeviceNames)>;
     virtual hardware::Return<void> getCameraIdList(getCameraIdList_cb _hidl_cb) override {
+        mCalledCounter[GET_CAMERA_ID_LIST]++;
         _hidl_cb(Status::OK, mDeviceNames);
         return hardware::Void();
     }
@@ -148,6 +153,25 @@
         return hardware::Void();
     }
 
+    virtual hardware::Return<void> notifyDeviceStateChange(
+            hardware::hidl_bitfield<DeviceState> newState) override {
+        mCalledCounter[NOTIFY_DEVICE_STATE]++;
+        mCurrentState = newState;
+        return hardware::Void();
+    }
+
+    enum MethodNames {
+        SET_CALLBACK,
+        GET_VENDOR_TAGS,
+        IS_SET_TORCH_MODE_SUPPORTED,
+        NOTIFY_DEVICE_STATE,
+        GET_CAMERA_ID_LIST,
+
+        METHOD_NAME_COUNT
+    };
+    int mCalledCounter[METHOD_NAME_COUNT] {0};
+
+    hardware::hidl_bitfield<DeviceState> mCurrentState = 0xFFFFFFFF; // Unlikely to be a real state
 };
 
 /**
@@ -209,11 +233,26 @@
 
     res = providerManager->initialize(statusListener, &serviceProxy);
     ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
+    // Check that both "legacy" and "external" providers (really the same object) are called
+    // once for all the init methods
+    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::SET_CALLBACK], 2) <<
+            "Only one call to setCallback per provider expected during init";
+    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::GET_VENDOR_TAGS], 2) <<
+            "Only one call to getVendorTags per provider expected during init";
+    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::IS_SET_TORCH_MODE_SUPPORTED], 2) <<
+            "Only one call to isSetTorchModeSupported per provider expected during init";
+    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::GET_CAMERA_ID_LIST], 2) <<
+            "Only one call to getCameraIdList per provider expected during init";
+    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::NOTIFY_DEVICE_STATE], 2) <<
+            "Only one call to notifyDeviceState per provider expected during init";
 
     std::string legacyInstanceName = "legacy/0";
     std::string externalInstanceName = "external/0";
     bool gotLegacy = false;
     bool gotExternal = false;
+    EXPECT_EQ(2u, serviceProxy.mLastRequestedServiceNames.size()) <<
+            "Only two service queries expected to be seen by hardware service manager";
+
     for (auto& serviceName : serviceProxy.mLastRequestedServiceNames) {
         if (serviceName == legacyInstanceName) gotLegacy = true;
         if (serviceName == externalInstanceName) gotExternal = true;
@@ -375,3 +414,35 @@
     metadataCopy.dump(1, 2);
     secondMetadata.dump(1, 2);
 }
+
+TEST(CameraProviderManagerTest, NotifyStateChangeTest) {
+    std::vector<hardware::hidl_string> deviceNames {
+        "device@3.2/test/0",
+        "device@1.0/test/0",
+        "device@3.2/test/1"};
+
+    hardware::hidl_vec<common::V1_0::VendorTagSection> vendorSection;
+    status_t res;
+    sp<CameraProviderManager> providerManager = new CameraProviderManager();
+    sp<TestStatusListener> statusListener = new TestStatusListener();
+    TestInteractionProxy serviceProxy;
+    sp<TestICameraProvider> provider =  new TestICameraProvider(deviceNames,
+            vendorSection);
+    serviceProxy.setProvider(provider);
+
+    res = providerManager->initialize(statusListener, &serviceProxy);
+    ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
+
+    ASSERT_EQ(provider->mCurrentState,
+            static_cast<hardware::hidl_bitfield<DeviceState>>(DeviceState::NORMAL))
+            << "Initial device state not set";
+
+    res = providerManager->notifyDeviceStateChange(
+        static_cast<hardware::hidl_bitfield<DeviceState>>(DeviceState::FOLDED));
+
+    ASSERT_EQ(res, OK) << "Unable to call notifyDeviceStateChange";
+    ASSERT_EQ(provider->mCurrentState,
+            static_cast<hardware::hidl_bitfield<DeviceState>>(DeviceState::FOLDED))
+            << "Unable to change device state";
+
+}
diff --git a/services/camera/libcameraservice/tests/DepthProcessorTest.cpp b/services/camera/libcameraservice/tests/DepthProcessorTest.cpp
new file mode 100644
index 0000000..2162514
--- /dev/null
+++ b/services/camera/libcameraservice/tests/DepthProcessorTest.cpp
@@ -0,0 +1,382 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "DepthProcessorTest"
+
+#include <array>
+#include <random>
+
+#include <dlfcn.h>
+#include <gtest/gtest.h>
+
+#include "../common/DepthPhotoProcessor.h"
+#include "../utils/ExifUtils.h"
+#include "NV12Compressor.h"
+
+using namespace android;
+using namespace android::camera3;
+
+static const size_t kTestBufferWidth = 640;
+static const size_t kTestBufferHeight = 480;
+static const size_t kTestBufferNV12Size ((((kTestBufferWidth) * (kTestBufferHeight)) * 3) / 2);
+static const size_t kTestBufferDepthSize (kTestBufferWidth * kTestBufferHeight);
+static const size_t kSeed = 1234;
+
+void linkToDepthPhotoLibrary(void **libHandle /*out*/,
+        process_depth_photo_frame *processFrameFunc /*out*/) {
+    ASSERT_NE(libHandle, nullptr);
+    ASSERT_NE(processFrameFunc, nullptr);
+
+    *libHandle = dlopen(kDepthPhotoLibrary, RTLD_NOW | RTLD_LOCAL);
+    if (*libHandle != nullptr) {
+        *processFrameFunc = reinterpret_cast<camera3::process_depth_photo_frame> (
+                dlsym(*libHandle, kDepthPhotoProcessFunction));
+        ASSERT_NE(*processFrameFunc, nullptr);
+    }
+}
+
+void generateColorJpegBuffer(int jpegQuality, ExifOrientation orientationValue, bool includeExif,
+        bool switchDimensions, std::vector<uint8_t> *colorJpegBuffer /*out*/) {
+    ASSERT_NE(colorJpegBuffer, nullptr);
+
+    std::array<uint8_t, kTestBufferNV12Size> colorSourceBuffer;
+    std::default_random_engine gen(kSeed);
+    std::uniform_int_distribution<int> uniDist(0, UINT8_MAX - 1);
+    for (size_t i = 0; i < colorSourceBuffer.size(); i++) {
+        colorSourceBuffer[i] = uniDist(gen);
+    }
+
+    size_t width = kTestBufferWidth;
+    size_t height = kTestBufferHeight;
+    if (switchDimensions) {
+        width = kTestBufferHeight;
+        height = kTestBufferWidth;
+    }
+
+    NV12Compressor jpegCompressor;
+    if (includeExif) {
+        ASSERT_TRUE(jpegCompressor.compressWithExifOrientation(
+                reinterpret_cast<const unsigned char*> (colorSourceBuffer.data()), width, height,
+                jpegQuality, orientationValue));
+    } else {
+        ASSERT_TRUE(jpegCompressor.compress(
+                reinterpret_cast<const unsigned char*> (colorSourceBuffer.data()), width, height,
+                jpegQuality));
+    }
+
+    *colorJpegBuffer = std::move(jpegCompressor.getCompressedData());
+    ASSERT_FALSE(colorJpegBuffer->empty());
+}
+
+void generateDepth16Buffer(std::array<uint16_t, kTestBufferDepthSize> *depth16Buffer /*out*/) {
+    ASSERT_NE(depth16Buffer, nullptr);
+    std::default_random_engine gen(kSeed+1);
+    std::uniform_int_distribution<int> uniDist(0, UINT16_MAX - 1);
+    for (size_t i = 0; i < depth16Buffer->size(); i++) {
+        (*depth16Buffer)[i] = uniDist(gen);
+    }
+}
+
+TEST(DepthProcessorTest, LinkToLibray) {
+    void *libHandle;
+    process_depth_photo_frame processFunc;
+    linkToDepthPhotoLibrary(&libHandle, &processFunc);
+    if (libHandle != nullptr) {
+        dlclose(libHandle);
+    }
+}
+
+TEST(DepthProcessorTest, BadInput) {
+    void *libHandle;
+    int jpegQuality = 95;
+
+    process_depth_photo_frame processFunc;
+    linkToDepthPhotoLibrary(&libHandle, &processFunc);
+    if (libHandle == nullptr) {
+        // Depth library no present, nothing more to test.
+        return;
+    }
+
+    DepthPhotoInputFrame inputFrame;
+    // Worst case both depth and confidence maps have the same size as the main color image.
+    inputFrame.mMaxJpegSize = inputFrame.mMainJpegSize * 3;
+
+    std::vector<uint8_t> colorJpegBuffer;
+    generateColorJpegBuffer(jpegQuality, ExifOrientation::ORIENTATION_UNDEFINED,
+            /*includeExif*/ false, /*switchDimensions*/ false, &colorJpegBuffer);
+
+    std::array<uint16_t, kTestBufferDepthSize> depth16Buffer;
+    generateDepth16Buffer(&depth16Buffer);
+
+    std::vector<uint8_t> depthPhotoBuffer(inputFrame.mMaxJpegSize);
+    size_t actualDepthPhotoSize = 0;
+
+    inputFrame.mMainJpegWidth = kTestBufferWidth;
+    inputFrame.mMainJpegHeight = kTestBufferHeight;
+    inputFrame.mJpegQuality = jpegQuality;
+    ASSERT_NE(processFunc(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
+                &actualDepthPhotoSize), 0);
+
+    inputFrame.mMainJpegBuffer = reinterpret_cast<const char*> (colorJpegBuffer.data());
+    inputFrame.mMainJpegSize = colorJpegBuffer.size();
+    ASSERT_NE(processFunc(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
+                &actualDepthPhotoSize), 0);
+
+    inputFrame.mDepthMapBuffer = depth16Buffer.data();
+    inputFrame.mDepthMapWidth = inputFrame.mDepthMapStride = kTestBufferWidth;
+    inputFrame.mDepthMapHeight = kTestBufferHeight;
+    ASSERT_NE(processFunc(inputFrame, depthPhotoBuffer.size(), nullptr,
+                &actualDepthPhotoSize), 0);
+
+    ASSERT_NE(processFunc(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(), nullptr),
+            0);
+
+    dlclose(libHandle);
+}
+
+TEST(DepthProcessorTest, BasicDepthPhotoValidation) {
+    void *libHandle;
+    int jpegQuality = 95;
+
+    process_depth_photo_frame processFunc;
+    linkToDepthPhotoLibrary(&libHandle, &processFunc);
+    if (libHandle == nullptr) {
+        // Depth library no present, nothing more to test.
+        return;
+    }
+
+    std::vector<uint8_t> colorJpegBuffer;
+    generateColorJpegBuffer(jpegQuality, ExifOrientation::ORIENTATION_UNDEFINED,
+            /*includeExif*/ false, /*switchDimensions*/ false, &colorJpegBuffer);
+
+    std::array<uint16_t, kTestBufferDepthSize> depth16Buffer;
+    generateDepth16Buffer(&depth16Buffer);
+
+    DepthPhotoInputFrame inputFrame;
+    inputFrame.mMainJpegBuffer = reinterpret_cast<const char*> (colorJpegBuffer.data());
+    inputFrame.mMainJpegSize = colorJpegBuffer.size();
+    // Worst case both depth and confidence maps have the same size as the main color image.
+    inputFrame.mMaxJpegSize = inputFrame.mMainJpegSize * 3;
+    inputFrame.mMainJpegWidth = kTestBufferWidth;
+    inputFrame.mMainJpegHeight = kTestBufferHeight;
+    inputFrame.mJpegQuality = jpegQuality;
+    inputFrame.mDepthMapBuffer = depth16Buffer.data();
+    inputFrame.mDepthMapWidth = inputFrame.mDepthMapStride = kTestBufferWidth;
+    inputFrame.mDepthMapHeight = kTestBufferHeight;
+
+    std::vector<uint8_t> depthPhotoBuffer(inputFrame.mMaxJpegSize);
+    size_t actualDepthPhotoSize = 0;
+    ASSERT_EQ(processFunc(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
+                &actualDepthPhotoSize), 0);
+    ASSERT_TRUE((actualDepthPhotoSize > 0) && (depthPhotoBuffer.size() >= actualDepthPhotoSize));
+
+    // The final depth photo must consist of three jpeg images:
+    //  - the main color image
+    //  - the depth map image
+    //  - the confidence map image
+    size_t mainJpegSize = 0;
+    ASSERT_EQ(NV12Compressor::findJpegSize(depthPhotoBuffer.data(), actualDepthPhotoSize,
+                &mainJpegSize), OK);
+    ASSERT_TRUE((mainJpegSize > 0) && (mainJpegSize < actualDepthPhotoSize));
+    size_t depthMapSize = 0;
+    ASSERT_EQ(NV12Compressor::findJpegSize(depthPhotoBuffer.data() + mainJpegSize,
+                actualDepthPhotoSize - mainJpegSize, &depthMapSize), OK);
+    ASSERT_TRUE((depthMapSize > 0) && (depthMapSize < (actualDepthPhotoSize - mainJpegSize)));
+
+    dlclose(libHandle);
+}
+
+TEST(DepthProcessorTest, TestDepthPhotoExifOrientation) {
+    void *libHandle;
+    int jpegQuality = 95;
+
+    process_depth_photo_frame processFunc;
+    linkToDepthPhotoLibrary(&libHandle, &processFunc);
+    if (libHandle == nullptr) {
+        // Depth library no present, nothing more to test.
+        return;
+    }
+
+    ExifOrientation exifOrientations[] = { ExifOrientation::ORIENTATION_UNDEFINED,
+            ExifOrientation::ORIENTATION_0_DEGREES, ExifOrientation::ORIENTATION_90_DEGREES,
+            ExifOrientation::ORIENTATION_180_DEGREES, ExifOrientation::ORIENTATION_270_DEGREES };
+    for (auto exifOrientation : exifOrientations) {
+        std::vector<uint8_t> colorJpegBuffer;
+        generateColorJpegBuffer(jpegQuality, exifOrientation, /*includeExif*/ true,
+                /*switchDimensions*/ false, &colorJpegBuffer);
+        if (exifOrientation != ExifOrientation::ORIENTATION_UNDEFINED) {
+            auto jpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
+            ASSERT_EQ(NV12Compressor::getExifOrientation(colorJpegBuffer.data(),
+                    colorJpegBuffer.size(), &jpegExifOrientation), OK);
+            ASSERT_EQ(exifOrientation, jpegExifOrientation);
+        }
+
+        std::array<uint16_t, kTestBufferDepthSize> depth16Buffer;
+        generateDepth16Buffer(&depth16Buffer);
+
+        DepthPhotoInputFrame inputFrame;
+        inputFrame.mMainJpegBuffer = reinterpret_cast<const char*> (colorJpegBuffer.data());
+        inputFrame.mMainJpegSize = colorJpegBuffer.size();
+        // Worst case both depth and confidence maps have the same size as the main color image.
+        inputFrame.mMaxJpegSize = inputFrame.mMainJpegSize * 3;
+        inputFrame.mMainJpegWidth = kTestBufferWidth;
+        inputFrame.mMainJpegHeight = kTestBufferHeight;
+        inputFrame.mJpegQuality = jpegQuality;
+        inputFrame.mDepthMapBuffer = depth16Buffer.data();
+        inputFrame.mDepthMapWidth = inputFrame.mDepthMapStride = kTestBufferWidth;
+        inputFrame.mDepthMapHeight = kTestBufferHeight;
+
+        std::vector<uint8_t> depthPhotoBuffer(inputFrame.mMaxJpegSize);
+        size_t actualDepthPhotoSize = 0;
+        ASSERT_EQ(processFunc(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
+                &actualDepthPhotoSize), 0);
+        ASSERT_TRUE((actualDepthPhotoSize > 0) &&
+                (depthPhotoBuffer.size() >= actualDepthPhotoSize));
+
+        size_t mainJpegSize = 0;
+        ASSERT_EQ(NV12Compressor::findJpegSize(depthPhotoBuffer.data(), actualDepthPhotoSize,
+                &mainJpegSize), OK);
+        ASSERT_TRUE((mainJpegSize > 0) && (mainJpegSize < actualDepthPhotoSize));
+        size_t depthMapSize = 0;
+        ASSERT_EQ(NV12Compressor::findJpegSize(depthPhotoBuffer.data() + mainJpegSize,
+                actualDepthPhotoSize - mainJpegSize, &depthMapSize), OK);
+        ASSERT_TRUE((depthMapSize > 0) && (depthMapSize < (actualDepthPhotoSize - mainJpegSize)));
+        size_t confidenceMapSize = actualDepthPhotoSize - (mainJpegSize + depthMapSize);
+
+        //Depth and confidence images must have the same EXIF orientation as the source
+        auto depthJpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
+        ASSERT_EQ(NV12Compressor::getExifOrientation(depthPhotoBuffer.data() + mainJpegSize,
+                depthMapSize, &depthJpegExifOrientation), OK);
+        if (exifOrientation == ORIENTATION_UNDEFINED) {
+            // In case of undefined or missing EXIF orientation, always expect 0 degrees in the
+            // depth map.
+            ASSERT_EQ(depthJpegExifOrientation, ExifOrientation::ORIENTATION_0_DEGREES);
+        } else {
+            ASSERT_EQ(depthJpegExifOrientation, exifOrientation);
+        }
+
+        auto confidenceJpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
+        ASSERT_EQ(NV12Compressor::getExifOrientation(
+                depthPhotoBuffer.data() + mainJpegSize + depthMapSize,
+                confidenceMapSize, &confidenceJpegExifOrientation), OK);
+        if (exifOrientation == ORIENTATION_UNDEFINED) {
+            // In case of undefined or missing EXIF orientation, always expect 0 degrees in the
+            // confidence map.
+            ASSERT_EQ(confidenceJpegExifOrientation, ExifOrientation::ORIENTATION_0_DEGREES);
+        } else {
+            ASSERT_EQ(confidenceJpegExifOrientation, exifOrientation);
+        }
+    }
+
+    dlclose(libHandle);
+}
+
+TEST(DepthProcessorTest, TestDephtPhotoPhysicalRotation) {
+    void *libHandle;
+    int jpegQuality = 95;
+
+    process_depth_photo_frame processFunc;
+    linkToDepthPhotoLibrary(&libHandle, &processFunc);
+    if (libHandle == nullptr) {
+        // Depth library no present, nothing more to test.
+        return;
+    }
+
+    // In case of physical rotation, the EXIF orientation must always be 0.
+    auto exifOrientation = ExifOrientation::ORIENTATION_0_DEGREES;
+    DepthPhotoOrientation depthOrientations[] = {
+            DepthPhotoOrientation::DEPTH_ORIENTATION_0_DEGREES,
+            DepthPhotoOrientation::DEPTH_ORIENTATION_90_DEGREES,
+            DepthPhotoOrientation::DEPTH_ORIENTATION_180_DEGREES,
+            DepthPhotoOrientation::DEPTH_ORIENTATION_270_DEGREES };
+    for (auto depthOrientation : depthOrientations) {
+        std::vector<uint8_t> colorJpegBuffer;
+        bool switchDimensions = false;
+        size_t expectedWidth = kTestBufferWidth;
+        size_t expectedHeight = kTestBufferHeight;
+        if ((depthOrientation == DepthPhotoOrientation::DEPTH_ORIENTATION_90_DEGREES) ||
+                (depthOrientation == DepthPhotoOrientation::DEPTH_ORIENTATION_270_DEGREES)) {
+            switchDimensions = true;
+            expectedWidth = kTestBufferHeight;
+            expectedHeight = kTestBufferWidth;
+        }
+        generateColorJpegBuffer(jpegQuality, exifOrientation, /*includeExif*/ true,
+                switchDimensions, &colorJpegBuffer);
+        auto jpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
+        ASSERT_EQ(NV12Compressor::getExifOrientation(colorJpegBuffer.data(), colorJpegBuffer.size(),
+                &jpegExifOrientation), OK);
+        ASSERT_EQ(exifOrientation, jpegExifOrientation);
+
+        std::array<uint16_t, kTestBufferDepthSize> depth16Buffer;
+        generateDepth16Buffer(&depth16Buffer);
+
+        DepthPhotoInputFrame inputFrame;
+        inputFrame.mMainJpegBuffer = reinterpret_cast<const char*> (colorJpegBuffer.data());
+        inputFrame.mMainJpegSize = colorJpegBuffer.size();
+        // Worst case both depth and confidence maps have the same size as the main color image.
+        inputFrame.mMaxJpegSize = inputFrame.mMainJpegSize * 3;
+        inputFrame.mMainJpegWidth = kTestBufferWidth;
+        inputFrame.mMainJpegHeight = kTestBufferHeight;
+        inputFrame.mJpegQuality = jpegQuality;
+        inputFrame.mDepthMapBuffer = depth16Buffer.data();
+        inputFrame.mDepthMapWidth = inputFrame.mDepthMapStride = kTestBufferWidth;
+        inputFrame.mDepthMapHeight = kTestBufferHeight;
+        inputFrame.mOrientation = depthOrientation;
+
+        std::vector<uint8_t> depthPhotoBuffer(inputFrame.mMaxJpegSize);
+        size_t actualDepthPhotoSize = 0;
+        ASSERT_EQ(processFunc(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
+                &actualDepthPhotoSize), 0);
+        ASSERT_TRUE((actualDepthPhotoSize > 0) &&
+                (depthPhotoBuffer.size() >= actualDepthPhotoSize));
+
+        size_t mainJpegSize = 0;
+        ASSERT_EQ(NV12Compressor::findJpegSize(depthPhotoBuffer.data(), actualDepthPhotoSize,
+                &mainJpegSize), OK);
+        ASSERT_TRUE((mainJpegSize > 0) && (mainJpegSize < actualDepthPhotoSize));
+        size_t depthMapSize = 0;
+        ASSERT_EQ(NV12Compressor::findJpegSize(depthPhotoBuffer.data() + mainJpegSize,
+                actualDepthPhotoSize - mainJpegSize, &depthMapSize), OK);
+        ASSERT_TRUE((depthMapSize > 0) && (depthMapSize < (actualDepthPhotoSize - mainJpegSize)));
+        size_t confidenceMapSize = actualDepthPhotoSize - (mainJpegSize + depthMapSize);
+
+        //Depth and confidence images must have the same EXIF orientation as the source
+        auto depthJpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
+        ASSERT_EQ(NV12Compressor::getExifOrientation(depthPhotoBuffer.data() + mainJpegSize,
+                depthMapSize, &depthJpegExifOrientation), OK);
+        ASSERT_EQ(depthJpegExifOrientation, exifOrientation);
+        size_t depthMapWidth, depthMapHeight;
+        ASSERT_EQ(NV12Compressor::getJpegImageDimensions(depthPhotoBuffer.data() + mainJpegSize,
+                depthMapSize, &depthMapWidth, &depthMapHeight), OK);
+        ASSERT_EQ(depthMapWidth, expectedWidth);
+        ASSERT_EQ(depthMapHeight, expectedHeight);
+
+        auto confidenceJpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
+        ASSERT_EQ(NV12Compressor::getExifOrientation(
+                depthPhotoBuffer.data() + mainJpegSize + depthMapSize, confidenceMapSize,
+                &confidenceJpegExifOrientation), OK);
+        ASSERT_EQ(confidenceJpegExifOrientation, exifOrientation);
+        size_t confidenceMapWidth, confidenceMapHeight;
+        ASSERT_EQ(NV12Compressor::getJpegImageDimensions(
+                depthPhotoBuffer.data() + mainJpegSize + depthMapSize, confidenceMapSize,
+                &confidenceMapWidth, &confidenceMapHeight), OK);
+        ASSERT_EQ(confidenceMapWidth, expectedWidth);
+        ASSERT_EQ(confidenceMapHeight, expectedHeight);
+    }
+
+    dlclose(libHandle);
+}
diff --git a/services/camera/libcameraservice/tests/NV12Compressor.cpp b/services/camera/libcameraservice/tests/NV12Compressor.cpp
new file mode 100644
index 0000000..0a41a1f
--- /dev/null
+++ b/services/camera/libcameraservice/tests/NV12Compressor.cpp
@@ -0,0 +1,379 @@
+/*
+* Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "Test_NV12Compressor"
+
+#include "NV12Compressor.h"
+
+#include <libexif/exif-data.h>
+#include <netinet/in.h>
+
+using namespace android;
+using namespace android::camera3;
+
+namespace std {
+template <>
+struct default_delete<ExifEntry> {
+    inline void operator()(ExifEntry* entry) const { exif_entry_unref(entry); }
+};
+
+template <>
+struct default_delete<ExifData> {
+    inline void operator()(ExifData* data) const { exif_data_unref(data); }
+};
+
+}  // namespace std
+
+bool NV12Compressor::compress(const unsigned char* data, int width, int height, int quality) {
+    if (!configureCompressor(width, height, quality)) {
+        // the method will have logged a more detailed error message than we can
+        // provide here so just return.
+        return false;
+    }
+
+    return compressData(data, /*exifData*/ nullptr);
+}
+
+bool NV12Compressor::compressWithExifOrientation(const unsigned char* data, int width, int height,
+        int quality, android::camera3::ExifOrientation exifValue) {
+    std::unique_ptr<ExifData> exifData(exif_data_new());
+    if (exifData.get() == nullptr) {
+        return false;
+    }
+
+    exif_data_set_option(exifData.get(), EXIF_DATA_OPTION_FOLLOW_SPECIFICATION);
+    exif_data_set_data_type(exifData.get(), EXIF_DATA_TYPE_COMPRESSED);
+    exif_data_set_byte_order(exifData.get(), EXIF_BYTE_ORDER_INTEL);
+    std::unique_ptr<ExifEntry> exifEntry(exif_entry_new());
+    if (exifEntry.get() ==  nullptr) {
+        return false;
+    }
+
+    exifEntry->tag = EXIF_TAG_ORIENTATION;
+    exif_content_add_entry(exifData->ifd[EXIF_IFD_0], exifEntry.get());
+    exif_entry_initialize(exifEntry.get(), exifEntry->tag);
+    exif_set_short(exifEntry->data, EXIF_BYTE_ORDER_INTEL, exifValue);
+
+    if (!configureCompressor(width, height, quality)) {
+        return false;
+    }
+
+    return compressData(data, exifData.get());
+}
+
+const std::vector<uint8_t>& NV12Compressor::getCompressedData() const {
+    return mDestManager.mBuffer;
+}
+
+bool NV12Compressor::configureCompressor(int width, int height, int quality) {
+    mCompressInfo.err = jpeg_std_error(&mErrorManager);
+    // NOTE! DANGER! Do not construct any non-trivial objects below setjmp!
+    // The compiler will not generate code to destroy them during the return
+    // below so they will leak. Additionally, do not place any calls to libjpeg
+    // that can fail above this line or any error will cause undefined behavior.
+    if (setjmp(mErrorManager.mJumpBuffer)) {
+        // This is where the error handler will jump in case setup fails
+        // The error manager will ALOG an appropriate error message
+        return false;
+    }
+
+    jpeg_create_compress(&mCompressInfo);
+
+    mCompressInfo.image_width = width;
+    mCompressInfo.image_height = height;
+    mCompressInfo.input_components = 3;
+    mCompressInfo.in_color_space = JCS_YCbCr;
+    jpeg_set_defaults(&mCompressInfo);
+
+    jpeg_set_quality(&mCompressInfo, quality, TRUE);
+    // It may seem weird to set color space here again but this will also set
+    // other fields. These fields might be overwritten by jpeg_set_defaults
+    jpeg_set_colorspace(&mCompressInfo, JCS_YCbCr);
+    mCompressInfo.raw_data_in = TRUE;
+    mCompressInfo.dct_method = JDCT_IFAST;
+    // Set sampling factors
+    mCompressInfo.comp_info[0].h_samp_factor = 2;
+    mCompressInfo.comp_info[0].v_samp_factor = 2;
+    mCompressInfo.comp_info[1].h_samp_factor = 1;
+    mCompressInfo.comp_info[1].v_samp_factor = 1;
+    mCompressInfo.comp_info[2].h_samp_factor = 1;
+    mCompressInfo.comp_info[2].v_samp_factor = 1;
+
+    mCompressInfo.dest = &mDestManager;
+
+    return true;
+}
+
+static void deinterleave(const uint8_t* vuPlanar, std::vector<uint8_t>& uRows,
+        std::vector<uint8_t>& vRows, int rowIndex, int width, int height, int stride) {
+    int numRows = (height - rowIndex) / 2;
+    if (numRows > 8) numRows = 8;
+    for (int row = 0; row < numRows; ++row) {
+        int offset = ((rowIndex >> 1) + row) * stride;
+        const uint8_t* vu = vuPlanar + offset;
+        for (int i = 0; i < (width >> 1); ++i) {
+            int index = row * (width >> 1) + i;
+            uRows[index] = vu[1];
+            vRows[index] = vu[0];
+            vu += 2;
+        }
+    }
+}
+
+bool NV12Compressor::compressData(const unsigned char* data, ExifData* exifData) {
+    const uint8_t* y[16];
+    const uint8_t* cb[8];
+    const uint8_t* cr[8];
+    const uint8_t** planes[3] = { y, cb, cr };
+
+    int i, offset;
+    int width = mCompressInfo.image_width;
+    int height = mCompressInfo.image_height;
+    const uint8_t* yPlanar = data;
+    const uint8_t* vuPlanar = data + (width * height);
+    std::vector<uint8_t> uRows(8 * (width >> 1));
+    std::vector<uint8_t> vRows(8 * (width >> 1));
+
+    // NOTE! DANGER! Do not construct any non-trivial objects below setjmp!
+    // The compiler will not generate code to destroy them during the return
+    // below so they will leak. Additionally, do not place any calls to libjpeg
+    // that can fail above this line or any error will cause undefined behavior.
+    if (setjmp(mErrorManager.mJumpBuffer)) {
+        // This is where the error handler will jump in case compression fails
+        // The error manager will ALOG an appropriate error message
+        return false;
+    }
+
+    jpeg_start_compress(&mCompressInfo, TRUE);
+
+    attachExifData(exifData);
+
+    // process 16 lines of Y and 8 lines of U/V each time.
+    while (mCompressInfo.next_scanline < mCompressInfo.image_height) {
+        //deinterleave u and v
+        deinterleave(vuPlanar, uRows, vRows, mCompressInfo.next_scanline,
+                     width, height, width);
+
+        // Jpeg library ignores the rows whose indices are greater than height.
+        for (i = 0; i < 16; i++) {
+            // y row
+            y[i] = yPlanar + (mCompressInfo.next_scanline + i) * width;
+
+            // construct u row and v row
+            if ((i & 1) == 0) {
+                // height and width are both halved because of downsampling
+                offset = (i >> 1) * (width >> 1);
+                cb[i/2] = &uRows[offset];
+                cr[i/2] = &vRows[offset];
+            }
+          }
+        jpeg_write_raw_data(&mCompressInfo, const_cast<JSAMPIMAGE>(planes), 16);
+    }
+
+    jpeg_finish_compress(&mCompressInfo);
+    jpeg_destroy_compress(&mCompressInfo);
+
+    return true;
+}
+
+bool NV12Compressor::attachExifData(ExifData* exifData) {
+    if (exifData == nullptr) {
+        // This is not an error, we don't require EXIF data
+        return true;
+    }
+
+    // Save the EXIF data to memory
+    unsigned char* rawData = nullptr;
+    unsigned int size = 0;
+    exif_data_save_data(exifData, &rawData, &size);
+    if (rawData == nullptr) {
+        ALOGE("Failed to create EXIF data block");
+        return false;
+    }
+
+    jpeg_write_marker(&mCompressInfo, JPEG_APP0 + 1, rawData, size);
+    free(rawData);
+    return true;
+}
+
+NV12Compressor::ErrorManager::ErrorManager() {
+    error_exit = &onJpegError;
+}
+
+void NV12Compressor::ErrorManager::onJpegError(j_common_ptr cinfo) {
+    // NOTE! Do not construct any non-trivial objects in this method at the top
+    // scope. Their destructors will not be called. If you do need such an
+    // object create a local scope that does not include the longjmp call,
+    // that ensures the object is destroyed before longjmp is called.
+    ErrorManager* errorManager = reinterpret_cast<ErrorManager*>(cinfo->err);
+
+    // Format and log error message
+    char errorMessage[JMSG_LENGTH_MAX];
+    (*errorManager->format_message)(cinfo, errorMessage);
+    errorMessage[sizeof(errorMessage) - 1] = '\0';
+    ALOGE("JPEG compression error: %s", errorMessage);
+    jpeg_destroy(cinfo);
+
+    // And through the looking glass we go
+    longjmp(errorManager->mJumpBuffer, 1);
+}
+
+NV12Compressor::DestinationManager::DestinationManager() {
+    init_destination = &initDestination;
+    empty_output_buffer = &emptyOutputBuffer;
+    term_destination = &termDestination;
+}
+
+void NV12Compressor::DestinationManager::initDestination(j_compress_ptr cinfo) {
+    auto manager = reinterpret_cast<DestinationManager*>(cinfo->dest);
+
+    // Start out with some arbitrary but not too large buffer size
+    manager->mBuffer.resize(16 * 1024);
+    manager->next_output_byte = &manager->mBuffer[0];
+    manager->free_in_buffer = manager->mBuffer.size();
+}
+
+boolean NV12Compressor::DestinationManager::emptyOutputBuffer(
+        j_compress_ptr cinfo) {
+    auto manager = reinterpret_cast<DestinationManager*>(cinfo->dest);
+
+    // Keep doubling the size of the buffer for a very low, amortized
+    // performance cost of the allocations
+    size_t oldSize = manager->mBuffer.size();
+    manager->mBuffer.resize(oldSize * 2);
+    manager->next_output_byte = &manager->mBuffer[oldSize];
+    manager->free_in_buffer = manager->mBuffer.size() - oldSize;
+    return manager->free_in_buffer != 0;
+}
+
+void NV12Compressor::DestinationManager::termDestination(j_compress_ptr cinfo) {
+    auto manager = reinterpret_cast<DestinationManager*>(cinfo->dest);
+
+    // Resize down to the exact size of the output, that is remove as many
+    // bytes as there are left in the buffer
+    manager->mBuffer.resize(manager->mBuffer.size() - manager->free_in_buffer);
+}
+
+status_t NV12Compressor::findJpegSize(uint8_t *jpegBuffer, size_t maxSize, size_t *size /*out*/) {
+    if ((size == nullptr) || (jpegBuffer == nullptr)) {
+        return BAD_VALUE;
+    }
+
+    if (checkJpegStart(jpegBuffer) == 0) {
+        return BAD_VALUE;
+    }
+
+    // Read JFIF segment markers, skip over segment data
+    *size = kMarkerLength; //jump to Start Of Image
+    while (*size <= maxSize - kMarkerLength) {
+        segment_t *segment = (segment_t*)(jpegBuffer + *size);
+        uint8_t type = checkJpegMarker(segment->marker);
+        if (type == 0) { // invalid marker, no more segments, begin JPEG data
+            break;
+        }
+        if (type == kEndOfImage || *size > maxSize - sizeof(segment_t)) {
+            return BAD_VALUE;
+        }
+
+        size_t length = ntohs(segment->length);
+        *size += length + kMarkerLength;
+    }
+
+    // Find End of Image
+    // Scan JPEG buffer until End of Image
+    bool foundEnd = false;
+    for ( ; *size <= maxSize - kMarkerLength; (*size)++) {
+        if (checkJpegEnd(jpegBuffer + *size)) {
+            foundEnd = true;
+            *size += kMarkerLength;
+            break;
+        }
+    }
+
+    if (!foundEnd) {
+        return BAD_VALUE;
+    }
+
+    if (*size > maxSize) {
+        *size = maxSize;
+    }
+
+    return OK;
+}
+
+status_t NV12Compressor::getJpegImageDimensions(uint8_t *jpegBuffer,
+        size_t jpegBufferSize, size_t *width /*out*/, size_t *height /*out*/) {
+    if ((jpegBuffer == nullptr) || (width == nullptr) || (height == nullptr) ||
+            (jpegBufferSize == 0u)) {
+        return BAD_VALUE;
+    }
+
+    // Scan JPEG buffer until Start of Frame
+    bool foundSOF = false;
+    size_t currentPos;
+    for (currentPos = 0; currentPos <= jpegBufferSize - kMarkerLength; currentPos++) {
+        if (checkStartOfFrame(jpegBuffer + currentPos)) {
+            foundSOF = true;
+            currentPos += kMarkerLength;
+            break;
+        }
+    }
+
+    if (!foundSOF) {
+        ALOGE("%s: Start of Frame not found", __func__);
+        return BAD_VALUE;
+    }
+
+    sof_t *startOfFrame = reinterpret_cast<sof_t *> (jpegBuffer + currentPos);
+    *width = ntohs(startOfFrame->width);
+    *height = ntohs(startOfFrame->height);
+
+    return OK;
+}
+
+status_t NV12Compressor::getExifOrientation(uint8_t *jpegBuffer, size_t jpegBufferSize,
+        ExifOrientation *exifValue /*out*/) {
+    if ((jpegBuffer == nullptr) || (exifValue == nullptr) || (jpegBufferSize == 0u)) {
+        return BAD_VALUE;
+    }
+
+    std::unique_ptr<ExifData> exifData(exif_data_new());
+    exif_data_load_data(exifData.get(), jpegBuffer, jpegBufferSize);
+    ExifEntry *orientation = exif_content_get_entry(exifData->ifd[EXIF_IFD_0],
+            EXIF_TAG_ORIENTATION);
+    if ((orientation == nullptr) || (orientation->size != sizeof(ExifShort))) {
+        return BAD_VALUE;
+    }
+
+    auto orientationValue = exif_get_short(orientation->data,
+            exif_data_get_byte_order(exifData.get()));
+    status_t ret;
+    switch (orientationValue) {
+        case ExifOrientation::ORIENTATION_0_DEGREES:
+        case ExifOrientation::ORIENTATION_90_DEGREES:
+        case ExifOrientation::ORIENTATION_180_DEGREES:
+        case ExifOrientation::ORIENTATION_270_DEGREES:
+            *exifValue = static_cast<ExifOrientation> (orientationValue);
+            ret = OK;
+            break;
+        default:
+            ALOGE("%s: Unexpected EXIF orientation value: %u", __FUNCTION__, orientationValue);
+            ret = BAD_VALUE;
+    }
+
+    return ret;
+}
diff --git a/services/camera/libcameraservice/tests/NV12Compressor.h b/services/camera/libcameraservice/tests/NV12Compressor.h
new file mode 100644
index 0000000..ee22d5e
--- /dev/null
+++ b/services/camera/libcameraservice/tests/NV12Compressor.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TEST_CAMERA_JPEG_STUB_NV12_COMPRESSOR_H
+#define TEST_CAMERA_JPEG_STUB_NV12_COMPRESSOR_H
+
+#include <setjmp.h>
+#include <stdlib.h>
+extern "C" {
+#include <jpeglib.h>
+#include <jerror.h>
+}
+
+#include <utils/Errors.h>
+#include <vector>
+
+#include "../utils/ExifUtils.h"
+
+struct _ExifData;
+typedef _ExifData ExifData;
+
+class NV12Compressor {
+public:
+    NV12Compressor() {}
+
+    /* Compress |data| which represents raw NV21 encoded data of dimensions
+     * |width| * |height|.
+     */
+    bool compress(const unsigned char* data, int width, int height, int quality);
+    bool compressWithExifOrientation(const unsigned char* data, int width, int height, int quality,
+            android::camera3::ExifOrientation exifValue);
+
+    /* Get a reference to the compressed data, this will return an empty vector
+     * if compress has not been called yet
+     */
+    const std::vector<unsigned char>& getCompressedData() const;
+
+    // Utility methods
+    static android::status_t findJpegSize(uint8_t *jpegBuffer, size_t maxSize,
+            size_t *size /*out*/);
+
+    static android::status_t getExifOrientation(uint8_t *jpegBuffer,
+            size_t jpegBufferSize, android::camera3::ExifOrientation *exifValue /*out*/);
+
+    /* Get Jpeg image dimensions from the first Start Of Frame. Please note that due to the
+     * way the jpeg buffer is scanned if the image contains a thumbnail, then the size returned
+     * will be of the thumbnail and not the main image.
+     */
+    static android::status_t getJpegImageDimensions(uint8_t *jpegBuffer, size_t jpegBufferSize,
+            size_t *width /*out*/, size_t *height /*out*/);
+
+private:
+
+    struct DestinationManager : jpeg_destination_mgr {
+        DestinationManager();
+
+        static void initDestination(j_compress_ptr cinfo);
+        static boolean emptyOutputBuffer(j_compress_ptr cinfo);
+        static void termDestination(j_compress_ptr cinfo);
+
+        std::vector<unsigned char> mBuffer;
+    };
+
+    struct ErrorManager : jpeg_error_mgr {
+        ErrorManager();
+
+        static void onJpegError(j_common_ptr cinfo);
+
+        jmp_buf mJumpBuffer;
+    };
+
+    static const size_t kMarkerLength = 2; // length of a marker
+    static const uint8_t kMarker = 0xFF; // First byte of marker
+    static const uint8_t kStartOfImage = 0xD8; // Start of Image
+    static const uint8_t kEndOfImage = 0xD9; // End of Image
+    static const uint8_t kStartOfFrame = 0xC0; // Start of Frame
+
+    struct __attribute__((packed)) segment_t {
+        uint8_t marker[kMarkerLength];
+        uint16_t length;
+    };
+
+    struct __attribute__((packed)) sof_t {
+        uint16_t length;
+        uint8_t precision;
+        uint16_t height;
+        uint16_t width;
+    };
+
+    // check for start of image marker
+    static bool checkStartOfFrame(uint8_t* buf) {
+        return buf[0] == kMarker && buf[1] == kStartOfFrame;
+    }
+
+    // check for start of image marker
+    static bool checkJpegStart(uint8_t* buf) {
+        return buf[0] == kMarker && buf[1] == kStartOfImage;
+    }
+
+    // check for End of Image marker
+    static bool checkJpegEnd(uint8_t *buf) {
+        return buf[0] == kMarker && buf[1] == kEndOfImage;
+    }
+
+    // check for arbitrary marker, returns marker type (second byte)
+    // returns 0 if no marker found. Note: 0x00 is not a valid marker type
+    static uint8_t checkJpegMarker(uint8_t *buf) {
+        return (buf[0] == kMarker) ? buf[1] : 0;
+    }
+
+    jpeg_compress_struct mCompressInfo;
+    DestinationManager mDestManager;
+    ErrorManager mErrorManager;
+
+    bool configureCompressor(int width, int height, int quality);
+    bool compressData(const unsigned char* data, ExifData* exifData);
+    bool attachExifData(ExifData* exifData);
+};
+
+#endif  // TEST_CAMERA_JPEG_STUB_NV12_COMPRESSOR_H
+
diff --git a/services/camera/libcameraservice/utils/ExifUtils.cpp b/services/camera/libcameraservice/utils/ExifUtils.cpp
new file mode 100644
index 0000000..c0afdc1
--- /dev/null
+++ b/services/camera/libcameraservice/utils/ExifUtils.cpp
@@ -0,0 +1,1104 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CameraServerExifUtils"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <cutils/log.h>
+
+#include <inttypes.h>
+#include <math.h>
+#include <stdint.h>
+#include <string>
+#include <vector>
+
+#include "ExifUtils.h"
+
+extern "C" {
+#include <libexif/exif-data.h>
+}
+
+namespace std {
+
+template <>
+struct default_delete<ExifEntry> {
+    inline void operator()(ExifEntry* entry) const { exif_entry_unref(entry); }
+};
+
+}  // namespace std
+
+
+namespace android {
+namespace camera3 {
+
+
+class ExifUtilsImpl : public ExifUtils {
+public:
+    ExifUtilsImpl();
+
+    virtual ~ExifUtilsImpl();
+
+    // Initialize() can be called multiple times. The setting of Exif tags will be
+    // cleared.
+    virtual bool initialize(const unsigned char *app1Segment, size_t app1SegmentSize);
+    virtual bool initializeEmpty();
+
+    // set all known fields from a metadata structure
+    virtual bool setFromMetadata(const CameraMetadata& metadata,
+            const CameraMetadata& staticInfo,
+            const size_t imageWidth,
+            const size_t imageHeight);
+
+    // sets the len aperture.
+    // Returns false if memory allocation fails.
+    virtual bool setAperture(float aperture);
+
+    // sets the color space.
+    // Returns false if memory allocation fails.
+    virtual bool setColorSpace(uint16_t color_space);
+
+    // sets the date and time of image last modified. It takes local time. The
+    // name of the tag is DateTime in IFD0.
+    // Returns false if memory allocation fails.
+    virtual bool setDateTime(const struct tm& t);
+
+    // sets the digital zoom ratio. If the numerator is 0, it means digital zoom
+    // was not used.
+    // Returns false if memory allocation fails.
+    virtual bool setDigitalZoomRatio(
+            uint32_t crop_width, uint32_t crop_height,
+            uint32_t sensor_width, uint32_t sensor_height);
+
+    // Sets the exposure bias.
+    // Returns false if memory allocation fails.
+    virtual bool setExposureBias(int32_t ev,
+            uint32_t ev_step_numerator, uint32_t ev_step_denominator);
+
+    // sets the exposure mode set when the image was shot.
+    // Returns false if memory allocation fails.
+    virtual bool setExposureMode(uint8_t exposure_mode);
+
+    // sets the exposure time, given in seconds.
+    // Returns false if memory allocation fails.
+    virtual bool setExposureTime(float exposure_time);
+
+    // sets the status of flash.
+    // Returns false if memory allocation fails.
+    virtual bool setFlash(uint8_t flash_available, uint8_t flash_state, uint8_t ae_mode);
+
+    // sets the F number.
+    // Returns false if memory allocation fails.
+    virtual bool setFNumber(float f_number);
+
+    // sets the focal length of lens used to take the image in millimeters.
+    // Returns false if memory allocation fails.
+    virtual bool setFocalLength(float focal_length);
+
+    // sets the focal length of lens for 35mm film used to take the image in millimeters.
+    // Returns false if memory allocation fails.
+    virtual bool setFocalLengthIn35mmFilm(float focal_length,
+            float sensor_size_x, float sensor_size_y);
+
+    // sets the altitude in meters.
+    // Returns false if memory allocation fails.
+    virtual bool setGpsAltitude(double altitude);
+
+    // sets the latitude with degrees minutes seconds format.
+    // Returns false if memory allocation fails.
+    virtual bool setGpsLatitude(double latitude);
+
+    // sets the longitude with degrees minutes seconds format.
+    // Returns false if memory allocation fails.
+    virtual bool setGpsLongitude(double longitude);
+
+    // sets GPS processing method.
+    // Returns false if memory allocation fails.
+    virtual bool setGpsProcessingMethod(const std::string& method);
+
+    // sets GPS date stamp and time stamp (atomic clock). It takes UTC time.
+    // Returns false if memory allocation fails.
+    virtual bool setGpsTimestamp(const struct tm& t);
+
+    // sets the length (number of rows) of main image.
+    // Returns false if memory allocation fails.
+    virtual bool setImageHeight(uint32_t length);
+
+    // sets the width (number of columes) of main image.
+    // Returns false if memory allocation fails.
+    virtual bool setImageWidth(uint32_t width);
+
+    // sets the ISO speed.
+    // Returns false if memory allocation fails.
+    virtual bool setIsoSpeedRating(uint16_t iso_speed_ratings);
+
+    // sets the smallest F number of the lens.
+    // Returns false if memory allocation fails.
+    virtual bool setMaxAperture(float aperture);
+
+    // sets image orientation.
+    // Returns false if memory allocation fails.
+    virtual bool setOrientation(uint16_t degrees);
+
+    // sets image orientation.
+    // Returns false if memory allocation fails.
+    virtual bool setOrientationValue(ExifOrientation orientationValue);
+
+    // sets the shutter speed.
+    // Returns false if memory allocation fails.
+    virtual bool setShutterSpeed(float exposure_time);
+
+    // sets the distance to the subject, given in meters.
+    // Returns false if memory allocation fails.
+    virtual bool setSubjectDistance(float diopters);
+
+    // sets the fractions of seconds for the <DateTime> tag.
+    // Returns false if memory allocation fails.
+    virtual bool setSubsecTime(const std::string& subsec_time);
+
+    // sets the white balance mode set when the image was shot.
+    // Returns false if memory allocation fails.
+    virtual bool setWhiteBalance(uint8_t white_balance);
+
+    // Generates APP1 segment.
+    // Returns false if generating APP1 segment fails.
+    virtual bool generateApp1();
+
+    // Gets buffer of APP1 segment. This method must be called only after calling
+    // GenerateAPP1().
+    virtual const uint8_t* getApp1Buffer();
+
+    // Gets length of APP1 segment. This method must be called only after calling
+    // GenerateAPP1().
+    virtual unsigned int getApp1Length();
+
+  protected:
+    // sets the version of this standard supported.
+    // Returns false if memory allocation fails.
+    virtual bool setExifVersion(const std::string& exif_version);
+
+    // Resets the pointers and memories.
+    virtual void reset();
+
+    // Adds a variable length tag to |exif_data_|. It will remove the original one
+    // if the tag exists.
+    // Returns the entry of the tag. The reference count of returned ExifEntry is
+    // two.
+    virtual std::unique_ptr<ExifEntry> addVariableLengthEntry(ExifIfd ifd,
+            ExifTag tag, ExifFormat format, uint64_t components, unsigned int size);
+
+    // Adds a entry of |tag| in |exif_data_|. It won't remove the original one if
+    // the tag exists.
+    // Returns the entry of the tag. It adds one reference count to returned
+    // ExifEntry.
+    virtual std::unique_ptr<ExifEntry> addEntry(ExifIfd ifd, ExifTag tag);
+
+    // Helpe functions to add exif data with different types.
+    virtual bool setShort(ExifIfd ifd, ExifTag tag, uint16_t value, const std::string& msg);
+
+    virtual bool setLong(ExifIfd ifd, ExifTag tag, uint32_t value, const std::string& msg);
+
+    virtual bool setRational(ExifIfd ifd, ExifTag tag, uint32_t numerator,
+            uint32_t denominator, const std::string& msg);
+
+    virtual bool setSRational(ExifIfd ifd, ExifTag tag, int32_t numerator,
+            int32_t denominator, const std::string& msg);
+
+    virtual bool setString(ExifIfd ifd, ExifTag tag, ExifFormat format,
+            const std::string& buffer, const std::string& msg);
+
+    float convertToApex(float val) {
+        return 2.0f * log2f(val);
+    }
+
+    // Destroys the buffer of APP1 segment if exists.
+    virtual void destroyApp1();
+
+    // The Exif data (APP1). Owned by this class.
+    ExifData* exif_data_;
+    // The raw data of APP1 segment. It's allocated by ExifMem in |exif_data_| but
+    // owned by this class.
+    uint8_t* app1_buffer_;
+    // The length of |app1_buffer_|.
+    unsigned int app1_length_;
+
+    // How precise the float-to-rational conversion for EXIF tags would be.
+    const static int kRationalPrecision = 10000;
+};
+
+#define SET_SHORT(ifd, tag, value)                      \
+    do {                                                \
+        if (setShort(ifd, tag, value, #tag) == false)   \
+            return false;                               \
+    } while (0);
+
+#define SET_LONG(ifd, tag, value)                       \
+    do {                                                \
+        if (setLong(ifd, tag, value, #tag) == false)    \
+            return false;                               \
+    } while (0);
+
+#define SET_RATIONAL(ifd, tag, numerator, denominator)                      \
+    do {                                                                    \
+        if (setRational(ifd, tag, numerator, denominator, #tag) == false)   \
+            return false;                                                   \
+    } while (0);
+
+#define SET_SRATIONAL(ifd, tag, numerator, denominator)                       \
+    do {                                                                      \
+        if (setSRational(ifd, tag, numerator, denominator, #tag) == false)    \
+            return false;                                                     \
+    } while (0);
+
+#define SET_STRING(ifd, tag, format, buffer)                                  \
+    do {                                                                      \
+        if (setString(ifd, tag, format, buffer, #tag) == false)               \
+            return false;                                                     \
+    } while (0);
+
+// This comes from the Exif Version 2.2 standard table 6.
+const char gExifAsciiPrefix[] = {0x41, 0x53, 0x43, 0x49, 0x49, 0x0, 0x0, 0x0};
+
+static void setLatitudeOrLongitudeData(unsigned char* data, double num) {
+    // Take the integer part of |num|.
+    ExifLong degrees = static_cast<ExifLong>(num);
+    ExifLong minutes = static_cast<ExifLong>(60 * (num - degrees));
+    ExifLong microseconds =
+            static_cast<ExifLong>(3600000000u * (num - degrees - minutes / 60.0));
+    exif_set_rational(data, EXIF_BYTE_ORDER_INTEL, {degrees, 1});
+    exif_set_rational(data + sizeof(ExifRational), EXIF_BYTE_ORDER_INTEL, {minutes, 1});
+    exif_set_rational(data + 2 * sizeof(ExifRational), EXIF_BYTE_ORDER_INTEL,
+            {microseconds, 1000000});
+}
+
+ExifUtils *ExifUtils::create() {
+    return new ExifUtilsImpl();
+}
+
+ExifUtils::~ExifUtils() {
+}
+
+ExifUtilsImpl::ExifUtilsImpl()
+        : exif_data_(nullptr), app1_buffer_(nullptr), app1_length_(0) {}
+
+ExifUtilsImpl::~ExifUtilsImpl() {
+    reset();
+}
+
+
+bool ExifUtilsImpl::initialize(const unsigned char *app1Segment, size_t app1SegmentSize) {
+    reset();
+    exif_data_ = exif_data_new_from_data(app1Segment, app1SegmentSize);
+    if (exif_data_ == nullptr) {
+        ALOGE("%s: allocate memory for exif_data_ failed", __FUNCTION__);
+        return false;
+    }
+    // set the image options.
+    exif_data_set_option(exif_data_, EXIF_DATA_OPTION_FOLLOW_SPECIFICATION);
+    exif_data_set_data_type(exif_data_, EXIF_DATA_TYPE_COMPRESSED);
+    exif_data_set_byte_order(exif_data_, EXIF_BYTE_ORDER_INTEL);
+
+    // set exif version to 2.2.
+    if (!setExifVersion("0220")) {
+        return false;
+    }
+
+    return true;
+}
+
+bool ExifUtilsImpl::initializeEmpty() {
+    reset();
+    exif_data_ = exif_data_new();
+    if (exif_data_ == nullptr) {
+        ALOGE("%s: allocate memory for exif_data_ failed", __FUNCTION__);
+        return false;
+    }
+    // set the image options.
+    exif_data_set_option(exif_data_, EXIF_DATA_OPTION_FOLLOW_SPECIFICATION);
+    exif_data_set_data_type(exif_data_, EXIF_DATA_TYPE_COMPRESSED);
+    exif_data_set_byte_order(exif_data_, EXIF_BYTE_ORDER_INTEL);
+
+    // set exif version to 2.2.
+    if (!setExifVersion("0220")) {
+        return false;
+    }
+
+    return true;
+}
+
+bool ExifUtilsImpl::setAperture(float aperture) {
+    float apexValue = convertToApex(aperture);
+    SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_APERTURE_VALUE,
+            static_cast<uint32_t>(std::round(apexValue * kRationalPrecision)),
+            kRationalPrecision);
+    return true;
+}
+
+bool ExifUtilsImpl::setColorSpace(uint16_t color_space) {
+    SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_COLOR_SPACE, color_space);
+    return true;
+}
+
+bool ExifUtilsImpl::setDateTime(const struct tm& t) {
+    // The length is 20 bytes including NULL for termination in Exif standard.
+    char str[20];
+    int result = snprintf(str, sizeof(str), "%04i:%02i:%02i %02i:%02i:%02i",
+            t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec);
+    if (result != sizeof(str) - 1) {
+        ALOGW("%s: Input time is invalid", __FUNCTION__);
+        return false;
+    }
+    std::string buffer(str);
+    SET_STRING(EXIF_IFD_0, EXIF_TAG_DATE_TIME, EXIF_FORMAT_ASCII, buffer);
+    SET_STRING(EXIF_IFD_EXIF, EXIF_TAG_DATE_TIME_ORIGINAL, EXIF_FORMAT_ASCII, buffer);
+    SET_STRING(EXIF_IFD_EXIF, EXIF_TAG_DATE_TIME_DIGITIZED, EXIF_FORMAT_ASCII, buffer);
+    return true;
+}
+
+bool ExifUtilsImpl::setDigitalZoomRatio(
+        uint32_t crop_width, uint32_t crop_height,
+        uint32_t sensor_width, uint32_t sensor_height) {
+    float zoomRatioX = (crop_width == 0) ? 1.0 : 1.0 * sensor_width / crop_width;
+    float zoomRatioY = (crop_height == 0) ? 1.0 : 1.0 * sensor_height / crop_height;
+    float zoomRatio = std::max(zoomRatioX, zoomRatioY);
+    const static float noZoomThreshold = 1.02f;
+
+    if (zoomRatio <= noZoomThreshold) {
+        SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_DIGITAL_ZOOM_RATIO, 0, 1);
+    } else {
+        SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_DIGITAL_ZOOM_RATIO,
+                static_cast<uint32_t>(std::round(zoomRatio * kRationalPrecision)),
+                kRationalPrecision);
+    }
+    return true;
+}
+
+bool ExifUtilsImpl::setExposureMode(uint8_t exposure_mode) {
+    uint16_t exposureMode = (exposure_mode == ANDROID_CONTROL_AE_MODE_OFF) ? 1 : 0;
+    SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_EXPOSURE_MODE, exposureMode);
+    return true;
+}
+
+bool ExifUtilsImpl::setExposureTime(float exposure_time) {
+    SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_EXPOSURE_TIME,
+            static_cast<uint32_t>(std::round(exposure_time * kRationalPrecision)),
+            kRationalPrecision);
+    return true;
+}
+
+bool ExifUtilsImpl::setFlash(uint8_t flash_available, uint8_t flash_state, uint8_t ae_mode) {
+    // EXIF_TAG_FLASH bits layout per EXIF standard:
+    // Bit 0:    0 - did not fire
+    //           1 - fired
+    // Bit 1-2:  status of return light
+    // Bit 3-4:  0 - unknown
+    //           1 - compulsory flash firing
+    //           2 - compulsory flash suppression
+    //           3 - auto mode
+    // Bit 5:    0 - flash function present
+    //           1 - no flash function
+    // Bit 6:    0 - no red-eye reduction mode or unknown
+    //           1 - red-eye reduction supported
+    uint16_t flash = 0x20;
+
+    if (flash_available == ANDROID_FLASH_INFO_AVAILABLE_TRUE) {
+        flash = 0x00;
+
+        if (flash_state == ANDROID_FLASH_STATE_FIRED) {
+            flash |= 0x1;
+        }
+        if (ae_mode == ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE) {
+            flash |= 0x40;
+        }
+
+        uint16_t flashMode = 0;
+        switch (ae_mode) {
+            case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH:
+            case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE:
+               flashMode = 3; // AUTO
+               break;
+            case ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH:
+            case ANDROID_CONTROL_AE_MODE_ON_EXTERNAL_FLASH:
+               flashMode = 1; // ON
+               break;
+            case ANDROID_CONTROL_AE_MODE_OFF:
+            case ANDROID_CONTROL_AE_MODE_ON:
+               flashMode = 2; // OFF
+               break;
+            default:
+               flashMode = 0; // UNKNOWN
+               break;
+        }
+        flash |= (flashMode << 3);
+    }
+    SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_FLASH, flash);
+    return true;
+}
+
+bool ExifUtilsImpl::setFNumber(float f_number) {
+    SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_FNUMBER,
+            static_cast<uint32_t>(std::round(f_number * kRationalPrecision)),
+            kRationalPrecision);
+    return true;
+}
+
+bool ExifUtilsImpl::setFocalLength(float focal_length) {
+    uint32_t numerator = static_cast<uint32_t>(std::round(focal_length * kRationalPrecision));
+    SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_FOCAL_LENGTH, numerator, kRationalPrecision);
+    return true;
+}
+
+bool ExifUtilsImpl::setFocalLengthIn35mmFilm(
+        float focal_length, float sensor_size_x, float sensor_size_y) {
+    static const float filmDiagonal = 43.27; // diagonal of 35mm film
+    static const float minSensorDiagonal = 0.01;
+    float sensorDiagonal = std::sqrt(
+            sensor_size_x * sensor_size_x + sensor_size_y * sensor_size_y);
+    sensorDiagonal = std::max(sensorDiagonal, minSensorDiagonal);
+    float focalLength35mmFilm = std::round(focal_length * filmDiagonal / sensorDiagonal);
+    focalLength35mmFilm = std::min(1.0f * 65535, focalLength35mmFilm);
+
+    SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_FOCAL_LENGTH_IN_35MM_FILM,
+            static_cast<uint16_t>(focalLength35mmFilm));
+    return true;
+}
+
+bool ExifUtilsImpl::setGpsAltitude(double altitude) {
+    ExifTag refTag = static_cast<ExifTag>(EXIF_TAG_GPS_ALTITUDE_REF);
+    std::unique_ptr<ExifEntry> refEntry =
+            addVariableLengthEntry(EXIF_IFD_GPS, refTag, EXIF_FORMAT_BYTE, 1, 1);
+    if (!refEntry) {
+        ALOGE("%s: Adding GPSAltitudeRef exif entry failed", __FUNCTION__);
+        return false;
+    }
+    if (altitude >= 0) {
+        *refEntry->data = 0;
+    } else {
+        *refEntry->data = 1;
+        altitude *= -1;
+    }
+
+    ExifTag tag = static_cast<ExifTag>(EXIF_TAG_GPS_ALTITUDE);
+    std::unique_ptr<ExifEntry> entry = addVariableLengthEntry(
+            EXIF_IFD_GPS, tag, EXIF_FORMAT_RATIONAL, 1, sizeof(ExifRational));
+    if (!entry) {
+        exif_content_remove_entry(exif_data_->ifd[EXIF_IFD_GPS], refEntry.get());
+        ALOGE("%s: Adding GPSAltitude exif entry failed", __FUNCTION__);
+        return false;
+    }
+    exif_set_rational(entry->data, EXIF_BYTE_ORDER_INTEL,
+            {static_cast<ExifLong>(altitude * 1000), 1000});
+
+    return true;
+}
+
+bool ExifUtilsImpl::setGpsLatitude(double latitude) {
+    const ExifTag refTag = static_cast<ExifTag>(EXIF_TAG_GPS_LATITUDE_REF);
+    std::unique_ptr<ExifEntry> refEntry =
+            addVariableLengthEntry(EXIF_IFD_GPS, refTag, EXIF_FORMAT_ASCII, 2, 2);
+    if (!refEntry) {
+        ALOGE("%s: Adding GPSLatitudeRef exif entry failed", __FUNCTION__);
+        return false;
+    }
+    if (latitude >= 0) {
+        memcpy(refEntry->data, "N", sizeof("N"));
+    } else {
+        memcpy(refEntry->data, "S", sizeof("S"));
+        latitude *= -1;
+    }
+
+    const ExifTag tag = static_cast<ExifTag>(EXIF_TAG_GPS_LATITUDE);
+    std::unique_ptr<ExifEntry> entry = addVariableLengthEntry(
+            EXIF_IFD_GPS, tag, EXIF_FORMAT_RATIONAL, 3, 3 * sizeof(ExifRational));
+    if (!entry) {
+        exif_content_remove_entry(exif_data_->ifd[EXIF_IFD_GPS], refEntry.get());
+        ALOGE("%s: Adding GPSLatitude exif entry failed", __FUNCTION__);
+        return false;
+    }
+    setLatitudeOrLongitudeData(entry->data, latitude);
+
+    return true;
+}
+
+bool ExifUtilsImpl::setGpsLongitude(double longitude) {
+    ExifTag refTag = static_cast<ExifTag>(EXIF_TAG_GPS_LONGITUDE_REF);
+    std::unique_ptr<ExifEntry> refEntry =
+            addVariableLengthEntry(EXIF_IFD_GPS, refTag, EXIF_FORMAT_ASCII, 2, 2);
+    if (!refEntry) {
+        ALOGE("%s: Adding GPSLongitudeRef exif entry failed", __FUNCTION__);
+        return false;
+    }
+    if (longitude >= 0) {
+        memcpy(refEntry->data, "E", sizeof("E"));
+    } else {
+        memcpy(refEntry->data, "W", sizeof("W"));
+        longitude *= -1;
+    }
+
+    ExifTag tag = static_cast<ExifTag>(EXIF_TAG_GPS_LONGITUDE);
+    std::unique_ptr<ExifEntry> entry = addVariableLengthEntry(
+            EXIF_IFD_GPS, tag, EXIF_FORMAT_RATIONAL, 3, 3 * sizeof(ExifRational));
+    if (!entry) {
+        exif_content_remove_entry(exif_data_->ifd[EXIF_IFD_GPS], refEntry.get());
+        ALOGE("%s: Adding GPSLongitude exif entry failed", __FUNCTION__);
+        return false;
+    }
+    setLatitudeOrLongitudeData(entry->data, longitude);
+
+    return true;
+}
+
+bool ExifUtilsImpl::setGpsProcessingMethod(const std::string& method) {
+    std::string buffer =
+            std::string(gExifAsciiPrefix, sizeof(gExifAsciiPrefix)) + method;
+    SET_STRING(EXIF_IFD_GPS, static_cast<ExifTag>(EXIF_TAG_GPS_PROCESSING_METHOD),
+            EXIF_FORMAT_UNDEFINED, buffer);
+    return true;
+}
+
+bool ExifUtilsImpl::setGpsTimestamp(const struct tm& t) {
+    const ExifTag dateTag = static_cast<ExifTag>(EXIF_TAG_GPS_DATE_STAMP);
+    const size_t kGpsDateStampSize = 11;
+    std::unique_ptr<ExifEntry> entry = addVariableLengthEntry(EXIF_IFD_GPS,
+            dateTag, EXIF_FORMAT_ASCII, kGpsDateStampSize, kGpsDateStampSize);
+    if (!entry) {
+        ALOGE("%s: Adding GPSDateStamp exif entry failed", __FUNCTION__);
+        return false;
+    }
+    int result = snprintf(reinterpret_cast<char*>(entry->data), kGpsDateStampSize,
+            "%04i:%02i:%02i", t.tm_year + 1900, t.tm_mon + 1, t.tm_mday);
+    if (result != kGpsDateStampSize - 1) {
+        ALOGW("%s: Input time is invalid", __FUNCTION__);
+        return false;
+    }
+
+    const ExifTag timeTag = static_cast<ExifTag>(EXIF_TAG_GPS_TIME_STAMP);
+    entry = addVariableLengthEntry(EXIF_IFD_GPS, timeTag, EXIF_FORMAT_RATIONAL, 3,
+            3 * sizeof(ExifRational));
+    if (!entry) {
+        ALOGE("%s: Adding GPSTimeStamp exif entry failed", __FUNCTION__);
+        return false;
+    }
+    exif_set_rational(entry->data, EXIF_BYTE_ORDER_INTEL,
+            {static_cast<ExifLong>(t.tm_hour), 1});
+    exif_set_rational(entry->data + sizeof(ExifRational), EXIF_BYTE_ORDER_INTEL,
+            {static_cast<ExifLong>(t.tm_min), 1});
+    exif_set_rational(entry->data + 2 * sizeof(ExifRational), EXIF_BYTE_ORDER_INTEL,
+            {static_cast<ExifLong>(t.tm_sec), 1});
+
+    return true;
+}
+
+bool ExifUtilsImpl::setImageHeight(uint32_t length) {
+    SET_LONG(EXIF_IFD_0, EXIF_TAG_IMAGE_LENGTH, length);
+    SET_LONG(EXIF_IFD_EXIF, EXIF_TAG_PIXEL_Y_DIMENSION, length);
+    return true;
+}
+
+bool ExifUtilsImpl::setImageWidth(uint32_t width) {
+    SET_LONG(EXIF_IFD_0, EXIF_TAG_IMAGE_WIDTH, width);
+    SET_LONG(EXIF_IFD_EXIF, EXIF_TAG_PIXEL_X_DIMENSION, width);
+    return true;
+}
+
+bool ExifUtilsImpl::setIsoSpeedRating(uint16_t iso_speed_ratings) {
+    SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_ISO_SPEED_RATINGS, iso_speed_ratings);
+    return true;
+}
+
+bool ExifUtilsImpl::setMaxAperture(float aperture) {
+    float maxAperture = convertToApex(aperture);
+    SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_MAX_APERTURE_VALUE,
+            static_cast<uint32_t>(std::round(maxAperture * kRationalPrecision)),
+            kRationalPrecision);
+    return true;
+}
+
+bool ExifUtilsImpl::setExposureBias(int32_t ev,
+        uint32_t ev_step_numerator, uint32_t ev_step_denominator) {
+    SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_EXPOSURE_BIAS_VALUE,
+            ev * ev_step_numerator, ev_step_denominator);
+    return true;
+}
+
+bool ExifUtilsImpl::setOrientation(uint16_t degrees) {
+    ExifOrientation value = ExifOrientation::ORIENTATION_0_DEGREES;
+    switch (degrees) {
+        case 90:
+            value = ExifOrientation::ORIENTATION_90_DEGREES;
+            break;
+        case 180:
+            value = ExifOrientation::ORIENTATION_180_DEGREES;
+            break;
+        case 270:
+            value = ExifOrientation::ORIENTATION_270_DEGREES;
+            break;
+        default:
+            break;
+    }
+    return setOrientationValue(value);
+}
+
+bool ExifUtilsImpl::setOrientationValue(ExifOrientation orientationValue) {
+    SET_SHORT(EXIF_IFD_0, EXIF_TAG_ORIENTATION, orientationValue);
+    return true;
+}
+
+bool ExifUtilsImpl::setShutterSpeed(float exposure_time) {
+    float shutterSpeed = -log2f(exposure_time);
+    SET_SRATIONAL(EXIF_IFD_EXIF, EXIF_TAG_SHUTTER_SPEED_VALUE,
+            static_cast<uint32_t>(shutterSpeed * kRationalPrecision), kRationalPrecision);
+    return true;
+}
+
+bool ExifUtilsImpl::setSubjectDistance(float diopters) {
+    const static float kInfinityDiopters = 1.0e-6;
+    uint32_t numerator, denominator;
+    uint16_t distanceRange;
+    if (diopters > kInfinityDiopters) {
+        float focusDistance = 1.0f / diopters;
+        numerator = static_cast<uint32_t>(std::round(focusDistance * kRationalPrecision));
+        denominator = kRationalPrecision;
+
+        if (focusDistance < 1.0f) {
+            distanceRange = 1; // Macro
+        } else if (focusDistance < 3.0f) {
+            distanceRange = 2; // Close
+        } else {
+            distanceRange = 3; // Distant
+        }
+    } else {
+        numerator = 0xFFFFFFFF;
+        denominator = 1;
+        distanceRange = 3; // Distant
+    }
+    SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_SUBJECT_DISTANCE, numerator, denominator);
+    SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_SUBJECT_DISTANCE_RANGE, distanceRange);
+    return true;
+}
+
+bool ExifUtilsImpl::setSubsecTime(const std::string& subsec_time) {
+    SET_STRING(EXIF_IFD_EXIF, EXIF_TAG_SUB_SEC_TIME, EXIF_FORMAT_ASCII, subsec_time);
+    SET_STRING(EXIF_IFD_EXIF, EXIF_TAG_SUB_SEC_TIME_ORIGINAL, EXIF_FORMAT_ASCII, subsec_time);
+    SET_STRING(EXIF_IFD_EXIF, EXIF_TAG_SUB_SEC_TIME_DIGITIZED, EXIF_FORMAT_ASCII, subsec_time);
+    return true;
+}
+
+bool ExifUtilsImpl::setWhiteBalance(uint8_t white_balance) {
+    uint16_t whiteBalance = (white_balance == ANDROID_CONTROL_AWB_MODE_AUTO) ? 0 : 1;
+    SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_WHITE_BALANCE, whiteBalance);
+    return true;
+}
+
+bool ExifUtilsImpl::generateApp1() {
+    destroyApp1();
+    // Save the result into |app1_buffer_|.
+    exif_data_save_data(exif_data_, &app1_buffer_, &app1_length_);
+    if (!app1_length_) {
+        ALOGE("%s: Allocate memory for app1_buffer_ failed", __FUNCTION__);
+        return false;
+    }
+    /*
+     * The JPEG segment size is 16 bits in spec. The size of APP1 segment should
+     * be smaller than 65533 because there are two bytes for segment size field.
+     */
+    if (app1_length_ > 65533) {
+        destroyApp1();
+        ALOGE("%s: The size of APP1 segment is too large", __FUNCTION__);
+        return false;
+    }
+    return true;
+}
+
+const uint8_t* ExifUtilsImpl::getApp1Buffer() {
+    return app1_buffer_;
+}
+
+unsigned int ExifUtilsImpl::getApp1Length() {
+    return app1_length_;
+}
+
+bool ExifUtilsImpl::setExifVersion(const std::string& exif_version) {
+    SET_STRING(EXIF_IFD_EXIF, EXIF_TAG_EXIF_VERSION, EXIF_FORMAT_UNDEFINED, exif_version);
+    return true;
+}
+
+void ExifUtilsImpl::reset() {
+    destroyApp1();
+    if (exif_data_) {
+        /*
+         * Since we decided to ignore the original APP1, we are sure that there is
+         * no thumbnail allocated by libexif. |exif_data_->data| is actually
+         * allocated by JpegCompressor. sets |exif_data_->data| to nullptr to
+         * prevent exif_data_unref() destroy it incorrectly.
+         */
+        exif_data_->data = nullptr;
+        exif_data_->size = 0;
+        exif_data_unref(exif_data_);
+        exif_data_ = nullptr;
+    }
+}
+
+std::unique_ptr<ExifEntry> ExifUtilsImpl::addVariableLengthEntry(ExifIfd ifd,
+        ExifTag tag, ExifFormat format, uint64_t components, unsigned int size) {
+    // Remove old entry if exists.
+    exif_content_remove_entry(exif_data_->ifd[ifd],
+            exif_content_get_entry(exif_data_->ifd[ifd], tag));
+    ExifMem* mem = exif_mem_new_default();
+    if (!mem) {
+        ALOGE("%s: Allocate memory for exif entry failed", __FUNCTION__);
+        return nullptr;
+    }
+    std::unique_ptr<ExifEntry> entry(exif_entry_new_mem(mem));
+    if (!entry) {
+        ALOGE("%s: Allocate memory for exif entry failed", __FUNCTION__);
+        exif_mem_unref(mem);
+        return nullptr;
+    }
+    void* tmpBuffer = exif_mem_alloc(mem, size);
+    if (!tmpBuffer) {
+        ALOGE("%s: Allocate memory for exif entry failed", __FUNCTION__);
+        exif_mem_unref(mem);
+        return nullptr;
+    }
+
+    entry->data = static_cast<unsigned char*>(tmpBuffer);
+    entry->tag = tag;
+    entry->format = format;
+    entry->components = components;
+    entry->size = size;
+
+    exif_content_add_entry(exif_data_->ifd[ifd], entry.get());
+    exif_mem_unref(mem);
+
+    return entry;
+}
+
+std::unique_ptr<ExifEntry> ExifUtilsImpl::addEntry(ExifIfd ifd, ExifTag tag) {
+    std::unique_ptr<ExifEntry> entry(exif_content_get_entry(exif_data_->ifd[ifd], tag));
+    if (entry) {
+        // exif_content_get_entry() won't ref the entry, so we ref here.
+        exif_entry_ref(entry.get());
+        return entry;
+    }
+    entry.reset(exif_entry_new());
+    if (!entry) {
+        ALOGE("%s: Allocate memory for exif entry failed", __FUNCTION__);
+        return nullptr;
+    }
+    entry->tag = tag;
+    exif_content_add_entry(exif_data_->ifd[ifd], entry.get());
+    exif_entry_initialize(entry.get(), tag);
+    return entry;
+}
+
+bool ExifUtilsImpl::setShort(ExifIfd ifd, ExifTag tag, uint16_t value, const std::string& msg) {
+    std::unique_ptr<ExifEntry> entry = addEntry(ifd, tag);
+    if (!entry) {
+        ALOGE("%s: Adding '%s' entry failed", __FUNCTION__, msg.c_str());
+        return false;
+    }
+    exif_set_short(entry->data, EXIF_BYTE_ORDER_INTEL, value);
+    return true;
+}
+
+bool ExifUtilsImpl::setLong(ExifIfd ifd, ExifTag tag, uint32_t value, const std::string& msg) {
+    std::unique_ptr<ExifEntry> entry = addEntry(ifd, tag);
+    if (!entry) {
+        ALOGE("%s: Adding '%s' entry failed", __FUNCTION__, msg.c_str());
+        return false;
+    }
+    exif_set_long(entry->data, EXIF_BYTE_ORDER_INTEL, value);
+    return true;
+}
+
+bool ExifUtilsImpl::setRational(ExifIfd ifd, ExifTag tag, uint32_t numerator,
+        uint32_t denominator, const std::string& msg) {
+    std::unique_ptr<ExifEntry> entry = addEntry(ifd, tag);
+    if (!entry) {
+        ALOGE("%s: Adding '%s' entry failed", __FUNCTION__, msg.c_str());
+        return false;
+    }
+    exif_set_rational(entry->data, EXIF_BYTE_ORDER_INTEL, {numerator, denominator});
+    return true;
+}
+
+bool ExifUtilsImpl::setSRational(ExifIfd ifd, ExifTag tag, int32_t numerator,
+        int32_t denominator, const std::string& msg) {
+    std::unique_ptr<ExifEntry> entry = addEntry(ifd, tag);
+    if (!entry) {
+        ALOGE("%s: Adding '%s' entry failed", __FUNCTION__, msg.c_str());
+        return false;
+    }
+    exif_set_srational(entry->data, EXIF_BYTE_ORDER_INTEL, {numerator, denominator});
+    return true;
+}
+
+bool ExifUtilsImpl::setString(ExifIfd ifd, ExifTag tag, ExifFormat format,
+        const std::string& buffer, const std::string& msg) {
+    size_t entry_size = buffer.length();
+    // Since the exif format is undefined, NULL termination is not necessary.
+    if (format == EXIF_FORMAT_ASCII) {
+        entry_size++;
+    }
+    std::unique_ptr<ExifEntry> entry =
+            addVariableLengthEntry(ifd, tag, format, entry_size, entry_size);
+    if (!entry) {
+        ALOGE("%s: Adding '%s' entry failed", __FUNCTION__, msg.c_str());
+        return false;
+    }
+    memcpy(entry->data, buffer.c_str(), entry_size);
+    return true;
+}
+
+void ExifUtilsImpl::destroyApp1() {
+    /*
+     * Since there is no API to access ExifMem in ExifData->priv, we use free
+     * here, which is the default free function in libexif. See
+     * exif_data_save_data() for detail.
+     */
+    free(app1_buffer_);
+    app1_buffer_ = nullptr;
+    app1_length_ = 0;
+}
+
+bool ExifUtilsImpl::setFromMetadata(const CameraMetadata& metadata,
+        const CameraMetadata& staticInfo,
+        const size_t imageWidth, const size_t imageHeight) {
+    if (!setImageWidth(imageWidth) ||
+            !setImageHeight(imageHeight)) {
+        ALOGE("%s: setting image resolution failed.", __FUNCTION__);
+        return false;
+    }
+
+    struct timespec tp;
+    struct tm time_info;
+    bool time_available = clock_gettime(CLOCK_REALTIME, &tp) != -1;
+    localtime_r(&tp.tv_sec, &time_info);
+    if (!setDateTime(time_info)) {
+        ALOGE("%s: setting data time failed.", __FUNCTION__);
+        return false;
+    }
+
+    float focal_length;
+    camera_metadata_ro_entry entry = metadata.find(ANDROID_LENS_FOCAL_LENGTH);
+    if (entry.count) {
+        focal_length = entry.data.f[0];
+
+        if (!setFocalLength(focal_length)) {
+            ALOGE("%s: setting focal length failed.", __FUNCTION__);
+            return false;
+        }
+
+        camera_metadata_ro_entry sensorSizeEntry =
+                staticInfo.find(ANDROID_SENSOR_INFO_PHYSICAL_SIZE);
+        if (sensorSizeEntry.count == 2) {
+            if (!setFocalLengthIn35mmFilm(
+                    focal_length, sensorSizeEntry.data.f[0], sensorSizeEntry.data.f[1])) {
+                ALOGE("%s: setting focal length in 35mm failed.", __FUNCTION__);
+                return false;
+            }
+        }
+    } else {
+        ALOGV("%s: Cannot find focal length in metadata.", __FUNCTION__);
+    }
+
+    if (metadata.exists(ANDROID_SCALER_CROP_REGION) &&
+            staticInfo.exists(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE)) {
+        entry = metadata.find(ANDROID_SCALER_CROP_REGION);
+        camera_metadata_ro_entry activeArrayEntry =
+                staticInfo.find(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE);
+
+        if (!setDigitalZoomRatio(entry.data.i32[2], entry.data.i32[3],
+                activeArrayEntry.data.i32[2], activeArrayEntry.data.i32[3])) {
+            ALOGE("%s: setting digital zoom ratio failed.", __FUNCTION__);
+            return false;
+        }
+    }
+
+    if (metadata.exists(ANDROID_JPEG_GPS_COORDINATES)) {
+        entry = metadata.find(ANDROID_JPEG_GPS_COORDINATES);
+        if (entry.count < 3) {
+            ALOGE("%s: Gps coordinates in metadata is not complete.", __FUNCTION__);
+            return false;
+        }
+        if (!setGpsLatitude(entry.data.d[0])) {
+            ALOGE("%s: setting gps latitude failed.", __FUNCTION__);
+            return false;
+        }
+        if (!setGpsLongitude(entry.data.d[1])) {
+            ALOGE("%s: setting gps longitude failed.", __FUNCTION__);
+            return false;
+        }
+        if (!setGpsAltitude(entry.data.d[2])) {
+            ALOGE("%s: setting gps altitude failed.", __FUNCTION__);
+            return false;
+        }
+    }
+
+    if (metadata.exists(ANDROID_JPEG_GPS_PROCESSING_METHOD)) {
+        entry = metadata.find(ANDROID_JPEG_GPS_PROCESSING_METHOD);
+        std::string method_str(reinterpret_cast<const char*>(entry.data.u8));
+        if (!setGpsProcessingMethod(method_str)) {
+            ALOGE("%s: setting gps processing method failed.", __FUNCTION__);
+            return false;
+        }
+    }
+
+    if (time_available && metadata.exists(ANDROID_JPEG_GPS_TIMESTAMP)) {
+        entry = metadata.find(ANDROID_JPEG_GPS_TIMESTAMP);
+        time_t timestamp = static_cast<time_t>(entry.data.i64[0]);
+        if (gmtime_r(&timestamp, &time_info)) {
+            if (!setGpsTimestamp(time_info)) {
+                ALOGE("%s: setting gps timestamp failed.", __FUNCTION__);
+                return false;
+            }
+        } else {
+            ALOGE("%s: Time tranformation failed.", __FUNCTION__);
+            return false;
+        }
+    }
+
+    if (staticInfo.exists(ANDROID_CONTROL_AE_COMPENSATION_STEP) &&
+            metadata.exists(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION)) {
+        entry = metadata.find(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION);
+        camera_metadata_ro_entry stepEntry =
+                staticInfo.find(ANDROID_CONTROL_AE_COMPENSATION_STEP);
+        if (!setExposureBias(entry.data.i32[0], stepEntry.data.r[0].numerator,
+                stepEntry.data.r[0].denominator)) {
+            ALOGE("%s: setting exposure bias failed.", __FUNCTION__);
+            return false;
+        }
+    }
+
+    if (metadata.exists(ANDROID_JPEG_ORIENTATION)) {
+        entry = metadata.find(ANDROID_JPEG_ORIENTATION);
+        if (!setOrientation(entry.data.i32[0])) {
+            ALOGE("%s: setting orientation failed.", __FUNCTION__);
+            return false;
+        }
+    }
+
+    if (metadata.exists(ANDROID_SENSOR_EXPOSURE_TIME)) {
+        entry = metadata.find(ANDROID_SENSOR_EXPOSURE_TIME);
+        float exposure_time = 1.0f * entry.data.i64[0] / 1e9;
+        if (!setExposureTime(exposure_time)) {
+            ALOGE("%s: setting exposure time failed.", __FUNCTION__);
+            return false;
+        }
+
+        if (!setShutterSpeed(exposure_time)) {
+            ALOGE("%s: setting shutter speed failed.", __FUNCTION__);
+            return false;
+        }
+    }
+
+    if (metadata.exists(ANDROID_LENS_FOCUS_DISTANCE)) {
+        entry = metadata.find(ANDROID_LENS_FOCUS_DISTANCE);
+        if (!setSubjectDistance(entry.data.f[0])) {
+            ALOGE("%s: setting subject distance failed.", __FUNCTION__);
+            return false;
+        }
+    }
+
+    if (metadata.exists(ANDROID_SENSOR_SENSITIVITY)) {
+        entry = metadata.find(ANDROID_SENSOR_SENSITIVITY);
+        int32_t iso = entry.data.i32[0];
+        camera_metadata_ro_entry postRawSensEntry =
+                metadata.find(ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST);
+        if (postRawSensEntry.count > 0) {
+            iso = iso * postRawSensEntry.data.i32[0] / 100;
+        }
+
+        if (!setIsoSpeedRating(static_cast<uint16_t>(iso))) {
+            ALOGE("%s: setting iso rating failed.", __FUNCTION__);
+            return false;
+        }
+    }
+
+    if (metadata.exists(ANDROID_LENS_APERTURE)) {
+        entry = metadata.find(ANDROID_LENS_APERTURE);
+        if (!setFNumber(entry.data.f[0])) {
+            ALOGE("%s: setting F number failed.", __FUNCTION__);
+            return false;
+        }
+        if (!setAperture(entry.data.f[0])) {
+            ALOGE("%s: setting aperture failed.", __FUNCTION__);
+            return false;
+        }
+    }
+
+    static const uint16_t kSRGBColorSpace = 1;
+    if (!setColorSpace(kSRGBColorSpace)) {
+        ALOGE("%s: setting color space failed.", __FUNCTION__);
+        return false;
+    }
+
+    if (staticInfo.exists(ANDROID_LENS_INFO_AVAILABLE_APERTURES)) {
+        entry = staticInfo.find(ANDROID_LENS_INFO_AVAILABLE_APERTURES);
+        if (!setMaxAperture(entry.data.f[0])) {
+            ALOGE("%s: setting max aperture failed.", __FUNCTION__);
+            return false;
+        }
+    }
+
+    if (staticInfo.exists(ANDROID_FLASH_INFO_AVAILABLE)) {
+        entry = staticInfo.find(ANDROID_FLASH_INFO_AVAILABLE);
+        camera_metadata_ro_entry flashStateEntry = metadata.find(ANDROID_FLASH_STATE);
+        camera_metadata_ro_entry aeModeEntry = metadata.find(ANDROID_CONTROL_AE_MODE);
+        uint8_t flashState = flashStateEntry.count > 0 ?
+                flashStateEntry.data.u8[0] : ANDROID_FLASH_STATE_UNAVAILABLE;
+        uint8_t aeMode = aeModeEntry.count > 0 ?
+                aeModeEntry.data.u8[0] : ANDROID_CONTROL_AE_MODE_OFF;
+
+        if (!setFlash(entry.data.u8[0], flashState, aeMode)) {
+            ALOGE("%s: setting flash failed.", __FUNCTION__);
+            return false;
+        }
+    }
+
+    if (metadata.exists(ANDROID_CONTROL_AWB_MODE)) {
+        entry = metadata.find(ANDROID_CONTROL_AWB_MODE);
+        if (!setWhiteBalance(entry.data.u8[0])) {
+            ALOGE("%s: setting white balance failed.", __FUNCTION__);
+            return false;
+        }
+    }
+
+    if (metadata.exists(ANDROID_CONTROL_AE_MODE)) {
+        entry = metadata.find(ANDROID_CONTROL_AE_MODE);
+        if (!setExposureMode(entry.data.u8[0])) {
+            ALOGE("%s: setting exposure mode failed.", __FUNCTION__);
+            return false;
+        }
+    }
+    if (time_available) {
+        char str[4];
+        if (snprintf(str, sizeof(str), "%03ld", tp.tv_nsec / 1000000) < 0) {
+            ALOGE("%s: Subsec is invalid: %ld", __FUNCTION__, tp.tv_nsec);
+            return false;
+        }
+        if (!setSubsecTime(std::string(str))) {
+            ALOGE("%s: setting subsec time failed.", __FUNCTION__);
+            return false;
+        }
+    }
+
+    return true;
+}
+
+} // namespace camera3
+} // namespace android
diff --git a/services/camera/libcameraservice/utils/ExifUtils.h b/services/camera/libcameraservice/utils/ExifUtils.h
new file mode 100644
index 0000000..f1d0205
--- /dev/null
+++ b/services/camera/libcameraservice/utils/ExifUtils.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_EXIF_UTILS_H
+#define ANDROID_SERVERS_CAMERA_EXIF_UTILS_H
+
+#include "CameraMetadata.h"
+
+namespace android {
+namespace camera3 {
+
+/*
+ * Orientation value:
+ *  1      2      3      4      5          6          7          8
+ *
+ *  888888 888888     88 88     8888888888 88                 88 8888888888
+ *  88         88     88 88     88  88     88  88         88  88     88  88
+ *  8888     8888   8888 8888   88         8888888888 8888888888         88
+ *  88         88     88 88
+ *  88         88 888888 888888
+ */
+enum ExifOrientation : uint16_t {
+    ORIENTATION_UNDEFINED   = 0x0,
+    ORIENTATION_0_DEGREES   = 0x1,
+    ORIENTATION_90_DEGREES  = 0x6,
+    ORIENTATION_180_DEGREES = 0x3,
+    ORIENTATION_270_DEGREES = 0x8,
+};
+
+// This is based on the camera HIDL shim implementation, which was in turned
+// based on original ChromeOS ARC implementation of a V4L2 HAL
+
+// ExifUtils can override APP1 segment with tags which caller set. ExifUtils can
+// also add a thumbnail in the APP1 segment if thumbnail size is specified.
+// ExifUtils can be reused with different images by calling initialize().
+//
+// Example of using this class :
+//  std::unique_ptr<ExifUtils> utils(ExifUtils::Create());
+//  utils->initialize(const unsigned char* app1Segment, size_t app1SegmentSize);
+//  ...
+//  // Call ExifUtils functions to set Exif tags.
+//  ...
+//  utils->GenerateApp1();
+//  unsigned int app1Length = utils->GetApp1Length();
+//  uint8_t* app1Buffer = new uint8_t[app1Length];
+//  memcpy(app1Buffer, utils->GetApp1Buffer(), app1Length);
+class ExifUtils {
+
+public:
+    virtual ~ExifUtils();
+
+    static ExifUtils* create();
+
+    // Initialize() can be called multiple times. The setting of Exif tags will be
+    // cleared.
+    virtual bool initialize(const unsigned char *app1Segment, size_t app1SegmentSize) = 0;
+    virtual bool initializeEmpty() = 0;
+
+    // Set all known fields from a metadata structure
+    virtual bool setFromMetadata(const CameraMetadata& metadata,
+            const CameraMetadata& staticInfo,
+            const size_t imageWidth, const size_t imageHeight) = 0;
+
+    // Sets the len aperture.
+    // Returns false if memory allocation fails.
+    virtual bool setAperture(float aperture) = 0;
+
+    // sets the color space.
+    // Returns false if memory allocation fails.
+    virtual bool setColorSpace(uint16_t color_space) = 0;
+
+    // Sets the date and time of image last modified. It takes local time. The
+    // name of the tag is DateTime in IFD0.
+    // Returns false if memory allocation fails.
+    virtual bool setDateTime(const struct tm& t) = 0;
+
+    // Sets the digital zoom ratio. If the numerator is 0, it means digital zoom
+    // was not used.
+    // Returns false if memory allocation fails.
+    virtual bool setDigitalZoomRatio(uint32_t crop_width, uint32_t crop_height,
+            uint32_t sensor_width, uint32_t sensor_height) = 0;
+
+    // Sets the exposure bias.
+    // Returns false if memory allocation fails.
+    virtual bool setExposureBias(int32_t ev,
+            uint32_t ev_step_numerator, uint32_t ev_step_denominator) = 0;
+
+    // Sets the exposure mode set when the image was shot.
+    // Returns false if memory allocation fails.
+    virtual bool setExposureMode(uint8_t exposure_mode) = 0;
+
+    // Sets the exposure time, given in seconds.
+    // Returns false if memory allocation fails.
+    virtual bool setExposureTime(float exposure_time) = 0;
+
+    // Sets the status of flash.
+    // Returns false if memory allocation fails.
+    virtual bool setFlash(uint8_t flash_available, uint8_t flash_state, uint8_t ae_mode) = 0;
+
+    // Sets the F number.
+    // Returns false if memory allocation fails.
+    virtual bool setFNumber(float f_number) = 0;
+
+    // Sets the focal length of lens used to take the image in millimeters.
+    // Returns false if memory allocation fails.
+    virtual bool setFocalLength(float focal_length) = 0;
+
+    // Sets the focal length of lens for 35mm film used to take the image in millimeters.
+    // Returns false if memory allocation fails.
+    virtual bool setFocalLengthIn35mmFilm(float focal_length,
+            float sensor_size_x, float sensor_size_y) = 0;
+
+    // Sets the altitude in meters.
+    // Returns false if memory allocation fails.
+    virtual bool setGpsAltitude(double altitude) = 0;
+
+    // Sets the latitude with degrees minutes seconds format.
+    // Returns false if memory allocation fails.
+    virtual bool setGpsLatitude(double latitude) = 0;
+
+    // Sets the longitude with degrees minutes seconds format.
+    // Returns false if memory allocation fails.
+    virtual bool setGpsLongitude(double longitude) = 0;
+
+    // Sets GPS processing method.
+    // Returns false if memory allocation fails.
+    virtual bool setGpsProcessingMethod(const std::string& method) = 0;
+
+    // Sets GPS date stamp and time stamp (atomic clock). It takes UTC time.
+    // Returns false if memory allocation fails.
+    virtual bool setGpsTimestamp(const struct tm& t) = 0;
+
+    // Sets the height (number of rows) of main image.
+    // Returns false if memory allocation fails.
+    virtual bool setImageHeight(uint32_t length) = 0;
+
+    // Sets the width (number of columns) of main image.
+    // Returns false if memory allocation fails.
+    virtual bool setImageWidth(uint32_t width) = 0;
+
+    // Sets the ISO speed.
+    // Returns false if memory allocation fails.
+    virtual bool setIsoSpeedRating(uint16_t iso_speed_ratings) = 0;
+
+    // Sets the smallest F number of the lens.
+    // Returns false if memory allocation fails.
+    virtual bool setMaxAperture(float aperture) = 0;
+
+    // Sets image orientation.
+    // Returns false if memory allocation fails.
+    virtual bool setOrientation(uint16_t degrees) = 0;
+
+    // Sets image orientation.
+    // Returns false if memory allocation fails.
+    virtual bool setOrientationValue(ExifOrientation orientationValue) = 0;
+
+    // Sets the shutter speed.
+    // Returns false if memory allocation fails.
+    virtual bool setShutterSpeed(float exposure_time) = 0;
+
+    // Sets the distance to the subject, given in meters.
+    // Returns false if memory allocation fails.
+    virtual bool setSubjectDistance(float diopters) = 0;
+
+    // Sets the fractions of seconds for the <DateTime> tag.
+    // Returns false if memory allocation fails.
+    virtual bool setSubsecTime(const std::string& subsec_time) = 0;
+
+    // Sets the white balance mode set when the image was shot.
+    // Returns false if memory allocation fails.
+    virtual bool setWhiteBalance(uint8_t white_blanace) = 0;
+
+    // Generates APP1 segment.
+    // Returns false if generating APP1 segment fails.
+    virtual bool generateApp1() = 0;
+
+    // Gets buffer of APP1 segment. This method must be called only after calling
+    // GenerateAPP1().
+    virtual const uint8_t* getApp1Buffer() = 0;
+
+    // Gets length of APP1 segment. This method must be called only after calling
+    // GenerateAPP1().
+    virtual unsigned int getApp1Length() = 0;
+};
+
+} // namespace camera3
+} // namespace android
+
+#endif  // ANDROID_SERVERS_CAMERA_EXIF_UTILS_H
diff --git a/services/mediaanalytics/MediaAnalyticsService.cpp b/services/mediaanalytics/MediaAnalyticsService.cpp
index ae832ba..4f3ac1b 100644
--- a/services/mediaanalytics/MediaAnalyticsService.cpp
+++ b/services/mediaanalytics/MediaAnalyticsService.cpp
@@ -51,7 +51,6 @@
 #include <utils/Timers.h>
 #include <utils/Vector.h>
 
-#include <media/AudioPolicyHelper.h>
 #include <media/IMediaHTTPService.h>
 #include <media/IRemoteDisplay.h>
 #include <media/IRemoteDisplayClient.h>
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index 6a71d7d..f78c671 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -108,6 +108,9 @@
     libutils \
     libziparchive \
 
+LOCAL_HEADER_LIBRARIES := \
+    libnativeloader-dummy-headers \
+
 LOCAL_MODULE := mediaswcodec
 LOCAL_INIT_RC := mediaswcodec.rc
 LOCAL_SANITIZE := scudo
diff --git a/services/mediacodec/MediaCodecUpdateService.cpp b/services/mediacodec/MediaCodecUpdateService.cpp
index 0e6892d..50ccbce 100644
--- a/services/mediacodec/MediaCodecUpdateService.cpp
+++ b/services/mediacodec/MediaCodecUpdateService.cpp
@@ -20,28 +20,12 @@
 #include <android/dlext.h>
 #include <dlfcn.h>
 #include <media/CodecServiceRegistrant.h>
+#include <nativeloader/dlext_namespaces.h>
 #include <utils/Log.h>
 #include <utils/String8.h>
 
 #include "MediaCodecUpdateService.h"
 
-// Copied from GraphicsEnv.cpp
-// TODO(b/37049319) Get this from a header once one exists
-extern "C" {
-  android_namespace_t* android_create_namespace(const char* name,
-                                                const char* ld_library_path,
-                                                const char* default_library_path,
-                                                uint64_t type,
-                                                const char* permitted_when_isolated_path,
-                                                android_namespace_t* parent);
-  bool android_link_namespaces(android_namespace_t* from,
-                               android_namespace_t* to,
-                               const char* shared_libs_sonames);
-  enum {
-     ANDROID_NAMESPACE_TYPE_ISOLATED = 1,
-  };
-}
-
 namespace android {
 
 void loadFromApex(const char *libDirPath) {
diff --git a/services/mediacodec/registrant/Android.bp b/services/mediacodec/registrant/Android.bp
index 80d3630..1470de2 100644
--- a/services/mediacodec/registrant/Android.bp
+++ b/services/mediacodec/registrant/Android.bp
@@ -28,6 +28,7 @@
         "libcodec2_soft_amrwbdec",
         "libcodec2_soft_amrwbenc",
         "libcodec2_soft_hevcdec",
+        "libcodec2_soft_hevcenc",
         "libcodec2_soft_g711alawdec",
         "libcodec2_soft_g711mlawdec",
         "libcodec2_soft_mpeg2dec",
diff --git a/services/mediacodec/seccomp_policy/mediacodec-arm.policy b/services/mediacodec/seccomp_policy/mediacodec-arm.policy
index 9bdd4c8..3870a11 100644
--- a/services/mediacodec/seccomp_policy/mediacodec-arm.policy
+++ b/services/mediacodec/seccomp_policy/mediacodec-arm.policy
@@ -58,8 +58,4 @@
 getdents64: 1
 getrandom: 1
 
-# Used by UBSan diagnostic messages
-readlink: 1
-open: 1
-
 @include /system/etc/seccomp_policy/crash_dump.arm.policy
diff --git a/services/mediacodec/seccomp_policy/mediacodec-x86.policy b/services/mediacodec/seccomp_policy/mediacodec-x86.policy
index a1ef16f..845f84b 100644
--- a/services/mediacodec/seccomp_policy/mediacodec-x86.policy
+++ b/services/mediacodec/seccomp_policy/mediacodec-x86.policy
@@ -67,8 +67,4 @@
 getpid: 1
 gettid: 1
 
-# Used by UBSan diagnostic messages
-readlink: 1
-open: 1
-
 @include /system/etc/seccomp_policy/crash_dump.x86.policy
diff --git a/services/mediaextractor/Android.mk b/services/mediaextractor/Android.mk
index dd64881..7654982 100644
--- a/services/mediaextractor/Android.mk
+++ b/services/mediaextractor/Android.mk
@@ -4,8 +4,7 @@
 include $(CLEAR_VARS)
 LOCAL_CFLAGS := -Wall -Werror
 LOCAL_SRC_FILES := \
-    MediaExtractorService.cpp \
-    MediaExtractorUpdateService.cpp \
+    MediaExtractorService.cpp
 
 LOCAL_SHARED_LIBRARIES := libmedia libstagefright libbinder libutils liblog
 LOCAL_MODULE:= libmediaextractorservice
diff --git a/services/mediaextractor/MediaExtractorUpdateService.cpp b/services/mediaextractor/MediaExtractorUpdateService.cpp
deleted file mode 100644
index 473a698..0000000
--- a/services/mediaextractor/MediaExtractorUpdateService.cpp
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "MediaExtractorUpdateService"
-#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include <media/stagefright/MediaExtractorFactory.h>
-
-#include "MediaExtractorUpdateService.h"
-
-namespace android {
-namespace media {
-
-binder::Status MediaExtractorUpdateService::loadPlugins(const ::std::string& apkPath) {
-    ALOGV("loadPlugins %s", apkPath.c_str());
-    MediaExtractorFactory::LoadPlugins(apkPath);
-    return binder::Status::ok();
-}
-
-}   // namespace media
-}   // namespace android
diff --git a/services/mediaextractor/MediaExtractorUpdateService.h b/services/mediaextractor/MediaExtractorUpdateService.h
deleted file mode 100644
index ea34c9d..0000000
--- a/services/mediaextractor/MediaExtractorUpdateService.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_MEDIA_EXTRACTOR_UPDATE_SERVICE_H
-#define ANDROID_MEDIA_EXTRACTOR_UPDATE_SERVICE_H
-
-#include <binder/BinderService.h>
-#include <android/media/BnMediaUpdateService.h>
-
-namespace android {
-namespace media {
-
-class MediaExtractorUpdateService
-    : public BinderService<MediaExtractorUpdateService>, public BnMediaUpdateService
-{
-    friend class BinderService<MediaExtractorUpdateService>;
-public:
-    MediaExtractorUpdateService() : BnMediaUpdateService() { }
-    virtual ~MediaExtractorUpdateService() { }
-    static const char* getServiceName() { return "media.extractor.update"; }
-    binder::Status loadPlugins(const ::std::string& apkPath);
-};
-
-}   // namespace media
-}   // namespace android
-
-#endif  // ANDROID_MEDIA_EXTRACTOR_UPDATE_SERVICE_H
diff --git a/services/mediaextractor/main_extractorservice.cpp b/services/mediaextractor/main_extractorservice.cpp
index 5f42711..3c15bfd 100644
--- a/services/mediaextractor/main_extractorservice.cpp
+++ b/services/mediaextractor/main_extractorservice.cpp
@@ -31,7 +31,6 @@
 // from LOCAL_C_INCLUDES
 #include "IcuUtils.h"
 #include "MediaExtractorService.h"
-#include "MediaExtractorUpdateService.h"
 #include "MediaUtils.h"
 #include "minijail.h"
 
@@ -72,11 +71,6 @@
     sp<IServiceManager> sm = defaultServiceManager();
     MediaExtractorService::instantiate();
 
-    std::string value = base::GetProperty("ro.build.type", "unknown");
-    if (value == "userdebug" || value == "eng") {
-        media::MediaExtractorUpdateService::instantiate();
-    }
-
     ProcessState::self()->startThreadPool();
     IPCThreadState::self()->joinThreadPool();
 }
diff --git a/services/mediaresourcemanager/test/Android.bp b/services/mediaresourcemanager/test/Android.bp
new file mode 100644
index 0000000..70e8833
--- /dev/null
+++ b/services/mediaresourcemanager/test/Android.bp
@@ -0,0 +1,41 @@
+// Build the unit tests.
+cc_test {
+    name: "ResourceManagerService_test",
+    srcs: ["ResourceManagerService_test.cpp"],
+    shared_libs: [
+        "libbinder",
+        "liblog",
+        "libmedia",
+        "libresourcemanagerservice",
+        "libutils",
+    ],
+    include_dirs: [
+        "frameworks/av/include",
+        "frameworks/av/services/mediaresourcemanager",
+    ],
+    cflags: [
+        "-Werror",
+        "-Wall",
+    ],
+    compile_multilib: "32",
+}
+
+cc_test {
+    name: "ServiceLog_test",
+    srcs: ["ServiceLog_test.cpp"],
+    shared_libs: [
+        "liblog",
+        "libmedia",
+        "libresourcemanagerservice",
+        "libutils",
+    ],
+    include_dirs: [
+        "frameworks/av/include",
+        "frameworks/av/services/mediaresourcemanager",
+    ],
+    cflags: [
+        "-Werror",
+        "-Wall",
+    ],
+    compile_multilib: "32",
+}
diff --git a/services/mediaresourcemanager/test/Android.mk b/services/mediaresourcemanager/test/Android.mk
deleted file mode 100644
index 6abcf92..0000000
--- a/services/mediaresourcemanager/test/Android.mk
+++ /dev/null
@@ -1,52 +0,0 @@
-# Build the unit tests.
-LOCAL_PATH:= $(call my-dir)
-include $(CLEAR_VARS)
-
-LOCAL_MODULE := ResourceManagerService_test
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_SRC_FILES := \
-  ResourceManagerService_test.cpp \
-
-LOCAL_SHARED_LIBRARIES := \
-  libbinder \
-  liblog \
-  libmedia \
-  libresourcemanagerservice \
-  libutils \
-
-LOCAL_C_INCLUDES := \
-  frameworks/av/include \
-  frameworks/av/services/mediaresourcemanager \
-
-LOCAL_CFLAGS += -Werror -Wall
-
-LOCAL_32_BIT_ONLY := true
-
-include $(BUILD_NATIVE_TEST)
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE := ServiceLog_test
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_SRC_FILES := \
-  ServiceLog_test.cpp \
-
-LOCAL_SHARED_LIBRARIES := \
-  liblog \
-  libmedia \
-  libresourcemanagerservice \
-  libutils \
-
-LOCAL_C_INCLUDES := \
-  frameworks/av/include \
-  frameworks/av/services/mediaresourcemanager \
-
-LOCAL_CFLAGS += -Werror -Wall
-
-LOCAL_32_BIT_ONLY := true
-
-include $(BUILD_NATIVE_TEST)