Merge "Delay start of mediaswcodec until after APEX is mounted"
diff --git a/apex/Android.bp b/apex/Android.bp
index 51e4c23..39997d2 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -15,6 +15,7 @@
 apex {
     name: "com.android.media",
     manifest: "manifest.json",
+    java_libs: ["updatable-media"],
     native_shared_libs: [
         // Extractor plugins
         "libaacextractor",
@@ -28,14 +29,13 @@
         "liboggextractor",
         "libwavextractor",
         // MediaPlayer2
-        "libmediaplayer2_jni",
+        "libmedia2_jni",
     ],
     key: "com.android.media.key",
 }
 
 apex {
     name: "com.android.media.swcodec",
-    compile_multilib: "32",
     manifest: "manifest_codec.json",
     native_shared_libs: [
         "libmedia_codecserviceregistrant",
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 4bb74cb..641816f 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -5561,12 +5561,12 @@
      *   <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
      * </ul></p>
      *
-     * <p>For a logical camera, this is concatenation of all underlying physical camera ids.
-     * The null terminator for physical camera id must be preserved so that the whole string
-     * can be tokenized using '\0' to generate list of physical camera ids.</p>
-     * <p>For example, if the physical camera ids of the logical camera are "2" and "3", the
+     * <p>For a logical camera, this is concatenation of all underlying physical camera IDs.
+     * The null terminator for physical camera ID must be preserved so that the whole string
+     * can be tokenized using '\0' to generate list of physical camera IDs.</p>
+     * <p>For example, if the physical camera IDs of the logical camera are "2" and "3", the
      * value of this tag will be ['2', '\0', '3', '\0'].</p>
-     * <p>The number of physical camera ids must be no less than 2.</p>
+     * <p>The number of physical camera IDs must be no less than 2.</p>
      */
     ACAMERA_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS =                 // byte[n]
             ACAMERA_LOGICAL_MULTI_CAMERA_START,
@@ -5591,6 +5591,28 @@
      */
     ACAMERA_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE =             // byte (acamera_metadata_enum_android_logical_multi_camera_sensor_sync_type_t)
             ACAMERA_LOGICAL_MULTI_CAMERA_START + 1,
+    /**
+     * <p>String containing the ID of the underlying active physical camera.</p>
+     *
+     * <p>Type: byte</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul></p>
+     *
+     * <p>The ID of the active physical camera that's backing the logical camera. All camera
+     * streams and metadata that are not physical camera specific will be originating from this
+     * physical camera. This must be one of valid physical IDs advertised in the physicalIds
+     * static tag.</p>
+     * <p>For a logical camera made up of physical cameras where each camera's lenses have
+     * different characteristics, the camera device may choose to switch between the physical
+     * cameras when application changes FOCAL_LENGTH or SCALER_CROP_REGION.
+     * At the time of lens switch, this result metadata reflects the new active physical camera
+     * ID.</p>
+     */
+    ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID =           // byte
+            ACAMERA_LOGICAL_MULTI_CAMERA_START + 2,
     ACAMERA_LOGICAL_MULTI_CAMERA_END,
 
     /**
@@ -7162,6 +7184,10 @@
      * <p>If this is supported, android.scaler.streamConfigurationMap will
      * additionally return a min frame duration that is greater than
      * zero for each supported size-format combination.</p>
+     * <p>For camera devices with LOGICAL_MULTI_CAMERA capability, when the underlying active
+     * physical camera switches, exposureTime, sensitivity, and lens properties may change
+     * even if AE/AF is locked. However, the overall auto exposure and auto focus experience
+     * for users will be consistent. Refer to LOGICAL_MULTI_CAMERA capability for details.</p>
      *
      * @see ACAMERA_BLACK_LEVEL_LOCK
      * @see ACAMERA_CONTROL_AE_LOCK
@@ -7217,6 +7243,10 @@
      * will accurately report the values applied by AWB in the result.</p>
      * <p>A given camera device may also support additional post-processing
      * controls, but this capability only covers the above list of controls.</p>
+     * <p>For camera devices with LOGICAL_MULTI_CAMERA capability, when underlying active
+     * physical camera switches, tonemap, white balance, and shading map may change even if
+     * awb is locked. However, the overall post-processing experience for users will be
+     * consistent. Refer to LOGICAL_MULTI_CAMERA capability for details.</p>
      *
      * @see ACAMERA_COLOR_CORRECTION_ABERRATION_MODE
      * @see ACAMERA_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES
@@ -7396,7 +7426,7 @@
      * </li>
      * <li>The SENSOR_INFO_TIMESTAMP_SOURCE of the logical device and physical devices must be
      *   the same.</li>
-     * <li>The logical camera device must be LIMITED or higher device.</li>
+     * <li>The logical camera must be LIMITED or higher device.</li>
      * </ul>
      * <p>Both the logical camera device and its underlying physical devices support the
      * mandatory stream combinations required for their device levels.</p>
@@ -7416,13 +7446,84 @@
      * <p>Using physical streams in place of a logical stream of the same size and format will
      * not slow down the frame rate of the capture, as long as the minimum frame duration
      * of the physical and logical streams are the same.</p>
+     * <p>A logical camera device's dynamic metadata may contain
+     * ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID to notify the application of the current
+     * active physical camera Id. An active physical camera is the physical camera from which
+     * the logical camera's main image data outputs (YUV or RAW) and metadata come from.
+     * In addition, this serves as an indication which physical camera is used to output to
+     * a RAW stream, or in case only physical cameras support RAW, which physical RAW stream
+     * the application should request.</p>
+     * <p>Logical camera's static metadata tags below describe the default active physical
+     * camera. An active physical camera is default if it's used when application directly
+     * uses requests built from a template. All templates will default to the same active
+     * physical camera.</p>
+     * <ul>
+     * <li>ACAMERA_SENSOR_INFO_SENSITIVITY_RANGE</li>
+     * <li>ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT</li>
+     * <li>ACAMERA_SENSOR_INFO_EXPOSURE_TIME_RANGE</li>
+     * <li>ACAMERA_SENSOR_INFO_MAX_FRAME_DURATION</li>
+     * <li>ACAMERA_SENSOR_INFO_PHYSICAL_SIZE</li>
+     * <li>ACAMERA_SENSOR_INFO_WHITE_LEVEL</li>
+     * <li>ACAMERA_SENSOR_INFO_LENS_SHADING_APPLIED</li>
+     * <li>ACAMERA_SENSOR_REFERENCE_ILLUMINANT1</li>
+     * <li>ACAMERA_SENSOR_REFERENCE_ILLUMINANT2</li>
+     * <li>ACAMERA_SENSOR_CALIBRATION_TRANSFORM1</li>
+     * <li>ACAMERA_SENSOR_CALIBRATION_TRANSFORM2</li>
+     * <li>ACAMERA_SENSOR_COLOR_TRANSFORM1</li>
+     * <li>ACAMERA_SENSOR_COLOR_TRANSFORM2</li>
+     * <li>ACAMERA_SENSOR_FORWARD_MATRIX1</li>
+     * <li>ACAMERA_SENSOR_FORWARD_MATRIX2</li>
+     * <li>ACAMERA_SENSOR_BLACK_LEVEL_PATTERN</li>
+     * <li>ACAMERA_SENSOR_MAX_ANALOG_SENSITIVITY</li>
+     * <li>ACAMERA_SENSOR_OPTICAL_BLACK_REGIONS</li>
+     * <li>ACAMERA_SENSOR_AVAILABLE_TEST_PATTERN_MODES</li>
+     * <li>ACAMERA_LENS_INFO_HYPERFOCAL_DISTANCE</li>
+     * <li>ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE</li>
+     * <li>ACAMERA_LENS_INFO_FOCUS_DISTANCE_CALIBRATION</li>
+     * <li>ACAMERA_LENS_POSE_ROTATION</li>
+     * <li>ACAMERA_LENS_POSE_TRANSLATION</li>
+     * <li>ACAMERA_LENS_INTRINSIC_CALIBRATION</li>
+     * <li>ACAMERA_LENS_POSE_REFERENCE</li>
+     * <li>ACAMERA_LENS_DISTORTION</li>
+     * </ul>
+     * <p>To maintain backward compatibility, the capture request and result metadata tags
+     * required for basic camera functionalities will be solely based on the
+     * logical camera capabiltity. Other request and result metadata tags, on the other
+     * hand, will be based on current active physical camera. For example, the physical
+     * cameras' sensor sensitivity and lens capability could be different from each other.
+     * So when the application manually controls sensor exposure time/gain, or does manual
+     * focus control, it must checks the current active physical camera's exposure, gain,
+     * and focus distance range.</p>
      *
      * @see ACAMERA_LENS_DISTORTION
+     * @see ACAMERA_LENS_INFO_FOCUS_DISTANCE_CALIBRATION
+     * @see ACAMERA_LENS_INFO_HYPERFOCAL_DISTANCE
+     * @see ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE
      * @see ACAMERA_LENS_INTRINSIC_CALIBRATION
      * @see ACAMERA_LENS_POSE_REFERENCE
      * @see ACAMERA_LENS_POSE_ROTATION
      * @see ACAMERA_LENS_POSE_TRANSLATION
+     * @see ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID
      * @see ACAMERA_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE
+     * @see ACAMERA_SENSOR_AVAILABLE_TEST_PATTERN_MODES
+     * @see ACAMERA_SENSOR_BLACK_LEVEL_PATTERN
+     * @see ACAMERA_SENSOR_CALIBRATION_TRANSFORM1
+     * @see ACAMERA_SENSOR_CALIBRATION_TRANSFORM2
+     * @see ACAMERA_SENSOR_COLOR_TRANSFORM1
+     * @see ACAMERA_SENSOR_COLOR_TRANSFORM2
+     * @see ACAMERA_SENSOR_FORWARD_MATRIX1
+     * @see ACAMERA_SENSOR_FORWARD_MATRIX2
+     * @see ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT
+     * @see ACAMERA_SENSOR_INFO_EXPOSURE_TIME_RANGE
+     * @see ACAMERA_SENSOR_INFO_LENS_SHADING_APPLIED
+     * @see ACAMERA_SENSOR_INFO_MAX_FRAME_DURATION
+     * @see ACAMERA_SENSOR_INFO_PHYSICAL_SIZE
+     * @see ACAMERA_SENSOR_INFO_SENSITIVITY_RANGE
+     * @see ACAMERA_SENSOR_INFO_WHITE_LEVEL
+     * @see ACAMERA_SENSOR_MAX_ANALOG_SENSITIVITY
+     * @see ACAMERA_SENSOR_OPTICAL_BLACK_REGIONS
+     * @see ACAMERA_SENSOR_REFERENCE_ILLUMINANT1
+     * @see ACAMERA_SENSOR_REFERENCE_ILLUMINANT2
      */
     ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA      = 11,
 
diff --git a/drm/libmediadrm/DrmHal.cpp b/drm/libmediadrm/DrmHal.cpp
index f4c0341..480c7cd 100644
--- a/drm/libmediadrm/DrmHal.cpp
+++ b/drm/libmediadrm/DrmHal.cpp
@@ -63,6 +63,7 @@
 
 typedef drm::V1_1::KeyRequestType KeyRequestType_V1_1;
 typedef drm::V1_2::Status Status_V1_2;
+typedef drm::V1_2::HdcpLevel HdcpLevel_V1_2;
 
 namespace {
 
@@ -144,6 +145,23 @@
     }
 }
 
+static SecurityLevel toHidlSecurityLevel(DrmPlugin::SecurityLevel level) {
+    switch(level) {
+    case DrmPlugin::kSecurityLevelSwSecureCrypto:
+        return SecurityLevel::SW_SECURE_CRYPTO;
+    case DrmPlugin::kSecurityLevelSwSecureDecode:
+        return SecurityLevel::SW_SECURE_DECODE;
+    case DrmPlugin::kSecurityLevelHwSecureCrypto:
+        return SecurityLevel::HW_SECURE_CRYPTO;
+    case DrmPlugin::kSecurityLevelHwSecureDecode:
+        return SecurityLevel::HW_SECURE_DECODE;
+    case DrmPlugin::kSecurityLevelHwSecureAll:
+        return SecurityLevel::HW_SECURE_ALL;
+    default:
+        return SecurityLevel::UNKNOWN;
+    }
+}
+
 static DrmPlugin::OfflineLicenseState toOfflineLicenseState(
         OfflineLicenseState licenseState) {
     switch(licenseState) {
@@ -156,26 +174,26 @@
     }
 }
 
-static DrmPlugin::HdcpLevel toHdcpLevel(HdcpLevel level) {
+static DrmPlugin::HdcpLevel toHdcpLevel(HdcpLevel_V1_2 level) {
     switch(level) {
-    case HdcpLevel::HDCP_NONE:
+    case HdcpLevel_V1_2::HDCP_NONE:
         return DrmPlugin::kHdcpNone;
-    case HdcpLevel::HDCP_V1:
+    case HdcpLevel_V1_2::HDCP_V1:
         return DrmPlugin::kHdcpV1;
-    case HdcpLevel::HDCP_V2:
+    case HdcpLevel_V1_2::HDCP_V2:
         return DrmPlugin::kHdcpV2;
-    case HdcpLevel::HDCP_V2_1:
+    case HdcpLevel_V1_2::HDCP_V2_1:
         return DrmPlugin::kHdcpV2_1;
-    case HdcpLevel::HDCP_V2_2:
+    case HdcpLevel_V1_2::HDCP_V2_2:
         return DrmPlugin::kHdcpV2_2;
-    case HdcpLevel::HDCP_NO_OUTPUT:
+    case HdcpLevel_V1_2::HDCP_V2_3:
+        return DrmPlugin::kHdcpV2_3;
+    case HdcpLevel_V1_2::HDCP_NO_OUTPUT:
         return DrmPlugin::kHdcpNoOutput;
     default:
         return DrmPlugin::kHdcpLevelUnknown;
     }
 }
-
-
 static ::KeyedVector toHidlKeyedVector(const KeyedVector<String8, String8>&
         keyedVector) {
     std::vector<KeyValue> stdKeyedVector;
@@ -568,16 +586,39 @@
     return Void();
 }
 
-bool DrmHal::isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType) {
+bool DrmHal::matchMimeTypeAndSecurityLevel(sp<IDrmFactory> &factory,
+                                           const uint8_t uuid[16],
+                                           const String8 &mimeType,
+                                           DrmPlugin::SecurityLevel level) {
+    if (mimeType == "") {
+        return true;
+    } else if (!factory->isContentTypeSupported(mimeType.string())) {
+        return false;
+    }
+
+    if (level == DrmPlugin::kSecurityLevelUnknown) {
+        return true;
+    } else {
+        sp<drm::V1_2::IDrmFactory> factoryV1_2 = drm::V1_2::IDrmFactory::castFrom(factory);
+        if (factoryV1_2 == NULL) {
+            return true;
+        } else if (factoryV1_2->isCryptoSchemeSupported_1_2(uuid,
+                        mimeType.string(), toHidlSecurityLevel(level))) {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool DrmHal::isCryptoSchemeSupported(const uint8_t uuid[16],
+                                     const String8 &mimeType,
+                                     DrmPlugin::SecurityLevel level) {
     Mutex::Autolock autoLock(mLock);
 
     for (size_t i = 0; i < mFactories.size(); i++) {
-        if (mFactories[i]->isCryptoSchemeSupported(uuid)) {
-            if (mimeType != "") {
-                if (mFactories[i]->isContentTypeSupported(mimeType.string())) {
-                    return true;
-                }
-            } else {
+        sp<IDrmFactory> factory = mFactories[i];
+        if (factory->isCryptoSchemeSupported(uuid)) {
+            if (matchMimeTypeAndSecurityLevel(factory, uuid, mimeType, level)) {
                 return true;
             }
         }
@@ -633,30 +674,15 @@
     Mutex::Autolock autoLock(mLock);
     INIT_CHECK();
 
-    SecurityLevel hSecurityLevel;
+    SecurityLevel hSecurityLevel = toHidlSecurityLevel(level);
     bool setSecurityLevel = true;
 
-    switch(level) {
-    case DrmPlugin::kSecurityLevelSwSecureCrypto:
-        hSecurityLevel = SecurityLevel::SW_SECURE_CRYPTO;
-        break;
-    case DrmPlugin::kSecurityLevelSwSecureDecode:
-        hSecurityLevel = SecurityLevel::SW_SECURE_DECODE;
-        break;
-    case DrmPlugin::kSecurityLevelHwSecureCrypto:
-        hSecurityLevel = SecurityLevel::HW_SECURE_CRYPTO;
-        break;
-    case DrmPlugin::kSecurityLevelHwSecureDecode:
-        hSecurityLevel = SecurityLevel::HW_SECURE_DECODE;
-        break;
-    case DrmPlugin::kSecurityLevelHwSecureAll:
-        hSecurityLevel = SecurityLevel::HW_SECURE_ALL;
-        break;
-    case DrmPlugin::kSecurityLevelMax:
+    if (level == DrmPlugin::kSecurityLevelMax) {
         setSecurityLevel = false;
-        break;
-    default:
-        return ERROR_DRM_CANNOT_HANDLE;
+    } else {
+        if (hSecurityLevel == SecurityLevel::UNKNOWN) {
+            return ERROR_DRM_CANNOT_HANDLE;
+        }
     }
 
     status_t  err = UNKNOWN_ERROR;
@@ -1093,22 +1119,31 @@
     }
     status_t err = UNKNOWN_ERROR;
 
-    if (mPluginV1_1 == NULL) {
-        return ERROR_DRM_CANNOT_HANDLE;
-    }
-
     *connected = DrmPlugin::kHdcpLevelUnknown;
     *max = DrmPlugin::kHdcpLevelUnknown;
 
-    Return<void> hResult = mPluginV1_1->getHdcpLevels(
-            [&](Status status, const HdcpLevel& hConnected, const HdcpLevel& hMax) {
-                if (status == Status::OK) {
-                    *connected = toHdcpLevel(hConnected);
-                    *max = toHdcpLevel(hMax);
-                }
-                err = toStatusT(status);
-            }
-    );
+    Return<void> hResult;
+    if (mPluginV1_2 != NULL) {
+        hResult = mPluginV1_2->getHdcpLevels_1_2(
+                [&](Status_V1_2 status, const HdcpLevel_V1_2& hConnected, const HdcpLevel_V1_2& hMax) {
+                    if (status == Status_V1_2::OK) {
+                        *connected = toHdcpLevel(hConnected);
+                        *max = toHdcpLevel(hMax);
+                    }
+                    err = toStatusT_1_2(status);
+                });
+    } else if (mPluginV1_1 != NULL) {
+        hResult = mPluginV1_1->getHdcpLevels(
+                [&](Status status, const HdcpLevel& hConnected, const HdcpLevel& hMax) {
+                    if (status == Status::OK) {
+                        *connected = toHdcpLevel(static_cast<HdcpLevel_V1_2>(hConnected));
+                        *max = toHdcpLevel(static_cast<HdcpLevel_V1_2>(hMax));
+                    }
+                    err = toStatusT(status);
+                });
+    } else {
+        return ERROR_DRM_CANNOT_HANDLE;
+    }
 
     return hResult.isOk() ? err : DEAD_OBJECT;
 }
diff --git a/drm/libmediadrm/IDrm.cpp b/drm/libmediadrm/IDrm.cpp
index 8c26317..0f34315 100644
--- a/drm/libmediadrm/IDrm.cpp
+++ b/drm/libmediadrm/IDrm.cpp
@@ -83,11 +83,14 @@
         return reply.readInt32();
     }
 
-    virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType) {
+    virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType,
+            DrmPlugin::SecurityLevel level) {
         Parcel data, reply;
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
         data.write(uuid, 16);
         data.writeString8(mimeType);
+        data.writeInt32(level);
+
         status_t status = remote()->transact(IS_CRYPTO_SUPPORTED, data, &reply);
         if (status != OK) {
             ALOGE("isCryptoSchemeSupported: binder call failed: %d", status);
@@ -123,11 +126,11 @@
         return reply.readInt32();
     }
 
-    virtual status_t openSession(DrmPlugin::SecurityLevel securityLevel,
+    virtual status_t openSession(DrmPlugin::SecurityLevel level,
             Vector<uint8_t> &sessionId) {
         Parcel data, reply;
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
-        data.writeInt32(securityLevel);
+        data.writeInt32(level);
 
         status_t status = remote()->transact(OPEN_SESSION, data, &reply);
         if (status != OK) {
@@ -768,7 +771,9 @@
             uint8_t uuid[16];
             data.read(uuid, sizeof(uuid));
             String8 mimeType = data.readString8();
-            reply->writeInt32(isCryptoSchemeSupported(uuid, mimeType));
+            DrmPlugin::SecurityLevel level =
+                    static_cast<DrmPlugin::SecurityLevel>(data.readInt32());
+            reply->writeInt32(isCryptoSchemeSupported(uuid, mimeType, level));
             return OK;
         }
 
diff --git a/drm/mediadrm/plugins/clearkey/hidl/DrmFactory.cpp b/drm/mediadrm/plugins/clearkey/hidl/DrmFactory.cpp
index 9d040a8..9fb5bbe 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/DrmFactory.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/DrmFactory.cpp
@@ -34,6 +34,7 @@
 namespace clearkey {
 
 using ::android::hardware::drm::V1_0::Status;
+using ::android::hardware::drm::V1_1::SecurityLevel;
 using ::android::hardware::Void;
 
 Return<bool> DrmFactory::isCryptoSchemeSupported(
@@ -41,6 +42,13 @@
     return clearkeydrm::isClearKeyUUID(uuid.data());
 }
 
+Return<bool> DrmFactory::isCryptoSchemeSupported_1_2(const hidl_array<uint8_t, 16>& uuid,
+                                                     const hidl_string &mimeType,
+                                                     SecurityLevel level) {
+    return isCryptoSchemeSupported(uuid) && isContentTypeSupported(mimeType) &&
+            level == SecurityLevel::SW_SECURE_CRYPTO;
+}
+
 Return<bool> DrmFactory::isContentTypeSupported(const hidl_string &mimeType) {
     // This should match the mimeTypes handed by InitDataParser.
     return mimeType == kIsoBmffVideoMimeType ||
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyTypes.h b/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyTypes.h
index 2dafa36..03c434e 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyTypes.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyTypes.h
@@ -28,6 +28,7 @@
 namespace clearkey {
 
 using ::android::hardware::drm::V1_0::KeyValue;
+using ::android::hardware::drm::V1_1::SecurityLevel;
 using ::android::hardware::hidl_vec;
 
 const uint8_t kBlockSize = 16; //AES_BLOCK_SIZE;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/DrmFactory.h b/drm/mediadrm/plugins/clearkey/hidl/include/DrmFactory.h
index ff715ea..4ca856d 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/DrmFactory.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/DrmFactory.h
@@ -39,6 +39,10 @@
     Return<bool> isCryptoSchemeSupported(const hidl_array<uint8_t, 16>& uuid)
             override;
 
+    Return<bool> isCryptoSchemeSupported_1_2(const hidl_array<uint8_t, 16>& uuid,
+                                             const hidl_string& mimeType,
+                                             SecurityLevel level) override;
+
     Return<bool> isContentTypeSupported(const hidl_string &mimeType)
             override;
 
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
index a9b897b..ba5fa65 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
@@ -63,6 +63,7 @@
 typedef drm::V1_1::KeyRequestType KeyRequestType_V1_1;
 typedef drm::V1_2::IDrmPluginListener IDrmPluginListener_V1_2;
 typedef drm::V1_2::Status Status_V1_2;
+typedef drm::V1_2::HdcpLevel HdcpLevel_V1_2;
 
 struct DrmPlugin : public IDrmPlugin {
     explicit DrmPlugin(SessionLibrary* sessionLibrary);
@@ -162,6 +163,13 @@
         return Void();
     }
 
+    Return<void> getHdcpLevels_1_2(getHdcpLevels_1_2_cb _hidl_cb) {
+        HdcpLevel_V1_2 connectedLevel = HdcpLevel_V1_2::HDCP_NONE;
+        HdcpLevel_V1_2 maxLevel = HdcpLevel_V1_2::HDCP_NO_OUTPUT;
+        _hidl_cb(Status_V1_2::OK, connectedLevel, maxLevel);
+        return Void();
+    }
+
     Return<void> getNumberOfSessions(getNumberOfSessions_cb _hidl_cb) override;
 
     Return<void> getSecurityLevel(const hidl_vec<uint8_t>& sessionId,
diff --git a/include/media/MediaExtractorPluginHelper.h b/include/media/MediaExtractorPluginHelper.h
index f4d4da6..b86f177 100644
--- a/include/media/MediaExtractorPluginHelper.h
+++ b/include/media/MediaExtractorPluginHelper.h
@@ -171,6 +171,9 @@
 };
 
 inline CMediaTrack *wrap(MediaTrackHelper *track) {
+    if (track == nullptr) {
+        return nullptr;
+    }
     CMediaTrack *wrapper = (CMediaTrack*) malloc(sizeof(CMediaTrack));
     wrapper->data = track;
     wrapper->free = [](void *data) -> void {
diff --git a/include/media/MediaTrack.h b/include/media/MediaTrack.h
index e828a7f..493eba3 100644
--- a/include/media/MediaTrack.h
+++ b/include/media/MediaTrack.h
@@ -142,7 +142,7 @@
 
 class MediaTrackCUnwrapper : public MediaTrack {
 public:
-    explicit MediaTrackCUnwrapper(CMediaTrack *wrapper);
+    static MediaTrackCUnwrapper *create(CMediaTrack *wrapper);
 
     virtual status_t start();
     virtual status_t stop();
@@ -155,6 +155,7 @@
     virtual ~MediaTrackCUnwrapper();
 
 private:
+    explicit MediaTrackCUnwrapper(CMediaTrack *wrapper);
     CMediaTrack *wrapper;
     MediaBufferGroup *bufferGroup;
 };
diff --git a/media/codec2/sfplugin/C2OMXNode.cpp b/media/codec2/sfplugin/C2OMXNode.cpp
index 749fd7a..9500aed 100644
--- a/media/codec2/sfplugin/C2OMXNode.cpp
+++ b/media/codec2/sfplugin/C2OMXNode.cpp
@@ -225,14 +225,18 @@
     if (omxBuf.mBufferType == OMXBuffer::kBufferTypeANWBuffer
             && omxBuf.mGraphicBuffer != nullptr) {
         std::shared_ptr<C2GraphicAllocation> alloc;
+        native_handle_t *clonedHandle = native_handle_clone(omxBuf.mGraphicBuffer->handle);
         handle = WrapNativeCodec2GrallocHandle(
-                native_handle_clone(omxBuf.mGraphicBuffer->handle),
+                clonedHandle,
                 omxBuf.mGraphicBuffer->width,
                 omxBuf.mGraphicBuffer->height,
                 omxBuf.mGraphicBuffer->format,
                 omxBuf.mGraphicBuffer->usage,
                 omxBuf.mGraphicBuffer->stride);
         c2_status_t err = mAllocator->priorGraphicAllocation(handle, &alloc);
+        if (clonedHandle) {
+            native_handle_delete(clonedHandle);
+        }
         if (err != OK) {
             return UNKNOWN_ERROR;
         }
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 55a97d8..b529cbc 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -271,12 +271,8 @@
 
 namespace {
 
-// TODO: get this info from component
-const static size_t kMinInputBufferArraySize = 4;
-const static size_t kMaxPipelineCapacity = 18;
-const static size_t kChannelOutputDelay = 0;
-const static size_t kMinOutputBufferArraySize = kMaxPipelineCapacity +
-                                                kChannelOutputDelay;
+const static size_t kSmoothnessFactor = 4;
+const static size_t kRenderingDepth = 3;
 const static size_t kLinearBufferSize = 1048576;
 // This can fit 4K RGBA frame, and most likely client won't need more than this.
 const static size_t kMaxLinearBufferSize = 3840 * 2160 * 4;
@@ -829,6 +825,7 @@
             const sp<ICrypto> &crypto,
             int32_t heapSeqNum,
             size_t capacity,
+            size_t numInputSlots,
             const char *componentName, const char *name = "EncryptedInput")
         : LinearInputBuffers(componentName, name),
           mUsage({0, 0}),
@@ -840,7 +837,7 @@
         } else {
             mUsage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
         }
-        for (size_t i = 0; i < kMinInputBufferArraySize; ++i) {
+        for (size_t i = 0; i < numInputSlots; ++i) {
             sp<IMemory> memory = mDealer->allocate(capacity);
             if (memory == nullptr) {
                 ALOGD("[%s] Failed to allocate memory from dealer: only %zu slots allocated", mName, i);
@@ -951,11 +948,12 @@
 
 class GraphicInputBuffers : public CCodecBufferChannel::InputBuffers {
 public:
-    GraphicInputBuffers(const char *componentName, const char *name = "2D-BB-Input")
+    GraphicInputBuffers(
+            size_t numInputSlots, const char *componentName, const char *name = "2D-BB-Input")
         : InputBuffers(componentName, name),
           mImpl(mName),
           mLocalBufferPool(LocalBufferPool::Create(
-                  kMaxLinearBufferSize * kMinInputBufferArraySize)) { }
+                  kMaxLinearBufferSize * numInputSlots)) { }
     ~GraphicInputBuffers() override = default;
 
     bool requestNewBuffer(size_t *index, sp<MediaCodecBuffer> *buffer) override {
@@ -1291,10 +1289,11 @@
 
 class RawGraphicOutputBuffers : public FlexOutputBuffers {
 public:
-    RawGraphicOutputBuffers(const char *componentName, const char *name = "2D-BB-Output")
+    RawGraphicOutputBuffers(
+            size_t numOutputSlots, const char *componentName, const char *name = "2D-BB-Output")
         : FlexOutputBuffers(componentName, name),
           mLocalBufferPool(LocalBufferPool::Create(
-                  kMaxLinearBufferSize * kMinOutputBufferArraySize)) { }
+                  kMaxLinearBufferSize * numOutputSlots)) { }
     ~RawGraphicOutputBuffers() override = default;
 
     sp<Codec2Buffer> wrap(const std::shared_ptr<C2Buffer> &buffer) override {
@@ -1545,6 +1544,8 @@
         const std::shared_ptr<CCodecCallback> &callback)
     : mHeapSeqNum(-1),
       mCCodecCallback(callback),
+      mNumInputSlots(kSmoothnessFactor),
+      mNumOutputSlots(kSmoothnessFactor),
       mFrameIndex(0u),
       mFirstValidFrameIndex(0u),
       mMetaMode(MODE_NONE),
@@ -2006,7 +2007,7 @@
     Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
 
     if (!(*buffers)->isArrayMode()) {
-        *buffers = (*buffers)->toArrayMode(kMinInputBufferArraySize);
+        *buffers = (*buffers)->toArrayMode(mNumInputSlots);
     }
 
     (*buffers)->getArray(array);
@@ -2017,7 +2018,7 @@
     Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
 
     if (!(*buffers)->isArrayMode()) {
-        *buffers = (*buffers)->toArrayMode(kMinOutputBufferArraySize);
+        *buffers = (*buffers)->toArrayMode(mNumOutputSlots);
     }
 
     (*buffers)->getArray(array);
@@ -2029,12 +2030,19 @@
     C2StreamBufferTypeSetting::output oStreamFormat(0u);
     C2PortReorderBufferDepthTuning::output reorderDepth;
     C2PortReorderKeySetting::output reorderKey;
+    C2PortActualDelayTuning::input inputDelay(0);
+    C2PortActualDelayTuning::output outputDelay(0);
+    C2ActualPipelineDelayTuning pipelineDelay(0);
+
     c2_status_t err = mComponent->query(
             {
                 &iStreamFormat,
                 &oStreamFormat,
                 &reorderDepth,
                 &reorderKey,
+                &inputDelay,
+                &pipelineDelay,
+                &outputDelay,
             },
             {},
             C2_DONT_BLOCK,
@@ -2057,6 +2065,13 @@
             reorder->setKey(reorderKey.value);
         }
     }
+
+    mNumInputSlots =
+        (inputDelay ? inputDelay.value : 0) +
+        (pipelineDelay ? pipelineDelay.value : 0) +
+        kSmoothnessFactor;
+    mNumOutputSlots = (outputDelay ? outputDelay.value : 0) + kSmoothnessFactor;
+
     // TODO: get this from input format
     bool secure = mComponent->getName().find(".secure") != std::string::npos;
 
@@ -2127,6 +2142,7 @@
             pools->inputPool = pool;
         }
 
+        bool forceArrayMode = false;
         Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
         if (graphic) {
             if (mInputSurface) {
@@ -2134,7 +2150,7 @@
             } else if (mMetaMode == MODE_ANW) {
                 buffers->reset(new GraphicMetadataInputBuffers(mName));
             } else {
-                buffers->reset(new GraphicInputBuffers(mName));
+                buffers->reset(new GraphicInputBuffers(mNumInputSlots, mName));
             }
         } else {
             if (hasCryptoOrDescrambler()) {
@@ -2147,7 +2163,7 @@
                 if (mDealer == nullptr) {
                     mDealer = new MemoryDealer(
                             align(capacity, MemoryDealer::getAllocationAlignment())
-                                * (kMinInputBufferArraySize + 1),
+                                * (mNumInputSlots + 1),
                             "EncryptedLinearInputBuffers");
                     mDecryptDestination = mDealer->allocate((size_t)capacity);
                 }
@@ -2157,7 +2173,9 @@
                     mHeapSeqNum = -1;
                 }
                 buffers->reset(new EncryptedLinearInputBuffers(
-                        secure, mDealer, mCrypto, mHeapSeqNum, (size_t)capacity, mName));
+                        secure, mDealer, mCrypto, mHeapSeqNum, (size_t)capacity,
+                        mNumInputSlots, mName));
+                forceArrayMode = true;
             } else {
                 buffers->reset(new LinearInputBuffers(mName));
             }
@@ -2169,6 +2187,10 @@
         } else {
             // TODO: error
         }
+
+        if (forceArrayMode) {
+            *buffers = (*buffers)->toArrayMode(mNumInputSlots);
+        }
     }
 
     if (outputFormat != nullptr) {
@@ -2286,7 +2308,7 @@
             if (outputSurface) {
                 buffers->reset(new GraphicOutputBuffers(mName));
             } else {
-                buffers->reset(new RawGraphicOutputBuffers(mName));
+                buffers->reset(new RawGraphicOutputBuffers(mNumOutputSlots, mName));
             }
         } else {
             buffers->reset(new LinearOutputBuffers(mName));
@@ -2307,7 +2329,7 @@
             // WORKAROUND: if we're using early CSD workaround we convert to
             //             array mode, to appease apps assuming the output
             //             buffers to be of the same size.
-            (*buffers) = (*buffers)->toArrayMode(kMinOutputBufferArraySize);
+            (*buffers) = (*buffers)->toArrayMode(mNumOutputSlots);
 
             int32_t channelCount;
             int32_t sampleRate;
@@ -2335,27 +2357,10 @@
     // about buffers from the previous generation do not interfere with the
     // newly initialized pipeline capacity.
 
-    // Query delays
-    C2PortRequestedDelayTuning::input inputDelay;
-    C2PortRequestedDelayTuning::output outputDelay;
-    C2RequestedPipelineDelayTuning pipelineDelay;
-#if 0
-    err = mComponent->query(
-            { &inputDelay, &pipelineDelay, &outputDelay },
-            {},
-            C2_DONT_BLOCK,
-            nullptr);
     mAvailablePipelineCapacity.initialize(
-            inputDelay,
-            inputDelay + pipelineDelay,
-            inputDelay + pipelineDelay + outputDelay,
+            mNumInputSlots,
+            mNumInputSlots + mNumOutputSlots,
             mName);
-#else
-    mAvailablePipelineCapacity.initialize(
-            kMinInputBufferArraySize,
-            kMaxPipelineCapacity,
-            mName);
-#endif
 
     mInputMetEos = false;
     mSync.start();
@@ -2374,7 +2379,7 @@
     }
     std::vector<sp<MediaCodecBuffer>> toBeQueued;
     // TODO: use proper buffer depth instead of this random value
-    for (size_t i = 0; i < kMinInputBufferArraySize; ++i) {
+    for (size_t i = 0; i < mNumInputSlots; ++i) {
         size_t index;
         sp<MediaCodecBuffer> buffer;
         {
@@ -2731,7 +2736,7 @@
     sp<IGraphicBufferProducer> producer;
     if (newSurface) {
         newSurface->setScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
-        newSurface->setMaxDequeuedBufferCount(kMinOutputBufferArraySize);
+        newSurface->setMaxDequeuedBufferCount(mNumOutputSlots + kRenderingDepth);
         producer = newSurface->getIGraphicBufferProducer();
         producer->setGenerationNumber(generation);
     } else {
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.h b/media/codec2/sfplugin/CCodecBufferChannel.h
index 431baaa..fd806b7 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.h
+++ b/media/codec2/sfplugin/CCodecBufferChannel.h
@@ -37,6 +37,8 @@
 
 namespace android {
 
+class MemoryDealer;
+
 class CCodecCallback {
 public:
     virtual ~CCodecCallback() = default;
@@ -233,6 +235,9 @@
     QueueSync mQueueSync;
     std::vector<std::unique_ptr<C2Param>> mParamsToBeSet;
 
+    size_t mNumInputSlots;
+    size_t mNumOutputSlots;
+
     Mutexed<std::unique_ptr<InputBuffers>> mInputBuffers;
     Mutexed<std::list<sp<ABuffer>>> mFlushedConfigs;
     Mutexed<std::unique_ptr<OutputBuffers>> mOutputBuffers;
diff --git a/media/codec2/vndk/C2AllocatorGralloc.cpp b/media/codec2/vndk/C2AllocatorGralloc.cpp
index 4878974..18f2430 100644
--- a/media/codec2/vndk/C2AllocatorGralloc.cpp
+++ b/media/codec2/vndk/C2AllocatorGralloc.cpp
@@ -304,17 +304,23 @@
 }
 
 C2AllocationGralloc::~C2AllocationGralloc() {
-    if (!mBuffer) {
-        return;
-    }
-    if (mLocked) {
+    if (mBuffer && mLocked) {
         // implementation ignores addresss and rect
         uint8_t* addr[C2PlanarLayout::MAX_NUM_PLANES] = {};
         unmap(addr, C2Rect(), nullptr);
     }
-    mMapper->freeBuffer(const_cast<native_handle_t *>(mBuffer));
-    native_handle_delete(const_cast<native_handle_t*>(
-            reinterpret_cast<const native_handle_t*>(mHandle)));
+    if (mBuffer) {
+        mMapper->freeBuffer(const_cast<native_handle_t *>(mBuffer));
+    }
+    if (mHandle) {
+        native_handle_delete(
+                const_cast<native_handle_t *>(reinterpret_cast<const native_handle_t *>(mHandle)));
+    }
+    if (mLockedHandle) {
+        native_handle_delete(
+                const_cast<native_handle_t *>(
+                        reinterpret_cast<const native_handle_t *>(mLockedHandle)));
+    }
 }
 
 c2_status_t C2AllocationGralloc::map(
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index cc1534a..c7f4242 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -594,7 +594,7 @@
                 }
             } else {
                 uint32_t sampleIndex;
-                uint32_t sampleTime;
+                uint64_t sampleTime;
                 if (track->timescale != 0 &&
                         track->sampleTable->findThumbnailSample(&sampleIndex) == OK
                         && track->sampleTable->getMetaDataForSample(
@@ -4503,7 +4503,7 @@
     }
 
     if (!strncasecmp("video/", mime, 6)) {
-        uint32_t firstSampleCTS = 0;
+        uint64_t firstSampleCTS = 0;
         err = mSampleTable->getMetaDataForSample(0, NULL, NULL, &firstSampleCTS);
         // Start offset should be less or equal to composition time of first sample.
         // Composition time stamp of first sample cannot be negative.
@@ -4868,7 +4868,9 @@
         off64_t offset, bool isSubsampleEncryption, uint32_t flags) {
 
     int32_t ivlength;
-    CHECK(AMediaFormat_getInt32(mFormat, AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE, &ivlength));
+    if (!AMediaFormat_getInt32(mFormat, AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE, &ivlength)) {
+        return ERROR_MALFORMED;
+    }
 
     // only 0, 8 and 16 byte initialization vectors are supported
     if (ivlength != 0 && ivlength != 8 && ivlength != 16) {
@@ -5349,7 +5351,7 @@
                         sampleIndex, &syncSampleIndex, findFlags);
             }
 
-            uint32_t sampleTime;
+            uint64_t sampleTime;
             if (err == OK) {
                 err = mSampleTable->getMetaDataForSample(
                         sampleIndex, NULL, NULL, &sampleTime);
@@ -5399,7 +5401,7 @@
 
     off64_t offset = 0;
     size_t size = 0;
-    uint32_t cts, stts;
+    uint64_t cts, stts;
     bool isSyncSample;
     bool newBuffer = false;
     if (mBuffer == NULL) {
diff --git a/media/extractors/mp4/SampleIterator.cpp b/media/extractors/mp4/SampleIterator.cpp
index 1a6d306..d6287ec 100644
--- a/media/extractors/mp4/SampleIterator.cpp
+++ b/media/extractors/mp4/SampleIterator.cpp
@@ -301,7 +301,7 @@
 }
 
 status_t SampleIterator::findSampleTimeAndDuration(
-        uint32_t sampleIndex, uint32_t *time, uint32_t *duration) {
+        uint32_t sampleIndex, uint64_t *time, uint64_t *duration) {
     if (sampleIndex >= mTable->mNumSampleSizes) {
         return ERROR_OUT_OF_RANGE;
     }
@@ -314,8 +314,8 @@
             break;
         }
         if (mTimeToSampleIndex == mTable->mTimeToSampleCount ||
-            (mTTSDuration != 0 && mTTSCount > UINT32_MAX / mTTSDuration) ||
-            mTTSSampleTime > UINT32_MAX - (mTTSCount * mTTSDuration)) {
+            (mTTSDuration != 0 && mTTSCount > UINT64_MAX / mTTSDuration) ||
+            mTTSSampleTime > UINT64_MAX - (mTTSCount * mTTSDuration)) {
             return ERROR_OUT_OF_RANGE;
         }
 
@@ -341,8 +341,8 @@
     int32_t offset = mTable->getCompositionTimeOffset(sampleIndex);
     if ((offset < 0 && *time < (offset == INT32_MIN ?
             INT32_MAX : uint32_t(-offset))) ||
-            (offset > 0 && *time > UINT32_MAX - offset)) {
-        ALOGE("%u + %d would overflow", *time, offset);
+            (offset > 0 && *time > UINT64_MAX - offset)) {
+        ALOGE("%llu + %d would overflow", (unsigned long long) *time, offset);
         return ERROR_OUT_OF_RANGE;
     }
     if (offset > 0) {
diff --git a/media/extractors/mp4/SampleIterator.h b/media/extractors/mp4/SampleIterator.h
index 6e4f60e..5a0ea76 100644
--- a/media/extractors/mp4/SampleIterator.h
+++ b/media/extractors/mp4/SampleIterator.h
@@ -33,8 +33,8 @@
     uint32_t getDescIndex() const { return mChunkDesc; }
     off64_t getSampleOffset() const { return mCurrentSampleOffset; }
     size_t getSampleSize() const { return mCurrentSampleSize; }
-    uint32_t getSampleTime() const { return mCurrentSampleTime; }
-    uint32_t getSampleDuration() const { return mCurrentSampleDuration; }
+    uint64_t getSampleTime() const { return mCurrentSampleTime; }
+    uint64_t getSampleDuration() const { return mCurrentSampleDuration; }
 
     uint32_t getLastSampleIndexInChunk() const {
         return mCurrentSampleIndex + mSamplesPerChunk -
@@ -63,20 +63,20 @@
 
     uint32_t mTimeToSampleIndex;
     uint32_t mTTSSampleIndex;
-    uint32_t mTTSSampleTime;
+    uint64_t mTTSSampleTime;
     uint32_t mTTSCount;
-    uint32_t mTTSDuration;
+    uint64_t mTTSDuration;
 
     uint32_t mCurrentSampleIndex;
     off64_t mCurrentSampleOffset;
     size_t mCurrentSampleSize;
-    uint32_t mCurrentSampleTime;
-    uint32_t mCurrentSampleDuration;
+    uint64_t mCurrentSampleTime;
+    uint64_t mCurrentSampleDuration;
 
     void reset();
     status_t findChunkRange(uint32_t sampleIndex);
     status_t getChunkOffset(uint32_t chunk, off64_t *offset);
-    status_t findSampleTimeAndDuration(uint32_t sampleIndex, uint32_t *time, uint32_t *duration);
+    status_t findSampleTimeAndDuration(uint32_t sampleIndex, uint64_t *time, uint64_t *duration);
 
     SampleIterator(const SampleIterator &);
     SampleIterator &operator=(const SampleIterator &);
diff --git a/media/extractors/mp4/SampleTable.cpp b/media/extractors/mp4/SampleTable.cpp
index d242798..571b441 100644
--- a/media/extractors/mp4/SampleTable.cpp
+++ b/media/extractors/mp4/SampleTable.cpp
@@ -614,7 +614,7 @@
     return OK;
 }
 
-uint32_t abs_difference(uint32_t time1, uint32_t time2) {
+uint32_t abs_difference(uint64_t time1, uint64_t time2) {
     return time1 > time2 ? time1 - time2 : time2 - time1;
 }
 
@@ -662,7 +662,7 @@
     }
 
     uint32_t sampleIndex = 0;
-    uint32_t sampleTime = 0;
+    uint64_t sampleTime = 0;
 
     for (uint32_t i = 0; i < mTimeToSampleCount; ++i) {
         uint32_t n = mTimeToSample[2 * i];
@@ -684,13 +684,13 @@
                         (compTimeDelta == INT32_MIN ?
                                 INT32_MAX : uint32_t(-compTimeDelta)))
                         || (compTimeDelta > 0 &&
-                                sampleTime > UINT32_MAX - compTimeDelta)) {
-                    ALOGE("%u + %d would overflow, clamping",
-                            sampleTime, compTimeDelta);
+                                sampleTime > UINT64_MAX - compTimeDelta)) {
+                    ALOGE("%llu + %d would overflow, clamping",
+                            (unsigned long long) sampleTime, compTimeDelta);
                     if (compTimeDelta < 0) {
                         sampleTime = 0;
                     } else {
-                        sampleTime = UINT32_MAX;
+                        sampleTime = UINT64_MAX;
                     }
                     compTimeDelta = 0;
                 }
@@ -701,10 +701,10 @@
             }
 
             ++sampleIndex;
-            if (sampleTime > UINT32_MAX - delta) {
-                ALOGE("%u + %u would overflow, clamping",
-                    sampleTime, delta);
-                sampleTime = UINT32_MAX;
+            if (sampleTime > UINT64_MAX - delta) {
+                ALOGE("%llu + %u would overflow, clamping",
+                    (unsigned long long) sampleTime, delta);
+                sampleTime = UINT64_MAX;
             } else {
                 sampleTime += delta;
             }
@@ -870,19 +870,19 @@
             if (err != OK) {
                 return err;
             }
-            uint32_t sample_time = mSampleIterator->getSampleTime();
+            uint64_t sample_time = mSampleIterator->getSampleTime();
 
             err = mSampleIterator->seekTo(mSyncSamples[left]);
             if (err != OK) {
                 return err;
             }
-            uint32_t upper_time = mSampleIterator->getSampleTime();
+            uint64_t upper_time = mSampleIterator->getSampleTime();
 
             err = mSampleIterator->seekTo(mSyncSamples[left - 1]);
             if (err != OK) {
                 return err;
             }
-            uint32_t lower_time = mSampleIterator->getSampleTime();
+            uint64_t lower_time = mSampleIterator->getSampleTime();
 
             // use abs_difference for safety
             if (abs_difference(upper_time, sample_time) >
@@ -955,9 +955,9 @@
         uint32_t sampleIndex,
         off64_t *offset,
         size_t *size,
-        uint32_t *compositionTime,
+        uint64_t *compositionTime,
         bool *isSyncSample,
-        uint32_t *sampleDuration) {
+        uint64_t *sampleDuration) {
     Mutex::Autolock autoLock(mLock);
 
     status_t err;
diff --git a/media/extractors/mp4/SampleTable.h b/media/extractors/mp4/SampleTable.h
index d4b5dc8..57f6e62 100644
--- a/media/extractors/mp4/SampleTable.h
+++ b/media/extractors/mp4/SampleTable.h
@@ -66,9 +66,9 @@
             uint32_t sampleIndex,
             off64_t *offset,
             size_t *size,
-            uint32_t *compositionTime,
+            uint64_t *compositionTime,
             bool *isSyncSample = NULL,
-            uint32_t *sampleDuration = NULL);
+            uint64_t *sampleDuration = NULL);
 
     // call only after getMetaDataForSample has been called successfully.
     uint32_t getLastSampleIndexInChunk();
@@ -124,7 +124,7 @@
 
     struct SampleTimeEntry {
         uint32_t mSampleIndex;
-        uint32_t mCompositionTime;
+        uint64_t mCompositionTime;
     };
     SampleTimeEntry *mSampleTimeEntries;
 
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.cpp b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
index e1509ee..49dd0b4 100644
--- a/media/extractors/mpeg2/MPEG2TSExtractor.cpp
+++ b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
@@ -302,16 +302,21 @@
     return AMEDIA_ERROR_UNKNOWN;
 }
 
-void MPEG2TSExtractor::addSource(const sp<AnotherPacketSource> &impl) {
-    bool found = false;
+status_t MPEG2TSExtractor::findIndexOfSource(const sp<AnotherPacketSource> &impl, size_t *index) {
     for (size_t i = 0; i < mSourceImpls.size(); i++) {
         if (mSourceImpls[i] == impl) {
-            found = true;
-            break;
+            *index = i;
+            return OK;
         }
     }
-    if (!found) {
+    return NAME_NOT_FOUND;
+}
+
+void MPEG2TSExtractor::addSource(const sp<AnotherPacketSource> &impl) {
+    size_t index;
+    if (findIndexOfSource(impl, &index) != OK) {
         mSourceImpls.push(impl);
+        mSyncPoints.push();
     }
 }
 
@@ -319,6 +324,7 @@
     bool haveAudio = false;
     bool haveVideo = false;
     int64_t startTime = ALooper::GetNowUs();
+    size_t index;
 
     status_t err;
     while ((err = feedMore(true /* isInit */)) == OK
@@ -337,8 +343,9 @@
                     haveVideo = true;
                     addSource(impl);
                     if (!isScrambledFormat(*(format.get()))) {
-                        mSyncPoints.push();
-                        mSeekSyncPoints = &mSyncPoints.editTop();
+                        if (findIndexOfSource(impl, &index) == OK) {
+                            mSeekSyncPoints = &mSyncPoints.editItemAt(index);
+                        }
                     }
                 }
             }
@@ -352,10 +359,9 @@
                 if (format != NULL) {
                     haveAudio = true;
                     addSource(impl);
-                    if (!isScrambledFormat(*(format.get()))) {
-                        mSyncPoints.push();
-                        if (!haveVideo) {
-                            mSeekSyncPoints = &mSyncPoints.editTop();
+                    if (!isScrambledFormat(*(format.get())) && !haveVideo) {
+                        if (findIndexOfSource(impl, &index) == OK) {
+                            mSeekSyncPoints = &mSyncPoints.editItemAt(index);
                         }
                     }
                 }
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.h b/media/extractors/mpeg2/MPEG2TSExtractor.h
index e425d23..2537d3b 100644
--- a/media/extractors/mpeg2/MPEG2TSExtractor.h
+++ b/media/extractors/mpeg2/MPEG2TSExtractor.h
@@ -95,6 +95,7 @@
     status_t seekBeyond(int64_t seekTimeUs);
 
     status_t feedUntilBufferAvailable(const sp<AnotherPacketSource> &impl);
+    status_t findIndexOfSource(const sp<AnotherPacketSource> &impl, size_t *index);
 
     // Add a SynPoint derived from |event|.
     void addSyncPoint_l(const ATSParser::SyncEvent &event);
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index 2a02b20..3de1514 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -105,9 +105,14 @@
         assert(false);
     }
     if (framesRead < 0) {
-        myData->inputError = framesRead;
-        printf("ERROR in read = %d = %s\n", framesRead,
-               AAudio_convertResultToText(framesRead));
+        // Expect INVALID_STATE if STATE_STARTING
+        if (myData->framesReadTotal > 0) {
+            myData->inputError = framesRead;
+            printf("ERROR in read = %d = %s\n", framesRead,
+                   AAudio_convertResultToText(framesRead));
+        } else {
+            framesRead = 0;
+        }
     } else {
         myData->framesReadTotal += framesRead;
     }
@@ -149,8 +154,10 @@
         int32_t totalFramesRead = 0;
         do {
             actualFramesRead = readFormattedData(myData, numFrames);
-            if (actualFramesRead) {
+            if (actualFramesRead > 0) {
                 totalFramesRead += actualFramesRead;
+            } else if (actualFramesRead < 0) {
+                result = AAUDIO_CALLBACK_RESULT_STOP;
             }
             // Ignore errors because input stream may not be started yet.
         } while (actualFramesRead > 0);
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index fffcda0..3b03601 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -62,7 +62,7 @@
         , mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
         , mInService(inService)
         , mServiceInterface(serviceInterface)
-        , mAtomicTimestamp()
+        , mAtomicInternalTimestamp()
         , mWakeupDelayNanos(AAudioProperty_getWakeupDelayMicros() * AAUDIO_NANOS_PER_MICROSECOND)
         , mMinimumSleepNanos(AAudioProperty_getMinimumSleepMicros() * AAUDIO_NANOS_PER_MICROSECOND)
         {
@@ -349,8 +349,7 @@
     }
 }
 
-aaudio_result_t AudioStreamInternal::requestStop()
-{
+aaudio_result_t AudioStreamInternal::requestStop() {
     aaudio_result_t result = stopCallback();
     if (result != AAUDIO_OK) {
         return result;
@@ -364,7 +363,7 @@
 
     mClockModel.stop(AudioClock::getNanoseconds());
     setState(AAUDIO_STREAM_STATE_STOPPING);
-    mAtomicTimestamp.clear();
+    mAtomicInternalTimestamp.clear();
 
     return mServiceInterface.stopStream(mServiceStreamHandle);
 }
@@ -413,8 +412,8 @@
                            int64_t *framePosition,
                            int64_t *timeNanoseconds) {
     // Generated in server and passed to client. Return latest.
-    if (mAtomicTimestamp.isValid()) {
-        Timestamp timestamp = mAtomicTimestamp.read();
+    if (mAtomicInternalTimestamp.isValid()) {
+        Timestamp timestamp = mAtomicInternalTimestamp.read();
         int64_t position = timestamp.getPosition() + mFramesOffsetFromService;
         if (position >= 0) {
             *framePosition = position;
@@ -461,7 +460,7 @@
 
 aaudio_result_t AudioStreamInternal::onTimestampHardware(AAudioServiceMessage *message) {
     Timestamp timestamp(message->timestamp.position, message->timestamp.timestamp);
-    mAtomicTimestamp.write(timestamp);
+    mAtomicInternalTimestamp.write(timestamp);
     return AAUDIO_OK;
 }
 
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 3bb9e1e..1c88f52 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -163,7 +163,7 @@
 
     AAudioServiceInterface  &mServiceInterface;   // abstract interface to the service
 
-    SimpleDoubleBuffer<Timestamp>  mAtomicTimestamp;
+    SimpleDoubleBuffer<Timestamp>  mAtomicInternalTimestamp;
 
     AtomicRequestor          mNeedCatchUp;   // Ask read() or write() to sync on first timestamp.
 
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 58ef7b1..7dcb620 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -259,6 +259,7 @@
 
         if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
             ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
+            result = systemStopFromCallback();
             break;
         }
     }
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 9af47b2..6af8e7d 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -71,7 +71,7 @@
 
     mClockModel.stop(AudioClock::getNanoseconds());
     setState(AAUDIO_STREAM_STATE_PAUSING);
-    mAtomicTimestamp.clear();
+    mAtomicInternalTimestamp.clear();
     return mServiceInterface.pauseStream(mServiceStreamHandle);
 }
 
@@ -294,6 +294,7 @@
             }
         } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
             ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
+            result = systemStopFromCallback();
             break;
         }
     }
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 2fb3986..0d71efc 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -316,7 +316,7 @@
 {
     AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
     ALOGD("%s(%p) called", __func__, stream);
-    return audioStream->systemStop();
+    return audioStream->systemStopFromApp();
 }
 
 AAUDIO_API aaudio_result_t AAudioStream_waitForStateChange(AAudioStream* stream,
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 391af29..e39a075 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -119,21 +119,29 @@
     return AAUDIO_OK;
 }
 
-aaudio_result_t AudioStream::safeStart() {
+aaudio_result_t AudioStream::systemStart() {
     std::lock_guard<std::mutex> lock(mStreamLock);
+
     if (collidesWithCallback()) {
         ALOGE("%s cannot be called from a callback!", __func__);
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    return requestStart();
+
+    aaudio_result_t result = requestStart();
+    if (result == AAUDIO_OK) {
+        // We only call this for logging in "dumpsys audio". So ignore return code.
+        (void) mPlayerBase->start();
+    }
+    return result;
 }
 
-aaudio_result_t AudioStream::safePause() {
+aaudio_result_t AudioStream::systemPause() {
+    std::lock_guard<std::mutex> lock(mStreamLock);
+
     if (!isPauseSupported()) {
         return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
-    std::lock_guard<std::mutex> lock(mStreamLock);
     if (collidesWithCallback()) {
         ALOGE("%s cannot be called from a callback!", __func__);
         return AAUDIO_ERROR_INVALID_STATE;
@@ -169,7 +177,12 @@
             return AAUDIO_ERROR_INVALID_STATE;
     }
 
-    return requestPause();
+    aaudio_result_t result = requestPause();
+    if (result == AAUDIO_OK) {
+        // We only call this for logging in "dumpsys audio". So ignore return code.
+        (void) mPlayerBase->pause();
+    }
+    return result;
 }
 
 aaudio_result_t AudioStream::safeFlush() {
@@ -192,12 +205,31 @@
     return requestFlush();
 }
 
-aaudio_result_t AudioStream::safeStop() {
+aaudio_result_t AudioStream::systemStopFromCallback() {
+    std::lock_guard<std::mutex> lock(mStreamLock);
+    aaudio_result_t result = safeStop();
+    if (result == AAUDIO_OK) {
+        // We only call this for logging in "dumpsys audio". So ignore return code.
+        (void) mPlayerBase->stop();
+    }
+    return result;
+}
+
+aaudio_result_t AudioStream::systemStopFromApp() {
     std::lock_guard<std::mutex> lock(mStreamLock);
     if (collidesWithCallback()) {
-        ALOGE("stream cannot be stopped from a callback!");
+        ALOGE("stream cannot be stopped by calling from a callback!");
         return AAUDIO_ERROR_INVALID_STATE;
     }
+    aaudio_result_t result = safeStop();
+    if (result == AAUDIO_OK) {
+        // We only call this for logging in "dumpsys audio". So ignore return code.
+        (void) mPlayerBase->stop();
+    }
+    return result;
+}
+
+aaudio_result_t AudioStream::safeStop() {
 
     switch (getState()) {
         // Proceed with stopping.
@@ -224,7 +256,7 @@
         case AAUDIO_STREAM_STATE_CLOSING:
         case AAUDIO_STREAM_STATE_CLOSED:
         default:
-            ALOGW("requestStop() stream not running, state = %s",
+            ALOGW("%s() stream not running, state = %s", __func__,
                   AAudio_convertStreamStateToText(getState()));
             return AAUDIO_ERROR_INVALID_STATE;
     }
@@ -349,21 +381,33 @@
     }
 }
 
-aaudio_result_t AudioStream::joinThread(void** returnArg, int64_t timeoutNanoseconds)
+aaudio_result_t AudioStream::joinThread(void** returnArg, int64_t timeoutNanoseconds __unused)
 {
     if (!mHasThread) {
         ALOGE("joinThread() - but has no thread");
         return AAUDIO_ERROR_INVALID_STATE;
     }
+    aaudio_result_t result = AAUDIO_OK;
+    // If the callback is stopping the stream because the app passed back STOP
+    // then we don't need to join(). The thread is already about to exit.
+    if (pthread_self() != mThread) {
+        // Called from an app thread. Not the callback.
 #if 0
-    // TODO implement equivalent of pthread_timedjoin_np()
-    struct timespec abstime;
-    int err = pthread_timedjoin_np(mThread, returnArg, &abstime);
+        // TODO implement equivalent of pthread_timedjoin_np()
+        struct timespec abstime;
+        int err = pthread_timedjoin_np(mThread, returnArg, &abstime);
 #else
-    int err = pthread_join(mThread, returnArg);
+        int err = pthread_join(mThread, returnArg);
 #endif
+        if (err) {
+            ALOGE("%s() pthread_join() returns err = %d", __func__, err);
+            result = AAudioConvert_androidToAAudioResult(-err);
+        }
+    }
+    // This must be set false so that the callback thread can be created
+    // when the stream is restarted.
     mHasThread = false;
-    return err ? AAudioConvert_androidToAAudioResult(-errno) : mThreadRegistrationResult;
+    return (result != AAUDIO_OK) ? result : mThreadRegistrationResult;
 }
 
 aaudio_data_callback_result_t AudioStream::maybeCallDataCallback(void *audioData,
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 60200b2..46951f5 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -51,21 +51,6 @@
 
     virtual ~AudioStream();
 
-    /**
-     * Lock a mutex and make sure we are not calling from a callback function.
-     * @return result of requestStart();
-     */
-    aaudio_result_t safeStart();
-
-    aaudio_result_t safePause();
-
-    aaudio_result_t safeFlush();
-
-    aaudio_result_t safeStop();
-
-    aaudio_result_t safeClose();
-
-    // =========== Begin ABSTRACT methods ===========================
 protected:
 
     /* Asynchronous requests.
@@ -74,7 +59,7 @@
     virtual aaudio_result_t requestStart() = 0;
 
     /**
-     * Check the state to see if Pause if currently legal.
+     * Check the state to see if Pause is currently legal.
      *
      * @param result pointer to return code
      * @return true if OK to continue, if false then return result
@@ -356,33 +341,28 @@
         mPlayerBase->unregisterWithAudioManager();
     }
 
-    // Pass start request through PlayerBase for tracking.
-    aaudio_result_t systemStart() {
-        mPlayerBase->start();
-        // Pass aaudio_result_t around the PlayerBase interface, which uses status__t.
-        return mPlayerBase->getResult();
-    }
+    aaudio_result_t systemStart();
 
-    // Pass pause request through PlayerBase for tracking.
-    aaudio_result_t systemPause() {
-        mPlayerBase->pause();
-        return mPlayerBase->getResult();
-    }
+    aaudio_result_t systemPause();
 
-    // Pass stop request through PlayerBase for tracking.
-    aaudio_result_t systemStop() {
-        mPlayerBase->stop();
-        return mPlayerBase->getResult();
-    }
+    aaudio_result_t safeFlush();
+
+    /**
+     * This is called when an app calls AAudioStream_requestStop();
+     * It prevents calls from a callback.
+     */
+    aaudio_result_t systemStopFromApp();
+
+    /**
+     * This is called internally when an app callback returns AAUDIO_CALLBACK_RESULT_STOP.
+     */
+    aaudio_result_t systemStopFromCallback();
+
+    aaudio_result_t safeClose();
 
 protected:
 
-    // PlayerBase allows the system to control the stream.
-    // Calling through PlayerBase->start() notifies the AudioManager of the player state.
-    // The AudioManager also can start/stop a stream by calling mPlayerBase->playerStart().
-    // systemStart() ==> mPlayerBase->start()   mPlayerBase->playerStart() ==> requestStart()
-    //                        \                           /
-    //                         ------ AudioManager -------
+    // PlayerBase allows the system to control the stream volume.
     class MyPlayerBase : public android::PlayerBase {
     public:
         explicit MyPlayerBase(AudioStream *parent);
@@ -406,20 +386,19 @@
 
         void clearParentReference() { mParent = nullptr; }
 
+        // Just a stub. The ability to start audio through PlayerBase is being deprecated.
         android::status_t playerStart() override {
-            // mParent should NOT be null. So go ahead and crash if it is.
-            mResult = mParent->safeStart();
-            return AAudioConvert_aaudioToAndroidStatus(mResult);
+            return android::NO_ERROR;
         }
 
+        // Just a stub. The ability to pause audio through PlayerBase is being deprecated.
         android::status_t playerPause() override {
-            mResult = mParent->safePause();
-            return AAudioConvert_aaudioToAndroidStatus(mResult);
+            return android::NO_ERROR;
         }
 
+        // Just a stub. The ability to stop audio through PlayerBase is being deprecated.
         android::status_t playerStop() override {
-            mResult = mParent->safeStop();
-            return AAudioConvert_aaudioToAndroidStatus(mResult);
+            return android::NO_ERROR;
         }
 
         android::status_t playerSetVolume() override {
@@ -548,6 +527,8 @@
 
 private:
 
+    aaudio_result_t safeStop();
+
     std::mutex                 mStreamLock;
 
     const android::sp<MyPlayerBase>   mPlayerBase;
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
index a6b9f5d..2edab58 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -78,8 +78,9 @@
 
 void AudioStreamLegacy::processCallbackCommon(aaudio_callback_operation_t opcode, void *info) {
     aaudio_data_callback_result_t callbackResult;
-    // This illegal size can be used to tell AudioFlinger to stop calling us.
-    // This takes advantage of AudioFlinger killing the stream.
+    // This illegal size can be used to tell AudioRecord or AudioTrack to stop calling us.
+    // This takes advantage of them killing the stream when they see a size out of range.
+    // That is an undocumented behavior.
     // TODO add to API in AudioRecord and AudioTrack
     const size_t SIZE_STOP_CALLBACKS = SIZE_MAX;
 
@@ -95,7 +96,7 @@
                 ALOGW("processCallbackCommon() data, stream disconnected");
                 audioBuffer->size = SIZE_STOP_CALLBACKS;
             } else if (!mCallbackEnabled.load()) {
-                ALOGW("processCallbackCommon() stopping because callback disabled");
+                ALOGW("processCallbackCommon() no data because callback disabled");
                 audioBuffer->size = SIZE_STOP_CALLBACKS;
             } else {
                 if (audioBuffer->frameCount == 0) {
@@ -115,10 +116,16 @@
                 }
                 if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
                     audioBuffer->size = audioBuffer->frameCount * getBytesPerDeviceFrame();
-                } else { // STOP or invalid result
-                    ALOGW("%s() callback requested stop, fake an error", __func__);
-                    audioBuffer->size = SIZE_STOP_CALLBACKS;
-                    // Disable the callback just in case AudioFlinger keeps trying to call us.
+                } else {
+                    if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
+                        ALOGD("%s() callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
+                    } else {
+                        ALOGW("%s() callback returned invalid result = %d",
+                              __func__, callbackResult);
+                    }
+                    audioBuffer->size = 0;
+                    systemStopFromCallback();
+                    // Disable the callback just in case the system keeps trying to call us.
                     mCallbackEnabled.store(false);
                 }
 
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 1ac2558..c995e99 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -288,7 +288,7 @@
 
 aaudio_result_t AudioStreamTrack::requestPause() {
     if (mAudioTrack.get() == nullptr) {
-        ALOGE("requestPause() no AudioTrack");
+        ALOGE("%s() no AudioTrack", __func__);
         return AAUDIO_ERROR_INVALID_STATE;
     }
 
@@ -304,7 +304,7 @@
 
 aaudio_result_t AudioStreamTrack::requestFlush() {
     if (mAudioTrack.get() == nullptr) {
-        ALOGE("requestFlush() no AudioTrack");
+        ALOGE("%s() no AudioTrack", __func__);
         return AAUDIO_ERROR_INVALID_STATE;
     }
 
@@ -318,7 +318,7 @@
 
 aaudio_result_t AudioStreamTrack::requestStop() {
     if (mAudioTrack.get() == nullptr) {
-        ALOGE("requestStop() no AudioTrack");
+        ALOGE("%s() no AudioTrack", __func__);
         return AAUDIO_ERROR_INVALID_STATE;
     }
 
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 827df6a..1417aaf 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -50,6 +50,7 @@
         "libmediametrics",
         "libmediautils",
         "libnblog",
+        "libprocessgroup",
         "libutils",
     ],
     export_shared_lib_headers: ["libbinder"],
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 65b8a38..72a23e3 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -26,6 +26,7 @@
 #include <media/AudioRecord.h>
 #include <utils/Log.h>
 #include <private/media/AudioTrackShared.h>
+#include <processgroup/sched_policy.h>
 #include <media/IAudioFlinger.h>
 #include <media/MediaAnalyticsItem.h>
 #include <media/TypeConverter.h>
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index baeae8b..4c762ed 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -784,7 +784,8 @@
 status_t AudioSystem::setDeviceConnectionState(audio_devices_t device,
                                                audio_policy_dev_state_t state,
                                                const char *device_address,
-                                               const char *device_name)
+                                               const char *device_name,
+                                               audio_format_t encodedFormat)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     const char *address = "";
@@ -798,7 +799,7 @@
     if (device_name != NULL) {
         name = device_name;
     }
-    return aps->setDeviceConnectionState(device, state, address, name);
+    return aps->setDeviceConnectionState(device, state, address, name, encodedFormat);
 }
 
 audio_policy_dev_state_t AudioSystem::getDeviceConnectionState(audio_devices_t device,
@@ -812,7 +813,8 @@
 
 status_t AudioSystem::handleDeviceConfigChange(audio_devices_t device,
                                                const char *device_address,
-                                               const char *device_name)
+                                               const char *device_name,
+                                               audio_format_t encodedFormat)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     const char *address = "";
@@ -826,7 +828,7 @@
     if (device_name != NULL) {
         name = device_name;
     }
-    return aps->handleDeviceConfigChange(device, address, name);
+    return aps->handleDeviceConfigChange(device, address, name, encodedFormat);
 }
 
 status_t AudioSystem::setPhoneState(audio_mode_t state)
@@ -1335,6 +1337,13 @@
     return aps->isHapticPlaybackSupported();
 }
 
+status_t AudioSystem::getHwOffloadEncodingFormatsSupportedForA2DP(
+                                std::vector<audio_format_t> *formats)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+    return aps->getHwOffloadEncodingFormatsSupportedForA2DP(formats);
+}
 
 // ---------------------------------------------------------------------------
 
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index b444d2d..e9a0e22 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -29,6 +29,7 @@
 #include <media/AudioTrack.h>
 #include <utils/Log.h>
 #include <private/media/AudioTrackShared.h>
+#include <processgroup/sched_policy.h>
 #include <media/IAudioFlinger.h>
 #include <media/IAudioPolicyService.h>
 #include <media/AudioParameter.h>
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index 272415c..8c7fac5 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -92,6 +92,7 @@
     IS_HAPTIC_PLAYBACK_SUPPORTED,
     SET_UID_DEVICE_AFFINITY,
     REMOVE_UID_DEVICE_AFFINITY,
+    GET_OFFLOAD_FORMATS_A2DP
 };
 
 #define MAX_ITEMS_PER_LIST 1024
@@ -108,7 +109,8 @@
                                     audio_devices_t device,
                                     audio_policy_dev_state_t state,
                                     const char *device_address,
-                                    const char *device_name)
+                                    const char *device_name,
+                                    audio_format_t encodedFormat)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -116,6 +118,7 @@
         data.writeInt32(static_cast <uint32_t>(state));
         data.writeCString(device_address);
         data.writeCString(device_name);
+        data.writeInt32(static_cast <uint32_t>(encodedFormat));
         remote()->transact(SET_DEVICE_CONNECTION_STATE, data, &reply);
         return static_cast <status_t> (reply.readInt32());
     }
@@ -134,13 +137,15 @@
 
     virtual status_t handleDeviceConfigChange(audio_devices_t device,
                                               const char *device_address,
-                                              const char *device_name)
+                                              const char *device_name,
+                                              audio_format_t encodedFormat)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
         data.writeInt32(static_cast <uint32_t>(device));
         data.writeCString(device_address);
         data.writeCString(device_name);
+        data.writeInt32(static_cast <uint32_t>(encodedFormat));
         remote()->transact(HANDLE_DEVICE_CONFIG_CHANGE, data, &reply);
         return static_cast <status_t> (reply.readInt32());
     }
@@ -884,7 +889,30 @@
         return reply.readInt32();
     }
 
-    virtual status_t addStreamDefaultEffect(const effect_uuid_t *type,
+    virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
+                std::vector<audio_format_t> *formats)
+    {
+        if (formats == NULL) {
+            return BAD_VALUE;
+        }
+
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_OFFLOAD_FORMATS_A2DP, data, &reply);
+        if (status != NO_ERROR || (status = (status_t)reply.readInt32()) != NO_ERROR) {
+            return status;
+        }
+
+        size_t list_size = reply.readUint32();
+
+        for (size_t i = 0; i < list_size; i++) {
+            formats->push_back(static_cast<audio_format_t>(reply.readInt32()));
+        }
+        return NO_ERROR;
+    }
+
+
+     virtual status_t addStreamDefaultEffect(const effect_uuid_t *type,
                                             const String16& opPackageName,
                                             const effect_uuid_t *uuid,
                                             int32_t priority,
@@ -1096,7 +1124,8 @@
         case SET_ASSISTANT_UID:
         case SET_A11Y_SERVICES_UIDS:
         case SET_UID_DEVICE_AFFINITY:
-        case REMOVE_UID_DEVICE_AFFINITY: {
+        case REMOVE_UID_DEVICE_AFFINITY:
+        case GET_OFFLOAD_FORMATS_A2DP: {
             if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
                 ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
                       __func__, code, IPCThreadState::self()->getCallingPid(),
@@ -1121,6 +1150,7 @@
                     static_cast <audio_policy_dev_state_t>(data.readInt32());
             const char *device_address = data.readCString();
             const char *device_name = data.readCString();
+            audio_format_t codecFormat = static_cast <audio_format_t>(data.readInt32());
             if (device_address == nullptr || device_name == nullptr) {
                 ALOGE("Bad Binder transaction: SET_DEVICE_CONNECTION_STATE for device %u", device);
                 reply->writeInt32(static_cast<int32_t> (BAD_VALUE));
@@ -1128,7 +1158,8 @@
                 reply->writeInt32(static_cast<uint32_t> (setDeviceConnectionState(device,
                                                                                   state,
                                                                                   device_address,
-                                                                                  device_name)));
+                                                                                  device_name,
+                                                                                  codecFormat)));
             }
             return NO_ERROR;
         } break;
@@ -1154,13 +1185,16 @@
                     static_cast <audio_devices_t>(data.readInt32());
             const char *device_address = data.readCString();
             const char *device_name = data.readCString();
+            audio_format_t codecFormat =
+                    static_cast <audio_format_t>(data.readInt32());
             if (device_address == nullptr || device_name == nullptr) {
                 ALOGE("Bad Binder transaction: HANDLE_DEVICE_CONFIG_CHANGE for device %u", device);
                 reply->writeInt32(static_cast<int32_t> (BAD_VALUE));
             } else {
                 reply->writeInt32(static_cast<uint32_t> (handleDeviceConfigChange(device,
                                                                                   device_address,
-                                                                                  device_name)));
+                                                                                  device_name,
+                                                                                  codecFormat)));
             }
             return NO_ERROR;
         } break;
@@ -1745,6 +1779,21 @@
             return NO_ERROR;
         }
 
+        case GET_OFFLOAD_FORMATS_A2DP: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            std::vector<audio_format_t> encodingFormats;
+            status_t status = getHwOffloadEncodingFormatsSupportedForA2DP(&encodingFormats);
+            reply->writeInt32(status);
+            if (status != NO_ERROR) {
+                return NO_ERROR;
+            }
+            reply->writeUint32(static_cast<uint32_t>(encodingFormats.size()));
+            for (size_t i = 0; i < encodingFormats.size(); i++)
+                reply->writeInt32(static_cast<int32_t>(encodingFormats[i]));
+            return NO_ERROR;
+        }
+
+
         case ADD_STREAM_DEFAULT_EFFECT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             effect_uuid_t type;
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 781e9df..a208602 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -209,12 +209,14 @@
     // IAudioPolicyService interface (see AudioPolicyInterface for method descriptions)
     //
     static status_t setDeviceConnectionState(audio_devices_t device, audio_policy_dev_state_t state,
-                                             const char *device_address, const char *device_name);
+                                             const char *device_address, const char *device_name,
+                                             audio_format_t encodedFormat);
     static audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
                                                                 const char *device_address);
     static status_t handleDeviceConfigChange(audio_devices_t device,
                                              const char *device_address,
-                                             const char *device_name);
+                                             const char *device_name,
+                                             audio_format_t encodedFormat);
     static status_t setPhoneState(audio_mode_t state);
     static status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
     static audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
@@ -342,6 +344,9 @@
 
     static status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
 
+    static status_t getHwOffloadEncodingFormatsSupportedForA2DP(
+                                    std::vector<audio_format_t> *formats);
+
     // numSurroundFormats holds the maximum number of formats and bool value allowed in the array.
     // When numSurroundFormats is 0, surroundFormats and surroundFormatsEnabled will not be
     // populated. The actual number of surround formats should be returned at numSurroundFormats.
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index fb4fe93..177adc2 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -44,12 +44,14 @@
     virtual status_t setDeviceConnectionState(audio_devices_t device,
                                               audio_policy_dev_state_t state,
                                               const char *device_address,
-                                              const char *device_name) = 0;
+                                              const char *device_name,
+                                              audio_format_t encodedFormat) = 0;
     virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
                                                                   const char *device_address) = 0;
     virtual status_t handleDeviceConfigChange(audio_devices_t device,
                                               const char *device_address,
-                                              const char *device_name) = 0;
+                                              const char *device_name,
+                                              audio_format_t encodedFormat) = 0;
     virtual status_t setPhoneState(audio_mode_t state) = 0;
     virtual status_t setForceUse(audio_policy_force_use_t usage,
                                     audio_policy_forced_cfg_t config) = 0;
@@ -186,6 +188,8 @@
                                         audio_format_t *surroundFormats,
                                         bool *surroundFormatsEnabled,
                                         bool reported) = 0;
+    virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
+                                        std::vector<audio_format_t> *formats) = 0;
     virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled) = 0;
 
     virtual status_t setAssistantUid(uid_t uid) = 0;
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_SetTimeConstant.c b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_SetTimeConstant.c
index 48f5d54..9d3ee88 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_SetTimeConstant.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_SetTimeConstant.c
@@ -51,7 +51,7 @@
                                LVM_INT16           NumChannels)
 {
 #ifdef HIGHER_FS
-    LVM_FLOAT   DeltaTable[11] = {0.500000f,/*8000*/
+    LVM_FLOAT   DeltaTable[13] = {0.500000f,/*8000*/
                                   0.362812f,/*11025*/
                                   0.333333f,/*12000*/
                                   0.250000f,/*16000*/
@@ -60,7 +60,9 @@
                                   0.125000f,/*32000*/
                                   0.090703f,/*44100*/
                                   0.083333f,/*48000*/
+                                  0.045352f,/*88200*/
                                   0.041667f,/*96000*/
+                                  0.022676f,/*176400*/
                                   0.020833f};/*192000*/
 #else
     LVM_FLOAT   DeltaTable[9] = {0.500000f,/*8000*/
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_VarSlope_SetTimeConstant.c b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_VarSlope_SetTimeConstant.c
index 9dc7d21..0e0acf1 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_VarSlope_SetTimeConstant.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_VarSlope_SetTimeConstant.c
@@ -52,7 +52,7 @@
                                          LVM_INT16           NumChannels)
 {
 #ifdef HIGHER_FS
-     LVM_FLOAT   DeltaTable[11] = {0.500000f,/*8000*/
+     LVM_FLOAT   DeltaTable[13] = {0.500000f,/*8000*/
                                    0.362812f,/*11025*/
                                    0.333333f,/*12000*/
                                    0.250000f,/*16000*/
@@ -61,7 +61,9 @@
                                    0.125000f,/*32000*/
                                    0.090703f,/*44100*/
                                    0.083333f,/*48000*/
+                                   0.045352f,/*88200*/
                                    0.041666f,/*96000*/
+                                   0.022676f,/*176400*/
                                    0.020833f};/*192000*/
 #else
     LVM_FLOAT   DeltaTable[9] = {0.500000f,/*8000*/
diff --git a/media/libeffects/lvm/lib/Common/src/LVM_GetOmega.c b/media/libeffects/lvm/lib/Common/src/LVM_GetOmega.c
index 7846ca0..6307e68 100644
--- a/media/libeffects/lvm/lib/Common/src/LVM_GetOmega.c
+++ b/media/libeffects/lvm/lib/Common/src/LVM_GetOmega.c
@@ -53,7 +53,9 @@
 #define LVVDL_2PiBy_48000_f       0.000130900f
 
 #ifdef HIGHER_FS
+#define LVVDL_2PiBy_88200_f       0.000071238f
 #define LVVDL_2PiBy_96000_f       0.000065450f
+#define LVVDL_2PiBy_176400_f      0.000035619f
 #define LVVDL_2PiBy_192000_f      0.000032725f
 #endif
 const LVM_FLOAT     LVVDL_2PiOnFsTable[] =  {LVVDL_2PiBy_8000_f,
@@ -66,7 +68,9 @@
                                              LVVDL_2PiBy_44100_f,
                                              LVVDL_2PiBy_48000_f
 #ifdef HIGHER_FS
+                                            ,LVVDL_2PiBy_88200_f
                                             ,LVVDL_2PiBy_96000_f
+                                            ,LVVDL_2PiBy_176400_f
                                             ,LVVDL_2PiBy_192000_f
 #endif
                                            };
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h
index e45d81f..ba05577 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h
@@ -239,13 +239,12 @@
 #define LVCS_STEREODELAY_CS_24KHZ                   279         /* Sample rate 24kS/s */
 #define LVCS_STEREODELAY_CS_32KHZ                   372         /* Sample rate 32kS/s */
 #define LVCS_STEREODELAY_CS_44KHZ                   512         /* Sample rate 44kS/s */
-// TODO: this should linearly scale by frequency but is limited to 512 frames until
-// we ensure enough buffer size has been allocated.
-#define LVCS_STEREODELAY_CS_48KHZ                   512         /* Sample rate 48kS/s */
-#define LVCS_STEREODELAY_CS_88KHZ                   512         /* Sample rate 88.2kS/s */
-#define LVCS_STEREODELAY_CS_96KHZ                   512         /* Sample rate 96kS/s */
-#define LVCS_STEREODELAY_CS_176KHZ                  512         /* Sample rate 176.4kS/s */
-#define LVCS_STEREODELAY_CS_192KHZ                  512         /* Sample rate 196kS/s */
+#define LVCS_STEREODELAY_CS_48KHZ                   557         /* Sample rate 48kS/s */
+#define LVCS_STEREODELAY_CS_88KHZ                   1024        /* Sample rate 88.2kS/s */
+#define LVCS_STEREODELAY_CS_96KHZ                   1115        /* Sample rate 96kS/s */
+#define LVCS_STEREODELAY_CS_176KHZ                  2048        /* Sample rate 176.4kS/s */
+#define LVCS_STEREODELAY_CS_192KHZ                  2229        /* Sample rate 196kS/s */
+#define LVCS_STEREODELAY_CS_MAX_VAL                 LVCS_STEREODELAY_CS_192KHZ
 
 /* Reverb coefficients for 8000 Hz sample rate, scaled with 1.038030 */
 #define CS_REVERB_8000_A0                          0.667271
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.h b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.h
index 69892b6..f94d4e4 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.h
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.h
@@ -65,7 +65,7 @@
     /* Filter */
     void                        (*pBiquadCallBack) (Biquad_Instance_t*, LVM_INT16*, LVM_INT16*, LVM_INT16);
 #else
-    LVM_FLOAT                   StereoSamples[2 * LVCS_STEREODELAY_CS_48KHZ];
+    LVM_FLOAT                   StereoSamples[2 * LVCS_STEREODELAY_CS_MAX_VAL];
     /* Reverb Level */
     LVM_FLOAT                   ReverbLevel;
     /* Filter */
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 3efb5de..68dae56 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -213,6 +213,7 @@
         "android.hidl.token@1.0-utils",
         "liblog",
         "libcutils",
+        "libprocessgroup",
         "libutils",
         "libbinder",
         "libsonivox",
diff --git a/media/libmedia/IMediaMetadataRetriever.cpp b/media/libmedia/IMediaMetadataRetriever.cpp
index 590ba1a..f9fa86e 100644
--- a/media/libmedia/IMediaMetadataRetriever.cpp
+++ b/media/libmedia/IMediaMetadataRetriever.cpp
@@ -23,6 +23,7 @@
 #include <media/IDataSource.h>
 #include <media/IMediaHTTPService.h>
 #include <media/IMediaMetadataRetriever.h>
+#include <processgroup/sched_policy.h>
 #include <utils/String8.h>
 #include <utils/KeyedVector.h>
 
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
index c24e046..aa77cd3 100644
--- a/media/libmedia/TypeConverter.cpp
+++ b/media/libmedia/TypeConverter.cpp
@@ -209,6 +209,14 @@
     MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MAT_1_0),
     MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MAT_2_0),
     MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MAT_2_1),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LATM),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LATM_LC),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LATM_HE_V1),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LATM_HE_V2),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_CELT),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_APTX_ADAPTIVE),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_LHDC),
+    MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_LHDC_LL),
     TERMINATOR
 };
 
diff --git a/media/libmedia/include/media/DrmHal.h b/media/libmedia/include/media/DrmHal.h
index de0f3c7..7be5cf2 100644
--- a/media/libmedia/include/media/DrmHal.h
+++ b/media/libmedia/include/media/DrmHal.h
@@ -38,6 +38,7 @@
 using drm::V1_0::IDrmPlugin;
 using drm::V1_0::IDrmPluginListener;
 using drm::V1_0::KeyStatus;
+using drm::V1_1::SecurityLevel;
 using drm::V1_2::OfflineLicenseState;
 using ::android::hardware::hidl_vec;
 using ::android::hardware::Return;
@@ -62,7 +63,9 @@
 
     virtual status_t initCheck() const;
 
-    virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType);
+    virtual bool isCryptoSchemeSupported(const uint8_t uuid[16],
+                                         const String8& mimeType,
+                                         DrmPlugin::SecurityLevel level);
 
     virtual status_t createPlugin(const uint8_t uuid[16],
                                   const String8 &appPackageName);
@@ -223,6 +226,10 @@
     status_t getPropertyStringInternal(String8 const &name, String8 &value) const;
     status_t getPropertyByteArrayInternal(String8 const &name,
                                           Vector<uint8_t> &value) const;
+    bool matchMimeTypeAndSecurityLevel(sp<IDrmFactory> &factory,
+                                       const uint8_t uuid[16],
+                                       const String8 &mimeType,
+                                       DrmPlugin::SecurityLevel level);
 
     DISALLOW_EVIL_CONSTRUCTORS(DrmHal);
 };
diff --git a/media/libmedia/include/media/IDrm.h b/media/libmedia/include/media/IDrm.h
index 49166c6..a32756f 100644
--- a/media/libmedia/include/media/IDrm.h
+++ b/media/libmedia/include/media/IDrm.h
@@ -34,7 +34,9 @@
 
     virtual status_t initCheck() const = 0;
 
-    virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType) = 0;
+    virtual bool isCryptoSchemeSupported(const uint8_t uuid[16],
+                                         const String8 &mimeType,
+                                         DrmPlugin::SecurityLevel securityLevel) = 0;
 
     virtual status_t createPlugin(const uint8_t uuid[16],
                                   const String8 &appPackageName) = 0;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
index 8d876da..67a0f1e 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
@@ -159,7 +159,8 @@
     if (drm != NULL) {
         for (size_t i = 0; i < psshDRMs.size(); i++) {
             DrmUUID uuid = psshDRMs[i];
-            if (drm->isCryptoSchemeSupported(uuid.ptr(), String8()))
+            if (drm->isCryptoSchemeSupported(uuid.ptr(), String8(),
+                            DrmPlugin::kSecurityLevelUnknown))
                 supportedDRMs.add(uuid);
         }
 
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index 9511931..4ed3382 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -57,7 +57,7 @@
 }
 
 MediaTrack *MediaExtractorCUnwrapper::getTrack(size_t index) {
-    return new MediaTrackCUnwrapper(plugin->getTrack(plugin->data, index));
+    return MediaTrackCUnwrapper::create(plugin->getTrack(plugin->data, index));
 }
 
 status_t MediaExtractorCUnwrapper::getTrackMetaData(
diff --git a/media/libstagefright/MediaTrack.cpp b/media/libstagefright/MediaTrack.cpp
index 036e79d..89c9b25 100644
--- a/media/libstagefright/MediaTrack.cpp
+++ b/media/libstagefright/MediaTrack.cpp
@@ -65,6 +65,13 @@
     bufferGroup = nullptr;
 }
 
+MediaTrackCUnwrapper *MediaTrackCUnwrapper::create(CMediaTrack *cmediatrack) {
+    if (cmediatrack == nullptr) {
+        return nullptr;
+    }
+    return new MediaTrackCUnwrapper(cmediatrack);
+}
+
 MediaTrackCUnwrapper::~MediaTrackCUnwrapper() {
     wrapper->free(wrapper->data);
     free(wrapper);
diff --git a/media/libstagefright/data/media_codecs_google_c2_telephony.xml b/media/libstagefright/data/media_codecs_google_c2_telephony.xml
new file mode 100644
index 0000000..d1055b3
--- /dev/null
+++ b/media/libstagefright/data/media_codecs_google_c2_telephony.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<Included>
+    <Decoders>
+        <MediaCodec name="c2.android.gsm.decoder" type="audio/gsm">
+            <Limit name="channel-count" max="1" />
+            <Limit name="sample-rate" ranges="8000" />
+            <Limit name="bitrate" range="13000" />
+        </MediaCodec>
+    </Decoders>
+</Included>
diff --git a/media/libstagefright/data/media_codecs_google_c2_tv.xml b/media/libstagefright/data/media_codecs_google_c2_tv.xml
new file mode 100644
index 0000000..fa082c7
--- /dev/null
+++ b/media/libstagefright/data/media_codecs_google_c2_tv.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<Included>
+    <Decoders>
+        <MediaCodec name="c2.android.mpeg2.decoder" type="video/mpeg2">
+            <!-- profiles and levels:  ProfileMain : LevelHL -->
+            <Limit name="size" min="16x16" max="1920x1088" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="block-size" value="16x16" />
+            <Limit name="blocks-per-second" range="1-244800" />
+            <Limit name="bitrate" range="1-20000000" />
+            <Feature name="adaptive-playback" />
+        </MediaCodec>
+    </Decoders>
+</Included>
diff --git a/media/mediaserver/mediaserver.rc b/media/mediaserver/mediaserver.rc
index f6c325c..8cfcd79 100644
--- a/media/mediaserver/mediaserver.rc
+++ b/media/mediaserver/mediaserver.rc
@@ -2,5 +2,7 @@
     class main
     user media
     group audio camera inet net_bt net_bt_admin net_bw_acct drmrpc mediadrm
+    # TODO(b/123275379): Remove updatable when http://aosp/878198 has landed
+    updatable
     ioprio rt 4
     writepid /dev/cpuset/foreground/tasks /dev/stune/foreground/tasks
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index 55afb33..9082f62 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -274,7 +274,7 @@
     }
 
     String8 mimeStr = mimeType ? String8(mimeType) : String8("");
-    return drm->isCryptoSchemeSupported(uuid, mimeStr);
+    return drm->isCryptoSchemeSupported(uuid, mimeStr, DrmPlugin::kSecurityLevelUnknown);
 }
 
 EXPORT
diff --git a/media/ndk/NdkMediaExtractor.cpp b/media/ndk/NdkMediaExtractor.cpp
index 8296598..28e4f12 100644
--- a/media/ndk/NdkMediaExtractor.cpp
+++ b/media/ndk/NdkMediaExtractor.cpp
@@ -46,6 +46,18 @@
     sp<ABuffer> mPsshBuf;
 };
 
+sp<ABuffer> U32ArrayToSizeBuf(size_t numSubSamples, uint32_t *data) {
+    if (numSubSamples >  SIZE_MAX / sizeof(size_t)) {
+        return NULL;
+    }
+    sp<ABuffer> sizebuf = new ABuffer(numSubSamples * sizeof(size_t));
+    size_t *sizes = (size_t *)sizebuf->data();
+    for (size_t i = 0; sizes != NULL && i < numSubSamples; i++) {
+        sizes[i] = data[i];
+    }
+    return sizebuf;
+}
+
 extern "C" {
 
 EXPORT
@@ -339,7 +351,7 @@
     if (!meta->findData(kKeyEncryptedSizes, &type, &crypteddata, &cryptedsize)) {
         return NULL;
     }
-    size_t numSubSamples = cryptedsize / sizeof(size_t);
+    size_t numSubSamples = cryptedsize / sizeof(uint32_t);
 
     const void *cleardata;
     size_t clearsize;
@@ -373,6 +385,16 @@
         mode = CryptoPlugin::kMode_AES_CTR;
     }
 
+    if (sizeof(uint32_t) != sizeof(size_t)) {
+        sp<ABuffer> clearbuf   = U32ArrayToSizeBuf(numSubSamples, (uint32_t *)cleardata);
+        sp<ABuffer> cryptedbuf = U32ArrayToSizeBuf(numSubSamples, (uint32_t *)crypteddata);
+        cleardata   = clearbuf    == NULL ? NULL : clearbuf->data();
+        crypteddata = crypteddata == NULL ? NULL : cryptedbuf->data();
+        if(crypteddata == NULL || cleardata == NULL) {
+            return NULL;
+        }
+    }
+
     return AMediaCodecCryptoInfo_new(
             numSubSamples,
             (uint8_t*) key,
diff --git a/packages/MediaComponents/apex/java/android/media/session/MediaSession.java b/packages/MediaComponents/apex/java/android/media/session/MediaSession.java
index 73e16a6..3cbeff9 100644
--- a/packages/MediaComponents/apex/java/android/media/session/MediaSession.java
+++ b/packages/MediaComponents/apex/java/android/media/session/MediaSession.java
@@ -1077,8 +1077,7 @@
 
         private static RemoteUserInfo createRemoteUserInfo(String packageName, int pid, int uid,
                 ISessionControllerCallback caller) {
-            return new RemoteUserInfo(packageName, pid, uid,
-                    caller != null ? caller.asBinder() : null);
+            return new RemoteUserInfo(packageName, pid, uid);
         }
 
         @Override
diff --git a/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java b/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java
index a66ec35..76c99b9 100644
--- a/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java
+++ b/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java
@@ -544,8 +544,7 @@
             throw new IllegalStateException("This should be called inside of onGetRoot or"
                     + " onLoadChildren or onLoadItem methods");
         }
-        return new RemoteUserInfo(mCurConnection.pkg, mCurConnection.pid, mCurConnection.uid,
-                mCurConnection.callbacks.asBinder());
+        return new RemoteUserInfo(mCurConnection.pkg, mCurConnection.pid, mCurConnection.uid);
     }
 
     /**
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 1c2b9d7..cf2ce99 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -75,14 +75,16 @@
     virtual status_t setDeviceConnectionState(audio_devices_t device,
                                               audio_policy_dev_state_t state,
                                               const char *device_address,
-                                              const char *device_name) = 0;
+                                              const char *device_name,
+                                              audio_format_t encodedFormat) = 0;
     // retrieve a device connection status
     virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
                                                                           const char *device_address) = 0;
     // indicate a change in device configuration
     virtual status_t handleDeviceConfigChange(audio_devices_t device,
                                               const char *device_address,
-                                              const char *device_name) = 0;
+                                              const char *device_name,
+                                              audio_format_t encodedFormat) = 0;
     // indicate a change in phone state. Valid phones states are defined by audio_mode_t
     virtual void setPhoneState(audio_mode_t state) = 0;
     // force using a specific device category for the specified usage
@@ -234,6 +236,9 @@
 
     virtual bool     isHapticPlaybackSupported() = 0;
 
+    virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
+                std::vector<audio_format_t> *formats) = 0;
+
     virtual void     setAppState(uid_t uid, app_state_t state);
 };
 
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 46a2a40..837ca47 100644
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -76,6 +76,21 @@
 }
 
 /**
+ * Check whether audio device has encoding capability.
+ *
+ * @param[in] device to consider
+ *
+ * @return true if device has encoding capability, false otherwise..
+ */
+static inline bool device_has_encoding_capability(audio_devices_t device)
+{
+    if (device & AUDIO_DEVICE_OUT_ALL_A2DP) {
+        return true;
+    }
+    return false;
+}
+
+/**
  * Returns the priority of a given audio source for capture. The priority is used when more than one
  * capture session is active on a given input stream to determine which session drives routing and
  * effect configuration.
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 14b995b..e1ecc61 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -154,6 +154,7 @@
     void setDevices(const DeviceVector &devices) { mDevices = devices; }
     bool sharesHwModuleWith(const sp<SwAudioOutputDescriptor>& outputDesc);
     virtual DeviceVector supportedDevices() const;
+    virtual bool deviceSupportsEncodedFormats(audio_devices_t device);
     virtual uint32_t latency();
     virtual bool isDuplicated() const { return (mOutput1 != NULL && mOutput2 != NULL); }
     virtual bool isFixedVolume(audio_devices_t device);
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
index e6a62d9..2932296 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
@@ -23,9 +23,10 @@
 #include <system/audio.h>
 #include <utils/String8.h>
 
-namespace android {
+#include <DeviceDescriptor.h>
+#include <AudioOutputDescriptor.h>
 
-class SwAudioOutputDescriptor;
+namespace android {
 
 /**
  * custom mix entry in mPolicyMixes
@@ -79,6 +80,18 @@
                                                        const DeviceVector &availableDeviceTypes,
                                                        AudioMix **policyMix);
 
+    /**
+     * @brief try to find a matching mix for a given output descriptor and returns the associated
+     * output device.
+     * @param output to be considered
+     * @param availableOutputDevices list of output devices currently reachable
+     * @param policyMix to be returned if any mix matching ouput descriptor
+     * @return device selected from the mix attached to the output, null pointer otherwise
+     */
+    sp<DeviceDescriptor> getDeviceAndMixForOutput(const sp<SwAudioOutputDescriptor> &output,
+                                                  const DeviceVector &availableOutputDevices,
+                                                  AudioMix **policyMix = nullptr);
+
     status_t getInputMixForAttr(audio_attributes_t attr, AudioMix **policyMix);
 
     status_t setUidDeviceAffinities(uid_t uid, const Vector<AudioDeviceTypeAddr>& devices);
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index b581665..cc43fe6 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -44,8 +44,18 @@
 
     const FormatVector& encodedFormats() const { return mEncodedFormats; }
 
+    audio_format_t getEncodedFormat() { return mCurrentEncodedFormat; }
+
+    void setEncodedFormat(audio_format_t format) {
+        mCurrentEncodedFormat = format;
+    }
+
     bool equals(const sp<DeviceDescriptor>& other) const;
 
+    bool hasCurrentEncodedFormat() const;
+
+    bool supportsFormat(audio_format_t format);
+
     // AudioPortConfig
     virtual sp<AudioPort> getAudioPort() const { return (AudioPort*) this; }
     virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
@@ -69,6 +79,7 @@
     audio_devices_t     mDeviceType;
     FormatVector        mEncodedFormats;
     audio_port_handle_t mId = AUDIO_PORT_HANDLE_NONE;
+    audio_format_t      mCurrentEncodedFormat;
 };
 
 class DeviceVector : public SortedVector<sp<DeviceDescriptor> >
@@ -88,9 +99,10 @@
 
     audio_devices_t types() const { return mDeviceTypes; }
 
-    // If 'address' is empty, a device with a non-empty address may be returned
-    // if there is no device with the specified 'type' and empty address.
-    sp<DeviceDescriptor> getDevice(audio_devices_t type, const String8 &address = {}) const;
+    // If 'address' is empty and 'codec' is AUDIO_FORMAT_DEFAULT, a device with a non-empty
+    // address may be returned if there is no device with the specified 'type' and empty address.
+    sp<DeviceDescriptor> getDevice(audio_devices_t type, const String8 &address,
+                                   audio_format_t codec) const;
     DeviceVector getDevicesFromTypeMask(audio_devices_t types) const;
 
     /**
diff --git a/services/audiopolicy/common/managerdefinitions/include/HwModule.h b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
index d7dc4b0..eb34da4 100644
--- a/services/audiopolicy/common/managerdefinitions/include/HwModule.h
+++ b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
@@ -130,9 +130,11 @@
 public:
     sp<HwModule> getModuleFromName(const char *name) const;
 
-    sp<HwModule> getModuleForDeviceTypes(audio_devices_t device) const;
+    sp<HwModule> getModuleForDeviceTypes(audio_devices_t device,
+                                         audio_format_t encodedFormat) const;
 
-    sp<HwModule> getModuleForDevice(const sp<DeviceDescriptor> &device) const;
+    sp<HwModule> getModuleForDevice(const sp<DeviceDescriptor> &device,
+                                    audio_format_t encodedFormat) const;
 
     DeviceVector getAvailableDevicesFromModuleName(const char *name,
                                                    const DeviceVector &availableDevices) const;
@@ -149,6 +151,7 @@
      * @param type of the device requested
      * @param address of the device requested
      * @param name of the device that requested
+     * @param encodedFormat if not AUDIO_FORMAT_DEFAULT, must match one supported format
      * @param matchAddress true if a strong match is required
      * @param allowToCreate true if allowed to create dynamic device (e.g. hdmi, usb...)
      * @return device descriptor associated to the type (and address if matchAddress is true)
@@ -156,6 +159,7 @@
     sp<DeviceDescriptor> getDeviceDescriptor(const audio_devices_t type,
                                              const char *address,
                                              const char *name,
+                                             audio_format_t encodedFormat,
                                              bool allowToCreate = false,
                                              bool matchAddress = true) const;
 
@@ -171,7 +175,8 @@
      */
     sp<DeviceDescriptor> createDevice(const audio_devices_t type,
                                       const char *address,
-                                      const char *name) const;
+                                      const char *name,
+                                      const audio_format_t encodedFormat) const;
 
     /**
      * @brief cleanUpForDevice: loop on all profiles of all modules to remove device from
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index d0c05a5..dc409a7 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -94,7 +94,10 @@
     bool supportsDeviceTypes(audio_devices_t device) const
     {
         if (audio_is_output_devices(device)) {
-            return mSupportedDevices.types() & device;
+            if (deviceSupportsEncodedFormats(device)) {
+                return mSupportedDevices.types() & device;
+            }
+            return false;
         }
         return mSupportedDevices.types() & (device & ~AUDIO_DEVICE_BIT_IN);
     }
@@ -116,6 +119,16 @@
         return mSupportedDevices.contains(device);
     }
 
+    bool deviceSupportsEncodedFormats(audio_devices_t device) const
+    {
+        DeviceVector deviceList =
+            mSupportedDevices.getDevicesFromTypeMask(device);
+        if (!deviceList.empty()) {
+            return deviceList.itemAt(0)->hasCurrentEncodedFormat();
+        }
+        return false;
+    }
+
     void clearSupportedDevices() { mSupportedDevices.clear(); }
     void addSupportedDevice(const sp<DeviceDescriptor> &device)
     {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 643cbd1..57328f0 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -364,6 +364,16 @@
     return filteredDevices.filter(devices);
 }
 
+bool SwAudioOutputDescriptor::deviceSupportsEncodedFormats(audio_devices_t device)
+{
+    if (isDuplicated()) {
+        return (mOutput1->deviceSupportsEncodedFormats(device)
+                    || mOutput2->deviceSupportsEncodedFormats(device));
+    } else {
+       return mProfile->deviceSupportsEncodedFormats(device);
+    }
+}
+
 uint32_t SwAudioOutputDescriptor::latency()
 {
     if (isDuplicated()) {
@@ -687,7 +697,9 @@
     for (size_t i = 0; i < size(); i++) {
         sp<SwAudioOutputDescriptor> outputDesc = valueAt(i);
         if (!outputDesc->isDuplicated() &&
-                outputDesc->devices().types() & AUDIO_DEVICE_OUT_ALL_A2DP) {
+             outputDesc->devices().types()  & AUDIO_DEVICE_OUT_ALL_A2DP &&
+             outputDesc->deviceSupportsEncodedFormats(
+                     AUDIO_DEVICE_OUT_BLUETOOTH_A2DP)) {
             return this->keyAt(i);
         }
     }
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index d18091c..3b9411a 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -280,6 +280,27 @@
     return BAD_VALUE;
 }
 
+sp<DeviceDescriptor> AudioPolicyMixCollection::getDeviceAndMixForOutput(
+        const sp<SwAudioOutputDescriptor> &output,
+        const DeviceVector &availableOutputDevices,
+        AudioMix **policyMix)
+{
+    for (size_t i = 0; i < size(); i++) {
+        if (valueAt(i)->getOutput() == output) {
+            AudioMix *mix = valueAt(i)->getMix();
+            if (policyMix != nullptr)
+                *policyMix = mix;
+            // This Desc is involved in a Mix, which has the highest prio
+            audio_devices_t deviceType = mix->mDeviceType;
+            String8 address = mix->mDeviceAddress;
+            ALOGV("%s: device (0x%x, addr=%s) forced by mix",
+                  __FUNCTION__, deviceType, address.c_str());
+            return availableOutputDevices.getDevice(deviceType, address, AUDIO_FORMAT_DEFAULT);
+        }
+    }
+    return nullptr;
+}
+
 sp<DeviceDescriptor> AudioPolicyMixCollection::getDeviceAndMixForInputSource(
         audio_source_t inputSource, const DeviceVector &availDevices, AudioMix **policyMix)
 {
@@ -296,7 +317,8 @@
                 // assuming PolicyMix only for remote submix for input
                 // so mix->mDeviceType can only be AUDIO_DEVICE_OUT_REMOTE_SUBMIX
                 audio_devices_t device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
-                auto mixDevice = availDevices.getDevice(device, mix->mDeviceAddress);
+                auto mixDevice =
+                        availDevices.getDevice(device, mix->mDeviceAddress, AUDIO_FORMAT_DEFAULT);
                 if (mixDevice != nullptr) {
                     if (policyMix != NULL) {
                         *policyMix = mix;
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 01111c5..1bc4ec8 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -18,6 +18,7 @@
 //#define LOG_NDEBUG 0
 
 #include <audio_utils/string.h>
+#include <set>
 #include "DeviceDescriptor.h"
 #include "TypeConverter.h"
 #include "AudioGain.h"
@@ -37,14 +38,25 @@
                                              AUDIO_PORT_ROLE_SOURCE),
     mTagName(tagName), mDeviceType(type), mEncodedFormats(encodedFormats)
 {
+    mCurrentEncodedFormat = AUDIO_FORMAT_DEFAULT;
     if (type == AUDIO_DEVICE_IN_REMOTE_SUBMIX || type == AUDIO_DEVICE_OUT_REMOTE_SUBMIX ) {
         mAddress = String8("0");
     }
-    /* FIXME: read from APM config file */
-    if (type == AUDIO_DEVICE_OUT_HDMI) {
+    /* If framework runs against a pre 5.0 Audio HAL, encoded formats are absent from the config.
+     * FIXME: APM should know the version of the HAL and don't add the formats for V5.0.
+     * For now, the workaround to remove AC3 and IEC61937 support on HDMI is to declare
+     * something like 'encodedFormats="AUDIO_FORMAT_PCM_16_BIT"' on the HDMI devicePort.
+     */
+    if (type == AUDIO_DEVICE_OUT_HDMI && mEncodedFormats.isEmpty()) {
         mEncodedFormats.add(AUDIO_FORMAT_AC3);
         mEncodedFormats.add(AUDIO_FORMAT_IEC61937);
     }
+    // For backward compatibility always indicate support for SBC and AAC if no
+    // supported format is listed in the configuration file
+    if ((type & AUDIO_DEVICE_OUT_ALL_A2DP) != 0 && mEncodedFormats.isEmpty()) {
+        mEncodedFormats.add(AUDIO_FORMAT_SBC);
+        mEncodedFormats.add(AUDIO_FORMAT_AAC);
+    }
 }
 
 audio_port_handle_t DeviceDescriptor::getId() const
@@ -58,21 +70,49 @@
     mId = getNextUniqueId();
 }
 
-void DeviceDescriptor::detach()
-{
+void DeviceDescriptor::detach() {
     mId = AUDIO_PORT_HANDLE_NONE;
     AudioPort::detach();
 }
 
+template<typename T>
+bool checkEqual(const T& f1, const T& f2)
+{
+    std::set<typename T::value_type> s1(f1.begin(), f1.end());
+    std::set<typename T::value_type> s2(f2.begin(), f2.end());
+    return s1 == s2;
+}
+
 bool DeviceDescriptor::equals(const sp<DeviceDescriptor>& other) const
 {
     // Devices are considered equal if they:
     // - are of the same type (a device type cannot be AUDIO_DEVICE_NONE)
     // - have the same address
+    // - have the same encodingFormats (if device supports encoding)
     if (other == 0) {
         return false;
     }
-    return (mDeviceType == other->mDeviceType) && (mAddress == other->mAddress);
+
+    return (mDeviceType == other->mDeviceType) && (mAddress == other->mAddress) &&
+           checkEqual(mEncodedFormats, other->mEncodedFormats);
+}
+
+bool DeviceDescriptor::hasCurrentEncodedFormat() const
+{
+    if (!device_has_encoding_capability(type())) {
+        return true;
+    }
+    return (mCurrentEncodedFormat != AUDIO_FORMAT_DEFAULT);
+}
+
+bool DeviceDescriptor::supportsFormat(audio_format_t format)
+{
+    for (const auto& devFormat : mEncodedFormats) {
+        if (devFormat == format) {
+            return true;
+        }
+    }
+    return false;
 }
 
 void DeviceVector::refreshTypes()
@@ -167,12 +207,17 @@
     return deviceTypes;
 }
 
-sp<DeviceDescriptor> DeviceVector::getDevice(audio_devices_t type, const String8& address) const
+sp<DeviceDescriptor> DeviceVector::getDevice(audio_devices_t type, const String8& address,
+                                             audio_format_t format) const
 {
     sp<DeviceDescriptor> device;
     for (size_t i = 0; i < size(); i++) {
         if (itemAt(i)->type() == type) {
-            if (address == "" || itemAt(i)->address() == address) {
+            // Assign device if address is empty or matches and
+            // format is default or matches
+            if (((address == "" || itemAt(i)->address() == address) &&
+                 format == AUDIO_FORMAT_DEFAULT) ||
+                itemAt(i)->supportsFormat(format)) {
                 device = itemAt(i);
                 if (itemAt(i)->address() == address) {
                     break;
@@ -180,8 +225,8 @@
             }
         }
     }
-    ALOGV("DeviceVector::%s() for type %08x address \"%s\" found %p",
-            __func__, type, address.string(), device.get());
+    ALOGV("DeviceVector::%s() for type %08x address \"%s\" found %p format %08x",
+            __func__, type, address.string(), device.get(), format);
     return device;
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index 7d2d094..85d9bce 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -273,32 +273,34 @@
     return nullptr;
 }
 
-sp <HwModule> HwModuleCollection::getModuleForDeviceTypes(audio_devices_t device) const
+sp <HwModule> HwModuleCollection::getModuleForDeviceTypes(audio_devices_t type,
+                                                          audio_format_t encodedFormat) const
 {
     for (const auto& module : *this) {
-        const auto& profiles = audio_is_output_device(device) ?
+        const auto& profiles = audio_is_output_device(type) ?
                 module->getOutputProfiles() : module->getInputProfiles();
         for (const auto& profile : profiles) {
-            if (profile->supportsDeviceTypes(device)) {
-                return module;
+            if (profile->supportsDeviceTypes(type)) {
+                if (encodedFormat != AUDIO_FORMAT_DEFAULT) {
+                    DeviceVector declaredDevices = module->getDeclaredDevices();
+                    sp <DeviceDescriptor> deviceDesc =
+                            declaredDevices.getDevice(type, String8(), encodedFormat);
+                    if (deviceDesc) {
+                        return module;
+                    }
+                } else {
+                    return module;
+                }
             }
         }
     }
     return nullptr;
 }
 
-sp <HwModule> HwModuleCollection::getModuleForDevice(const sp<DeviceDescriptor> &device) const
+sp<HwModule> HwModuleCollection::getModuleForDevice(const sp<DeviceDescriptor> &device,
+                                                     audio_format_t encodedFormat) const
 {
-    for (const auto& module : *this) {
-        const auto& profiles = audio_is_output_device(device->type()) ?
-                module->getOutputProfiles() : module->getInputProfiles();
-        for (const auto& profile : profiles) {
-            if (profile->supportsDevice(device)) {
-                return module;
-            }
-        }
-    }
-    return nullptr;
+    return getModuleForDeviceTypes(device->type(), encodedFormat);
 }
 
 DeviceVector HwModuleCollection::getAvailableDevicesFromModuleName(
@@ -314,6 +316,7 @@
 sp<DeviceDescriptor> HwModuleCollection::getDeviceDescriptor(const audio_devices_t deviceType,
                                                              const char *address,
                                                              const char *name,
+                                                             const audio_format_t encodedFormat,
                                                              bool allowToCreate,
                                                              bool matchAddress) const
 {
@@ -325,8 +328,14 @@
 
     for (const auto& hwModule : *this) {
         DeviceVector moduleDevices = hwModule->getAllDevices();
-        auto moduleDevice = moduleDevices.getDevice(deviceType, devAddress);
+        auto moduleDevice = moduleDevices.getDevice(deviceType, devAddress, encodedFormat);
         if (moduleDevice) {
+            if (encodedFormat != AUDIO_FORMAT_DEFAULT) {
+                moduleDevice->setEncodedFormat(encodedFormat);
+                if (moduleDevice->address() != devAddress) {
+                    moduleDevice->setAddress(devAddress);
+                }
+            }
             if (allowToCreate) {
                 moduleDevice->attach(hwModule);
             }
@@ -338,14 +347,15 @@
               name, deviceType, address);
         return nullptr;
     }
-    return createDevice(deviceType, address, name);
+    return createDevice(deviceType, address, name, encodedFormat);
 }
 
 sp<DeviceDescriptor> HwModuleCollection::createDevice(const audio_devices_t type,
                                                       const char *address,
-                                                      const char *name) const
+                                                      const char *name,
+                                                      const audio_format_t encodedFormat) const
 {
-    sp<HwModule> hwModule = getModuleForDeviceTypes(type);
+    sp<HwModule> hwModule = getModuleForDeviceTypes(type, encodedFormat);
     if (hwModule == 0) {
         ALOGE("%s: could not find HW module for device %04x address %s", __FUNCTION__, type,
               address);
@@ -354,8 +364,9 @@
     sp<DeviceDescriptor> device = new DeviceDescriptor(type, String8(name));
     device->setName(String8(name));
     device->setAddress(String8(address));
+    device->setEncodedFormat(encodedFormat);
 
-    // Add the device to the list of dynamic devices
+  // Add the device to the list of dynamic devices
     hwModule->addDynamicDevice(device);
     // Reciprocally attach the device to the module
     device->attach(hwModule);
@@ -370,7 +381,8 @@
         if (profile->supportsDevice(device, false /*matchAdress*/)) {
 
             // @todo quid of audio profile? import the profile from device of the same type?
-            const auto &isoTypeDeviceForProfile = profile->getSupportedDevices().getDevice(type);
+            const auto &isoTypeDeviceForProfile =
+                profile->getSupportedDevices().getDevice(type, String8(), AUDIO_FORMAT_DEFAULT);
             device->importAudioPort(isoTypeDeviceForProfile, true /* force */);
 
             ALOGV("%s: adding device %s to profile %s", __FUNCTION__,
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 1154654..98d375c 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -140,6 +140,8 @@
         static constexpr const char *roleSource = "source"; /**< <attribute role source value>. */
         /** optional: device address, char string less than 64. */
         static constexpr const char *address = "address";
+        /** optional: the list of encoded audio formats that are known to be supported. */
+        static constexpr const char *encodedFormats = "encodedFormats";
     };
 
     static Return<Element> deserialize(const xmlNode *cur, PtrSerializingCtx serializingContext);
@@ -511,7 +513,13 @@
         ALOGW("%s: bad type %08x", __func__, type);
         return Status::fromStatusT(BAD_VALUE);
     }
-    Element deviceDesc = new DeviceDescriptor(type, String8(name.c_str()));
+    std::string encodedFormatsLiteral = getXmlAttribute(cur, Attributes::encodedFormats);
+    ALOGV("%s: %s %s=%s", __func__, tag, Attributes::encodedFormats, encodedFormatsLiteral.c_str());
+    FormatVector encodedFormats;
+    if (!encodedFormatsLiteral.empty()) {
+        encodedFormats = formatsFromString(encodedFormatsLiteral, " ");
+    }
+    Element deviceDesc = new DeviceDescriptor(type, encodedFormats, String8(name.c_str()));
 
     std::string address = getXmlAttribute(cur, Attributes::address);
     if (!address.empty()) {
diff --git a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp b/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
index 620f361..2625733 100644
--- a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
@@ -26,16 +26,22 @@
 {
     ALOG_ASSERT(!mCurvePoints.isEmpty(), "Invalid volume curve");
 
-    size_t nbCurvePoints = mCurvePoints.size();
-    // the volume index in the UI is relative to the min and max volume indices for this stream
-    int nbSteps = 1 + mCurvePoints[nbCurvePoints - 1].mIndex - mCurvePoints[0].mIndex;
     if (indexInUi < volIndexMin) {
+        // an index of 0 means mute request when volIndexMin > 0
+        if (indexInUi == 0) {
+            ALOGV("VOLUME forcing mute for index 0 with min index %d", volIndexMin);
+            return VOLUME_MIN_DB;
+        }
         ALOGV("VOLUME remapping index from %d to min index %d", indexInUi, volIndexMin);
         indexInUi = volIndexMin;
     } else if (indexInUi > volIndexMax) {
         ALOGV("VOLUME remapping index from %d to max index %d", indexInUi, volIndexMax);
         indexInUi = volIndexMax;
     }
+
+    size_t nbCurvePoints = mCurvePoints.size();
+    // the volume index in the UI is relative to the min and max volume indices for this stream
+    int nbSteps = 1 + mCurvePoints[nbCurvePoints - 1].mIndex - mCurvePoints[0].mIndex;
     int volIdx = (nbSteps * (indexInUi - volIndexMin)) / (volIndexMax - volIndexMin);
 
     // Where would this volume index been inserted in the curve point
diff --git a/services/audiopolicy/config/audio_policy_configuration_generic.xml b/services/audiopolicy/config/audio_policy_configuration_generic.xml
index 58768c3..40dcc22 100644
--- a/services/audiopolicy/config/audio_policy_configuration_generic.xml
+++ b/services/audiopolicy/config/audio_policy_configuration_generic.xml
@@ -37,4 +37,10 @@
 
     <!-- End of Volume section -->
 
+    <!-- Surround Sound configuration -->
+
+    <xi:include href="surround_sound_configuration_5_0.xml"/>
+
+    <!-- End of Surround Sound configuration -->
+
 </audioPolicyConfiguration>
diff --git a/services/audiopolicy/config/audio_policy_configuration_generic_tv.xml b/services/audiopolicy/config/audio_policy_configuration_generic_tv.xml
new file mode 100644
index 0000000..5f1ca31
--- /dev/null
+++ b/services/audiopolicy/config/audio_policy_configuration_generic_tv.xml
@@ -0,0 +1,49 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+    <!-- version section contains a “version” tag in the form “major.minor” e.g version=”1.0” -->
+
+    <!-- Global configuration Decalaration -->
+    <globalConfiguration speaker_drc_enabled="false"/>
+
+    <modules>
+        <!-- Primary Audio HAL -->
+        <xi:include href="primary_audio_policy_configuration_tv.xml"/>
+
+        <!-- Usb Audio HAL -->
+        <xi:include href="usb_audio_policy_configuration.xml"/>
+
+        <!-- Remote Submix Audio HAL -->
+        <xi:include href="r_submix_audio_policy_configuration.xml"/>
+
+    </modules>
+    <!-- End of Modules section -->
+
+    <!-- Volume section -->
+
+    <xi:include href="audio_policy_volumes.xml"/>
+    <xi:include href="default_volume_tables.xml"/>
+
+    <!-- End of Volume section -->
+
+    <!-- Surround Sound configuration -->
+
+    <xi:include href="surround_sound_configuration_5_0.xml"/>
+
+    <!-- End of Surround Sound configuration -->
+
+</audioPolicyConfiguration>
diff --git a/services/audiopolicy/config/audio_policy_configuration_stub.xml b/services/audiopolicy/config/audio_policy_configuration_stub.xml
index 26c381f..8350eb8 100644
--- a/services/audiopolicy/config/audio_policy_configuration_stub.xml
+++ b/services/audiopolicy/config/audio_policy_configuration_stub.xml
@@ -15,6 +15,9 @@
 -->
 
 <audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+    <!-- Global configuration Decalaration -->
+    <globalConfiguration speaker_drc_enabled="false"/>
+
     <modules>
         <!-- Stub Audio HAL -->
         <xi:include href="stub_audio_policy_configuration.xml"/>
@@ -26,5 +29,6 @@
 
     <xi:include href="audio_policy_volumes.xml"/>
     <xi:include href="default_volume_tables.xml"/>
+    <xi:include href="surround_sound_configuration_5_0.xml"/>
 
 </audioPolicyConfiguration>
diff --git a/services/audiopolicy/config/primary_audio_policy_configuration.xml b/services/audiopolicy/config/primary_audio_policy_configuration.xml
index 5b7ae7f..eedc96b 100644
--- a/services/audiopolicy/config/primary_audio_policy_configuration.xml
+++ b/services/audiopolicy/config/primary_audio_policy_configuration.xml
@@ -1,5 +1,5 @@
 <?xml version="1.0" encoding="UTF-8"?>
-<!-- Default Primary Audio HAL Module Audio Policy Configuration include flie -->
+<!-- Default Primary Audio HAL Module Audio Policy Configuration include file -->
 <module name="primary" halVersion="2.0">
     <attachedDevices>
         <item>Speaker</item>
diff --git a/services/audiopolicy/config/primary_audio_policy_configuration_tv.xml b/services/audiopolicy/config/primary_audio_policy_configuration_tv.xml
new file mode 100644
index 0000000..826015a
--- /dev/null
+++ b/services/audiopolicy/config/primary_audio_policy_configuration_tv.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Default Primary Audio HAL Module Audio Policy Configuration include file for TV -->
+<module name="primary" halVersion="2.0">
+    <attachedDevices>
+        <item>Speaker</item>
+    </attachedDevices>
+    <defaultOutputDevice>Speaker</defaultOutputDevice>
+    <mixPorts>
+        <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </mixPort>
+        <mixPort name="direct" role="source" flags="AUDIO_OUTPUT_FLAG_DIRECT" />
+        <mixPort name="tunnel" role="source"
+                 flags="AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_HW_AV_SYNC" />
+   </mixPorts>
+   <devicePorts>
+        <devicePort tagName="Speaker" type="AUDIO_DEVICE_OUT_SPEAKER" role="sink" />
+        <devicePort tagName="Out Aux Digital" type="AUDIO_DEVICE_OUT_AUX_DIGITAL" role="sink"
+                    encodedFormats="AUDIO_FORMAT_AC3 AUDIO_FORMAT_IEC61937" />
+    </devicePorts>
+    <routes>
+        <route type="mix" sink="Speaker" sources="primary output"/>
+        <route type="mix" sink="Out Aux Digital" sources="primary output,direct,tunnel"/>
+    </routes>
+</module>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
index eb11980..7c87c80 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
@@ -28,6 +28,7 @@
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes RemoteSubmix
+				AvailableOutputDevicesAddresses Includes 0
 
 				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
 					remote_submix = 1
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_dtmf.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_dtmf.pfw
index 883c741..c830c42 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_dtmf.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_dtmf.pfw
@@ -20,6 +20,7 @@
 				TelephonyMode IsNot InCall
 				TelephonyMode IsNot InCommunication
 				AvailableOutputDevices Includes RemoteSubmix
+				AvailableOutputDevicesAddresses Includes 0
 
 				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
 					remote_submix = 1
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_enforced_audible.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_enforced_audible.pfw
index f504631..c641138 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_enforced_audible.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_enforced_audible.pfw
@@ -61,6 +61,7 @@
 		domain: Device2
 			conf: RemoteSubmix
 				AvailableOutputDevices Includes RemoteSubmix
+				AvailableOutputDevicesAddresses Includes 0
 
 				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
 					remote_submix = 1
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_media.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_media.pfw
index bdb6ae0..f8bab3d 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_media.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_media.pfw
@@ -19,6 +19,7 @@
 		domain: Device2
 			conf: RemoteSubmix
 				AvailableOutputDevices Includes RemoteSubmix
+				AvailableOutputDevicesAddresses Includes 0
 
 				component: /Policy/policy/strategies/media/selected_output_devices/mask
 					speaker = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_rerouting.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_rerouting.pfw
index 04e62f7..28a3629 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_rerouting.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_rerouting.pfw
@@ -24,6 +24,7 @@
 		domain: Device2
 			conf: RemoteSubmix
 				AvailableOutputDevices Includes RemoteSubmix
+				AvailableOutputDevicesAddresses Includes 0
 
 				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
 					remote_submix = 1
diff --git a/services/audiopolicy/engineconfigurable/wrapper/config/policy_criterion_types.xml.in b/services/audiopolicy/engineconfigurable/wrapper/config/policy_criterion_types.xml.in
index 6cb799f..fe17369 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/config/policy_criterion_types.xml.in
+++ b/services/audiopolicy/engineconfigurable/wrapper/config/policy_criterion_types.xml.in
@@ -16,7 +16,12 @@
 <criterion_types>
     <criterion_type name="OutputDevicesMaskType" type="inclusive"/>
     <criterion_type name="InputDevicesMaskType" type="inclusive"/>
-    <criterion_type name="OutputDevicesAddressesType" type="inclusive"/>
+    <criterion_type name="OutputDevicesAddressesType" type="inclusive">
+        <values>
+            <!-- legacy remote submix -->
+            <value literal="0" numerical="1"/>
+        </values>
+    </criterion_type>
     <criterion_type name="InputDevicesAddressesType" type="inclusive"/>
     <criterion_type name="AndroidModeType" type="exclusive"/>
     <criterion_type name="BooleanType" type="exclusive">
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 3d68cd8..cc5a025 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -506,7 +506,7 @@
         if (strategy != STRATEGY_SONIFICATION) {
             // no sonification on remote submix (e.g. WFD)
             if (availableOutputDevices.getDevice(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
-                                                 String8("0")) != 0) {
+                                                 String8("0"), AUDIO_FORMAT_DEFAULT) != 0) {
                 device2 = availableOutputDevices.types() & AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
             }
         }
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 5c8a799..798b45f 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -81,9 +81,11 @@
 status_t AudioPolicyManager::setDeviceConnectionState(audio_devices_t device,
                                                       audio_policy_dev_state_t state,
                                                       const char *device_address,
-                                                      const char *device_name)
+                                                      const char *device_name,
+                                                      audio_format_t encodedFormat)
 {
-    status_t status = setDeviceConnectionStateInt(device, state, device_address, device_name);
+    status_t status = setDeviceConnectionStateInt(device, state, device_address,
+                                                  device_name, encodedFormat);
     nextAudioPortGeneration();
     return status;
 }
@@ -101,16 +103,17 @@
 status_t AudioPolicyManager::setDeviceConnectionStateInt(audio_devices_t deviceType,
                                                          audio_policy_dev_state_t state,
                                                          const char *device_address,
-                                                         const char *device_name)
+                                                         const char *device_name,
+                                                         audio_format_t encodedFormat)
 {
-    ALOGV("setDeviceConnectionStateInt() device: 0x%X, state %d, address %s name %s",
-            deviceType, state, device_address, device_name);
+    ALOGV("setDeviceConnectionStateInt() device: 0x%X, state %d, address %s name %s format 0x%X",
+            deviceType, state, device_address, device_name, encodedFormat);
 
     // connect/disconnect only 1 device at a time
     if (!audio_is_output_device(deviceType) && !audio_is_input_device(deviceType)) return BAD_VALUE;
 
     sp<DeviceDescriptor> device =
-            mHwModules.getDeviceDescriptor(deviceType, device_address, device_name,
+            mHwModules.getDeviceDescriptor(deviceType, device_address, device_name, encodedFormat,
                                            state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE);
     if (device == 0) {
         return INVALID_OPERATION;
@@ -133,10 +136,22 @@
                 ALOGW("%s() device already connected: %s", __func__, device->toString().c_str());
                 return INVALID_OPERATION;
             }
-            ALOGV("%s() connecting device %s", __func__, device->toString().c_str());
+            ALOGV("%s() connecting device %s format %x",
+                    __func__, device->toString().c_str(), encodedFormat);
 
             // register new device as available
-            if (mAvailableOutputDevices.add(device) < 0) {
+            index = mAvailableOutputDevices.add(device);
+            if (index >= 0) {
+                sp<HwModule> module = mHwModules.getModuleForDevice(device, encodedFormat);
+                if (module == 0) {
+                    ALOGD("setDeviceConnectionState() could not find HW module for device %s",
+                          device->toString().c_str());
+                    mAvailableOutputDevices.remove(device);
+                    return INVALID_OPERATION;
+                }
+                ALOGV("setDeviceConnectionState() module name=%s", module->getName());
+                mAvailableOutputDevices[index]->attach(module);
+            } else {
                 return NO_MEMORY;
             }
 
@@ -178,6 +193,9 @@
 
             checkOutputsForDevice(device, state, outputs);
 
+            // Reset active device codec
+            device->setEncodedFormat(AUDIO_FORMAT_DEFAULT);
+
             // Propagate device availability to Engine
             mEngine->setDeviceConnectionState(device, state);
             } break;
@@ -248,6 +266,13 @@
                 ALOGW("%s() device already connected: %s", __func__, device->toString().c_str());
                 return INVALID_OPERATION;
             }
+            sp<HwModule> module = mHwModules.getModuleForDevice(device, AUDIO_FORMAT_DEFAULT);
+            if (module == NULL) {
+                ALOGW("setDeviceConnectionState(): could not find HW module for device %s",
+                      device->toString().c_str());
+                return INVALID_OPERATION;
+            }
+
             // Before checking intputs, broadcast connect event to allow HAL to retrieve dynamic
             // parameters on newly connected devices (instead of opening the inputs...)
             broadcastDeviceConnectionState(device, state);
@@ -318,7 +343,8 @@
                                                                       const char *device_address)
 {
     sp<DeviceDescriptor> devDesc =
-            mHwModules.getDeviceDescriptor(device, device_address, "", false /* allowToCreate */,
+            mHwModules.getDeviceDescriptor(device, device_address, "", AUDIO_FORMAT_DEFAULT,
+                                           false /* allowToCreate */,
                                            (strlen(device_address) != 0)/*matchAddress*/);
 
     if (devDesc == 0) {
@@ -338,50 +364,61 @@
         return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
     }
 
-    return (deviceVector->getDevice(device, String8(device_address)) != 0) ?
+    return (deviceVector->getDevice(
+                device, String8(device_address), AUDIO_FORMAT_DEFAULT) != 0) ?
             AUDIO_POLICY_DEVICE_STATE_AVAILABLE : AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
 }
 
 status_t AudioPolicyManager::handleDeviceConfigChange(audio_devices_t device,
                                                       const char *device_address,
-                                                      const char *device_name)
+                                                      const char *device_name,
+                                                      audio_format_t encodedFormat)
 {
     status_t status;
     String8 reply;
     AudioParameter param;
     int isReconfigA2dpSupported = 0;
 
-    ALOGV("handleDeviceConfigChange(() device: 0x%X, address %s name %s",
-          device, device_address, device_name);
+    ALOGV("handleDeviceConfigChange(() device: 0x%X, address %s name %s encodedFormat: 0x%X",
+          device, device_address, device_name, encodedFormat);
 
     // connect/disconnect only 1 device at a time
     if (!audio_is_output_device(device) && !audio_is_input_device(device)) return BAD_VALUE;
 
     // Check if the device is currently connected
-    sp<DeviceDescriptor> devDesc =
-            mHwModules.getDeviceDescriptor(device, device_address, device_name);
-    if (devDesc == 0 || mAvailableOutputDevices.indexOf(devDesc) < 0) {
+    DeviceVector availableDevices = getAvailableOutputDevices();
+    DeviceVector deviceList = availableDevices.getDevicesFromTypeMask(device);
+    if (deviceList.empty()) {
         // Nothing to do: device is not connected
         return NO_ERROR;
     }
+    sp<DeviceDescriptor> devDesc = deviceList.itemAt(0);
 
     // For offloaded A2DP, Hw modules may have the capability to
-    // configure codecs. Check if any of the loaded hw modules
-    // supports this.
-    // If supported, send a set parameter to configure A2DP codecs
-    // and return. No need to toggle device state.
+    // configure codecs.
+    // Handle two specific cases by sending a set parameter to
+    // configure A2DP codecs. No need to toggle device state.
+    // Case 1: A2DP active device switches from primary to primary
+    // module
+    // Case 2: A2DP device config changes on primary module.
     if (device & AUDIO_DEVICE_OUT_ALL_A2DP) {
-        reply = mpClientInterface->getParameters(
-                    AUDIO_IO_HANDLE_NONE,
-                    String8(AudioParameter::keyReconfigA2dpSupported));
-        AudioParameter repliedParameters(reply);
-        repliedParameters.getInt(
-                String8(AudioParameter::keyReconfigA2dpSupported), isReconfigA2dpSupported);
-        if (isReconfigA2dpSupported) {
-            const String8 key(AudioParameter::keyReconfigA2dp);
-            param.add(key, String8("true"));
-            mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
-            return NO_ERROR;
+        sp<HwModule> module = mHwModules.getModuleForDeviceTypes(device, encodedFormat);
+        audio_module_handle_t primaryHandle = mPrimaryOutput->getModuleHandle();
+        if (availablePrimaryOutputDevices().contains(devDesc) &&
+           (module != 0 && module->getHandle() == primaryHandle)) {
+            reply = mpClientInterface->getParameters(
+                        AUDIO_IO_HANDLE_NONE,
+                        String8(AudioParameter::keyReconfigA2dpSupported));
+            AudioParameter repliedParameters(reply);
+            repliedParameters.getInt(
+                    String8(AudioParameter::keyReconfigA2dpSupported), isReconfigA2dpSupported);
+            if (isReconfigA2dpSupported) {
+                const String8 key(AudioParameter::keyReconfigA2dp);
+                param.add(key, String8("true"));
+                mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
+                devDesc->setEncodedFormat(encodedFormat);
+                return NO_ERROR;
+            }
         }
     }
 
@@ -389,7 +426,8 @@
     // This will force reading again the device configuration
     status = setDeviceConnectionState(device,
                                       AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-                                      device_address, device_name);
+                                      device_address, device_name,
+                                      devDesc->getEncodedFormat());
     if (status != NO_ERROR) {
         ALOGW("handleDeviceConfigChange() error disabling connection state: %d",
               status);
@@ -398,7 +436,7 @@
 
     status = setDeviceConnectionState(device,
                                       AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
-                                      device_address, device_name);
+                                      device_address, device_name, encodedFormat);
     if (status != NO_ERROR) {
         ALOGW("handleDeviceConfigChange() error enabling connection state: %d",
               status);
@@ -408,9 +446,47 @@
     return NO_ERROR;
 }
 
+status_t AudioPolicyManager::getHwOffloadEncodingFormatsSupportedForA2DP(
+                                    std::vector<audio_format_t> *formats)
+{
+    ALOGV("getHwOffloadEncodingFormatsSupportedForA2DP()");
+    char *tok = NULL, *saveptr;
+    status_t status = NO_ERROR;
+    char encoding_formats_list[PROPERTY_VALUE_MAX];
+    audio_format_t format = AUDIO_FORMAT_DEFAULT;
+    // FIXME This list should not come from a property but the supported encoded
+    // formats of declared A2DP devices in primary module
+    property_get("persist.bluetooth.a2dp_offload.cap", encoding_formats_list, "");
+    tok = strtok_r(encoding_formats_list, "-", &saveptr);
+    for (;tok != NULL; tok = strtok_r(NULL, "-", &saveptr)) {
+        if (strcmp(tok, "sbc") == 0) {
+            ALOGV("%s: SBC offload supported\n",__func__);
+            format = AUDIO_FORMAT_SBC;
+        } else if (strcmp(tok, "aptx") == 0) {
+            ALOGV("%s: APTX offload supported\n",__func__);
+            format = AUDIO_FORMAT_APTX;
+        } else if (strcmp(tok, "aptxhd") == 0) {
+            ALOGV("%s: APTX HD offload supported\n",__func__);
+            format = AUDIO_FORMAT_APTX_HD;
+        } else if (strcmp(tok, "ldac") == 0) {
+            ALOGV("%s: LDAC offload supported\n",__func__);
+            format = AUDIO_FORMAT_LDAC;
+        } else if (strcmp(tok, "aac") == 0) {
+            ALOGV("%s: AAC offload supported\n",__func__);
+            format = AUDIO_FORMAT_AAC;
+        } else {
+            ALOGE("%s: undefined token - %s\n",__func__, tok);
+            continue;
+        }
+        formats->push_back(format);
+    }
+    return status;
+}
+
 uint32_t AudioPolicyManager::updateCallRouting(const DeviceVector &rxDevices, uint32_t delayMs)
 {
     bool createTxPatch = false;
+    bool createRxPatch = false;
     uint32_t muteWaitMs = 0;
 
     if(!hasPrimaryOutput() || mPrimaryOutput->devices().types() == AUDIO_DEVICE_OUT_STUB) {
@@ -419,9 +495,10 @@
     ALOG_ASSERT(!rxDevices.isEmpty(), "updateCallRouting() no selected output device");
 
     audio_attributes_t attr = { .source = AUDIO_SOURCE_VOICE_COMMUNICATION };
-    auto txDevice = getDeviceAndMixForAttributes(attr);
+    auto txSourceDevice = getDeviceAndMixForAttributes(attr);
+    ALOG_ASSERT(txSourceDevice != 0, "updateCallRouting() input selected device not available");
     ALOGV("updateCallRouting device rxDevice %s txDevice %s", 
-          rxDevices.toString().c_str(), txDevice->toString().c_str());
+          rxDevices.itemAt(0)->toString().c_str(), txSourceDevice->toString().c_str());
 
     // release existing RX patch if any
     if (mCallRxPatch != 0) {
@@ -434,22 +511,54 @@
         mCallTxPatch.clear();
     }
 
-    // If the RX device is on the primary HW module, then use legacy routing method for voice calls
-    // via setOutputDevice() on primary output.
-    // Otherwise, create two audio patches for TX and RX path.
-    if (availablePrimaryOutputDevices().contains(rxDevices.itemAt(0))) {
-        muteWaitMs = setOutputDevices(mPrimaryOutput, rxDevices, true, delayMs);
+    auto telephonyRxModule =
+        mHwModules.getModuleForDeviceTypes(AUDIO_DEVICE_IN_TELEPHONY_RX, AUDIO_FORMAT_DEFAULT);
+    auto telephonyTxModule =
+        mHwModules.getModuleForDeviceTypes(AUDIO_DEVICE_OUT_TELEPHONY_TX, AUDIO_FORMAT_DEFAULT);
+    // retrieve Rx Source and Tx Sink device descriptors
+    sp<DeviceDescriptor> rxSourceDevice =
+        mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_TELEPHONY_RX,
+                                         String8(),
+                                         AUDIO_FORMAT_DEFAULT);
+    sp<DeviceDescriptor> txSinkDevice =
+        mAvailableOutputDevices.getDevice(AUDIO_DEVICE_OUT_TELEPHONY_TX,
+                                          String8(),
+                                          AUDIO_FORMAT_DEFAULT);
+
+    // RX and TX Telephony device are declared by Primary Audio HAL
+    if (isPrimaryModule(telephonyRxModule) && isPrimaryModule(telephonyTxModule) &&
+            (telephonyRxModule->getHalVersionMajor() >= 3)) {
+        if (rxSourceDevice == 0 || txSinkDevice == 0) {
+            // RX / TX Telephony device(s) is(are) not currently available
+            ALOGE("updateCallRouting() no telephony Tx and/or RX device");
+            return muteWaitMs;
+        }
+        // do not create a patch (aka Sw Bridging) if Primary HW module has declared supporting a
+        // route between telephony RX to Sink device and Source device to telephony TX
+        const auto &primaryModule = telephonyRxModule;
+        createRxPatch = !primaryModule->supportsPatch(rxSourceDevice, rxDevices.itemAt(0));
+        createTxPatch = !primaryModule->supportsPatch(txSourceDevice, txSinkDevice);
+    } else {
+        // If the RX device is on the primary HW module, then use legacy routing method for
+        // voice calls via setOutputDevice() on primary output.
+        // Otherwise, create two audio patches for TX and RX path.
+        createRxPatch = !(availablePrimaryOutputDevices().contains(rxDevices.itemAt(0))) &&
+                (rxSourceDevice != 0);
         // If the TX device is also on the primary HW module, setOutputDevice() will take care
         // of it due to legacy implementation. If not, create a patch.
-        if (!availablePrimaryModuleInputDevices().contains(txDevice)) {
-            createTxPatch = true;
-        }
+        createTxPatch = !(availablePrimaryModuleInputDevices().contains(txSourceDevice)) &&
+                (txSinkDevice != 0);
+    }
+    // Use legacy routing method for voice calls via setOutputDevice() on primary output.
+    // Otherwise, create two audio patches for TX and RX path.
+    if (!createRxPatch) {
+        muteWaitMs = setOutputDevices(mPrimaryOutput, rxDevices, true, delayMs);
     } else { // create RX path audio patch
         mCallRxPatch = createTelephonyPatch(true /*isRx*/, rxDevices.itemAt(0), delayMs);
-        createTxPatch = true;
+        ALOG_ASSERT(createTxPatch, "No Tx Patch will be created, nor legacy routing done");
     }
     if (createTxPatch) { // create TX path audio patch
-        mCallTxPatch = createTelephonyPatch(false /*isRx*/, txDevice, delayMs);
+        mCallTxPatch = createTelephonyPatch(false /*isRx*/, txSourceDevice, delayMs);
     }
 
     return muteWaitMs;
@@ -464,15 +573,18 @@
     }
     if (isRx) {
         patchBuilder.addSink(device).
-                addSource(mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_TELEPHONY_RX));
+                addSource(mAvailableInputDevices.getDevice(
+                    AUDIO_DEVICE_IN_TELEPHONY_RX, String8(), AUDIO_FORMAT_DEFAULT));
     } else {
         patchBuilder.addSource(device).
-                addSink(mAvailableOutputDevices.getDevice(AUDIO_DEVICE_OUT_TELEPHONY_TX));
+                addSink(mAvailableOutputDevices.getDevice(
+                    AUDIO_DEVICE_OUT_TELEPHONY_TX, String8(), AUDIO_FORMAT_DEFAULT));
     }
 
     // @TODO: still ignoring the address, or not dealing platform with mutliple telephonydevices
     const sp<DeviceDescriptor> outputDevice = isRx ?
-                device : mAvailableOutputDevices.getDevice(AUDIO_DEVICE_OUT_TELEPHONY_TX);
+                device : mAvailableOutputDevices.getDevice(
+                    AUDIO_DEVICE_OUT_TELEPHONY_TX, String8(), AUDIO_FORMAT_DEFAULT);
     SortedVector<audio_io_handle_t> outputs =
             getOutputsForDevices(DeviceVector(outputDevice), mOutputs);
     audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE, AUDIO_FORMAT_INVALID);
@@ -678,6 +790,9 @@
         sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
         DeviceVector newDevices = getNewOutputDevices(outputDesc, true /*fromCache*/);
         if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (outputDesc != mPrimaryOutput)) {
+            // As done in setDeviceConnectionState, we could also fix default device issue by
+            // preventing the force re-routing in case of default dev that distinguishes on address.
+            // Let's give back to engine full device choice decision however.
             waitMs = setOutputDevices(outputDesc, newDevices, !newDevices.isEmpty(), delayMs);
         }
         if (forceVolumeReeval && !newDevices.isEmpty()) {
@@ -736,6 +851,10 @@
             if (!mAvailableOutputDevices.containsAtLeastOne(curProfile->getSupportedDevices())) {
                 continue;
             }
+            // reject profiles if connected device does not support codec
+            if (!curProfile->deviceSupportsEncodedFormats(devices.types())) {
+                continue;
+            }
             if (!directOnly) return curProfile;
             // when searching for direct outputs, if several profiles are compatible, give priority
             // to one with offload capability
@@ -840,7 +959,8 @@
             *output = desc->mIoHandle;
             AudioMix *mix = desc->mPolicyMix;
             sp<DeviceDescriptor> deviceDesc =
-                mAvailableOutputDevices.getDevice(mix->mDeviceType, mix->mDeviceAddress);
+                mAvailableOutputDevices.getDevice(
+                        mix->mDeviceType, mix->mDeviceAddress, AUDIO_FORMAT_DEFAULT);
             *selectedDeviceId = deviceDesc != 0 ? deviceDesc->getId() : AUDIO_PORT_HANDLE_NONE;
             ALOGV("%s returns output %d", __func__, *output);
             return NO_ERROR;
@@ -883,7 +1003,8 @@
     *output = AUDIO_IO_HANDLE_NONE;
     if (!msdDevices.isEmpty()) {
         *output = getOutputForDevices(msdDevices, session, *stream, config, flags);
-        sp<DeviceDescriptor> deviceDesc = mAvailableOutputDevices.getDevice(deviceType);
+        sp<DeviceDescriptor> deviceDesc =
+                mAvailableOutputDevices.getDevice(deviceType, String8(), AUDIO_FORMAT_DEFAULT);
         if (*output != AUDIO_IO_HANDLE_NONE && setMsdPatch(deviceDesc) == NO_ERROR) {
             ALOGV("%s() Using MSD devices %s instead of device %s",
                     __func__, msdDevices.toString().c_str(), deviceDesc->toString().c_str());
@@ -1158,7 +1279,7 @@
         ALOGE("%s() unable to get MSD module", __func__);
         return NO_INIT;
     }
-    sp<HwModule> deviceModule = mHwModules.getModuleForDevice(outputDevice);
+    sp<HwModule> deviceModule = mHwModules.getModuleForDevice(outputDevice, AUDIO_FORMAT_DEFAULT);
     if (deviceModule == nullptr) {
         ALOGE("%s() unable to get module for %s", __func__, outputDevice->toString().c_str());
         return NO_INIT;
@@ -1456,7 +1577,8 @@
         } else {
             newDeviceType = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
         }
-        devices.add(mAvailableOutputDevices.getDevice(newDeviceType, String8(address)));
+        devices.add(mAvailableOutputDevices.getDevice(newDeviceType,
+                                                      String8(address), AUDIO_FORMAT_DEFAULT));
     }
 
     // requiresMuteCheck is false when we can bypass mute strategy.
@@ -1574,7 +1696,8 @@
         setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
                                     AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
                                     address,
-                                    "remote-submix");
+                                    "remote-submix",
+                                    AUDIO_FORMAT_DEFAULT);
     }
 
     return NO_ERROR;
@@ -1620,7 +1743,7 @@
                 setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
                                             AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
                                             outputDesc->mPolicyMix->mDeviceAddress,
-                                            "remote-submix");
+                                            "remote-submix", AUDIO_FORMAT_DEFAULT);
             }
         }
         bool forceDeviceUpdate = false;
@@ -1812,7 +1935,8 @@
         }
         *inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
         device = mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
-                                                  String8(attr->tags + strlen("addr=")));
+                                                  String8(attr->tags + strlen("addr=")),
+                                                  AUDIO_FORMAT_DEFAULT);
     } else {
         if (explicitRoutingDevice != nullptr) {
             device = explicitRoutingDevice;
@@ -1831,7 +1955,8 @@
             // know about it and is therefore considered "legacy"
             *inputType = API_INPUT_LEGACY;
         } else if (audio_is_remote_submix_device(device->type())) {
-            device = mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_REMOTE_SUBMIX, String8("0"));
+            device = mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_REMOTE_SUBMIX, String8("0"),
+                                                      AUDIO_FORMAT_DEFAULT);
             *inputType = API_INPUT_MIX_CAPTURE;
         } else if (device->type() == AUDIO_DEVICE_IN_TELEPHONY_RX) {
             *inputType = API_INPUT_TELEPHONY_RX;
@@ -2065,7 +2190,7 @@
             if (address != "") {
                 setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
                         AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
-                        address, "remote-submix");
+                        address, "remote-submix", AUDIO_FORMAT_DEFAULT);
             }
         }
     }
@@ -2116,7 +2241,7 @@
             if (address != "") {
                 setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
                                          AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-                                         address, "remote-submix");
+                                         address, "remote-submix", AUDIO_FORMAT_DEFAULT);
             }
         }
         resetInputDevice(input);
@@ -2202,6 +2327,10 @@
                                             int indexMax)
 {
     ALOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax);
+    if (indexMin < 0 || indexMax < 0) {
+        ALOGE("%s for stream %d: invalid min %d or max %d", __func__, stream , indexMin, indexMax);
+        return;
+    }
     mVolumeCurves->initStreamVolume(stream, indexMin, indexMax);
 
     // initialize other private stream volumes which follow this one
@@ -2218,10 +2347,11 @@
                                                   audio_devices_t device)
 {
 
-    // VOICE_CALL stream has minVolumeIndex > 0  but can be muted directly by an
-    // app that has MODIFY_PHONE_STATE permission.
+    // VOICE_CALL and BLUETOOTH_SCO stream have minVolumeIndex > 0 but
+    // can be muted directly by an app that has MODIFY_PHONE_STATE permission.
     if (((index < mVolumeCurves->getVolumeIndexMin(stream)) &&
-            !(stream == AUDIO_STREAM_VOICE_CALL && index == 0)) ||
+            !((stream == AUDIO_STREAM_VOICE_CALL || stream == AUDIO_STREAM_BLUETOOTH_SCO) &&
+            index == 0)) ||
             (index > mVolumeCurves->getVolumeIndexMax(stream))) {
         return BAD_VALUE;
     }
@@ -2536,11 +2666,11 @@
             if (mix.mMixType == MIX_TYPE_PLAYERS) {
                 setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
                         AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
-                        address.string(), "remote-submix");
+                        address.string(), "remote-submix", AUDIO_FORMAT_DEFAULT);
             } else {
                 setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
                         AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
-                        address.string(), "remote-submix");
+                        address.string(), "remote-submix", AUDIO_FORMAT_DEFAULT);
             }
         } else if ((mix.mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
             String8 address = mix.mDeviceAddress;
@@ -2614,13 +2744,13 @@
                     AUDIO_POLICY_DEVICE_STATE_AVAILABLE)  {
                 setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
                         AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-                        address.string(), "remote-submix");
+                        address.string(), "remote-submix", AUDIO_FORMAT_DEFAULT);
             }
             if (getDeviceConnectionState(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, address.string()) ==
                     AUDIO_POLICY_DEVICE_STATE_AVAILABLE)  {
                 setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
                         AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-                        address.string(), "remote-submix");
+                        address.string(), "remote-submix", AUDIO_FORMAT_DEFAULT);
             }
             rSubmixModule->removeOutputProfile(address);
             rSubmixModule->removeInputProfile(address);
@@ -2664,7 +2794,8 @@
         // reevaluate outputs for all given devices
         for (size_t i = 0; i < devices.size(); i++) {
             sp<DeviceDescriptor> devDesc = mHwModules.getDeviceDescriptor(
-                            devices[i].mType, devices[i].mAddress, String8());
+                            devices[i].mType, devices[i].mAddress, String8(),
+                            AUDIO_FORMAT_DEFAULT);
             SortedVector<audio_io_handle_t> outputs;
             if (checkOutputsForDevice(devDesc, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
                     outputs) != NO_ERROR) {
@@ -2685,7 +2816,8 @@
         // reevaluate outputs for all found devices
         for (size_t i = 0; i < devices.size(); i++) {
             sp<DeviceDescriptor> devDesc = mHwModules.getDeviceDescriptor(
-                    devices[i].mType, devices[i].mAddress, String8());
+                    devices[i].mType, devices[i].mAddress, String8(),
+                    AUDIO_FORMAT_DEFAULT);
             SortedVector<audio_io_handle_t> outputs;
             if (checkOutputsForDevice(devDesc, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
                     outputs) != NO_ERROR) {
@@ -3442,7 +3574,8 @@
 
     sp<DeviceDescriptor> srcDevice =
             mAvailableInputDevices.getDevice(source->ext.device.type,
-                                             String8(source->ext.device.address));
+                                             String8(source->ext.device.address),
+                                             AUDIO_FORMAT_DEFAULT);
     if (srcDevice == 0) {
         ALOGW("%s source->ext.device.type %08x not found", __FUNCTION__, source->ext.device.type);
         return BAD_VALUE;
@@ -3726,14 +3859,16 @@
         status_t status = setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_HDMI,
                                                       AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
                                                       address.c_str(),
-                                                      name.c_str());
+                                                      name.c_str(),
+                                                      AUDIO_FORMAT_DEFAULT);
         if (status != NO_ERROR) {
             continue;
         }
         status = setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_HDMI,
                                              AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
                                              address.c_str(),
-                                             name.c_str());
+                                             name.c_str(),
+                                             AUDIO_FORMAT_DEFAULT);
         profileUpdated |= (status == NO_ERROR);
     }
     // FIXME: Why doing this for input HDMI devices if we don't augment their reported formats?
@@ -3746,14 +3881,16 @@
         status_t status = setDeviceConnectionStateInt(AUDIO_DEVICE_IN_HDMI,
                                                       AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
                                                       address.c_str(),
-                                                      name.c_str());
+                                                      name.c_str(),
+                                                      AUDIO_FORMAT_DEFAULT);
         if (status != NO_ERROR) {
             continue;
         }
         status = setDeviceConnectionStateInt(AUDIO_DEVICE_IN_HDMI,
                                              AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
                                              address.c_str(),
-                                             name.c_str());
+                                             name.c_str(),
+                                             AUDIO_FORMAT_DEFAULT);
         profileUpdated |= (status == NO_ERROR);
     }
 
@@ -4178,7 +4315,8 @@
         // first list already open outputs that can be routed to this device
         for (size_t i = 0; i < mOutputs.size(); i++) {
             desc = mOutputs.valueAt(i);
-            if (!desc->isDuplicated() && desc->supportsDevice(device)) {
+            if (!desc->isDuplicated() && desc->supportsDevice(device)
+                    && desc->deviceSupportsEncodedFormats(deviceType)) {
                 ALOGV("checkOutputsForDevice(): adding opened output %d on device %s",
                       mOutputs.keyAt(i), device->toString().c_str());
                 outputs.add(mOutputs.keyAt(i));
@@ -4341,7 +4479,8 @@
             desc = mOutputs.valueAt(i);
             if (!desc->isDuplicated()) {
                 // exact match on device
-                if (device_distinguishes_on_address(deviceType) && desc->supportsDevice(device)) {
+                if (device_distinguishes_on_address(deviceType) && desc->supportsDevice(device)
+                        && desc->deviceSupportsEncodedFormats(deviceType)) {
                     outputs.add(mOutputs.keyAt(i));
                 } else if (!mAvailableOutputDevices.containsAtLeastOne(desc->supportedDevices())) {
                     ALOGV("checkOutputsForDevice(): disconnecting adding output %d",
@@ -4628,7 +4767,8 @@
         ALOGVV("output %zu isDuplicated=%d device=%s",
                 i, openOutputs.valueAt(i)->isDuplicated(),
                 openOutputs.valueAt(i)->supportedDevices().toString().c_str());
-        if (openOutputs.valueAt(i)->supportsAllDevices(devices)) {
+        if (openOutputs.valueAt(i)->supportsAllDevices(devices)
+                && openOutputs.valueAt(i)->deviceSupportsEncodedFormats(devices.types())) {
             ALOGVV("%s() found output %d", __func__, openOutputs.keyAt(i));
             outputs.add(openOutputs.keyAt(i));
         }
@@ -4684,9 +4824,10 @@
                 maxLatency = desc->latency();
             }
         }
-        ALOGV("%s: strategy %d, moving from output %s to output %s", __func__, strategy,
-              (srcOutputs.isEmpty()? "none" : std::to_string(srcOutputs[0]).c_str()),
-              (dstOutputs.isEmpty()? "none" : std::to_string(dstOutputs[0]).c_str()));
+        ALOGV_IF(!(srcOutputs.isEmpty() || dstOutputs.isEmpty()),
+              "%s: strategy %d, moving from output %s to output %s", __func__, strategy,
+              std::to_string(srcOutputs[0]).c_str(),
+              std::to_string(dstOutputs[0]).c_str());
         // mute strategy while moving tracks from one output to another
         for (audio_io_handle_t srcOut : srcOutputs) {
             sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueFor(srcOut);
@@ -4836,6 +4977,13 @@
         return DeviceVector(device);
     }
 
+    // Legacy Engine cannot take care of bus devices and mix, so we need to handle the conflict
+    // of setForceUse / Default Bus device here
+    device = mPolicyMixes.getDeviceAndMixForOutput(outputDesc, mAvailableOutputDevices);
+    if (device != nullptr) {
+        return DeviceVector(device);
+    }
+
     // check the following by order of priority to request a routing change if necessary:
     // 1: the strategy enforced audible is active and enforced on the output:
     //      use device for strategy enforced audible
@@ -5404,9 +5552,10 @@
     if (attributes.source == AUDIO_SOURCE_REMOTE_SUBMIX &&
                 strncmp(attributes.tags, "addr=", strlen("addr=")) == 0) {
         return mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
-                                                String8(attributes.tags + strlen("addr=")));
+                                                String8(attributes.tags + strlen("addr=")),
+                                                AUDIO_FORMAT_DEFAULT);
     }
-    return mAvailableInputDevices.getDevice(device);
+    return mAvailableInputDevices.getDevice(device, String8(), AUDIO_FORMAT_DEFAULT);
 }
 
 float AudioPolicyManager::computeVolume(audio_stream_type_t stream,
@@ -5524,6 +5673,15 @@
     float minDst = (float)mVolumeCurves->getVolumeIndexMin(dstStream);
     float maxDst = (float)mVolumeCurves->getVolumeIndexMax(dstStream);
 
+    // preserve mute request or correct range
+    if (srcIndex < minSrc) {
+        if (srcIndex == 0) {
+            return 0;
+        }
+        srcIndex = minSrc;
+    } else if (srcIndex > maxSrc) {
+        srcIndex = maxSrc;
+    }
     return (int)(minDst + ((srcIndex - minSrc) * (maxDst - minDst)) / (maxSrc - minSrc));
 }
 
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index e99de16..de6d489 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -97,12 +97,14 @@
         virtual status_t setDeviceConnectionState(audio_devices_t device,
                                                           audio_policy_dev_state_t state,
                                                           const char *device_address,
-                                                          const char *device_name);
+                                                          const char *device_name,
+                                                          audio_format_t encodedFormat);
         virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
                                                                               const char *device_address);
         virtual status_t handleDeviceConfigChange(audio_devices_t device,
                                                   const char *device_address,
-                                                  const char *device_name);
+                                                  const char *device_name,
+                                                  audio_format_t encodedFormat);
         virtual void setPhoneState(audio_mode_t state);
         virtual void setForceUse(audio_policy_force_use_t usage,
                                  audio_policy_forced_cfg_t config);
@@ -239,6 +241,9 @@
                                             bool reported);
         virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled);
 
+        virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
+                    std::vector<audio_format_t> *formats);
+
         // return the strategy corresponding to a given stream type
         routing_strategy getStrategy(audio_stream_type_t stream) const;
 
@@ -512,6 +517,13 @@
             return mAudioPatches.removeAudioPatch(handle);
         }
 
+        bool isPrimaryModule(const sp<HwModule> &module) const
+        {
+            if (module == 0 || !hasPrimaryOutput()) {
+                return false;
+            }
+            return module->getHandle() == mPrimaryOutput->getModuleHandle();
+        }
         DeviceVector availablePrimaryOutputDevices() const
         {
             if (!hasPrimaryOutput()) {
@@ -731,7 +743,8 @@
         status_t setDeviceConnectionStateInt(audio_devices_t deviceType,
                                              audio_policy_dev_state_t state,
                                              const char *device_address,
-                                             const char *device_name);
+                                             const char *device_name,
+                                             audio_format_t encodedFormat);
         void updateMono(audio_io_handle_t output) {
             AudioParameter param;
             param.addInt(String8(AudioParameter::keyMonoOutput), (int)mMasterMono);
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 2c904d9..49c541c 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -32,7 +32,8 @@
 status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device,
                                                   audio_policy_dev_state_t state,
                                                   const char *device_address,
-                                                  const char *device_name)
+                                                  const char *device_name,
+                                                  audio_format_t encodedFormat)
 {
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
@@ -49,7 +50,7 @@
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
     return mAudioPolicyManager->setDeviceConnectionState(device, state,
-                                                         device_address, device_name);
+                                                         device_address, device_name, encodedFormat);
 }
 
 audio_policy_dev_state_t AudioPolicyService::getDeviceConnectionState(
@@ -66,7 +67,8 @@
 
 status_t AudioPolicyService::handleDeviceConfigChange(audio_devices_t device,
                                                   const char *device_address,
-                                                  const char *device_name)
+                                                  const char *device_name,
+                                                  audio_format_t encodedFormat)
 {
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
@@ -79,7 +81,7 @@
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
     return mAudioPolicyManager->handleDeviceConfigChange(device, device_address,
-                                                         device_name);
+                                                         device_name, encodedFormat);
 }
 
 status_t AudioPolicyService::setPhoneState(audio_mode_t state)
@@ -1138,6 +1140,17 @@
                                                    surroundFormatsEnabled, reported);
 }
 
+status_t AudioPolicyService::getHwOffloadEncodingFormatsSupportedForA2DP(
+                                        std::vector<audio_format_t> *formats)
+{
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+    Mutex::Autolock _l(mLock);
+    AutoCallerClear acc;
+    return mAudioPolicyManager->getHwOffloadEncodingFormatsSupportedForA2DP(formats);
+}
+
 status_t AudioPolicyService::setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled)
 {
     if (mAudioPolicyManager == NULL) {
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 959e757..c073b7c 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -61,13 +61,15 @@
     virtual status_t setDeviceConnectionState(audio_devices_t device,
                                               audio_policy_dev_state_t state,
                                               const char *device_address,
-                                              const char *device_name);
+                                              const char *device_name,
+                                              audio_format_t encodedFormat);
     virtual audio_policy_dev_state_t getDeviceConnectionState(
                                                                 audio_devices_t device,
                                                                 const char *device_address);
     virtual status_t handleDeviceConfigChange(audio_devices_t device,
                                               const char *device_address,
-                                              const char *device_name);
+                                              const char *device_name,
+                                              audio_format_t encodedFormat);
     virtual status_t setPhoneState(audio_mode_t state);
     virtual status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
     virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
@@ -218,6 +220,8 @@
                                         audio_format_t *surroundFormats,
                                         bool *surroundFormatsEnabled,
                                         bool reported);
+    virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
+                                        std::vector<audio_format_t> *formats);
     virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled);
 
     virtual status_t setAssistantUid(uid_t uid);
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index 3b6dc80..6a71d7d 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -69,9 +69,12 @@
 include $(CLEAR_VARS)
 # seccomp is not required for coverage build.
 ifneq ($(NATIVE_COVERAGE),true)
-LOCAL_REQUIRED_MODULES_arm := crash_dump.policy mediacodec.policy
-LOCAL_REQUIRED_MODULES_x86 := crash_dump.policy mediacodec.policy
+LOCAL_REQUIRED_MODULES_arm := crash_dump.policy mediaswcodec.policy
+LOCAL_REQUIRED_MODULES_arm64 := crash_dump.policy mediaswcodec.policy
+LOCAL_REQUIRED_MODULES_x86 := crash_dump.policy mediaswcodec.policy
+LOCAL_REQUIRED_MODULES_x86_64 := crash_dump.policy mediaswcodec.policy
 endif
+
 LOCAL_SRC_FILES := \
     main_swcodecservice.cpp \
     MediaCodecUpdateService.cpp \
@@ -107,8 +110,12 @@
 
 LOCAL_MODULE := mediaswcodec
 LOCAL_INIT_RC := mediaswcodec.rc
-LOCAL_32_BIT_ONLY := true
 LOCAL_SANITIZE := scudo
+ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), x86_64 arm64))
+  LOCAL_MULTILIB := both
+  LOCAL_MODULE_STEM_32 := $(LOCAL_MODULE)32
+  LOCAL_MODULE_STEM_64 := $(LOCAL_MODULE)
+endif
 
 sanitizer_runtime_libraries :=
 llndk_libraries :=
@@ -137,4 +144,16 @@
 include $(BUILD_PREBUILT)
 endif
 
+####################################################################
+
+# sw service seccomp policy
+ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), x86 x86_64 arm arm64))
+include $(CLEAR_VARS)
+LOCAL_MODULE := mediaswcodec.policy
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT)/etc/seccomp_policy
+LOCAL_SRC_FILES := seccomp_policy/mediaswcodec-$(TARGET_ARCH).policy
+include $(BUILD_PREBUILT)
+endif
+
 include $(call all-makefiles-under, $(LOCAL_PATH))
diff --git a/services/mediacodec/main_swcodecservice.cpp b/services/mediacodec/main_swcodecservice.cpp
index 1168825..05b5695 100644
--- a/services/mediacodec/main_swcodecservice.cpp
+++ b/services/mediacodec/main_swcodecservice.cpp
@@ -26,12 +26,10 @@
 
 using namespace android;
 
-// TODO: replace policy with software codec-only policies
-// Must match location in Android.mk.
 static const char kSystemSeccompPolicyPath[] =
-        "/system/etc/seccomp_policy/mediacodec.policy";
+        "/system/etc/seccomp_policy/mediaswcodec.policy";
 static const char kVendorSeccompPolicyPath[] =
-        "/vendor/etc/seccomp_policy/mediacodec.policy";
+        "/vendor/etc/seccomp_policy/mediaswcodec.policy";
 
 // Disable Scudo's mismatch allocation check, as it is being triggered
 // by some third party code.
@@ -47,8 +45,11 @@
 
     ::android::hardware::configureRpcThreadpool(64, false);
 
-    // codec libs are currently 32-bit only
+#ifdef __LP64__
+    loadFromApex("/apex/com.android.media.swcodec/lib64");
+#else
     loadFromApex("/apex/com.android.media.swcodec/lib");
+#endif
 
     ::android::hardware::joinRpcThreadpool();
 }
diff --git a/services/mediacodec/registrant/Android.bp b/services/mediacodec/registrant/Android.bp
index 8c40ad1..8ae3376 100644
--- a/services/mediacodec/registrant/Android.bp
+++ b/services/mediacodec/registrant/Android.bp
@@ -49,7 +49,5 @@
         "libcodec2_soft_gsmdec",
         "libcodec2_soft_xaacdec",
     ],
-
-    compile_multilib: "32",
 }
 
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy b/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy
new file mode 100644
index 0000000..588141a
--- /dev/null
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy
@@ -0,0 +1,60 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+futex: 1
+# ioctl calls are filtered via the selinux policy.
+ioctl: 1
+sched_yield: 1
+close: 1
+dup: 1
+ppoll: 1
+mprotect: arg2 in ~PROT_EXEC || arg2 in ~PROT_WRITE
+mmap2: arg2 in ~PROT_EXEC || arg2 in ~PROT_WRITE
+
+# mremap: Ensure |flags| are (MREMAP_MAYMOVE | MREMAP_FIXED) TODO: Once minijail
+# parser support for '<' is in this needs to be modified to also prevent
+# |old_address| and |new_address| from touching the exception vector page, which
+# on ARM is statically loaded at 0xffff 0000. See
+# http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0211h/Babfeega.html
+# for more details.
+mremap: arg3 == 3
+munmap: 1
+prctl: 1
+getuid32: 1
+writev: 1
+sigaltstack: 1
+clone: 1
+exit: 1
+lseek: 1
+rt_sigprocmask: 1
+openat: 1
+fstat64: 1
+write: 1
+nanosleep: 1
+setpriority: 1
+set_tid_address: 1
+getdents64: 1
+readlinkat: 1
+read: 1
+pread64: 1
+fstatfs64: 1
+gettimeofday: 1
+faccessat: 1
+_llseek: 1
+fstatat64: 1
+ugetrlimit: 1
+exit_group: 1
+restart_syscall: 1
+rt_sigreturn: 1
+getrandom: 1
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy b/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy
new file mode 100644
index 0000000..1bee1b5
--- /dev/null
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy
@@ -0,0 +1,61 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+futex: 1
+# ioctl calls are filtered via the selinux policy.
+ioctl: 1
+sched_yield: 1
+close: 1
+dup: 1
+ppoll: 1
+mprotect: arg2 in ~PROT_EXEC || arg2 in ~PROT_WRITE
+mmap: arg2 in ~PROT_EXEC || arg2 in ~PROT_WRITE
+getuid: 1
+getrlimit: 1
+fstat: 1
+newfstatat: 1
+fstatfs: 1
+
+# mremap: Ensure |flags| are (MREMAP_MAYMOVE | MREMAP_FIXED) TODO: Once minijail
+# parser support for '<' is in this needs to be modified to also prevent
+# |old_address| and |new_address| from touching the exception vector page, which
+# on ARM is statically loaded at 0xffff 0000. See
+# http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0211h/Babfeega.html
+# for more details.
+mremap: arg3 == 3
+munmap: 1
+prctl: 1
+writev: 1
+sigaltstack: 1
+clone: 1
+exit: 1
+lseek: 1
+rt_sigprocmask: 1
+openat: 1
+write: 1
+nanosleep: 1
+setpriority: 1
+set_tid_address: 1
+getdents64: 1
+readlinkat: 1
+read: 1
+pread64: 1
+gettimeofday: 1
+faccessat: 1
+exit_group: 1
+restart_syscall: 1
+rt_sigreturn: 1
+getrandom: 1
+madvise: 1
+
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-x86.policy b/services/mediacodec/seccomp_policy/mediaswcodec-x86.policy
new file mode 120000
index 0000000..ab2592a
--- /dev/null
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-x86.policy
@@ -0,0 +1 @@
+mediacodec-x86.policy
\ No newline at end of file
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-x86_64.policy b/services/mediacodec/seccomp_policy/mediaswcodec-x86_64.policy
new file mode 120000
index 0000000..ab2592a
--- /dev/null
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-x86_64.policy
@@ -0,0 +1 @@
+mediacodec-x86.policy
\ No newline at end of file
diff --git a/services/mediaextractor/mediaextractor.rc b/services/mediaextractor/mediaextractor.rc
index 5fc2941..6b2d0a5 100644
--- a/services/mediaextractor/mediaextractor.rc
+++ b/services/mediaextractor/mediaextractor.rc
@@ -2,5 +2,7 @@
     class main
     user mediaex
     group drmrpc mediadrm
+    # TODO(b/123275379): Remove updatable when http://aosp/878198 has landed
+    updatable
     ioprio rt 4
     writepid /dev/cpuset/foreground/tasks
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
index 43b0a37..3616fa2 100644
--- a/services/oboeservice/AAudioServiceEndpoint.h
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -121,7 +121,7 @@
     mutable std::mutex       mLockStreams;
     std::vector<android::sp<AAudioServiceStreamBase>> mRegisteredStreams;
 
-    SimpleDoubleBuffer<Timestamp>  mAtomicTimestamp;
+    SimpleDoubleBuffer<Timestamp>  mAtomicEndpointTimestamp;
 
     android::AudioClient     mMmapClient;   // set in open, used in open and startStream
 
diff --git a/services/oboeservice/AAudioServiceEndpointShared.cpp b/services/oboeservice/AAudioServiceEndpointShared.cpp
index 2f1ec7e..0a415fd 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.cpp
+++ b/services/oboeservice/AAudioServiceEndpointShared.cpp
@@ -181,8 +181,8 @@
 // Get timestamp that was written by the real-time service thread, eg. mixer.
 aaudio_result_t AAudioServiceEndpointShared::getFreeRunningPosition(int64_t *positionFrames,
                                                                   int64_t *timeNanos) {
-    if (mAtomicTimestamp.isValid()) {
-        Timestamp timestamp = mAtomicTimestamp.read();
+    if (mAtomicEndpointTimestamp.isValid()) {
+        Timestamp timestamp = mAtomicEndpointTimestamp.read();
         *positionFrames = timestamp.getPosition();
         *timeNanos = timestamp.getNanoseconds();
         return AAUDIO_OK;
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index defbb7b..b16b5dc 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -43,7 +43,7 @@
 AAudioServiceStreamBase::AAudioServiceStreamBase(AAudioService &audioService)
         : mUpMessageQueue(nullptr)
         , mTimestampThread("AATime")
-        , mAtomicTimestamp()
+        , mAtomicStreamTimestamp()
         , mAudioService(audioService) {
     mMmapClient.clientUid = -1;
     mMmapClient.clientPid = -1;
@@ -182,7 +182,7 @@
     setSuspended(false);
 
     // Start with fresh presentation timestamps.
-    mAtomicTimestamp.clear();
+    mAtomicStreamTimestamp.clear();
 
     mClientHandle = AUDIO_PORT_HANDLE_NONE;
     result = startDevice();
@@ -291,16 +291,20 @@
 }
 
 // implement Runnable, periodically send timestamps to client
+__attribute__((no_sanitize("integer")))
 void AAudioServiceStreamBase::run() {
     ALOGD("%s() %s entering >>>>>>>>>>>>>> TIMESTAMPS", __func__, getTypeText());
     TimestampScheduler timestampScheduler;
     timestampScheduler.setBurstPeriod(mFramesPerBurst, getSampleRate());
     timestampScheduler.start(AudioClock::getNanoseconds());
     int64_t nextTime = timestampScheduler.nextAbsoluteTime();
+    int32_t loopCount = 0;
     while(mThreadEnabled.load()) {
+        loopCount++;
         if (AudioClock::getNanoseconds() >= nextTime) {
             aaudio_result_t result = sendCurrentTimestamp();
             if (result != AAUDIO_OK) {
+                ALOGE("%s() timestamp thread got result = %d", __func__, result);
                 break;
             }
             nextTime = timestampScheduler.nextAbsoluteTime();
@@ -310,7 +314,8 @@
             AudioClock::sleepUntilNanoTime(nextTime);
         }
     }
-    ALOGD("%s() %s exiting <<<<<<<<<<<<<< TIMESTAMPS", __func__, getTypeText());
+    ALOGD("%s() %s exiting after %d loops <<<<<<<<<<<<<< TIMESTAMPS",
+          __func__, getTypeText(), loopCount);
 }
 
 void AAudioServiceStreamBase::disconnect() {
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 7904b25..ffc768b 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -301,7 +301,7 @@
     // TODO rename mClientHandle to mPortHandle to be more consistent with AudioFlinger.
     audio_port_handle_t     mClientHandle = AUDIO_PORT_HANDLE_NONE;
 
-    SimpleDoubleBuffer<Timestamp>  mAtomicTimestamp;
+    SimpleDoubleBuffer<Timestamp>  mAtomicStreamTimestamp;
 
     android::AAudioService &mAudioService;
 
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index 9377945..837b080 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -162,7 +162,7 @@
     aaudio_result_t result = serviceEndpointMMAP->getFreeRunningPosition(positionFrames, timeNanos);
     if (result == AAUDIO_OK) {
         Timestamp timestamp(*positionFrames, *timeNanos);
-        mAtomicTimestamp.write(timestamp);
+        mAtomicStreamTimestamp.write(timestamp);
         *positionFrames = timestamp.getPosition();
         *timeNanos = timestamp.getNanoseconds();
     } else if (result != AAUDIO_ERROR_UNAVAILABLE) {
@@ -184,8 +184,8 @@
             static_cast<AAudioServiceEndpointMMAP *>(endpoint.get());
 
     // TODO Get presentation timestamp from the HAL
-    if (mAtomicTimestamp.isValid()) {
-        Timestamp timestamp = mAtomicTimestamp.read();
+    if (mAtomicStreamTimestamp.isValid()) {
+        Timestamp timestamp = mAtomicStreamTimestamp.read();
         *positionFrames = timestamp.getPosition();
         *timeNanos = timestamp.getNanoseconds() + serviceEndpointMMAP->getHardwareTimeOffsetNanos();
         return AAUDIO_OK;
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index d5450fe..14742dd 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -238,15 +238,15 @@
 }
 
 void AAudioServiceStreamShared::markTransferTime(Timestamp &timestamp) {
-    mAtomicTimestamp.write(timestamp);
+    mAtomicStreamTimestamp.write(timestamp);
 }
 
 // Get timestamp that was written by mixer or distributor.
 aaudio_result_t AAudioServiceStreamShared::getFreeRunningPosition(int64_t *positionFrames,
                                                                   int64_t *timeNanos) {
     // TODO Get presentation timestamp from the HAL
-    if (mAtomicTimestamp.isValid()) {
-        Timestamp timestamp = mAtomicTimestamp.read();
+    if (mAtomicStreamTimestamp.isValid()) {
+        Timestamp timestamp = mAtomicStreamTimestamp.read();
         *positionFrames = timestamp.getPosition();
         *timeNanos = timestamp.getNanoseconds();
         return AAUDIO_OK;