Merge "Use the timestamp reported by the HAL for aaudio."
diff --git a/Android.bp b/Android.bp
index 60f0ff1..ee609e1 100644
--- a/Android.bp
+++ b/Android.bp
@@ -57,7 +57,7 @@
             min_sdk_version: "29",
             apex_available: [
                 "//apex_available:platform",
-                "com.android.bluetooth.updatable",
+                "com.android.bluetooth",
                 "com.android.media",
                 "com.android.media.swcodec",
             ],
@@ -86,7 +86,7 @@
     min_sdk_version: "29",
     apex_available: [
         "//apex_available:platform",
-        "com.android.bluetooth.updatable",
+        "com.android.bluetooth",
         "com.android.media",
         "com.android.media.swcodec",
     ],
diff --git a/camera/aidl/android/hardware/ICameraService.aidl b/camera/aidl/android/hardware/ICameraService.aidl
index 78a77d4..3687b15 100644
--- a/camera/aidl/android/hardware/ICameraService.aidl
+++ b/camera/aidl/android/hardware/ICameraService.aidl
@@ -173,6 +173,13 @@
 
     void setTorchMode(String cameraId, boolean enabled, IBinder clientBinder);
 
+    // Change the brightness level of the flash unit associated with cameraId to strengthLevel.
+    // If the torch is in OFF state and strengthLevel > 0 then the torch will also be turned ON.
+    void turnOnTorchWithStrengthLevel(String cameraId, int strengthLevel, IBinder clientBinder);
+
+    // Get the brightness level of the flash unit associated with cameraId.
+    int getTorchStrengthLevel(String cameraId);
+
     /**
      * Notify the camera service of a system event.  Should only be called from system_server.
      *
diff --git a/camera/aidl/android/hardware/ICameraServiceListener.aidl b/camera/aidl/android/hardware/ICameraServiceListener.aidl
index c54813c..5f17f5b 100644
--- a/camera/aidl/android/hardware/ICameraServiceListener.aidl
+++ b/camera/aidl/android/hardware/ICameraServiceListener.aidl
@@ -83,6 +83,8 @@
 
     oneway void onTorchStatusChanged(int status, String cameraId);
 
+    oneway void onTorchStrengthLevelChanged(String cameraId, int newTorchStrength);
+
     /**
      * Notify registered clients about camera access priority changes.
      * Clients which were previously unable to open a certain camera device
diff --git a/camera/aidl/android/hardware/ICameraServiceProxy.aidl b/camera/aidl/android/hardware/ICameraServiceProxy.aidl
index 3d78aef..f5d0120 100644
--- a/camera/aidl/android/hardware/ICameraServiceProxy.aidl
+++ b/camera/aidl/android/hardware/ICameraServiceProxy.aidl
@@ -43,5 +43,5 @@
      * {@link android.hardware.camera2.CameraMetadata#SCALER_ROTATE_AND_CROP_180},
      * {@link android.hardware.camera2.CameraMetadata#SCALER_ROTATE_AND_CROP_270}).
      */
-    int getRotateAndCropOverride(String packageName, int lensFacing);
+    int getRotateAndCropOverride(String packageName, int lensFacing, int userId);
 }
diff --git a/camera/ndk/impl/ACameraManager.h b/camera/ndk/impl/ACameraManager.h
index da887a2..d53d809 100644
--- a/camera/ndk/impl/ACameraManager.h
+++ b/camera/ndk/impl/ACameraManager.h
@@ -95,6 +95,9 @@
         virtual binder::Status onTorchStatusChanged(int32_t, const String16&) {
             return binder::Status::ok();
         }
+        virtual binder::Status onTorchStrengthLevelChanged(const String16&, int32_t) {
+            return binder::Status::ok();
+        }
 
         virtual binder::Status onCameraAccessPrioritiesChanged();
         virtual binder::Status onCameraOpened(const String16&, const String16&) {
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 0e9740a..4015417 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -3660,7 +3660,8 @@
      * YUV_420_888    | all output sizes available for JPEG, up to the maximum video size | LIMITED        |
      * IMPLEMENTATION_DEFINED | same as YUV_420_888                  | Any            |</p>
      * <p>For applications targeting SDK version 31 or newer, if the mobile device declares to be
-     * <a href="https://developer.android.com/reference/android/os/Build/VERSION_CDOES/MEDIA_PERFORMANCE_CLASS.html">media performance class</a> S,
+     * media performance class 12 or higher by setting
+     * <a href="https://developer.android.com/reference/android/os/Build/VERSION_CDOES/MEDIA_PERFORMANCE_CLASS.html">MEDIA_PERFORMANCE_CLASS</a> to be 31 or larger,
      * the primary camera devices (first rear/front camera in the camera ID list) will not
      * support JPEG sizes smaller than 1080p. If the application configures a JPEG stream
      * smaller than 1080p, the camera device will round up the JPEG image size to at least
@@ -3678,9 +3679,11 @@
      * YUV_420_888    | all output sizes available for FULL hardware level, up to the maximum video size | LIMITED        |
      * IMPLEMENTATION_DEFINED | same as YUV_420_888                  | Any            |</p>
      * <p>For applications targeting SDK version 31 or newer, if the mobile device doesn't declare
-     * to be media performance class S, or if the camera device isn't a primary rear/front
-     * camera, the minimum required output stream configurations are the same as for applications
-     * targeting SDK version older than 31.</p>
+     * to be media performance class 12 or better by setting
+     * <a href="https://developer.android.com/reference/android/os/Build/VERSION_CDOES/MEDIA_PERFORMANCE_CLASS.html">MEDIA_PERFORMANCE_CLASS</a> to be 31 or larger,
+     * or if the camera device isn't a primary rear/front camera, the minimum required output
+     * stream configurations are the same as for applications targeting SDK version older than
+     * 31.</p>
      * <p>Refer to ACAMERA_REQUEST_AVAILABLE_CAPABILITIES for additional
      * mandatory stream configurations on a per-capability basis.</p>
      * <p>Exception on 176x144 (QCIF) resolution: camera devices usually have a fixed capability for
diff --git a/camera/tests/CameraBinderTests.cpp b/camera/tests/CameraBinderTests.cpp
index 9f2f430..17ea512 100644
--- a/camera/tests/CameraBinderTests.cpp
+++ b/camera/tests/CameraBinderTests.cpp
@@ -96,6 +96,12 @@
         return binder::Status::ok();
     };
 
+    virtual binder::Status onTorchStrengthLevelChanged(const String16& /*cameraId*/,
+            int32_t /*torchStrength*/) {
+        // No op
+        return binder::Status::ok();
+    }
+
     virtual binder::Status onCameraAccessPrioritiesChanged() {
         // No op
         return binder::Status::ok();
diff --git a/drm/mediadrm/plugins/clearkey/hidl/Android.bp b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
index 6c68532..02ac943 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/Android.bp
+++ b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
@@ -93,6 +93,11 @@
     srcs: ["protos/DeviceFiles.proto"],
 }
 
+cc_library {
+    name: "libclearkeyhidl",
+    defaults: ["clearkey_service_defaults"],
+}
+
 cc_binary {
     name: "android.hardware.drm@1.2-service.clearkey",
     defaults: ["clearkey_service_defaults"],
@@ -126,3 +131,37 @@
     init_rc: ["android.hardware.drm@1.4-service-lazy.clearkey.rc"],
     vintf_fragments: ["manifest_android.hardware.drm@1.4-service.clearkey.xml"],
 }
+
+cc_fuzz {
+    name: "clearkeyV1.4_fuzzer",
+    vendor: true,
+    srcs: [
+        "fuzzer/clearkeyV1.4_fuzzer.cpp",
+    ],
+    static_libs: [
+        "libclearkeyhidl",
+        "libclearkeycommon",
+        "libclearkeydevicefiles-protos",
+        "libjsmn",
+        "libprotobuf-cpp-lite",
+        "libutils",
+    ],
+    shared_libs: [
+        "android.hidl.allocator@1.0",
+        "android.hardware.drm@1.0",
+        "android.hardware.drm@1.1",
+        "android.hardware.drm@1.2",
+        "android.hardware.drm@1.3",
+        "android.hardware.drm@1.4",
+        "libcrypto",
+        "libhidlbase",
+        "libhidlmemory",
+        "liblog",
+    ],
+    fuzz_config: {
+        cc: [
+            "android-media-fuzzing-reports@google.com",
+        ],
+        componentid: 155276,
+    },
+}
diff --git a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
index 0cd9375..32d7723 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
@@ -187,7 +187,7 @@
         return Status_V1_2::ERROR_DRM_CANNOT_HANDLE;
     }
 
-    *defaultUrl = "";
+    *defaultUrl = "https://default.url";
     *keyRequestType = KeyRequestType_V1_1::UNKNOWN;
     *request = std::vector<uint8_t>();
 
diff --git a/drm/mediadrm/plugins/clearkey/hidl/fuzzer/README.md b/drm/mediadrm/plugins/clearkey/hidl/fuzzer/README.md
new file mode 100644
index 0000000..cb45460
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/fuzzer/README.md
@@ -0,0 +1,52 @@
+# Fuzzer for android.hardware.drm@1.4-service.clearkey
+
+## Plugin Design Considerations
+The fuzzer plugin for android.hardware.drm@1.4-service.clearkey is designed based on the understanding of the
+source code and tries to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data. This ensures more code paths are reached by the fuzzer.
+
+android.hardware.drm@1.4-service.clearkey supports the following parameters:
+1. Security Level (parameter name: `securityLevel`)
+2. Mime Type (parameter name: `mimeType`)
+3. Key Type (parameter name: `keyType`)
+4. Crypto Mode (parameter name: `cryptoMode`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `securityLevel` | 0.`SecurityLevel::UNKNOWN` 1.`SecurityLevel::SW_SECURE_CRYPTO` 2.`SecurityLevel::SW_SECURE_DECODE` 3.`SecurityLevel::HW_SECURE_CRYPTO`  4.`SecurityLevel::HW_SECURE_DECODE` 5.`SecurityLevel::HW_SECURE_ALL`| Value obtained from FuzzedDataProvider in the range 0 to 5|
+| `mimeType` | 0.`video/mp4` 1.`video/mpeg` 2.`video/x-flv` 3.`video/mj2` 4.`video/3gp2` 5.`video/3gpp` 6.`video/3gpp2` 7.`audio/mp4` 8.`audio/mpeg` 9.`audio/aac` 10.`audio/3gp2` 11.`audio/3gpp` 12.`audio/3gpp2` 13.`audio/webm` 14.`video/webm` 15.`webm` 16.`cenc` 17.`video/unknown` 18.`audio/unknown`| Value obtained from FuzzedDataProvider in the range 0 to 18|
+| `keyType` | 0.`KeyType::OFFLINE` 1.`KeyType::STREAMING` 2.`KeyType::RELEASE` | Value obtained from FuzzedDataProvider in the range 0 to 2|
+| `cryptoMode` | 0.`Mode::UNENCRYPTED` 1.`Mode::AES_CTR` 2.`Mode::AES_CBC_CTS` 3.`Mode::AES_CBC` | Value obtained from FuzzedDataProvider in the range 0 to 3|
+
+This also ensures that the plugin is always deterministic for any given input.
+
+##### Maximize utilization of input data
+The plugin feeds the entire input data to the module.
+This ensures that the plugin tolerates any kind of input (empty, huge,
+malformed, etc) and doesnt `exit()` on any input and thereby increasing the
+chance of identifying vulnerabilities.
+
+## Build
+
+This describes steps to build clearkeyV1.4_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+  $ mm -j$(nproc) clearkeyV1.4_fuzzer
+```
+#### Steps to run
+To run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/${TARGET_ARCH}/clearkeyV1.4_fuzzer/vendor/hw/clearkeyV1.4_fuzzer
+```
+
+## References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.com/google/oss-fuzz
diff --git a/drm/mediadrm/plugins/clearkey/hidl/fuzzer/clearkeyV1.4_fuzzer.cpp b/drm/mediadrm/plugins/clearkey/hidl/fuzzer/clearkeyV1.4_fuzzer.cpp
new file mode 100644
index 0000000..afe0e6c
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/fuzzer/clearkeyV1.4_fuzzer.cpp
@@ -0,0 +1,719 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include <include/CreatePluginFactories.h>
+
+#include <android/hidl/allocator/1.0/IAllocator.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <hidlmemory/mapping.h>
+#include <include/ClearKeyDrmProperties.h>
+#include <include/CryptoFactory.h>
+#include <include/CryptoPlugin.h>
+#include <include/DrmPlugin.h>
+#include <utils/Log.h>
+#include <utils/String8.h>
+
+namespace drm = ::android::hardware::drm;
+using namespace std;
+using namespace android;
+using ::android::sp;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_memory;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hidl::allocator::V1_0::IAllocator;
+using ::android::hidl::memory::V1_0::IMemory;
+using drm::V1_0::BufferType;
+using drm::V1_0::DestinationBuffer;
+using drm::V1_0::EventType;
+using drm::V1_0::ICryptoPlugin;
+using drm::V1_0::IDrmPlugin;
+using drm::V1_0::IDrmPluginListener;
+using drm::V1_0::KeyedVector;
+using drm::V1_0::KeyStatus;
+using drm::V1_0::KeyStatusType;
+using drm::V1_0::KeyType;
+using drm::V1_0::Mode;
+using drm::V1_0::Pattern;
+using drm::V1_0::SecureStop;
+using drm::V1_0::SharedBuffer;
+using drm::V1_0::Status;
+using drm::V1_0::SubSample;
+using drm::V1_1::DrmMetricGroup;
+using drm::V1_1::HdcpLevel;
+using drm::V1_1::SecureStopRelease;
+using drm::V1_1::SecurityLevel;
+using drm::V1_2::KeySetId;
+using drm::V1_2::OfflineLicenseState;
+using drm::V1_4::clearkey::ICryptoFactory;
+using drm::V1_4::clearkey::IDrmFactory;
+using drm::V1_4::clearkey::kAlgorithmsKey;
+using drm::V1_4::clearkey::kClientIdKey;
+using drm::V1_4::clearkey::kDeviceIdKey;
+using drm::V1_4::clearkey::kDrmErrorTestKey;
+using drm::V1_4::clearkey::kListenerTestSupportKey;
+using drm::V1_4::clearkey::kMetricsKey;
+using drm::V1_4::clearkey::kPluginDescriptionKey;
+using drm::V1_4::clearkey::kVendorKey;
+using drm::V1_4::clearkey::kVersionKey;
+
+typedef ::android::hardware::hidl_vec<uint8_t> SessionId;
+typedef ::android::hardware::hidl_vec<uint8_t> SecureStopId;
+
+static const uint8_t kInvalidUUID[] = {0x10, 0x20, 0x30, 0x40, 0x50, 0x60,
+                                       0x70, 0x80, 0x10, 0x20, 0x30, 0x40,
+                                       0x50, 0x60, 0x70, 0x80};
+
+static const uint8_t kClearKeyUUID[] = {0xE2, 0x71, 0x9D, 0x58, 0xA9, 0x85,
+                                        0xB3, 0xC9, 0x78, 0x1A, 0xB0, 0x30,
+                                        0xAF, 0x78, 0xD3, 0x0E};
+
+const SecurityLevel kSecurityLevel[] = {
+    SecurityLevel::UNKNOWN,          SecurityLevel::SW_SECURE_CRYPTO,
+    SecurityLevel::SW_SECURE_DECODE, SecurityLevel::HW_SECURE_CRYPTO,
+    SecurityLevel::HW_SECURE_DECODE, SecurityLevel::HW_SECURE_ALL};
+
+const char *kMimeType[] = {
+    "video/mp4",  "video/mpeg",  "video/x-flv",   "video/mj2",    "video/3gp2",
+    "video/3gpp", "video/3gpp2", "audio/mp4",     "audio/mpeg",   "audio/aac",
+    "audio/3gp2", "audio/3gpp",  "audio/3gpp2",   "audio/webm",   "video/webm",
+    "webm",       "cenc",        "video/unknown", "audio/unknown"};
+
+const char *kCipherAlgorithm[] = {"AES/CBC/NoPadding", ""};
+
+const char *kMacAlgorithm[] = {"HmacSHA256", ""};
+
+const char *kRSAAlgorithm[] = {"RSASSA-PSS-SHA1", ""};
+
+const std::string kProperty[] = {kVendorKey,
+                                 kVersionKey,
+                                 kPluginDescriptionKey,
+                                 kAlgorithmsKey,
+                                 kListenerTestSupportKey,
+                                 kDrmErrorTestKey,
+                                 kDeviceIdKey,
+                                 kClientIdKey,
+                                 kMetricsKey,
+                                 "placeholder"};
+
+const KeyType kKeyType[] = {KeyType::OFFLINE, KeyType::STREAMING,
+                            KeyType::RELEASE};
+
+const Mode kCryptoMode[] = {Mode::UNENCRYPTED, Mode::AES_CTR, Mode::AES_CBC_CTS,
+                            Mode::AES_CBC};
+
+const hidl_vec<uint8_t> validInitData = {
+    // BMFF box header (4 bytes size + 'pssh')
+    0x00, 0x00, 0x00, 0x34, 0x70, 0x73, 0x73, 0x68,
+    // full box header (version = 1 flags = 0)
+    0x01, 0x00, 0x00, 0x00,
+    // system id
+    0x10, 0x77, 0xef, 0xec, 0xc0, 0xb2, 0x4d, 0x02, 0xac, 0xe3, 0x3c, 0x1e,
+    0x52, 0xe2, 0xfb, 0x4b,
+    // number of key ids
+    0x00, 0x00, 0x00, 0x01,
+    // key id
+    0x60, 0x06, 0x1e, 0x01, 0x7e, 0x47, 0x7e, 0x87, 0x7e, 0x57, 0xd0, 0x0d,
+    0x1e, 0xd0, 0x0d, 0x1e,
+    // size of data, must be zero
+    0x00, 0x00, 0x00, 0x00};
+
+const hidl_vec<uint8_t> validKeyResponse = {
+    0x7b, 0x22, 0x6b, 0x65, 0x79, 0x73, 0x22, 0x3a, 0x5b, 0x7b, 0x22,
+    0x6b, 0x74, 0x79, 0x22, 0x3a, 0x22, 0x6f, 0x63, 0x74, 0x22, 0x2c,
+    0x22, 0x6b, 0x69, 0x64, 0x22, 0x3a, 0x22, 0x59, 0x41, 0x59, 0x65,
+    0x41, 0x58, 0x35, 0x48, 0x66, 0x6f, 0x64, 0x2d, 0x56, 0x39, 0x41,
+    0x4e, 0x48, 0x74, 0x41, 0x4e, 0x48, 0x67, 0x22, 0x2c, 0x22, 0x6b,
+    0x22, 0x3a, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x54, 0x65,
+    0x73, 0x74, 0x4b, 0x65, 0x79, 0x42, 0x61, 0x73, 0x65, 0x36, 0x34,
+    0x67, 0x67, 0x67, 0x22, 0x7d, 0x5d, 0x7d, 0x0a};
+
+const size_t kAESBlockSize = 16;
+const size_t kMaxStringLength = 100;
+const size_t kMaxSubSamples = 10;
+const size_t kMaxNumBytes = 1000;
+const size_t kSegmentIndex = 0;
+
+template <typename T, size_t size>
+T getValueFromArray(FuzzedDataProvider *fdp, const T (&arr)[size]) {
+  return arr[fdp->ConsumeIntegralInRange<int32_t>(0, size - 1)];
+}
+
+class TestDrmPluginListener : public IDrmPluginListener {
+public:
+  TestDrmPluginListener() {}
+  virtual ~TestDrmPluginListener() {}
+
+  virtual Return<void> sendEvent(EventType /*eventType*/,
+                                 const hidl_vec<uint8_t> & /*sessionId*/,
+                                 const hidl_vec<uint8_t> & /*data*/) override {
+    return Return<void>();
+  }
+
+  virtual Return<void>
+  sendExpirationUpdate(const hidl_vec<uint8_t> & /*sessionId*/,
+                       int64_t /*expiryTimeInMS*/) override {
+    return Return<void>();
+  }
+
+  virtual Return<void>
+  sendKeysChange(const hidl_vec<uint8_t> & /*sessionId*/,
+                 const hidl_vec<KeyStatus> & /*keyStatusList*/,
+                 bool /*hasNewUsableKey*/) override {
+    return Return<void>();
+  }
+};
+
+class ClearKeyFuzzer {
+public:
+  ~ClearKeyFuzzer() { deInit(); }
+  bool init();
+  void process(const uint8_t *data, size_t size);
+
+private:
+  void deInit();
+  void invokeDrmPlugin(const uint8_t *data, size_t size);
+  void invokeCryptoPlugin(const uint8_t *data);
+  void invokeDrm(const uint8_t *data, size_t size);
+  void invokeCrypto(const uint8_t *data);
+  void invokeDrmDecryptEncryptAPI(const uint8_t *data, size_t size);
+  bool invokeDrmFactory();
+  bool invokeCryptoFactory();
+  void invokeDrmV1_4API();
+  void invokeDrmSetAlgorithmAPI();
+  void invokeDrmPropertyAPI();
+  void invokeDrmSecureStopAPI();
+  void invokeDrmOfflineLicenseAPI(const uint8_t *data, size_t size);
+  SessionId getSessionId();
+  SecureStopRelease makeSecureRelease(const SecureStop &stop);
+  sp<IDrmFactory> mDrmFactory = nullptr;
+  sp<ICryptoFactory> mCryptoFactory = nullptr;
+  sp<IDrmPlugin> mDrmPlugin = nullptr;
+  sp<drm::V1_1::IDrmPlugin> mDrmPluginV1_1 = nullptr;
+  sp<drm::V1_2::IDrmPlugin> mDrmPluginV1_2 = nullptr;
+  sp<drm::V1_4::IDrmPlugin> mDrmPluginV1_4 = nullptr;
+  sp<drm::V1_4::ICryptoPlugin> mCryptoPluginV1_4 = nullptr;
+  sp<ICryptoPlugin> mCryptoPlugin = nullptr;
+  FuzzedDataProvider *mFDP = nullptr;
+  SessionId mSessionId = {};
+  SessionId mSessionIdV1 = {};
+};
+
+void ClearKeyFuzzer::deInit() {
+  if (mDrmPluginV1_1) {
+    mDrmPluginV1_1->closeSession(mSessionIdV1);
+  }
+  if (mDrmPluginV1_2) {
+    mDrmPluginV1_2->closeSession(mSessionId);
+  }
+  mDrmFactory.clear();
+  mCryptoFactory.clear();
+  mDrmPlugin.clear();
+  mDrmPluginV1_1.clear();
+  mDrmPluginV1_2.clear();
+  mDrmPluginV1_4.clear();
+  mCryptoPlugin.clear();
+  mCryptoPluginV1_4.clear();
+  mSessionId = {};
+  mSessionIdV1 = {};
+}
+
+void ClearKeyFuzzer::invokeDrmV1_4API() {
+  mDrmPluginV1_4->requiresSecureDecoderDefault(
+      getValueFromArray(mFDP, kMimeType));
+  mDrmPluginV1_4->requiresSecureDecoder(
+      getValueFromArray(mFDP, kMimeType),
+      getValueFromArray(mFDP, kSecurityLevel));
+  mDrmPluginV1_4->setPlaybackId(
+      mSessionId, mFDP->ConsumeRandomLengthString(kMaxStringLength).c_str());
+  drm::V1_4::IDrmPlugin::getLogMessages_cb cb =
+      [&]([[maybe_unused]] drm::V1_4::Status status,
+          [[maybe_unused]] hidl_vec<drm::V1_4::LogMessage> logs) {};
+  mDrmPluginV1_4->getLogMessages(cb);
+}
+
+void ClearKeyFuzzer::invokeDrmSetAlgorithmAPI() {
+  const hidl_string cipherAlgo =
+      mFDP->ConsumeBool()
+          ? mFDP->ConsumeRandomLengthString(kMaxStringLength).c_str()
+          : hidl_string(kCipherAlgorithm[mFDP->ConsumeBool()]);
+  mDrmPluginV1_2->setCipherAlgorithm(mSessionId, cipherAlgo);
+
+  const hidl_string macAlgo =
+      mFDP->ConsumeBool()
+          ? mFDP->ConsumeRandomLengthString(kMaxStringLength).c_str()
+          : hidl_string(kMacAlgorithm[mFDP->ConsumeBool()]);
+  mDrmPluginV1_2->setMacAlgorithm(mSessionId, macAlgo);
+}
+
+void ClearKeyFuzzer::invokeDrmPropertyAPI() {
+  mDrmPluginV1_2->setPropertyString(
+      hidl_string(getValueFromArray(mFDP, kProperty)), hidl_string("value"));
+
+  hidl_string stringValue;
+  mDrmPluginV1_2->getPropertyString(
+      getValueFromArray(mFDP, kProperty),
+      [&](Status status, const hidl_string &hValue) {
+        if (status == Status::OK) {
+          stringValue = hValue;
+        }
+      });
+
+  hidl_vec<uint8_t> value = {};
+  mDrmPluginV1_2->setPropertyByteArray(
+      hidl_string(getValueFromArray(mFDP, kProperty)), value);
+
+  hidl_vec<uint8_t> byteValue;
+  mDrmPluginV1_2->getPropertyByteArray(
+      getValueFromArray(mFDP, kProperty),
+      [&](Status status, const hidl_vec<uint8_t> &hValue) {
+        if (status == Status::OK) {
+          byteValue = hValue;
+        }
+      });
+}
+
+SessionId ClearKeyFuzzer::getSessionId() {
+  SessionId emptySessionId = {};
+  return mFDP->ConsumeBool() ? mSessionId : emptySessionId;
+}
+
+void ClearKeyFuzzer::invokeDrmDecryptEncryptAPI(const uint8_t *data,
+                                                size_t size) {
+  uint32_t currSessions, maximumSessions;
+  mDrmPluginV1_2->getNumberOfSessions(
+      [&](Status status, uint32_t hCurrentSessions, uint32_t hMaxSessions) {
+        if (status == Status::OK) {
+          currSessions = hCurrentSessions;
+          maximumSessions = hMaxSessions;
+        }
+      });
+
+  HdcpLevel connected, maximum;
+  mDrmPluginV1_2->getHdcpLevels([&](Status status,
+                                    const HdcpLevel &hConnectedLevel,
+                                    const HdcpLevel &hMaxLevel) {
+    if (status == Status::OK) {
+      connected = hConnectedLevel;
+      maximum = hMaxLevel;
+    }
+  });
+
+  drm::V1_2::HdcpLevel connectedV1_2, maximumV1_2;
+  mDrmPluginV1_2->getHdcpLevels_1_2(
+      [&](drm::V1_2::Status status, const drm::V1_2::HdcpLevel &connectedLevel,
+          const drm::V1_2::HdcpLevel &maxLevel) {
+        if (status == drm::V1_2::Status::OK) {
+          connectedV1_2 = connectedLevel;
+          maximumV1_2 = maxLevel;
+        }
+      });
+
+  SecurityLevel securityLevel;
+  mDrmPluginV1_2->getSecurityLevel(mSessionId,
+                                   [&](Status status, SecurityLevel hLevel) {
+                                     if (status == Status::OK) {
+                                       securityLevel = hLevel;
+                                     }
+                                   });
+
+  hidl_vec<DrmMetricGroup> metrics;
+  mDrmPluginV1_2->getMetrics(
+      [&](Status status, hidl_vec<DrmMetricGroup> hMetricGroups) {
+        if (status == Status::OK) {
+          metrics = hMetricGroups;
+        }
+      });
+
+  hidl_string certificateType;
+  hidl_string certificateAuthority;
+  mDrmPluginV1_2->getProvisionRequest(certificateType, certificateAuthority,
+                                      [&]([[maybe_unused]] Status status,
+                                          const hidl_vec<uint8_t> &,
+                                          const hidl_string &) {});
+
+  mDrmPluginV1_2->getProvisionRequest_1_2(
+      certificateType, certificateAuthority,
+      [&]([[maybe_unused]] drm::V1_2::Status status, const hidl_vec<uint8_t> &,
+          const hidl_string &) {});
+
+  hidl_vec<uint8_t> response;
+  mDrmPluginV1_2->provideProvisionResponse(
+      response, [&]([[maybe_unused]] Status status, const hidl_vec<uint8_t> &,
+                    const hidl_vec<uint8_t> &) {});
+
+  hidl_vec<uint8_t> initData = {};
+  if (mFDP->ConsumeBool()) {
+    initData = validInitData;
+  } else {
+    initData.setToExternal(const_cast<uint8_t *>(data), kAESBlockSize);
+  }
+  hidl_string mimeType = getValueFromArray(mFDP, kMimeType);
+  KeyType keyType = mFDP->ConsumeBool()
+                        ? static_cast<KeyType>(mFDP->ConsumeIntegral<size_t>())
+                        : getValueFromArray(mFDP, kKeyType);
+  KeyedVector optionalParameters;
+  mDrmPluginV1_2->getKeyRequest_1_2(
+      mSessionId, initData, mimeType, keyType, optionalParameters,
+      [&]([[maybe_unused]] drm::V1_2::Status status, const hidl_vec<uint8_t> &,
+          drm::V1_1::KeyRequestType, const hidl_string &) {});
+  mDrmPluginV1_1->getKeyRequest_1_1(
+      mSessionIdV1, initData, mimeType, keyType, optionalParameters,
+      [&]([[maybe_unused]] drm::V1_0::Status status, const hidl_vec<uint8_t> &,
+          drm::V1_1::KeyRequestType, const hidl_string &) {});
+  hidl_vec<uint8_t> emptyInitData = {};
+  mDrmPlugin->getKeyRequest(
+      mSessionId, mFDP->ConsumeBool() ? initData : emptyInitData, mimeType,
+      keyType, optionalParameters,
+      [&]([[maybe_unused]] drm::V1_0::Status status, const hidl_vec<uint8_t> &,
+          drm::V1_0::KeyRequestType, const hidl_string &) {});
+
+  hidl_vec<uint8_t> keyResponse = {};
+  if (mFDP->ConsumeBool()) {
+    keyResponse = validKeyResponse;
+  } else {
+    keyResponse.setToExternal(const_cast<uint8_t *>(data), size);
+  }
+  hidl_vec<uint8_t> keySetId;
+  hidl_vec<uint8_t> emptyKeyResponse = {};
+  mDrmPluginV1_2->provideKeyResponse(
+      getSessionId(), mFDP->ConsumeBool() ? keyResponse : emptyKeyResponse,
+      [&](Status status, const hidl_vec<uint8_t> &hKeySetId) {
+        if (status == Status::OK) {
+          keySetId = hKeySetId;
+        }
+      });
+
+  mDrmPluginV1_2->restoreKeys(getSessionId(), keySetId);
+
+  mDrmPluginV1_2->queryKeyStatus(
+      getSessionId(),
+      [&]([[maybe_unused]] Status status, KeyedVector /* info */) {});
+
+  hidl_vec<uint8_t> keyId, input, iv;
+  keyId.setToExternal(const_cast<uint8_t *>(data), size);
+  input.setToExternal(const_cast<uint8_t *>(data), size);
+  iv.setToExternal(const_cast<uint8_t *>(data), size);
+  mDrmPluginV1_2->encrypt(
+      getSessionId(), keyId, input, iv,
+      [&]([[maybe_unused]] Status status, const hidl_vec<uint8_t> &) {});
+
+  mDrmPluginV1_2->decrypt(
+      getSessionId(), keyId, input, iv,
+      [&]([[maybe_unused]] Status status, const hidl_vec<uint8_t> &) {});
+
+  hidl_vec<uint8_t> message;
+  message.setToExternal(const_cast<uint8_t *>(data), size);
+  mDrmPluginV1_2->sign(
+      getSessionId(), keyId, message,
+      [&]([[maybe_unused]] Status status, const hidl_vec<uint8_t> &) {});
+
+  hidl_vec<uint8_t> signature;
+  signature.setToExternal(const_cast<uint8_t *>(data), size);
+  mDrmPluginV1_2->verify(getSessionId(), keyId, message, signature,
+                         [&]([[maybe_unused]] Status status, bool) {});
+
+  hidl_vec<uint8_t> wrappedKey;
+  signature.setToExternal(const_cast<uint8_t *>(data), size);
+  mDrmPluginV1_2->signRSA(
+      getSessionId(), kRSAAlgorithm[mFDP->ConsumeBool()], message, wrappedKey,
+      [&]([[maybe_unused]] Status status, const hidl_vec<uint8_t> &) {});
+
+  mDrmPluginV1_2->removeKeys(getSessionId());
+}
+
+/**
+ * Helper function to create a secure release message for
+ * a secure stop. The clearkey secure stop release format
+ * is just a count followed by the secure stop opaque data.
+ */
+SecureStopRelease ClearKeyFuzzer::makeSecureRelease(const SecureStop &stop) {
+  std::vector<uint8_t> stopData = stop.opaqueData;
+  std::vector<uint8_t> buffer;
+  std::string count = "0001";
+
+  auto it = buffer.insert(buffer.begin(), count.begin(), count.end());
+  buffer.insert(it + count.size(), stopData.begin(), stopData.end());
+  SecureStopRelease release = {.opaqueData = hidl_vec<uint8_t>(buffer)};
+  return release;
+}
+
+void ClearKeyFuzzer::invokeDrmSecureStopAPI() {
+  SecureStopId ssid;
+  mDrmPluginV1_2->getSecureStop(
+      ssid, [&]([[maybe_unused]] Status status, const SecureStop &) {});
+
+  mDrmPluginV1_2->getSecureStopIds(
+      [&]([[maybe_unused]] Status status,
+          [[maybe_unused]] const hidl_vec<SecureStopId> &secureStopIds) {});
+
+  SecureStopRelease release;
+  mDrmPluginV1_2->getSecureStops(
+      [&]([[maybe_unused]] Status status, const hidl_vec<SecureStop> &stops) {
+        if (stops.size() > 0) {
+          release = makeSecureRelease(
+              stops[mFDP->ConsumeIntegralInRange<size_t>(0, stops.size() - 1)]);
+        }
+      });
+
+  mDrmPluginV1_2->releaseSecureStops(release);
+
+  mDrmPluginV1_2->removeSecureStop(ssid);
+
+  mDrmPluginV1_2->removeAllSecureStops();
+
+  mDrmPluginV1_2->releaseSecureStop(ssid);
+
+  mDrmPluginV1_2->releaseAllSecureStops();
+}
+
+void ClearKeyFuzzer::invokeDrmOfflineLicenseAPI(const uint8_t *data,
+                                                size_t size) {
+  hidl_vec<KeySetId> keySetIds = {};
+  mDrmPluginV1_2->getOfflineLicenseKeySetIds(
+      [&](Status status, const hidl_vec<KeySetId> &hKeySetIds) {
+        if (status == Status::OK) {
+          keySetIds = hKeySetIds;
+        }
+      });
+
+  OfflineLicenseState licenseState;
+  KeySetId keySetId = {};
+  if (keySetIds.size() > 0) {
+    keySetId = keySetIds[mFDP->ConsumeIntegralInRange<size_t>(
+        0, keySetIds.size() - 1)];
+  } else {
+    keySetId.setToExternal(const_cast<uint8_t *>(data), size);
+  }
+  mDrmPluginV1_2->getOfflineLicenseState(
+      keySetId, [&](Status status, OfflineLicenseState hLicenseState) {
+        if (status == Status::OK) {
+          licenseState = hLicenseState;
+        }
+      });
+
+  mDrmPluginV1_2->removeOfflineLicense(keySetId);
+}
+
+void ClearKeyFuzzer::invokeDrmPlugin(const uint8_t *data, size_t size) {
+  SecurityLevel secLevel =
+      mFDP->ConsumeBool()
+          ? getValueFromArray(mFDP, kSecurityLevel)
+          : static_cast<SecurityLevel>(mFDP->ConsumeIntegral<uint32_t>());
+  mDrmPluginV1_1->openSession_1_1(
+      secLevel, [&]([[maybe_unused]] Status status, const SessionId &id) {
+        mSessionIdV1 = id;
+      });
+  mDrmPluginV1_2->openSession([&]([[maybe_unused]] Status status,
+                                  const SessionId &id) { mSessionId = id; });
+
+  sp<TestDrmPluginListener> listener = new TestDrmPluginListener();
+  mDrmPluginV1_2->setListener(listener);
+  const hidl_vec<KeyStatus> keyStatusList = {
+      {{1}, KeyStatusType::USABLE},
+      {{2}, KeyStatusType::EXPIRED},
+      {{3}, KeyStatusType::OUTPUTNOTALLOWED},
+      {{4}, KeyStatusType::STATUSPENDING},
+      {{5}, KeyStatusType::INTERNALERROR},
+  };
+  mDrmPluginV1_2->sendKeysChange(mSessionId, keyStatusList, true);
+
+  invokeDrmV1_4API();
+  invokeDrmSetAlgorithmAPI();
+  invokeDrmPropertyAPI();
+  invokeDrmDecryptEncryptAPI(data, size);
+  invokeDrmSecureStopAPI();
+  invokeDrmOfflineLicenseAPI(data, size);
+}
+
+void ClearKeyFuzzer::invokeCryptoPlugin(const uint8_t *data) {
+  mCryptoPlugin->requiresSecureDecoderComponent(
+      getValueFromArray(mFDP, kMimeType));
+
+  const uint32_t width = mFDP->ConsumeIntegral<uint32_t>();
+  const uint32_t height = mFDP->ConsumeIntegral<uint32_t>();
+  mCryptoPlugin->notifyResolution(width, height);
+
+  mCryptoPlugin->setMediaDrmSession(mSessionId);
+
+  size_t totalSize = 0;
+  const size_t numSubSamples =
+      mFDP->ConsumeIntegralInRange<size_t>(1, kMaxSubSamples);
+
+  const Pattern pattern = {0, 0};
+  hidl_vec<SubSample> subSamples;
+  subSamples.resize(numSubSamples);
+
+  for (size_t i = 0; i < numSubSamples; ++i) {
+    const uint32_t clearBytes =
+        mFDP->ConsumeIntegralInRange<uint32_t>(0, kMaxNumBytes);
+    const uint32_t encryptedBytes =
+        mFDP->ConsumeIntegralInRange<uint32_t>(0, kMaxNumBytes);
+    subSamples[i].numBytesOfClearData = clearBytes;
+    subSamples[i].numBytesOfEncryptedData = encryptedBytes;
+    totalSize += subSamples[i].numBytesOfClearData;
+    totalSize += subSamples[i].numBytesOfEncryptedData;
+  }
+
+  // The first totalSize bytes of shared memory is the encrypted
+  // input, the second totalSize bytes is the decrypted output.
+  size_t memoryBytes = totalSize * 2;
+
+  sp<IAllocator> ashmemAllocator = IAllocator::getService("ashmem");
+  if (!ashmemAllocator.get()) {
+    return;
+  }
+
+  hidl_memory hidlMemory;
+  ashmemAllocator->allocate(memoryBytes, [&]([[maybe_unused]] bool success,
+                                             const hidl_memory &memory) {
+    mCryptoPlugin->setSharedBufferBase(memory, kSegmentIndex);
+    hidlMemory = memory;
+  });
+
+  sp<IMemory> mappedMemory = mapMemory(hidlMemory);
+  if (!mappedMemory.get()) {
+    return;
+  }
+  mCryptoPlugin->setSharedBufferBase(hidlMemory, kSegmentIndex);
+
+  uint32_t srcBufferId =
+      mFDP->ConsumeBool() ? kSegmentIndex : mFDP->ConsumeIntegral<uint32_t>();
+  const SharedBuffer sourceBuffer = {
+      .bufferId = srcBufferId, .offset = 0, .size = totalSize};
+
+  BufferType type = mFDP->ConsumeBool() ? BufferType::SHARED_MEMORY
+                                        : BufferType::NATIVE_HANDLE;
+  uint32_t destBufferId =
+      mFDP->ConsumeBool() ? kSegmentIndex : mFDP->ConsumeIntegral<uint32_t>();
+  const DestinationBuffer destBuffer = {
+      .type = type,
+      {.bufferId = destBufferId, .offset = totalSize, .size = totalSize},
+      .secureMemory = nullptr};
+
+  const uint64_t offset = 0;
+  uint32_t bytesWritten = 0;
+  hidl_array<uint8_t, kAESBlockSize> keyId =
+      hidl_array<uint8_t, kAESBlockSize>(data);
+  hidl_array<uint8_t, kAESBlockSize> iv =
+      hidl_array<uint8_t, kAESBlockSize>(data);
+  Mode mode = getValueFromArray(mFDP, kCryptoMode);
+  mCryptoPlugin->decrypt(
+      mFDP->ConsumeBool(), keyId, iv, mode, pattern, subSamples, sourceBuffer,
+      offset, destBuffer,
+      [&]([[maybe_unused]] Status status, uint32_t count,
+          [[maybe_unused]] string detailedError) { bytesWritten = count; });
+  drm::V1_4::IDrmPlugin::getLogMessages_cb cb =
+      [&]([[maybe_unused]] drm::V1_4::Status status,
+          [[maybe_unused]] hidl_vec<drm::V1_4::LogMessage> logs) {};
+  mCryptoPluginV1_4->getLogMessages(cb);
+}
+
+bool ClearKeyFuzzer::invokeDrmFactory() {
+  hidl_string packageName(
+      mFDP->ConsumeRandomLengthString(kMaxStringLength).c_str());
+  hidl_string mimeType(getValueFromArray(mFDP, kMimeType));
+  SecurityLevel securityLevel =
+      mFDP->ConsumeBool()
+          ? getValueFromArray(mFDP, kSecurityLevel)
+          : static_cast<SecurityLevel>(mFDP->ConsumeIntegral<uint32_t>());
+  const hidl_array<uint8_t, 16> uuid =
+      mFDP->ConsumeBool() ? kClearKeyUUID : kInvalidUUID;
+  mDrmFactory->isCryptoSchemeSupported_1_2(uuid, mimeType, securityLevel);
+  mDrmFactory->createPlugin(
+      uuid, packageName, [&](Status status, const sp<IDrmPlugin> &plugin) {
+        if (status == Status::OK) {
+          mDrmPlugin = plugin.get();
+          mDrmPluginV1_1 = drm::V1_1::IDrmPlugin::castFrom(mDrmPlugin);
+          mDrmPluginV1_2 = drm::V1_2::IDrmPlugin::castFrom(mDrmPlugin);
+          mDrmPluginV1_4 = drm::V1_4::IDrmPlugin::castFrom(mDrmPlugin);
+        }
+      });
+
+  std::vector<hidl_array<uint8_t, 16>> supportedSchemes;
+  mDrmFactory->getSupportedCryptoSchemes(
+      [&](const hidl_vec<hidl_array<uint8_t, 16>> &schemes) {
+        for (const auto &scheme : schemes) {
+          supportedSchemes.push_back(scheme);
+        }
+      });
+
+  if (!(mDrmPlugin && mDrmPluginV1_1 && mDrmPluginV1_2 && mDrmPluginV1_4)) {
+    return false;
+  }
+  return true;
+}
+
+bool ClearKeyFuzzer::invokeCryptoFactory() {
+  const hidl_array<uint8_t, 16> uuid =
+      mFDP->ConsumeBool() ? kClearKeyUUID : kInvalidUUID;
+  mCryptoFactory->createPlugin(
+      uuid, mSessionId, [this](Status status, const sp<ICryptoPlugin> &plugin) {
+        if (status == Status::OK) {
+          mCryptoPlugin = plugin;
+          mCryptoPluginV1_4 = drm::V1_4::ICryptoPlugin::castFrom(mCryptoPlugin);
+        }
+      });
+
+  if (!mCryptoPlugin && !mCryptoPluginV1_4) {
+    return false;
+  }
+  return true;
+}
+
+void ClearKeyFuzzer::invokeDrm(const uint8_t *data, size_t size) {
+  if (!invokeDrmFactory()) {
+    return;
+  }
+  invokeDrmPlugin(data, size);
+}
+
+void ClearKeyFuzzer::invokeCrypto(const uint8_t *data) {
+  if (!invokeCryptoFactory()) {
+    return;
+  }
+  invokeCryptoPlugin(data);
+}
+
+void ClearKeyFuzzer::process(const uint8_t *data, size_t size) {
+  mFDP = new FuzzedDataProvider(data, size);
+  invokeDrm(data, size);
+  invokeCrypto(data);
+  delete mFDP;
+}
+
+bool ClearKeyFuzzer::init() {
+  mCryptoFactory =
+      android::hardware::drm::V1_4::clearkey::createCryptoFactory();
+  mDrmFactory = android::hardware::drm::V1_4::clearkey::createDrmFactory();
+  if (!mDrmFactory && !mCryptoFactory) {
+    return false;
+  }
+  return true;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+  if (size < kAESBlockSize) {
+    return 0;
+  }
+  ClearKeyFuzzer clearKeyFuzzer;
+  if (clearKeyFuzzer.init()) {
+    clearKeyFuzzer.process(data, size);
+  }
+  return 0;
+}
diff --git a/media/codec2/TEST_MAPPING b/media/codec2/TEST_MAPPING
index 16cb323..c6728c8 100644
--- a/media/codec2/TEST_MAPPING
+++ b/media/codec2/TEST_MAPPING
@@ -35,6 +35,17 @@
           "exclude-filter": "android.media.audio.cts.AudioRecordTest"
         }
       ]
+    },
+    {
+      "name": "CtsMediaPlayerTestCases",
+      "options": [
+        {
+          "include-annotation": "android.platform.test.annotations.Presubmit"
+        },
+        {
+          "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
+        }
+      ]
     }
   ]
 }
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.cpp b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
index 7bd3358..475d863 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.cpp
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
@@ -20,6 +20,7 @@
 
 #include <C2Debug.h>
 #include <C2PlatformSupport.h>
+#include <Codec2BufferUtils.h>
 #include <Codec2Mapper.h>
 #include <SimpleC2Interface.h>
 #include <log/log.h>
@@ -338,6 +339,7 @@
           std::make_shared<SimpleInterface<IntfImpl>>(name, id, intfImpl)),
       mIntf(intfImpl),
       mCodecCtx(nullptr) {
+  mIsFormatR10G10B10A2Supported = IsFormatR10G10B10A2SupportedForLegacyRendering();
   gettimeofday(&mTimeStart, nullptr);
   gettimeofday(&mTimeEnd, nullptr);
 }
@@ -790,7 +792,14 @@
         work->workletsProcessed = 1u;
         return false;
       }
-      format = HAL_PIXEL_FORMAT_RGBA_1010102;
+      // TODO (b/201787956) For devices that do not support HAL_PIXEL_FORMAT_RGBA_1010102,
+      // HAL_PIXEL_FORMAT_YV12 is used as a temporary work around.
+      if (!mIsFormatR10G10B10A2Supported)  {
+        ALOGE("HAL_PIXEL_FORMAT_RGBA_1010102 isn't supported");
+        format = HAL_PIXEL_FORMAT_YV12;
+      } else {
+        format = HAL_PIXEL_FORMAT_RGBA_1010102;
+      }
     }
   }
   C2MemoryUsage usage = {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.h b/media/codec2/components/gav1/C2SoftGav1Dec.h
index 134fa0d..f82992d 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.h
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.h
@@ -82,6 +82,7 @@
 
   struct timeval mTimeStart;  // Time at the start of decode()
   struct timeval mTimeEnd;    // Time at the end of decode()
+  bool mIsFormatR10G10B10A2Supported;
 
   bool initDecoder();
   void getVuiParams(const libgav1::DecoderBuffer *buffer);
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index 45e2ca8..2da9d5b 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -25,6 +25,7 @@
 
 #include <C2Debug.h>
 #include <C2PlatformSupport.h>
+#include <Codec2BufferUtils.h>
 #include <SimpleC2Interface.h>
 
 #include "C2SoftVpxDec.h"
@@ -351,6 +352,7 @@
       mCodecCtx(nullptr),
       mCoreCount(1),
       mQueue(new Mutexed<ConversionQueue>) {
+      mIsFormatR10G10B10A2Supported = IsFormatR10G10B10A2SupportedForLegacyRendering();
 }
 
 C2SoftVpxDec::~C2SoftVpxDec() {
@@ -804,7 +806,14 @@
         if (defaultColorAspects->primaries == C2Color::PRIMARIES_BT2020 &&
             defaultColorAspects->matrix == C2Color::MATRIX_BT2020 &&
             defaultColorAspects->transfer == C2Color::TRANSFER_ST2084) {
-            format = HAL_PIXEL_FORMAT_RGBA_1010102;
+            // TODO (b/201787956) For devices that do not support HAL_PIXEL_FORMAT_RGBA_1010102,
+            // HAL_PIXEL_FORMAT_YV12 is used as a temporary work around.
+            if (!mIsFormatR10G10B10A2Supported)  {
+                ALOGE("HAL_PIXEL_FORMAT_RGBA_1010102 isn't supported");
+                format = HAL_PIXEL_FORMAT_YV12;
+            } else {
+                format = HAL_PIXEL_FORMAT_RGBA_1010102;
+            }
         }
     }
     C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.h b/media/codec2/components/vpx/C2SoftVpxDec.h
index 2065165..ade162d 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.h
+++ b/media/codec2/components/vpx/C2SoftVpxDec.h
@@ -80,7 +80,7 @@
     };
     std::shared_ptr<Mutexed<ConversionQueue>> mQueue;
     std::vector<sp<ConverterThread>> mConverterThreads;
-
+    bool mIsFormatR10G10B10A2Supported;
     status_t initDecoder();
     status_t destroyDecoder();
     void finishWork(uint64_t index, const std::unique_ptr<C2Work> &work,
diff --git a/media/codec2/hidl/1.0/vts/OWNERS b/media/codec2/hidl/1.0/vts/OWNERS
index dbe89cf..32b11b8 100644
--- a/media/codec2/hidl/1.0/vts/OWNERS
+++ b/media/codec2/hidl/1.0/vts/OWNERS
@@ -1,8 +1,5 @@
+# Bug component: 25690
 # Media team
 lajos@google.com
-pawin@google.com
 taklee@google.com
 wonsik@google.com
-
-# VTS team
-dshi@google.com
diff --git a/media/codec2/hidl/plugin/FilterWrapper.cpp b/media/codec2/hidl/plugin/FilterWrapper.cpp
index 70c63f2..b6024ff 100644
--- a/media/codec2/hidl/plugin/FilterWrapper.cpp
+++ b/media/codec2/hidl/plugin/FilterWrapper.cpp
@@ -430,6 +430,10 @@
             LOG(DEBUG) << "WrappedDecoderInterface: FilterWrapper not found";
             return C2_OK;
         }
+        if (!filterWrapper->isFilteringEnabled(next)) {
+            LOG(VERBOSE) << "WrappedDecoderInterface: filtering not enabled";
+            return C2_OK;
+        }
         std::vector<std::unique_ptr<C2Param>> params;
         c2_status_t err = filterWrapper->queryParamsForPreviousComponent(next, &params);
         if (err != C2_OK) {
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 23a326f..0de0b77 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -1467,6 +1467,16 @@
     std::list<std::unique_ptr<C2Work>> flushedConfigs;
     mFlushedConfigs.lock()->swap(flushedConfigs);
     if (!flushedConfigs.empty()) {
+        {
+            Mutexed<PipelineWatcher>::Locked watcher(mPipelineWatcher);
+            PipelineWatcher::Clock::time_point now = PipelineWatcher::Clock::now();
+            for (const std::unique_ptr<C2Work> &work : flushedConfigs) {
+                watcher->onWorkQueued(
+                        work->input.ordinal.frameIndex.peeku(),
+                        std::vector(work->input.buffers),
+                        now);
+            }
+        }
         err = mComponent->queue(&flushedConfigs);
         if (err != C2_OK) {
             ALOGW("[%s] Error while queueing a flushed config", mName);
@@ -1533,41 +1543,45 @@
     setDescrambler(nullptr);
 }
 
-
 void CCodecBufferChannel::flush(const std::list<std::unique_ptr<C2Work>> &flushedWork) {
     ALOGV("[%s] flush", mName);
-    std::vector<uint64_t> indices;
     std::list<std::unique_ptr<C2Work>> configs;
     mInput.lock()->lastFlushIndex = mFrameIndex.load(std::memory_order_relaxed);
-    for (const std::unique_ptr<C2Work> &work : flushedWork) {
-        indices.push_back(work->input.ordinal.frameIndex.peeku());
-        if (!(work->input.flags & C2FrameData::FLAG_CODEC_CONFIG)) {
-            continue;
+    {
+        Mutexed<PipelineWatcher>::Locked watcher(mPipelineWatcher);
+        for (const std::unique_ptr<C2Work> &work : flushedWork) {
+            uint64_t frameIndex = work->input.ordinal.frameIndex.peeku();
+            if (!(work->input.flags & C2FrameData::FLAG_CODEC_CONFIG)) {
+                watcher->onWorkDone(frameIndex);
+                continue;
+            }
+            if (work->input.buffers.empty()
+                    || work->input.buffers.front() == nullptr
+                    || work->input.buffers.front()->data().linearBlocks().empty()) {
+                ALOGD("[%s] no linear codec config data found", mName);
+                watcher->onWorkDone(frameIndex);
+                continue;
+            }
+            std::unique_ptr<C2Work> copy(new C2Work);
+            copy->input.flags = C2FrameData::flags_t(
+                    work->input.flags | C2FrameData::FLAG_DROP_FRAME);
+            copy->input.ordinal = work->input.ordinal;
+            copy->input.ordinal.frameIndex = mFrameIndex++;
+            for (size_t i = 0; i < work->input.buffers.size(); ++i) {
+                copy->input.buffers.push_back(watcher->onInputBufferReleased(frameIndex, i));
+            }
+            for (const std::unique_ptr<C2Param> &param : work->input.configUpdate) {
+                copy->input.configUpdate.push_back(C2Param::Copy(*param));
+            }
+            copy->input.infoBuffers.insert(
+                    copy->input.infoBuffers.begin(),
+                    work->input.infoBuffers.begin(),
+                    work->input.infoBuffers.end());
+            copy->worklets.emplace_back(new C2Worklet);
+            configs.push_back(std::move(copy));
+            watcher->onWorkDone(frameIndex);
+            ALOGV("[%s] stashed flushed codec config data", mName);
         }
-        if (work->input.buffers.empty()
-                || work->input.buffers.front() == nullptr
-                || work->input.buffers.front()->data().linearBlocks().empty()) {
-            ALOGD("[%s] no linear codec config data found", mName);
-            continue;
-        }
-        std::unique_ptr<C2Work> copy(new C2Work);
-        copy->input.flags = C2FrameData::flags_t(work->input.flags | C2FrameData::FLAG_DROP_FRAME);
-        copy->input.ordinal = work->input.ordinal;
-        copy->input.ordinal.frameIndex = mFrameIndex++;
-        copy->input.buffers.insert(
-                copy->input.buffers.begin(),
-                work->input.buffers.begin(),
-                work->input.buffers.end());
-        for (const std::unique_ptr<C2Param> &param : work->input.configUpdate) {
-            copy->input.configUpdate.push_back(C2Param::Copy(*param));
-        }
-        copy->input.infoBuffers.insert(
-                copy->input.infoBuffers.begin(),
-                work->input.infoBuffers.begin(),
-                work->input.infoBuffers.end());
-        copy->worklets.emplace_back(new C2Worklet);
-        configs.push_back(std::move(copy));
-        ALOGV("[%s] stashed flushed codec config data", mName);
     }
     mFlushedConfigs.lock()->swap(configs);
     {
@@ -1582,12 +1596,6 @@
             output->buffers->flushStash();
         }
     }
-    {
-        Mutexed<PipelineWatcher>::Locked watcher(mPipelineWatcher);
-        for (uint64_t index : indices) {
-            watcher->onWorkDone(index);
-        }
-    }
 }
 
 void CCodecBufferChannel::onWorkDone(
diff --git a/media/codec2/sfplugin/tests/CCodecConfig_test.cpp b/media/codec2/sfplugin/tests/CCodecConfig_test.cpp
index 7c660dc..3615289 100644
--- a/media/codec2/sfplugin/tests/CCodecConfig_test.cpp
+++ b/media/codec2/sfplugin/tests/CCodecConfig_test.cpp
@@ -224,6 +224,17 @@
                                 Copy<C2StreamBitrateInfo::output, C2StreamBitrateInfo::input>,
                                 mInputBitrate)
                             .build());
+
+                    addParameter(
+                            DefineParam(mOutputProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+                            .withDefault(new C2StreamProfileLevelInfo::output(
+                                    0u, PROFILE_UNUSED, LEVEL_UNUSED))
+                            .withFields({
+                                C2F(mOutputProfileLevel, profile).any(),
+                                C2F(mOutputProfileLevel, level).any(),
+                            })
+                            .withSetter(Setter<C2StreamProfileLevelInfo::output>)
+                            .build());
                 }
 
                 // TODO: more SDK params
@@ -241,6 +252,8 @@
             std::shared_ptr<C2StreamPixelAspectRatioInfo::output> mPixelAspectRatio;
             std::shared_ptr<C2StreamBitrateInfo::input> mInputBitrate;
             std::shared_ptr<C2StreamBitrateInfo::output> mOutputBitrate;
+            std::shared_ptr<C2StreamProfileLevelInfo::input> mInputProfileLevel;
+            std::shared_ptr<C2StreamProfileLevelInfo::output> mOutputProfileLevel;
 
             template<typename T>
             static C2R Setter(bool, C2P<T> &) {
@@ -576,4 +589,51 @@
             << "mOutputFormat = " << mConfig.mOutputFormat->debugString().c_str();
 }
 
+typedef std::tuple<std::string, C2Config::profile_t, int32_t> HdrProfilesParams;
+
+class HdrProfilesTest
+    : public CCodecConfigTest,
+      public ::testing::WithParamInterface<HdrProfilesParams> {
+};
+
+TEST_P(HdrProfilesTest, SetFromSdk) {
+    HdrProfilesParams params = GetParam();
+    std::string mediaType = std::get<0>(params);
+    C2Config::profile_t c2Profile = std::get<1>(params);
+    int32_t sdkProfile = std::get<2>(params);
+
+    init(C2Component::DOMAIN_VIDEO, C2Component::KIND_ENCODER, mediaType.c_str());
+
+    ASSERT_EQ(OK, mConfig.initialize(mReflector, mConfigurable));
+
+    sp<AMessage> format{new AMessage};
+    format->setInt32(KEY_PROFILE, sdkProfile);
+
+    std::vector<std::unique_ptr<C2Param>> configUpdate;
+    ASSERT_EQ(OK, mConfig.getConfigUpdateFromSdkParams(
+            mConfigurable, format, D::ALL, C2_MAY_BLOCK, &configUpdate));
+
+    ASSERT_EQ(1u, configUpdate.size());
+    C2StreamProfileLevelInfo::input *pl =
+        FindParam<std::remove_pointer<decltype(pl)>::type>(configUpdate);
+    ASSERT_NE(nullptr, pl);
+    ASSERT_EQ(c2Profile, pl->profile);
+}
+
+HdrProfilesParams kHdrProfilesParams[] = {
+    std::make_tuple(MIMETYPE_VIDEO_HEVC, PROFILE_HEVC_MAIN_10, HEVCProfileMain10HDR10),
+    std::make_tuple(MIMETYPE_VIDEO_HEVC, PROFILE_HEVC_MAIN_10, HEVCProfileMain10HDR10Plus),
+    std::make_tuple(MIMETYPE_VIDEO_VP9,  PROFILE_VP9_2,        VP9Profile2HDR),
+    std::make_tuple(MIMETYPE_VIDEO_VP9,  PROFILE_VP9_2,        VP9Profile2HDR10Plus),
+    std::make_tuple(MIMETYPE_VIDEO_VP9,  PROFILE_VP9_3,        VP9Profile3HDR),
+    std::make_tuple(MIMETYPE_VIDEO_VP9,  PROFILE_VP9_3,        VP9Profile3HDR10Plus),
+    std::make_tuple(MIMETYPE_VIDEO_AV1,  PROFILE_AV1_0,        AV1ProfileMain10HDR10),
+    std::make_tuple(MIMETYPE_VIDEO_AV1,  PROFILE_AV1_0,        AV1ProfileMain10HDR10Plus),
+};
+
+INSTANTIATE_TEST_SUITE_P(
+        CCodecConfig,
+        HdrProfilesTest,
+        ::testing::ValuesIn(kHdrProfilesParams));
+
 } // namespace android
diff --git a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
index 5f87c66..2213001 100644
--- a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
+++ b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
@@ -118,6 +118,22 @@
 
 }  // namespace
 
+bool IsFormatR10G10B10A2SupportedForLegacyRendering() {
+    const AHardwareBuffer_Desc desc = {
+        .width = 320,
+        .height = 240,
+        .format = AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM,
+        .layers = 1,
+        .usage = AHARDWAREBUFFER_USAGE_CPU_READ_RARELY | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN |
+                 AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE,
+        .stride = 0,
+        .rfu0 = 0,
+        .rfu1 = 0,
+    };
+
+    return AHardwareBuffer_isSupported(&desc);
+}
+
 status_t ImageCopy(uint8_t *imgBase, const MediaImage2 *img, const C2GraphicView &view) {
     if (view.crop().width != img->mWidth || view.crop().height != img->mHeight) {
         return BAD_VALUE;
diff --git a/media/codec2/sfplugin/utils/Codec2BufferUtils.h b/media/codec2/sfplugin/utils/Codec2BufferUtils.h
index 9fa642d..c4651a4 100644
--- a/media/codec2/sfplugin/utils/Codec2BufferUtils.h
+++ b/media/codec2/sfplugin/utils/Codec2BufferUtils.h
@@ -27,6 +27,11 @@
 namespace android {
 
 /**
+ * Check if R10G10B10A2 is supported in legacy rendering path that involves GPU
+ */
+bool IsFormatR10G10B10A2SupportedForLegacyRendering();
+
+/**
  * Converts an RGB view to planar YUV 420 media image.
  *
  * \param dstY       pointer to media image buffer
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.cpp b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
index ca6a328..f557830 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.cpp
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
@@ -255,6 +255,8 @@
     { C2Config::PROFILE_HEVC_MAIN_STILL, HEVCProfileMainStill },
     { C2Config::PROFILE_HEVC_MAIN_INTRA, HEVCProfileMain },
     { C2Config::PROFILE_HEVC_MAIN_10_INTRA, HEVCProfileMain10 },
+    { C2Config::PROFILE_HEVC_MAIN_10, HEVCProfileMain10HDR10 },
+    { C2Config::PROFILE_HEVC_MAIN_10, HEVCProfileMain10HDR10Plus },
 };
 
 ALookup<C2Config::profile_t, int32_t> sHevcHdrProfiles = {
diff --git a/media/libaaudio/scripts/measure_device_power.py b/media/libaaudio/scripts/measure_device_power.py
new file mode 100755
index 0000000..9603f88
--- /dev/null
+++ b/media/libaaudio/scripts/measure_device_power.py
@@ -0,0 +1,272 @@
+#!/usr/bin/python3
+"""
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+"""
+
+'''
+Measure CPU related power on Pixel 6 or later devices using ODPM,
+the On Device Power Measurement tool.
+Generate a CSV report for putting in a spreadsheet
+'''
+
+import argparse
+import os
+import re
+import subprocess
+import sys
+import time
+
+# defaults
+PRE_DELAY_SECONDS = 0.5 # time to sleep before command to avoid adb unroot error
+DEFAULT_NUM_ITERATIONS = 5
+DEFAULT_FILE_NAME = 'energy_commands.txt'
+
+'''
+Default rail assignments
+philburk-macbookpro3:expt philburk$ adb shell cat /sys/bus/iio/devices/iio\:device0/energy_value
+t=349894
+CH0(T=349894)[S10M_VDD_TPU], 5578756
+CH1(T=349894)[VSYS_PWR_MODEM], 29110940
+CH2(T=349894)[VSYS_PWR_RFFE], 3166046
+CH3(T=349894)[S2M_VDD_CPUCL2], 30203502
+CH4(T=349894)[S3M_VDD_CPUCL1], 23377533
+CH5(T=349894)[S4M_VDD_CPUCL0], 46356942
+CH6(T=349894)[S5M_VDD_INT], 10771876
+CH7(T=349894)[S1M_VDD_MIF], 21091363
+philburk-macbookpro3:expt philburk$ adb shell cat /sys/bus/iio/devices/iio\:device1/energy_value
+t=359458
+CH0(T=359458)[VSYS_PWR_WLAN_BT], 45993209
+CH1(T=359458)[L2S_VDD_AOC_RET], 2822928
+CH2(T=359458)[S9S_VDD_AOC], 6923706
+CH3(T=359458)[S5S_VDDQ_MEM], 4658202
+CH4(T=359458)[S10S_VDD2L], 5506273
+CH5(T=359458)[S4S_VDD2H_MEM], 14254574
+CH6(T=359458)[S2S_VDD_G3D], 5315420
+CH7(T=359458)[VSYS_PWR_DISPLAY], 81221665
+'''
+
+'''
+LDO2M(L2M_ALIVE):DDR  -> DRAM Array Core Power
+BUCK4S(S4S_VDD2H_MEM):DDR -> Normal operation data and control path logic circuits
+BUCK5S(S5S_VDDQ_MEM):DDR -> LPDDR I/O interface
+BUCK10S(S10S_VDD2L):DDR  -> DVFSC (1600Mbps or lower) operation data and control path logic circuits
+BUCK1M (S1M_VDD_MIF):  SoC side Memory InterFace and Controller
+'''
+
+# Map between rail name and human readable name.
+ENERGY_DICTIONARY = { \
+        'S4M_VDD_CPUCL0': 'CPU0', \
+        'S3M_VDD_CPUCL1': 'CPU1', \
+        'S2M_VDD_CPUCL2': 'CPU2', \
+        'S1M_VDD_MIF': 'MIF', \
+        'L2M_ALIVE': 'DDRAC', \
+        'S4S_VDD2H_MEM': 'DDRNO', \
+        'S10S_VDD2L': 'DDR16', \
+        'S5S_VDDQ_MEM': 'DDRIO', \
+        'VSYS_PWR_DISPLAY': 'SCREEN'}
+
+SORTED_ENERGY_LIST = sorted(ENERGY_DICTIONARY, key=ENERGY_DICTIONARY.get)
+
+# Sometimes "adb unroot" returns 1!
+# So try several times.
+# @return 0 on success
+def adbUnroot():
+    returnCode = 1
+    count = 0
+    limit = 5
+    while count < limit and returnCode != 0:
+        print(('Try to adb unroot {} of {}'.format(count, limit)))
+        subprocess.call(["adb", "wait-for-device"])
+        time.sleep(PRE_DELAY_SECONDS)
+        returnCode = subprocess.call(["adb", "unroot"])
+        print(('returnCode = {}'.format(returnCode)))
+        count += 1
+    return returnCode
+
+# @param commandString String containing shell command
+# @return Both the stdout and stderr of the commands run
+def runCommand(commandString):
+    print(commandString)
+    if commandString == "adb unroot":
+        result = adbUnroot()
+    else:
+        commandArray = commandString.split(' ')
+        result = subprocess.run(commandArray, check=True, capture_output=True).stdout
+    return result
+
+# @param commandString String containing ADB command
+# @return Both the stdout and stderr of the commands run
+def adbCommand(commandString):
+    if commandString == "unroot":
+        result = adbUnroot()
+    else:
+        print(("adb " + commandString))
+        commandArray = ["adb"] + commandString.split(' ')
+        subprocess.call(["adb", "wait-for-device"])
+        result = subprocess.run(commandArray, check=True, capture_output=True).stdout
+    return result
+
+# Parse a line that looks like "CH3(T=10697635)[S2M_VDD_CPUCL2], 116655335"
+# Use S2M_VDD_CPUCL2 as the tag and set value to the number
+# in the report dictionary.
+def parseEnergyValue(string):
+    return tuple(re.split('\[|\], +', string)[1:])
+
+# Read accumulated energy into a dictionary.
+def measureEnergyForDevice(deviceIndex, report):
+    # print("measureEnergyForDevice " + str(deviceIndex))
+    tableBytes = adbCommand( \
+            'shell cat /sys/bus/iio/devices/iio\:device{}/energy_value'\
+            .format(deviceIndex))
+    table = tableBytes.decode("utf-8")
+    # print(table)
+    for count, line in enumerate(table.splitlines()):
+        if count > 0:
+            tagEnergy = parseEnergyValue(line)
+            report[tagEnergy[0]] = int(tagEnergy[1].strip())
+    # print(report)
+
+def measureEnergyOnce():
+    adbCommand("root")
+    report = {}
+    d0 = measureEnergyForDevice(0, report)
+    d1 = measureEnergyForDevice(1, report)
+    adbUnroot()
+    return report
+
+# Subtract numeric values for matching keys.
+def subtractReports(A, B):
+    return {x: A[x] - B[x] for x in A if x in B}
+
+# Add numeric values for matching keys.
+def addReports(A, B):
+    return {x: A[x] + B[x] for x in A if x in B}
+
+# Divide numeric values by divisor.
+# @return Modified copy of report.
+def divideReport(report, divisor):
+    return {key: val / divisor for key, val in list(report.items())}
+
+# Generate a dictionary that is the difference between two measurements over time.
+def measureEnergyOverTime(duration):
+    report1 = measureEnergyOnce()
+    print(("Measure energy for " + str(duration) + " seconds."))
+    time.sleep(duration)
+    report2 = measureEnergyOnce()
+    return subtractReports(report2, report1)
+
+# Generate a CSV string containing the human readable headers.
+def formatEnergyHeader():
+    header = ""
+    for tag in SORTED_ENERGY_LIST:
+        header += ENERGY_DICTIONARY[tag] + ", "
+    return header
+
+# Generate a CSV string containing the numeric values.
+def formatEnergyData(report):
+    data = ""
+    for tag in SORTED_ENERGY_LIST:
+        if tag in list(report.keys()):
+            data += str(report[tag]) + ", "
+        else:
+            data += "-1,"
+    return data
+
+def printEnergyReport(report):
+    s = "\n"
+    s += "Values are in microWattSeconds\n"
+    s += "Report below is CSV format for pasting into a spreadsheet:\n"
+    s += formatEnergyHeader() + "\n"
+    s += formatEnergyData(report) + "\n"
+    print(s)
+
+# Generate a dictionary that is the difference between two measurements
+# before and after executing the command.
+def measureEnergyForCommand(command):
+    report1 = measureEnergyOnce()
+    print(("Measure energy for:  " + command))
+    result = runCommand(command)
+    report2 = measureEnergyOnce()
+    # print(result)
+    return subtractReports(report2, report1)
+
+# Average the results of several measurements for one command.
+def averageEnergyForCommand(command, count):
+    print("=================== #0\n")
+    sumReport = measureEnergyForCommand(command)
+    for i in range(1, count):
+        print(("=================== #" + str(i) + "\n"))
+        report = measureEnergyForCommand(command)
+        sumReport = addReports(sumReport, report)
+    print(sumReport)
+    return divideReport(sumReport, count)
+
+# Parse a list of commands in a file.
+# Lines ending in "\" are continuation lines.
+# Lines beginning with "#" are comments.
+def measureEnergyForCommands(fileName):
+    finalReport = "------------------------------------\n"
+    finalReport += "comment, command, " + formatEnergyHeader() + "\n"
+    comment = ""
+    try:
+        fp = open(fileName)
+        line = fp.readline()
+        while line:
+            command = line.strip()
+            if command.endswith('\\'):
+                command = command[:-1].strip() # remove \\:
+                runCommand(command)
+            elif command.startswith("#"):
+                # ignore comment
+                print((command + "\n"))
+                comment = command
+            elif command:
+                report = averageEnergyForCommand(command, DEFAULT_NUM_ITERATIONS)
+                finalReport += comment + ", " + command + ", " + formatEnergyData(report) + "\n"
+                print(finalReport)
+            line = fp.readline()
+    finally:
+        fp.close()
+    return finalReport
+
+def main():
+    # parse command line args
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-s', '--seconds',
+            help="Measure power for N seconds. Ignore scriptFile.",
+            type=float)
+    parser.add_argument("fileName",
+            nargs = '?',
+            help="Path to file containing commands to be measured."
+                    + " Default path = " + DEFAULT_FILE_NAME + "."
+                    + " Lines ending in '\' are continuation lines."
+                    + " Lines beginning with '#' are comments.",
+                    default=DEFAULT_FILE_NAME)
+    args=parser.parse_args();
+
+    print(("seconds  = " + str(args.seconds)))
+    print(("fileName = " + str(args.fileName)))
+    # Process command line
+    if args.seconds:
+        report = measureEnergyOverTime(args.seconds)
+        printEnergyReport(report)
+    else:
+        report = measureEnergyForCommands(args.fileName)
+        print(report)
+    print("Finished.\n")
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/media/libaaudio/scripts/setup_odpm_cpu_rails.sh b/media/libaaudio/scripts/setup_odpm_cpu_rails.sh
new file mode 100644
index 0000000..e9241b9
--- /dev/null
+++ b/media/libaaudio/scripts/setup_odpm_cpu_rails.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+# Configure ODPM rails to measure CPU specific power.
+# See go/odpm-p21-userguide
+
+adb root
+
+# LDO2M(L2M_ALIVE) - DRAM Array Core Power
+adb shell 'echo "CH0=LDO2M" > /sys/bus/iio/devices/iio\:device0/enabled_rails'
+
+# These are the defaults.
+# BUCK2M(S2M_VDD_CPUCL2):CPU(BIG)
+# adb shell 'echo "CH3=BUCK2M" > /sys/bus/iio/devices/iio\:device0/enabled_rails'
+# BUCK3M(S3M_VDD_CPUCL1):CPU(MID)
+# adb shell 'echo "CH4=BUCK3M" > /sys/bus/iio/devices/iio\:device0/enabled_rails'
+# BUCK4M(S4M_VDD_CPUCL0):CPU(LITTLE)
+# adb shell 'echo "CH5=BUCK4M" > /sys/bus/iio/devices/iio\:device0/enabled_rails'
+# BUCK1M(S1M_VDD_MIF):MIF
+# adb shell 'echo "CH7=BUCK1M" > /sys/bus/iio/devices/iio\:device0/enabled_rails'
+
+# These are default on device1.
+# BUCK5S(S5S_VDDQ_MEM):DDR
+# adb shell 'echo "CH3=BUCK5S" > /sys/bus/iio/devices/iio\:device1/enabled_rails'
+# BUCK10S(S10S_VDD2L):DDR
+# adb shell 'echo "CH4=BUCK10S" > /sys/bus/iio/devices/iio\:device1/enabled_rails'
+# BUCK4S(S4S_VDD2H_MEM):DDR
+# adb shell 'echo "CH5=BUCK4S" > /sys/bus/iio/devices/iio\:device1/enabled_rails'
+
+adb shell 'cat /sys/bus/iio/devices/iio\:device0/enabled_rails'
+adb shell 'cat /sys/bus/iio/devices/iio\:device1/enabled_rails'
+
+adb unroot
+
diff --git a/media/libaaudio/src/Android.bp b/media/libaaudio/src/Android.bp
index 01aaa2a..f50b53a 100644
--- a/media/libaaudio/src/Android.bp
+++ b/media/libaaudio/src/Android.bp
@@ -207,10 +207,16 @@
         "binding/RingBufferParcelable.cpp",
         "binding/SharedMemoryParcelable.cpp",
         "binding/SharedRegionParcelable.cpp",
-        "flowgraph/AudioProcessorBase.cpp",
+        "flowgraph/ChannelCountConverter.cpp",
         "flowgraph/ClipToRange.cpp",
+        "flowgraph/FlowGraphNode.cpp",
+        "flowgraph/ManyToMultiConverter.cpp",
+        "flowgraph/MonoBlend.cpp",
         "flowgraph/MonoToMultiConverter.cpp",
+        "flowgraph/MultiToMonoConverter.cpp",
+        "flowgraph/MultiToManyConverter.cpp",
         "flowgraph/RampLinear.cpp",
+        "flowgraph/SampleRateConverter.cpp",
         "flowgraph/SinkFloat.cpp",
         "flowgraph/SinkI16.cpp",
         "flowgraph/SinkI24.cpp",
@@ -219,6 +225,14 @@
         "flowgraph/SourceI16.cpp",
         "flowgraph/SourceI24.cpp",
         "flowgraph/SourceI32.cpp",
+        "flowgraph/resampler/IntegerRatio.cpp",
+        "flowgraph/resampler/LinearResampler.cpp",
+        "flowgraph/resampler/MultiChannelResampler.cpp",
+        "flowgraph/resampler/PolyphaseResampler.cpp",
+        "flowgraph/resampler/PolyphaseResamplerMono.cpp",
+        "flowgraph/resampler/PolyphaseResamplerStereo.cpp",
+        "flowgraph/resampler/SincResampler.cpp",
+        "flowgraph/resampler/SincResamplerStereo.cpp",
     ],
     sanitize: {
         integer_overflow: true,
diff --git a/media/libaaudio/src/client/AAudioFlowGraph.cpp b/media/libaaudio/src/client/AAudioFlowGraph.cpp
index 61b50f3..d0c3238 100644
--- a/media/libaaudio/src/client/AAudioFlowGraph.cpp
+++ b/media/libaaudio/src/client/AAudioFlowGraph.cpp
@@ -21,7 +21,10 @@
 #include "AAudioFlowGraph.h"
 
 #include <flowgraph/ClipToRange.h>
+#include <flowgraph/ManyToMultiConverter.h>
+#include <flowgraph/MonoBlend.h>
 #include <flowgraph/MonoToMultiConverter.h>
+#include <flowgraph/MultiToManyConverter.h>
 #include <flowgraph/RampLinear.h>
 #include <flowgraph/SinkFloat.h>
 #include <flowgraph/SinkI16.h>
@@ -37,12 +40,17 @@
 aaudio_result_t AAudioFlowGraph::configure(audio_format_t sourceFormat,
                           int32_t sourceChannelCount,
                           audio_format_t sinkFormat,
-                          int32_t sinkChannelCount) {
-    AudioFloatOutputPort *lastOutput = nullptr;
+                          int32_t sinkChannelCount,
+                          bool useMonoBlend,
+                          float audioBalance,
+                          bool isExclusive) {
+    FlowGraphPortFloatOutput *lastOutput = nullptr;
 
     // TODO change back to ALOGD
-    ALOGI("%s() source format = 0x%08x, channels = %d, sink format = 0x%08x, channels = %d",
-          __func__, sourceFormat, sourceChannelCount, sinkFormat, sinkChannelCount);
+    ALOGI("%s() source format = 0x%08x, channels = %d, sink format = 0x%08x, channels = %d, "
+          "useMonoBlend = %d, audioBalance = %f, isExclusive %d",
+          __func__, sourceFormat, sourceChannelCount, sinkFormat, sinkChannelCount,
+          useMonoBlend, audioBalance, isExclusive);
 
     switch (sourceFormat) {
         case AUDIO_FORMAT_PCM_FLOAT:
@@ -63,10 +71,11 @@
     }
     lastOutput = &mSource->output;
 
-    // Apply volume as a ramp to avoid pops.
-    mVolumeRamp = std::make_unique<RampLinear>(sourceChannelCount);
-    lastOutput->connect(&mVolumeRamp->input);
-    lastOutput = &mVolumeRamp->output;
+    if (useMonoBlend) {
+        mMonoBlend = std::make_unique<MonoBlend>(sourceChannelCount);
+        lastOutput->connect(&mMonoBlend->input);
+        lastOutput = &mMonoBlend->output;
+    }
 
     // For a pure float graph, there is chance that the data range may be very large.
     // So we should clip to a reasonable value that allows a little headroom.
@@ -86,6 +95,26 @@
         return AAUDIO_ERROR_UNIMPLEMENTED;
     }
 
+    // Apply volume ramps for only exclusive streams.
+    if (isExclusive) {
+        // Apply volume ramps to set the left/right audio balance and target volumes.
+        // The signals will be decoupled, volume ramps will be applied, before the signals are
+        // combined again.
+        mMultiToManyConverter = std::make_unique<MultiToManyConverter>(sinkChannelCount);
+        mManyToMultiConverter = std::make_unique<ManyToMultiConverter>(sinkChannelCount);
+        lastOutput->connect(&mMultiToManyConverter->input);
+        for (int i = 0; i < sinkChannelCount; i++) {
+            mVolumeRamps.emplace_back(std::make_unique<RampLinear>(1));
+            mPanningVolumes.emplace_back(1.0f);
+            lastOutput = mMultiToManyConverter->outputs[i].get();
+            lastOutput->connect(&(mVolumeRamps[i].get()->input));
+            lastOutput = &(mVolumeRamps[i].get()->output);
+            lastOutput->connect(mManyToMultiConverter->inputs[i].get());
+        }
+        lastOutput = &mManyToMultiConverter->output;
+        setAudioBalance(audioBalance);
+    }
+
     switch (sinkFormat) {
         case AUDIO_FORMAT_PCM_FLOAT:
             mSink = std::make_unique<SinkFloat>(sinkChannelCount);
@@ -117,9 +146,32 @@
  * @param volume between 0.0 and 1.0
  */
 void AAudioFlowGraph::setTargetVolume(float volume) {
-    mVolumeRamp->setTarget(volume);
+    for (int i = 0; i < mVolumeRamps.size(); i++) {
+        mVolumeRamps[i]->setTarget(volume * mPanningVolumes[i]);
+    }
+    mTargetVolume = volume;
 }
 
+/**
+ * @param audioBalance between -1.0 and 1.0
+ */
+void AAudioFlowGraph::setAudioBalance(float audioBalance) {
+    if (mPanningVolumes.size() >= 2) {
+        float leftMultiplier = 0;
+        float rightMultiplier = 0;
+        mBalance.computeStereoBalance(audioBalance, &leftMultiplier, &rightMultiplier);
+        mPanningVolumes[0] = leftMultiplier;
+        mPanningVolumes[1] = rightMultiplier;
+        mVolumeRamps[0]->setTarget(mTargetVolume * leftMultiplier);
+        mVolumeRamps[1]->setTarget(mTargetVolume * rightMultiplier);
+    }
+}
+
+/**
+ * @param numFrames to slowly adjust for volume changes
+ */
 void AAudioFlowGraph::setRampLengthInFrames(int32_t numFrames) {
-    mVolumeRamp->setLengthInFrames(numFrames);
+    for (auto& ramp : mVolumeRamps) {
+        ramp->setLengthInFrames(numFrames);
+    }
 }
diff --git a/media/libaaudio/src/client/AAudioFlowGraph.h b/media/libaaudio/src/client/AAudioFlowGraph.h
index a49f64e..00b6575 100644
--- a/media/libaaudio/src/client/AAudioFlowGraph.h
+++ b/media/libaaudio/src/client/AAudioFlowGraph.h
@@ -23,8 +23,12 @@
 #include <system/audio.h>
 
 #include <aaudio/AAudio.h>
+#include <audio_utils/Balance.h>
 #include <flowgraph/ClipToRange.h>
+#include <flowgraph/ManyToMultiConverter.h>
+#include <flowgraph/MonoBlend.h>
 #include <flowgraph/MonoToMultiConverter.h>
+#include <flowgraph/MultiToManyConverter.h>
 #include <flowgraph/RampLinear.h>
 
 class AAudioFlowGraph {
@@ -36,12 +40,19 @@
      * @param sourceChannelCount
      * @param sinkFormat
      * @param sinkChannelCount
+     * @param useMonoBlend
+     * @param audioBalance
+     * @param channelMask
+     * @param isExclusive
      * @return
      */
     aaudio_result_t configure(audio_format_t sourceFormat,
                               int32_t sourceChannelCount,
                               audio_format_t sinkFormat,
-                              int32_t sinkChannelCount);
+                              int32_t sinkChannelCount,
+                              bool useMonoBlend,
+                              float audioBalance,
+                              bool isExclusive);
 
     void process(const void *source, void *destination, int32_t numFrames);
 
@@ -50,14 +61,28 @@
      */
     void setTargetVolume(float volume);
 
+    /**
+     * @param audioBalance between -1.0 and 1.0
+     */
+    void setAudioBalance(float audioBalance);
+
+    /**
+     * @param numFrames to slowly adjust for volume changes
+     */
     void setRampLengthInFrames(int32_t numFrames);
 
 private:
-    std::unique_ptr<flowgraph::AudioSource>          mSource;
-    std::unique_ptr<flowgraph::RampLinear>           mVolumeRamp;
-    std::unique_ptr<flowgraph::ClipToRange>          mClipper;
-    std::unique_ptr<flowgraph::MonoToMultiConverter> mChannelConverter;
-    std::unique_ptr<flowgraph::AudioSink>            mSink;
+    std::unique_ptr<flowgraph::FlowGraphSourceBuffered>     mSource;
+    std::unique_ptr<flowgraph::MonoBlend>                   mMonoBlend;
+    std::unique_ptr<flowgraph::ClipToRange>                 mClipper;
+    std::unique_ptr<flowgraph::MonoToMultiConverter>        mChannelConverter;
+    std::unique_ptr<flowgraph::ManyToMultiConverter>        mManyToMultiConverter;
+    std::unique_ptr<flowgraph::MultiToManyConverter>        mMultiToManyConverter;
+    std::vector<std::unique_ptr<flowgraph::RampLinear>>     mVolumeRamps;
+    std::vector<float>                                      mPanningVolumes;
+    float                                                   mTargetVolume = 1.0f;
+    android::audio_utils::Balance                           mBalance;
+    std::unique_ptr<flowgraph::FlowGraphSink>               mSink;
 };
 
 
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 89d42bf..afdc2ac 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -27,6 +27,7 @@
 #include <aaudio/AAudio.h>
 #include <cutils/properties.h>
 
+#include <media/AudioParameter.h>
 #include <media/AudioSystem.h>
 #include <media/MediaMetricsItem.h>
 #include <utils/Trace.h>
@@ -270,6 +271,18 @@
         mCallbackBuffer = std::make_unique<uint8_t[]>(callbackBufferSize);
     }
 
+    // Exclusive output streams should combine channels when mono audio adjustment
+    // is enabled. They should also adjust for audio balance.
+    if ((getDirection() == AAUDIO_DIRECTION_OUTPUT) &&
+        (getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE)) {
+        bool isMasterMono = false;
+        android::AudioSystem::getMasterMono(&isMasterMono);
+        setRequireMonoBlend(isMasterMono);
+        float audioBalance = 0;
+        android::AudioSystem::getMasterBalance(&audioBalance);
+        setAudioBalance(audioBalance);
+    }
+
     // For debugging and analyzing the distribution of MMAP timestamps.
     // For OUTPUT, use a NEGATIVE offset to move the CPU writes further BEFORE the HW reads.
     // For INPUT, use a POSITIVE offset to move the CPU reads further AFTER the HW writes.
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 5921799..450d390 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -52,7 +52,10 @@
         result = mFlowGraph.configure(getFormat(),
                              getSamplesPerFrame(),
                              getDeviceFormat(),
-                             getDeviceChannelCount());
+                             getDeviceChannelCount(),
+                             getRequireMonoBlend(),
+                             getAudioBalance(),
+                             (getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE));
 
         if (result != AAUDIO_OK) {
             safeReleaseClose();
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index afb8551..5fb4528 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -277,6 +277,14 @@
         return mIsPrivacySensitive;
     }
 
+    bool getRequireMonoBlend() const {
+        return mRequireMonoBlend;
+    }
+
+    float getAudioBalance() const {
+        return mAudioBalance;
+    }
+
     /**
      * This is only valid after setChannelMask() and setFormat()
      * have been called.
@@ -631,6 +639,20 @@
         mIsPrivacySensitive = privacySensitive;
     }
 
+    /**
+     * This should not be called after the open() call.
+     */
+    void setRequireMonoBlend(bool requireMonoBlend) {
+        mRequireMonoBlend = requireMonoBlend;
+    }
+
+    /**
+     * This should not be called after the open() call.
+     */
+    void setAudioBalance(float audioBalance) {
+        mAudioBalance = audioBalance;
+    }
+
     std::string mMetricsId; // set once during open()
 
     std::mutex                 mStreamLock;
@@ -672,6 +694,8 @@
     aaudio_input_preset_t       mInputPreset     = AAUDIO_UNSPECIFIED;
     aaudio_allowed_capture_policy_t mAllowedCapturePolicy = AAUDIO_ALLOW_CAPTURE_BY_ALL;
     bool                        mIsPrivacySensitive = false;
+    bool                        mRequireMonoBlend = false;
+    float                       mAudioBalance = 0;
 
     int32_t                     mSessionId = AAUDIO_UNSPECIFIED;
 
diff --git a/media/libaaudio/src/flowgraph/AudioProcessorBase.cpp b/media/libaaudio/src/flowgraph/AudioProcessorBase.cpp
deleted file mode 100644
index d8ffd00..0000000
--- a/media/libaaudio/src/flowgraph/AudioProcessorBase.cpp
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <algorithm>
-#include <sys/types.h>
-#include "AudioProcessorBase.h"
-
-using namespace flowgraph;
-
-/***************************************************************************/
-int32_t AudioProcessorBase::pullData(int64_t framePosition, int32_t numFrames) {
-    if (framePosition > mLastFramePosition) {
-        mLastFramePosition = framePosition;
-        mFramesValid = onProcess(framePosition, numFrames);
-    }
-    return mFramesValid;
-}
-
-/***************************************************************************/
-AudioFloatBlockPort::AudioFloatBlockPort(AudioProcessorBase &parent,
-                               int32_t samplesPerFrame,
-                               int32_t framesPerBlock)
-        : AudioPort(parent, samplesPerFrame)
-        , mFramesPerBlock(framesPerBlock) {
-    int32_t numFloats = framesPerBlock * getSamplesPerFrame();
-    mSampleBlock = new float[numFloats]{0.0f};
-}
-
-AudioFloatBlockPort::~AudioFloatBlockPort() {
-    delete[] mSampleBlock;
-}
-
-/***************************************************************************/
-int32_t AudioFloatOutputPort::pullData(int64_t framePosition, int32_t numFrames) {
-    numFrames = std::min(getFramesPerBlock(), numFrames);
-    return mParent.pullData(framePosition, numFrames);
-}
-
-// These need to be in the .cpp file because of forward cross references.
-void AudioFloatOutputPort::connect(AudioFloatInputPort *port) {
-    port->connect(this);
-}
-
-void AudioFloatOutputPort::disconnect(AudioFloatInputPort *port) {
-    port->disconnect(this);
-}
-
-/***************************************************************************/
-int32_t AudioFloatInputPort::pullData(int64_t framePosition, int32_t numFrames) {
-    return (mConnected == nullptr)
-            ? std::min(getFramesPerBlock(), numFrames)
-            : mConnected->pullData(framePosition, numFrames);
-}
-
-float *AudioFloatInputPort::getBlock() {
-    if (mConnected == nullptr) {
-        return AudioFloatBlockPort::getBlock(); // loaded using setValue()
-    } else {
-        return mConnected->getBlock();
-    }
-}
-
-/***************************************************************************/
-int32_t AudioSink::pull(int32_t numFrames) {
-    int32_t actualFrames = input.pullData(mFramePosition, numFrames);
-    mFramePosition += actualFrames;
-    return actualFrames;
-}
\ No newline at end of file
diff --git a/media/libaaudio/src/flowgraph/AudioProcessorBase.h b/media/libaaudio/src/flowgraph/AudioProcessorBase.h
deleted file mode 100644
index 972932f..0000000
--- a/media/libaaudio/src/flowgraph/AudioProcessorBase.h
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * Copyright 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * AudioProcessorBase.h
- *
- * Audio processing node and ports that can be used in a simple data flow graph.
- */
-
-#ifndef FLOWGRAPH_AUDIO_PROCESSOR_BASE_H
-#define FLOWGRAPH_AUDIO_PROCESSOR_BASE_H
-
-#include <cassert>
-#include <cstring>
-#include <math.h>
-#include <sys/types.h>
-#include <time.h>
-#include <unistd.h>
-
-// TODO consider publishing all header files under "include/libaaudio/FlowGraph.h"
-
-namespace flowgraph {
-
-// Default block size that can be overridden when the AudioFloatBlockPort is created.
-// If it is too small then we will have too much overhead from switching between nodes.
-// If it is too high then we will thrash the caches.
-constexpr int kDefaultBlockSize = 8; // arbitrary
-
-class AudioFloatInputPort;
-
-/***************************************************************************/
-class AudioProcessorBase {
-public:
-    virtual ~AudioProcessorBase() = default;
-
-    /**
-     * Perform custom function.
-     *
-     * @param framePosition index of first frame to be processed
-     * @param numFrames maximum number of frames requested for processing
-     * @return number of frames actually processed
-     */
-    virtual int32_t onProcess(int64_t framePosition, int32_t numFrames) = 0;
-
-    /**
-     * If the framePosition is at or after the last frame position then call onProcess().
-     * This prevents infinite recursion in case of cyclic graphs.
-     * It also prevents nodes upstream from a branch from being executed twice.
-     *
-     * @param framePosition
-     * @param numFrames
-     * @return
-     */
-    int32_t pullData(int64_t framePosition, int32_t numFrames);
-
-protected:
-    int64_t  mLastFramePosition = -1; // Start at -1 so that the first pull works.
-
-private:
-    int32_t  mFramesValid = 0; // num valid frames in the block
-};
-
-/***************************************************************************/
-/**
-  * This is a connector that allows data to flow between modules.
-  */
-class AudioPort {
-public:
-    AudioPort(AudioProcessorBase &parent, int32_t samplesPerFrame)
-            : mParent(parent)
-            , mSamplesPerFrame(samplesPerFrame) {
-    }
-
-    // Ports are often declared public. So let's make them non-copyable.
-    AudioPort(const AudioPort&) = delete;
-    AudioPort& operator=(const AudioPort&) = delete;
-
-    int32_t getSamplesPerFrame() const {
-        return mSamplesPerFrame;
-    }
-
-protected:
-    AudioProcessorBase &mParent;
-
-private:
-    const int32_t    mSamplesPerFrame = 1;
-};
-
-/***************************************************************************/
-/**
- * This port contains a float type buffer.
- * The size is framesPerBlock * samplesPerFrame).
- */
-class AudioFloatBlockPort  : public AudioPort {
-public:
-    AudioFloatBlockPort(AudioProcessorBase &mParent,
-                   int32_t samplesPerFrame,
-                   int32_t framesPerBlock = kDefaultBlockSize
-                );
-
-    virtual ~AudioFloatBlockPort();
-
-    int32_t getFramesPerBlock() const {
-        return mFramesPerBlock;
-    }
-
-protected:
-
-    /**
-     * @return buffer internal to the port or from a connected port
-     */
-    virtual float *getBlock() {
-        return mSampleBlock;
-    }
-
-
-private:
-    const int32_t    mFramesPerBlock = 1;
-    float           *mSampleBlock = nullptr; // allocated in constructor
-};
-
-/***************************************************************************/
-/**
-  * The results of a module are stored in the buffer of the output ports.
-  */
-class AudioFloatOutputPort : public AudioFloatBlockPort {
-public:
-    AudioFloatOutputPort(AudioProcessorBase &parent, int32_t samplesPerFrame)
-            : AudioFloatBlockPort(parent, samplesPerFrame) {
-    }
-
-    virtual ~AudioFloatOutputPort() = default;
-
-    using AudioFloatBlockPort::getBlock;
-
-    /**
-     * Call the parent module's onProcess() method.
-     * That may pull data from its inputs and recursively
-     * process the entire graph.
-     * @return number of frames actually pulled
-     */
-    int32_t pullData(int64_t framePosition, int32_t numFrames);
-
-    /**
-     * Connect to the input of another module.
-     * An input port can only have one connection.
-     * An output port can have multiple connections.
-     * If you connect a second output port to an input port
-     * then it overwrites the previous connection.
-     *
-     * This not thread safe. Do not modify the graph topology form another thread while running.
-     */
-    void connect(AudioFloatInputPort *port);
-
-    /**
-     * Disconnect from the input of another module.
-     * This not thread safe.
-     */
-    void disconnect(AudioFloatInputPort *port);
-};
-
-/***************************************************************************/
-class AudioFloatInputPort : public AudioFloatBlockPort {
-public:
-    AudioFloatInputPort(AudioProcessorBase &parent, int32_t samplesPerFrame)
-            : AudioFloatBlockPort(parent, samplesPerFrame) {
-    }
-
-    virtual ~AudioFloatInputPort() = default;
-
-    /**
-     * If connected to an output port then this will return
-     * that output ports buffers.
-     * If not connected then it returns the input ports own buffer
-     * which can be loaded using setValue().
-     */
-    float *getBlock() override;
-
-    /**
-     * Pull data from any output port that is connected.
-     */
-    int32_t pullData(int64_t framePosition, int32_t numFrames);
-
-    /**
-     * Write every value of the float buffer.
-     * This value will be ignored if an output port is connected
-     * to this port.
-     */
-    void setValue(float value) {
-        int numFloats = kDefaultBlockSize * getSamplesPerFrame();
-        float *buffer = getBlock();
-        for (int i = 0; i < numFloats; i++) {
-            *buffer++ = value;
-        }
-    }
-
-    /**
-     * Connect to the output of another module.
-     * An input port can only have one connection.
-     * An output port can have multiple connections.
-     * This not thread safe.
-     */
-    void connect(AudioFloatOutputPort *port) {
-        assert(getSamplesPerFrame() == port->getSamplesPerFrame());
-        mConnected = port;
-    }
-
-    void disconnect(AudioFloatOutputPort *port) {
-        assert(mConnected == port);
-        (void) port;
-        mConnected = nullptr;
-    }
-
-    void disconnect() {
-        mConnected = nullptr;
-    }
-
-private:
-    AudioFloatOutputPort *mConnected = nullptr;
-};
-
-/***************************************************************************/
-class AudioSource : public AudioProcessorBase {
-public:
-    explicit AudioSource(int32_t channelCount)
-            : output(*this, channelCount) {
-    }
-
-    virtual ~AudioSource() = default;
-
-    AudioFloatOutputPort output;
-
-    void setData(const void *data, int32_t numFrames) {
-        mData = data;
-        mSizeInFrames = numFrames;
-        mFrameIndex = 0;
-    }
-
-protected:
-    const void *mData = nullptr;
-    int32_t     mSizeInFrames = 0; // number of frames in mData
-    int32_t     mFrameIndex = 0; // index of next frame to be processed
-};
-
-/***************************************************************************/
-class AudioSink : public AudioProcessorBase {
-public:
-    explicit AudioSink(int32_t channelCount)
-            : input(*this, channelCount) {
-    }
-
-    virtual ~AudioSink() = default;
-
-    AudioFloatInputPort input;
-
-    /**
-     * Do nothing. The work happens in the read() method.
-     *
-     * @param framePosition index of first frame to be processed
-     * @param numFrames
-     * @return number of frames actually processed
-     */
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override {
-        (void) framePosition;
-        (void) numFrames;
-        return 0;
-    };
-
-    virtual int32_t read(void *data, int32_t numFrames) = 0;
-
-protected:
-    int32_t pull(int32_t numFrames);
-
-private:
-    int64_t mFramePosition = 0;
-};
-
-} /* namespace flowgraph */
-
-#endif /* FLOWGRAPH_AUDIO_PROCESSOR_BASE_H */
diff --git a/media/libaaudio/src/flowgraph/ChannelCountConverter.cpp b/media/libaaudio/src/flowgraph/ChannelCountConverter.cpp
new file mode 100644
index 0000000..351def2
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/ChannelCountConverter.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include "FlowGraphNode.h"
+#include "ChannelCountConverter.h"
+
+using namespace flowgraph;
+
+ChannelCountConverter::ChannelCountConverter(
+        int32_t inputChannelCount,
+        int32_t outputChannelCount)
+        : input(*this, inputChannelCount)
+        , output(*this, outputChannelCount) {
+}
+
+ChannelCountConverter::~ChannelCountConverter() = default;
+
+int32_t ChannelCountConverter::onProcess(int32_t numFrames) {
+    const float *inputBuffer = input.getBuffer();
+    float *outputBuffer = output.getBuffer();
+    int32_t inputChannelCount = input.getSamplesPerFrame();
+    int32_t outputChannelCount = output.getSamplesPerFrame();
+    for (int i = 0; i < numFrames; i++) {
+        int inputChannel = 0;
+        for (int outputChannel = 0; outputChannel < outputChannelCount; outputChannel++) {
+            // Copy input channels to output channels.
+            // Wrap if we run out of inputs.
+            // Discard if we run out of outputs.
+            outputBuffer[outputChannel] = inputBuffer[inputChannel];
+            inputChannel = (inputChannel == inputChannelCount)
+                    ? 0 : inputChannel + 1;
+        }
+        inputBuffer += inputChannelCount;
+        outputBuffer += outputChannelCount;
+    }
+    return numFrames;
+}
+
diff --git a/media/libaaudio/src/flowgraph/ChannelCountConverter.h b/media/libaaudio/src/flowgraph/ChannelCountConverter.h
new file mode 100644
index 0000000..e4b6f4e
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/ChannelCountConverter.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_CHANNEL_COUNT_CONVERTER_H
+#define FLOWGRAPH_CHANNEL_COUNT_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Change the number of number of channels without mixing.
+ * When increasing the channel count, duplicate input channels.
+ * When decreasing the channel count, drop input channels.
+ */
+    class ChannelCountConverter : public FlowGraphNode {
+    public:
+        explicit ChannelCountConverter(
+                int32_t inputChannelCount,
+                int32_t outputChannelCount);
+
+        virtual ~ChannelCountConverter();
+
+        int32_t onProcess(int32_t numFrames) override;
+
+        const char *getName() override {
+            return "ChannelCountConverter";
+        }
+
+        FlowGraphPortFloatInput input;
+        FlowGraphPortFloatOutput output;
+    };
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_CHANNEL_COUNT_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/ClipToRange.cpp b/media/libaaudio/src/flowgraph/ClipToRange.cpp
index bd9c22a..d2f8a02 100644
--- a/media/libaaudio/src/flowgraph/ClipToRange.cpp
+++ b/media/libaaudio/src/flowgraph/ClipToRange.cpp
@@ -16,25 +16,23 @@
 
 #include <algorithm>
 #include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "ClipToRange.h"
 
 using namespace flowgraph;
 
 ClipToRange::ClipToRange(int32_t channelCount)
-        : input(*this, channelCount)
-        , output(*this, channelCount) {
+        : FlowGraphFilter(channelCount) {
 }
 
-int32_t ClipToRange::onProcess(int64_t framePosition, int32_t numFrames) {
-    int32_t framesToProcess = input.pullData(framePosition, numFrames);
-    const float *inputBuffer = input.getBlock();
-    float *outputBuffer = output.getBlock();
+int32_t ClipToRange::onProcess(int32_t numFrames) {
+    const float *inputBuffer = input.getBuffer();
+    float *outputBuffer = output.getBuffer();
 
-    int32_t numSamples = framesToProcess * output.getSamplesPerFrame();
+    int32_t numSamples = numFrames * output.getSamplesPerFrame();
     for (int32_t i = 0; i < numSamples; i++) {
         *outputBuffer++ = std::min(mMaximum, std::max(mMinimum, *inputBuffer++));
     }
 
-    return framesToProcess;
+    return numFrames;
 }
diff --git a/media/libaaudio/src/flowgraph/ClipToRange.h b/media/libaaudio/src/flowgraph/ClipToRange.h
index 9eef254..22b7804 100644
--- a/media/libaaudio/src/flowgraph/ClipToRange.h
+++ b/media/libaaudio/src/flowgraph/ClipToRange.h
@@ -21,7 +21,7 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
@@ -30,13 +30,13 @@
 constexpr float kDefaultMaxHeadroom = 1.41253754f;
 constexpr float kDefaultMinHeadroom = -kDefaultMaxHeadroom;
 
-class ClipToRange : public AudioProcessorBase {
+class ClipToRange : public FlowGraphFilter {
 public:
     explicit ClipToRange(int32_t channelCount);
 
     virtual ~ClipToRange() = default;
 
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+    int32_t onProcess(int32_t numFrames) override;
 
     void setMinimum(float min) {
         mMinimum = min;
@@ -54,8 +54,9 @@
         return mMaximum;
     }
 
-    AudioFloatInputPort input;
-    AudioFloatOutputPort output;
+    const char *getName() override {
+        return "ClipToRange";
+    }
 
 private:
     float mMinimum = kDefaultMinHeadroom;
diff --git a/media/libaaudio/src/flowgraph/FlowGraphNode.cpp b/media/libaaudio/src/flowgraph/FlowGraphNode.cpp
new file mode 100644
index 0000000..4c76e77
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/FlowGraphNode.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "stdio.h"
+#include <algorithm>
+#include <sys/types.h>
+#include "FlowGraphNode.h"
+
+using namespace flowgraph;
+
+/***************************************************************************/
+int32_t FlowGraphNode::pullData(int32_t numFrames, int64_t callCount) {
+    int32_t frameCount = numFrames;
+    // Prevent recursion and multiple execution of nodes.
+    if (callCount > mLastCallCount) {
+        mLastCallCount = callCount;
+        if (mDataPulledAutomatically) {
+            // Pull from all the upstream nodes.
+            for (auto &port : mInputPorts) {
+                // TODO fix bug of leaving unused data in some ports if using multiple AudioSource
+                frameCount = port.get().pullData(callCount, frameCount);
+            }
+        }
+        if (frameCount > 0) {
+            frameCount = onProcess(frameCount);
+        }
+        mLastFrameCount = frameCount;
+    } else {
+        frameCount = mLastFrameCount;
+    }
+    return frameCount;
+}
+
+void FlowGraphNode::pullReset() {
+    if (!mBlockRecursion) {
+        mBlockRecursion = true; // for cyclic graphs
+        // Pull reset from all the upstream nodes.
+        for (auto &port : mInputPorts) {
+            port.get().pullReset();
+        }
+        mBlockRecursion = false;
+        reset();
+    }
+}
+
+void FlowGraphNode::reset() {
+    mLastFrameCount = 0;
+    mLastCallCount = kInitialCallCount;
+}
+
+/***************************************************************************/
+FlowGraphPortFloat::FlowGraphPortFloat(FlowGraphNode &parent,
+                               int32_t samplesPerFrame,
+                               int32_t framesPerBuffer)
+        : FlowGraphPort(parent, samplesPerFrame)
+        , mFramesPerBuffer(framesPerBuffer)
+        , mBuffer(nullptr) {
+    size_t numFloats = framesPerBuffer * getSamplesPerFrame();
+    mBuffer = std::make_unique<float[]>(numFloats);
+}
+
+/***************************************************************************/
+int32_t FlowGraphPortFloatOutput::pullData(int64_t callCount, int32_t numFrames) {
+    numFrames = std::min(getFramesPerBuffer(), numFrames);
+    return mContainingNode.pullData(numFrames, callCount);
+}
+
+void FlowGraphPortFloatOutput::pullReset() {
+    mContainingNode.pullReset();
+}
+
+// These need to be in the .cpp file because of forward cross references.
+void FlowGraphPortFloatOutput::connect(FlowGraphPortFloatInput *port) {
+    port->connect(this);
+}
+
+void FlowGraphPortFloatOutput::disconnect(FlowGraphPortFloatInput *port) {
+    port->disconnect(this);
+}
+
+/***************************************************************************/
+int32_t FlowGraphPortFloatInput::pullData(int64_t callCount, int32_t numFrames) {
+    return (mConnected == nullptr)
+            ? std::min(getFramesPerBuffer(), numFrames)
+            : mConnected->pullData(callCount, numFrames);
+}
+void FlowGraphPortFloatInput::pullReset() {
+    if (mConnected != nullptr) mConnected->pullReset();
+}
+
+float *FlowGraphPortFloatInput::getBuffer() {
+    if (mConnected == nullptr) {
+        return FlowGraphPortFloat::getBuffer(); // loaded using setValue()
+    } else {
+        return mConnected->getBuffer();
+    }
+}
+
+int32_t FlowGraphSink::pullData(int32_t numFrames) {
+    return FlowGraphNode::pullData(numFrames, getLastCallCount() + 1);
+}
diff --git a/media/libaaudio/src/flowgraph/FlowGraphNode.h b/media/libaaudio/src/flowgraph/FlowGraphNode.h
new file mode 100644
index 0000000..69c83dd
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/FlowGraphNode.h
@@ -0,0 +1,437 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * FlowGraph.h
+ *
+ * Processing node and ports that can be used in a simple data flow graph.
+ * This was designed to work with audio but could be used for other
+ * types of data.
+ */
+
+#ifndef FLOWGRAPH_FLOW_GRAPH_NODE_H
+#define FLOWGRAPH_FLOW_GRAPH_NODE_H
+
+#include <cassert>
+#include <cstring>
+#include <math.h>
+#include <memory>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+#include <vector>
+
+// TODO Move these classes into separate files.
+// TODO Review use of raw pointers for connect(). Maybe use smart pointers but need to avoid
+//      run-time deallocation in audio thread.
+
+// Set this to 1 if using it inside the Android framework.
+// This code is kept here so that it can be moved easily between Oboe and AAudio.
+#ifndef FLOWGRAPH_ANDROID_INTERNAL
+#define FLOWGRAPH_ANDROID_INTERNAL 0
+#endif
+
+namespace flowgraph {
+
+// Default block size that can be overridden when the FlowGraphPortFloat is created.
+// If it is too small then we will have too much overhead from switching between nodes.
+// If it is too high then we will thrash the caches.
+constexpr int kDefaultBufferSize = 8; // arbitrary
+
+class FlowGraphPort;
+class FlowGraphPortFloatInput;
+
+/***************************************************************************/
+/**
+ * Base class for all nodes in the flowgraph.
+ */
+class FlowGraphNode {
+public:
+    FlowGraphNode() = default;
+    virtual ~FlowGraphNode() = default;
+
+    /**
+     * Read from the input ports,
+     * generate multiple frames of data then write the results to the output ports.
+     *
+     * @param numFrames maximum number of frames requested for processing
+     * @return number of frames actually processed
+     */
+    virtual int32_t onProcess(int32_t numFrames) = 0;
+
+    /**
+     * If the callCount is at or after the previous callCount then call
+     * pullData on all of the upstreamNodes.
+     * Then call onProcess().
+     * This prevents infinite recursion in case of cyclic graphs.
+     * It also prevents nodes upstream from a branch from being executed twice.
+     *
+     * @param callCount
+     * @param numFrames
+     * @return number of frames valid
+     */
+    int32_t pullData(int32_t numFrames, int64_t callCount);
+
+    /**
+     * Recursively reset all the nodes in the graph, starting from a Sink.
+     *
+     * This must not be called at the same time as pullData!
+     */
+    void pullReset();
+
+    /**
+     * Reset framePosition counters.
+     */
+    virtual void reset();
+
+    void addInputPort(FlowGraphPort &port) {
+        mInputPorts.emplace_back(port);
+    }
+
+    bool isDataPulledAutomatically() const {
+        return mDataPulledAutomatically;
+    }
+
+    /**
+     * Set true if you want the data pulled through the graph automatically.
+     * This is the default.
+     *
+     * Set false if you want to pull the data from the input ports in the onProcess() method.
+     * You might do this, for example, in a sample rate converting node.
+     *
+     * @param automatic
+     */
+    void setDataPulledAutomatically(bool automatic) {
+        mDataPulledAutomatically = automatic;
+    }
+
+    virtual const char *getName() {
+        return "FlowGraph";
+    }
+
+    int64_t getLastCallCount() {
+        return mLastCallCount;
+    }
+
+protected:
+
+    static constexpr int64_t  kInitialCallCount = -1;
+    int64_t  mLastCallCount = kInitialCallCount;
+
+    std::vector<std::reference_wrapper<FlowGraphPort>> mInputPorts;
+
+private:
+    bool     mDataPulledAutomatically = true;
+    bool     mBlockRecursion = false;
+    int32_t  mLastFrameCount = 0;
+
+};
+
+/***************************************************************************/
+/**
+  * This is a connector that allows data to flow between modules.
+  *
+  * The ports are the primary means of interacting with a module.
+  * So they are generally declared as public.
+  *
+  */
+class FlowGraphPort {
+public:
+    FlowGraphPort(FlowGraphNode &parent, int32_t samplesPerFrame)
+            : mContainingNode(parent)
+            , mSamplesPerFrame(samplesPerFrame) {
+    }
+
+    virtual ~FlowGraphPort() = default;
+
+    // Ports are often declared public. So let's make them non-copyable.
+    FlowGraphPort(const FlowGraphPort&) = delete;
+    FlowGraphPort& operator=(const FlowGraphPort&) = delete;
+
+    int32_t getSamplesPerFrame() const {
+        return mSamplesPerFrame;
+    }
+
+    virtual int32_t pullData(int64_t framePosition, int32_t numFrames) = 0;
+
+    virtual void pullReset() {}
+
+protected:
+    FlowGraphNode &mContainingNode;
+
+private:
+    const int32_t    mSamplesPerFrame = 1;
+};
+
+/***************************************************************************/
+/**
+ * This port contains a 32-bit float buffer that can contain several frames of data.
+ * Processing the data in a block improves performance.
+ *
+ * The size is framesPerBuffer * samplesPerFrame).
+ */
+class FlowGraphPortFloat  : public FlowGraphPort {
+public:
+    FlowGraphPortFloat(FlowGraphNode &parent,
+                   int32_t samplesPerFrame,
+                   int32_t framesPerBuffer = kDefaultBufferSize
+                );
+
+    virtual ~FlowGraphPortFloat() = default;
+
+    int32_t getFramesPerBuffer() const {
+        return mFramesPerBuffer;
+    }
+
+protected:
+
+    /**
+     * @return buffer internal to the port or from a connected port
+     */
+    virtual float *getBuffer() {
+        return mBuffer.get();
+    }
+
+private:
+    const int32_t    mFramesPerBuffer = 1;
+    std::unique_ptr<float[]> mBuffer; // allocated in constructor
+};
+
+/***************************************************************************/
+/**
+  * The results of a node's processing are stored in the buffers of the output ports.
+  */
+class FlowGraphPortFloatOutput : public FlowGraphPortFloat {
+public:
+    FlowGraphPortFloatOutput(FlowGraphNode &parent, int32_t samplesPerFrame)
+            : FlowGraphPortFloat(parent, samplesPerFrame) {
+    }
+
+    virtual ~FlowGraphPortFloatOutput() = default;
+
+    using FlowGraphPortFloat::getBuffer;
+
+    /**
+     * Connect to the input of another module.
+     * An input port can only have one connection.
+     * An output port can have multiple connections.
+     * If you connect a second output port to an input port
+     * then it overwrites the previous connection.
+     *
+     * This not thread safe. Do not modify the graph topology from another thread while running.
+     * Also do not delete a module while it is connected to another port if the graph is running.
+     */
+    void connect(FlowGraphPortFloatInput *port);
+
+    /**
+     * Disconnect from the input of another module.
+     * This not thread safe.
+     */
+    void disconnect(FlowGraphPortFloatInput *port);
+
+    /**
+     * Call the parent module's onProcess() method.
+     * That may pull data from its inputs and recursively
+     * process the entire graph.
+     * @return number of frames actually pulled
+     */
+    int32_t pullData(int64_t framePosition, int32_t numFrames) override;
+
+
+    void pullReset() override;
+
+};
+
+/***************************************************************************/
+
+/**
+ * An input port for streaming audio data.
+ * You can set a value that will be used for processing.
+ * If you connect an output port to this port then its value will be used instead.
+ */
+class FlowGraphPortFloatInput : public FlowGraphPortFloat {
+public:
+    FlowGraphPortFloatInput(FlowGraphNode &parent, int32_t samplesPerFrame)
+            : FlowGraphPortFloat(parent, samplesPerFrame) {
+        // Add to parent so it can pull data from each input.
+        parent.addInputPort(*this);
+    }
+
+    virtual ~FlowGraphPortFloatInput() = default;
+
+    /**
+     * If connected to an output port then this will return
+     * that output ports buffers.
+     * If not connected then it returns the input ports own buffer
+     * which can be loaded using setValue().
+     */
+    float *getBuffer() override;
+
+    /**
+     * Write every value of the float buffer.
+     * This value will be ignored if an output port is connected
+     * to this port.
+     */
+    void setValue(float value) {
+        int numFloats = kDefaultBufferSize * getSamplesPerFrame();
+        float *buffer = getBuffer();
+        for (int i = 0; i < numFloats; i++) {
+            *buffer++ = value;
+        }
+    }
+
+    /**
+     * Connect to the output of another module.
+     * An input port can only have one connection.
+     * An output port can have multiple connections.
+     * This not thread safe.
+     */
+    void connect(FlowGraphPortFloatOutput *port) {
+        assert(getSamplesPerFrame() == port->getSamplesPerFrame());
+        mConnected = port;
+    }
+
+    void disconnect(FlowGraphPortFloatOutput *port) {
+        assert(mConnected == port);
+        (void) port;
+        mConnected = nullptr;
+    }
+
+    void disconnect() {
+        mConnected = nullptr;
+    }
+
+    /**
+     * Pull data from any output port that is connected.
+     */
+    int32_t pullData(int64_t framePosition, int32_t numFrames) override;
+
+    void pullReset() override;
+
+private:
+    FlowGraphPortFloatOutput *mConnected = nullptr;
+};
+
+/***************************************************************************/
+
+/**
+ * Base class for an edge node in a graph that has no upstream nodes.
+ * It outputs data but does not consume data.
+ * By default, it will read its data from an external buffer.
+ */
+class FlowGraphSource : public FlowGraphNode {
+public:
+    explicit FlowGraphSource(int32_t channelCount)
+            : output(*this, channelCount) {
+    }
+
+    virtual ~FlowGraphSource() = default;
+
+    FlowGraphPortFloatOutput output;
+};
+
+/***************************************************************************/
+
+/**
+ * Base class for an edge node in a graph that has no upstream nodes.
+ * It outputs data but does not consume data.
+ * By default, it will read its data from an external buffer.
+ */
+class FlowGraphSourceBuffered : public FlowGraphSource {
+public:
+    explicit FlowGraphSourceBuffered(int32_t channelCount)
+            : FlowGraphSource(channelCount) {}
+
+    virtual ~FlowGraphSourceBuffered() = default;
+
+    /**
+     * Specify buffer that the node will read from.
+     *
+     * @param data TODO Consider using std::shared_ptr.
+     * @param numFrames
+     */
+    void setData(const void *data, int32_t numFrames) {
+        mData = data;
+        mSizeInFrames = numFrames;
+        mFrameIndex = 0;
+    }
+
+protected:
+    const void *mData = nullptr;
+    int32_t     mSizeInFrames = 0; // number of frames in mData
+    int32_t     mFrameIndex = 0; // index of next frame to be processed
+};
+
+/***************************************************************************/
+/**
+ * Base class for an edge node in a graph that has no downstream nodes.
+ * It consumes data but does not output data.
+ * This graph will be executed when data is read() from this node
+ * by pulling data from upstream nodes.
+ */
+class FlowGraphSink : public FlowGraphNode {
+public:
+    explicit FlowGraphSink(int32_t channelCount)
+            : input(*this, channelCount) {
+    }
+
+    virtual ~FlowGraphSink() = default;
+
+    FlowGraphPortFloatInput input;
+
+    /**
+     * Do nothing. The work happens in the read() method.
+     *
+     * @param numFrames
+     * @return number of frames actually processed
+     */
+    int32_t onProcess(int32_t numFrames) override {
+        return numFrames;
+    }
+
+    virtual int32_t read(void *data, int32_t numFrames) = 0;
+
+protected:
+    /**
+     * Pull data through the graph using this nodes last callCount.
+     * @param numFrames
+     * @return
+     */
+    int32_t pullData(int32_t numFrames);
+};
+
+/***************************************************************************/
+/**
+ * Base class for a node that has an input and an output with the same number of channels.
+ * This may include traditional filters, eg. FIR, but also include
+ * any processing node that converts input to output.
+ */
+class FlowGraphFilter : public FlowGraphNode {
+public:
+    explicit FlowGraphFilter(int32_t channelCount)
+            : input(*this, channelCount)
+            , output(*this, channelCount) {
+    }
+
+    virtual ~FlowGraphFilter() = default;
+
+    FlowGraphPortFloatInput input;
+    FlowGraphPortFloatOutput output;
+};
+
+} /* namespace flowgraph */
+
+#endif /* FLOWGRAPH_FLOW_GRAPH_NODE_H */
diff --git a/media/libaaudio/src/flowgraph/ManyToMultiConverter.cpp b/media/libaaudio/src/flowgraph/ManyToMultiConverter.cpp
new file mode 100644
index 0000000..879685e
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/ManyToMultiConverter.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+
+#include "ManyToMultiConverter.h"
+
+using namespace flowgraph;
+
+ManyToMultiConverter::ManyToMultiConverter(int32_t channelCount)
+        : inputs(channelCount)
+        , output(*this, channelCount) {
+    for (int i = 0; i < channelCount; i++) {
+        inputs[i] = std::make_unique<FlowGraphPortFloatInput>(*this, 1);
+    }
+}
+
+int32_t ManyToMultiConverter::onProcess(int32_t numFrames) {
+    int32_t channelCount = output.getSamplesPerFrame();
+
+    for (int ch = 0; ch < channelCount; ch++) {
+        const float *inputBuffer = inputs[ch]->getBuffer();
+        float *outputBuffer = output.getBuffer() + ch;
+
+        for (int i = 0; i < numFrames; i++) {
+            // read one, write into the proper interleaved output channel
+            float sample = *inputBuffer++;
+            *outputBuffer = sample;
+            outputBuffer += channelCount; // advance to next multichannel frame
+        }
+    }
+    return numFrames;
+}
+
diff --git a/media/libaaudio/src/flowgraph/ManyToMultiConverter.h b/media/libaaudio/src/flowgraph/ManyToMultiConverter.h
new file mode 100644
index 0000000..c7460ff
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/ManyToMultiConverter.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_MANY_TO_MULTI_CONVERTER_H
+#define FLOWGRAPH_MANY_TO_MULTI_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+#include <vector>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Combine multiple mono inputs into one interleaved multi-channel output.
+ */
+class ManyToMultiConverter : public flowgraph::FlowGraphNode {
+public:
+    explicit ManyToMultiConverter(int32_t channelCount);
+
+    virtual ~ManyToMultiConverter() = default;
+
+    int32_t onProcess(int numFrames) override;
+
+    void setEnabled(bool /*enabled*/) {}
+
+    std::vector<std::unique_ptr<flowgraph::FlowGraphPortFloatInput>> inputs;
+    flowgraph::FlowGraphPortFloatOutput output;
+
+    const char *getName() override {
+        return "ManyToMultiConverter";
+    }
+
+private:
+};
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_MANY_TO_MULTI_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/MonoBlend.cpp b/media/libaaudio/src/flowgraph/MonoBlend.cpp
new file mode 100644
index 0000000..62e2809
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MonoBlend.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+
+#include "MonoBlend.h"
+
+using namespace flowgraph;
+
+MonoBlend::MonoBlend(int32_t channelCount)
+        : FlowGraphFilter(channelCount)
+        , mInvChannelCount(1. / channelCount)
+{
+}
+
+int32_t MonoBlend::onProcess(int32_t numFrames) {
+    int32_t channelCount = output.getSamplesPerFrame();
+    const float *inputBuffer = input.getBuffer();
+    float *outputBuffer = output.getBuffer();
+
+    for (size_t i = 0; i < numFrames; ++i) {
+        float accum = 0;
+        for (size_t j = 0; j < channelCount; ++j) {
+            accum += *inputBuffer++;
+        }
+        accum *= mInvChannelCount;
+        for (size_t j = 0; j < channelCount; ++j) {
+            *outputBuffer++ = accum;
+        }
+    }
+
+    return numFrames;
+}
\ No newline at end of file
diff --git a/media/libaaudio/src/flowgraph/MonoBlend.h b/media/libaaudio/src/flowgraph/MonoBlend.h
new file mode 100644
index 0000000..7e3c35b
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MonoBlend.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_MONO_BLEND_H
+#define FLOWGRAPH_MONO_BLEND_H
+
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Combine data between multiple channels so each channel is an average
+ * of all channels.
+ */
+class MonoBlend : public FlowGraphFilter {
+public:
+    explicit MonoBlend(int32_t channelCount);
+
+    virtual ~MonoBlend() = default;
+
+    int32_t onProcess(int32_t numFrames) override;
+
+    const char *getName() override {
+        return "MonoBlend";
+    }
+private:
+    const float mInvChannelCount;
+};
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_MONO_BLEND
diff --git a/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp b/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp
index c6fcac6..c8d60b9 100644
--- a/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp
+++ b/media/libaaudio/src/flowgraph/MonoToMultiConverter.cpp
@@ -14,32 +14,28 @@
  * limitations under the License.
  */
 
-
 #include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "MonoToMultiConverter.h"
 
 using namespace flowgraph;
 
-MonoToMultiConverter::MonoToMultiConverter(int32_t channelCount)
+MonoToMultiConverter::MonoToMultiConverter(int32_t outputChannelCount)
         : input(*this, 1)
-        , output(*this, channelCount) {
+        , output(*this, outputChannelCount) {
 }
 
-int32_t MonoToMultiConverter::onProcess(int64_t framePosition, int32_t numFrames) {
-    int32_t framesToProcess = input.pullData(framePosition, numFrames);
-
-    const float *inputBuffer = input.getBlock();
-    float *outputBuffer = output.getBlock();
+int32_t MonoToMultiConverter::onProcess(int32_t numFrames) {
+    const float *inputBuffer = input.getBuffer();
+    float *outputBuffer = output.getBuffer();
     int32_t channelCount = output.getSamplesPerFrame();
-    // TODO maybe move to audio_util as audio_mono_to_multi()
-    for (int i = 0; i < framesToProcess; i++) {
+    for (int i = 0; i < numFrames; i++) {
         // read one, write many
         float sample = *inputBuffer++;
         for (int channel = 0; channel < channelCount; channel++) {
             *outputBuffer++ = sample;
         }
     }
-    return framesToProcess;
+    return numFrames;
 }
 
diff --git a/media/libaaudio/src/flowgraph/MonoToMultiConverter.h b/media/libaaudio/src/flowgraph/MonoToMultiConverter.h
index 5058ae0..6e87ccb 100644
--- a/media/libaaudio/src/flowgraph/MonoToMultiConverter.h
+++ b/media/libaaudio/src/flowgraph/MonoToMultiConverter.h
@@ -14,27 +14,34 @@
  * limitations under the License.
  */
 
-
 #ifndef FLOWGRAPH_MONO_TO_MULTI_CONVERTER_H
 #define FLOWGRAPH_MONO_TO_MULTI_CONVERTER_H
 
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class MonoToMultiConverter : public AudioProcessorBase {
+/**
+ * Convert a monophonic stream to a multi-channel interleaved stream
+ * with the same signal on each channel.
+ */
+class MonoToMultiConverter : public FlowGraphNode {
 public:
-    explicit MonoToMultiConverter(int32_t channelCount);
+    explicit MonoToMultiConverter(int32_t outputChannelCount);
 
     virtual ~MonoToMultiConverter() = default;
 
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+    int32_t onProcess(int32_t numFrames) override;
 
-    AudioFloatInputPort input;
-    AudioFloatOutputPort output;
+    const char *getName() override {
+        return "MonoToMultiConverter";
+    }
+
+    FlowGraphPortFloatInput input;
+    FlowGraphPortFloatOutput output;
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/MultiToManyConverter.cpp b/media/libaaudio/src/flowgraph/MultiToManyConverter.cpp
new file mode 100644
index 0000000..f074364
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MultiToManyConverter.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include "FlowGraphNode.h"
+#include "MultiToManyConverter.h"
+
+using namespace flowgraph;
+
+MultiToManyConverter::MultiToManyConverter(int32_t channelCount)
+        : outputs(channelCount)
+        , input(*this, channelCount) {
+    for (int i = 0; i < channelCount; i++) {
+        outputs[i] = std::make_unique<FlowGraphPortFloatOutput>(*this, 1);
+    }
+}
+
+MultiToManyConverter::~MultiToManyConverter() = default;
+
+int32_t MultiToManyConverter::onProcess(int32_t numFrames) {
+    int32_t channelCount = input.getSamplesPerFrame();
+
+    for (int ch = 0; ch < channelCount; ch++) {
+        const float *inputBuffer = input.getBuffer() + ch;
+        float *outputBuffer = outputs[ch]->getBuffer();
+
+        for (int i = 0; i < numFrames; i++) {
+            *outputBuffer++ = *inputBuffer;
+            inputBuffer += channelCount;
+        }
+    }
+
+    return numFrames;
+}
+
diff --git a/media/libaaudio/src/flowgraph/MultiToManyConverter.h b/media/libaaudio/src/flowgraph/MultiToManyConverter.h
new file mode 100644
index 0000000..de31475
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MultiToManyConverter.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_MULTI_TO_MANY_CONVERTER_H
+#define FLOWGRAPH_MULTI_TO_MANY_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Convert a multi-channel interleaved stream to multiple mono-channel
+ * outputs
+ */
+    class MultiToManyConverter : public FlowGraphNode {
+    public:
+        explicit MultiToManyConverter(int32_t channelCount);
+
+        virtual ~MultiToManyConverter();
+
+        int32_t onProcess(int32_t numFrames) override;
+
+        const char *getName() override {
+            return "MultiToManyConverter";
+        }
+
+        std::vector<std::unique_ptr<flowgraph::FlowGraphPortFloatOutput>> outputs;
+        flowgraph::FlowGraphPortFloatInput input;
+    };
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_MULTI_TO_MANY_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/MultiToMonoConverter.cpp b/media/libaaudio/src/flowgraph/MultiToMonoConverter.cpp
new file mode 100644
index 0000000..c745108
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MultiToMonoConverter.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include "FlowGraphNode.h"
+#include "MultiToMonoConverter.h"
+
+using namespace flowgraph;
+
+MultiToMonoConverter::MultiToMonoConverter(int32_t inputChannelCount)
+        : input(*this, inputChannelCount)
+        , output(*this, 1) {
+}
+
+MultiToMonoConverter::~MultiToMonoConverter() = default;
+
+int32_t MultiToMonoConverter::onProcess(int32_t numFrames) {
+    const float *inputBuffer = input.getBuffer();
+    float *outputBuffer = output.getBuffer();
+    int32_t channelCount = input.getSamplesPerFrame();
+    for (int i = 0; i < numFrames; i++) {
+        // read first channel of multi stream, write many
+        *outputBuffer++ = *inputBuffer;
+        inputBuffer += channelCount;
+    }
+    return numFrames;
+}
+
diff --git a/media/libaaudio/src/flowgraph/MultiToMonoConverter.h b/media/libaaudio/src/flowgraph/MultiToMonoConverter.h
new file mode 100644
index 0000000..37c53bd
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/MultiToMonoConverter.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FLOWGRAPH_MULTI_TO_MONO_CONVERTER_H
+#define FLOWGRAPH_MULTI_TO_MONO_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "FlowGraphNode.h"
+
+namespace flowgraph {
+
+/**
+ * Convert a multi-channel interleaved stream to a monophonic stream
+ * by extracting channel[0].
+ */
+    class MultiToMonoConverter : public FlowGraphNode {
+    public:
+        explicit MultiToMonoConverter(int32_t inputChannelCount);
+
+        virtual ~MultiToMonoConverter();
+
+        int32_t onProcess(int32_t numFrames) override;
+
+        const char *getName() override {
+            return "MultiToMonoConverter";
+        }
+
+        FlowGraphPortFloatInput input;
+        FlowGraphPortFloatOutput output;
+    };
+
+} /* namespace flowgraph */
+
+#endif //FLOWGRAPH_MULTI_TO_MONO_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/RampLinear.cpp b/media/libaaudio/src/flowgraph/RampLinear.cpp
index 0cc32e5..905ae07 100644
--- a/media/libaaudio/src/flowgraph/RampLinear.cpp
+++ b/media/libaaudio/src/flowgraph/RampLinear.cpp
@@ -14,20 +14,15 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "RampLinear"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
 #include <algorithm>
 #include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "RampLinear.h"
 
 using namespace flowgraph;
 
 RampLinear::RampLinear(int32_t channelCount)
-        : input(*this, channelCount)
-        , output(*this, channelCount) {
+        : FlowGraphFilter(channelCount) {
     mTarget.store(1.0f);
 }
 
@@ -38,7 +33,7 @@
 void RampLinear::setTarget(float target) {
     mTarget.store(target);
     // If the ramp has not been used then start immediately at this level.
-    if (mLastFramePosition < 0) {
+    if (mLastCallCount == kInitialCallCount) {
         forceCurrent(target);
     }
 }
@@ -47,10 +42,9 @@
     return mLevelTo - (mRemaining * mScaler);
 }
 
-int32_t RampLinear::onProcess(int64_t framePosition, int32_t numFrames) {
-    int32_t framesToProcess = input.pullData(framePosition, numFrames);
-    const float *inputBuffer = input.getBlock();
-    float *outputBuffer = output.getBlock();
+int32_t RampLinear::onProcess(int32_t numFrames) {
+    const float *inputBuffer = input.getBuffer();
+    float *outputBuffer = output.getBuffer();
     int32_t channelCount = output.getSamplesPerFrame();
 
     float target = getTarget();
@@ -59,12 +53,10 @@
         mLevelFrom = interpolateCurrent();
         mLevelTo = target;
         mRemaining = mLengthInFrames;
-        ALOGV("%s() mLevelFrom = %f, mLevelTo = %f, mRemaining = %d, mScaler = %f",
-              __func__, mLevelFrom, mLevelTo, mRemaining, mScaler);
         mScaler = (mLevelTo - mLevelFrom) / mLengthInFrames; // for interpolation
     }
 
-    int32_t framesLeft = framesToProcess;
+    int32_t framesLeft = numFrames;
 
     if (mRemaining > 0) { // Ramping? This doesn't happen very often.
         int32_t framesToRamp = std::min(framesLeft, mRemaining);
@@ -85,5 +77,5 @@
         *outputBuffer++ = *inputBuffer++ * mLevelTo;
     }
 
-    return framesToProcess;
+    return numFrames;
 }
diff --git a/media/libaaudio/src/flowgraph/RampLinear.h b/media/libaaudio/src/flowgraph/RampLinear.h
index bdc8f41..f285704 100644
--- a/media/libaaudio/src/flowgraph/RampLinear.h
+++ b/media/libaaudio/src/flowgraph/RampLinear.h
@@ -21,17 +21,25 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class RampLinear : public AudioProcessorBase {
+/**
+ * When the target is modified then the output will ramp smoothly
+ * between the original and the new target value.
+ * This can be used to smooth out control values and reduce pops.
+ *
+ * The target may be updated while a ramp is in progress, which will trigger
+ * a new ramp from the current value.
+ */
+class RampLinear : public FlowGraphFilter {
 public:
     explicit RampLinear(int32_t channelCount);
 
     virtual ~RampLinear() = default;
 
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+    int32_t onProcess(int32_t numFrames) override;
 
     /**
      * This is used for the next ramp.
@@ -66,8 +74,9 @@
         mLevelTo = level;
     }
 
-    AudioFloatInputPort input;
-    AudioFloatOutputPort output;
+    const char *getName() override {
+        return "RampLinear";
+    }
 
 private:
 
diff --git a/media/libaaudio/src/flowgraph/SampleRateConverter.cpp b/media/libaaudio/src/flowgraph/SampleRateConverter.cpp
new file mode 100644
index 0000000..5c3ed1f
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/SampleRateConverter.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SampleRateConverter.h"
+
+using namespace flowgraph;
+using namespace resampler;
+
+SampleRateConverter::SampleRateConverter(int32_t channelCount, MultiChannelResampler &resampler)
+        : FlowGraphFilter(channelCount)
+        , mResampler(resampler) {
+    setDataPulledAutomatically(false);
+}
+
+void SampleRateConverter::reset() {
+    FlowGraphNode::reset();
+    mInputCursor = kInitialCallCount;
+}
+
+// Return true if there is a sample available.
+bool SampleRateConverter::isInputAvailable() {
+    // If we have consumed all of the input data then go out and get some more.
+    if (mInputCursor >= mNumValidInputFrames) {
+        mInputCallCount++;
+        mNumValidInputFrames = input.pullData(mInputCallCount, input.getFramesPerBuffer());
+        mInputCursor = 0;
+    }
+    return (mInputCursor < mNumValidInputFrames);
+}
+
+const float *SampleRateConverter::getNextInputFrame() {
+    const float *inputBuffer = input.getBuffer();
+    return &inputBuffer[mInputCursor++ * input.getSamplesPerFrame()];
+}
+
+int32_t SampleRateConverter::onProcess(int32_t numFrames) {
+    float *outputBuffer = output.getBuffer();
+    int32_t channelCount = output.getSamplesPerFrame();
+    int framesLeft = numFrames;
+    while (framesLeft > 0) {
+        // Gather input samples as needed.
+        if(mResampler.isWriteNeeded()) {
+            if (isInputAvailable()) {
+                const float *frame = getNextInputFrame();
+                mResampler.writeNextFrame(frame);
+            } else {
+                break;
+            }
+        } else {
+            // Output frame is interpolated from input samples.
+            mResampler.readNextFrame(outputBuffer);
+            outputBuffer += channelCount;
+            framesLeft--;
+        }
+    }
+    return numFrames - framesLeft;
+}
diff --git a/media/libaaudio/src/flowgraph/SampleRateConverter.h b/media/libaaudio/src/flowgraph/SampleRateConverter.h
new file mode 100644
index 0000000..57d76a4
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/SampleRateConverter.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_SAMPLE_RATE_CONVERTER_H
+#define OBOE_SAMPLE_RATE_CONVERTER_H
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "FlowGraphNode.h"
+#include "resampler/MultiChannelResampler.h"
+
+namespace flowgraph {
+
+class SampleRateConverter : public FlowGraphFilter {
+public:
+    explicit SampleRateConverter(int32_t channelCount,
+                                 resampler::MultiChannelResampler &mResampler);
+
+    virtual ~SampleRateConverter() = default;
+
+    int32_t onProcess(int32_t numFrames) override;
+
+    const char *getName() override {
+        return "SampleRateConverter";
+    }
+
+    void reset() override;
+
+private:
+
+    // Return true if there is a sample available.
+    bool isInputAvailable();
+
+    // This assumes data is available. Only call after calling isInputAvailable().
+    const float *getNextInputFrame();
+
+    resampler::MultiChannelResampler &mResampler;
+
+    int32_t mInputCursor = 0;         // offset into the input port buffer
+    int32_t mNumValidInputFrames = 0; // number of valid frames currently in the input port buffer
+    // We need our own callCount for upstream calls because calls occur at a different rate.
+    // This means we cannot have cyclic graphs or merges that contain an SRC.
+    int64_t mInputCallCount = 0;
+
+};
+
+} /* namespace flowgraph */
+
+#endif //OBOE_SAMPLE_RATE_CONVERTER_H
diff --git a/media/libaaudio/src/flowgraph/SinkFloat.cpp b/media/libaaudio/src/flowgraph/SinkFloat.cpp
index fb3dcbc..0588848 100644
--- a/media/libaaudio/src/flowgraph/SinkFloat.cpp
+++ b/media/libaaudio/src/flowgraph/SinkFloat.cpp
@@ -16,31 +16,31 @@
 
 #include <algorithm>
 #include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "SinkFloat.h"
 
 using namespace flowgraph;
 
 SinkFloat::SinkFloat(int32_t channelCount)
-        : AudioSink(channelCount) {
+        : FlowGraphSink(channelCount) {
 }
 
 int32_t SinkFloat::read(void *data, int32_t numFrames) {
     float *floatData = (float *) data;
-    int32_t channelCount = input.getSamplesPerFrame();
+    const int32_t channelCount = input.getSamplesPerFrame();
 
     int32_t framesLeft = numFrames;
     while (framesLeft > 0) {
         // Run the graph and pull data through the input port.
-        int32_t framesRead = pull(framesLeft);
-        if (framesRead <= 0) {
+        int32_t framesPulled = pullData(framesLeft);
+        if (framesPulled <= 0) {
             break;
         }
-        const float *signal = input.getBlock();
-        int32_t numSamples = framesRead * channelCount;
+        const float *signal = input.getBuffer();
+        int32_t numSamples = framesPulled * channelCount;
         memcpy(floatData, signal, numSamples * sizeof(float));
         floatData += numSamples;
-        framesLeft -= framesRead;
+        framesLeft -= framesPulled;
     }
     return numFrames - framesLeft;
 }
diff --git a/media/libaaudio/src/flowgraph/SinkFloat.h b/media/libaaudio/src/flowgraph/SinkFloat.h
index 7775c08..c812373 100644
--- a/media/libaaudio/src/flowgraph/SinkFloat.h
+++ b/media/libaaudio/src/flowgraph/SinkFloat.h
@@ -21,16 +21,23 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class SinkFloat : public AudioSink {
+/**
+ * AudioSink that lets you read data as 32-bit floats.
+ */
+class SinkFloat : public FlowGraphSink {
 public:
     explicit SinkFloat(int32_t channelCount);
+    ~SinkFloat() override = default;
 
     int32_t read(void *data, int32_t numFrames) override;
 
+    const char *getName() override {
+        return "SinkFloat";
+    }
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SinkI16.cpp b/media/libaaudio/src/flowgraph/SinkI16.cpp
index ffec8f5..da7fd6b 100644
--- a/media/libaaudio/src/flowgraph/SinkI16.cpp
+++ b/media/libaaudio/src/flowgraph/SinkI16.cpp
@@ -17,17 +17,16 @@
 #include <algorithm>
 #include <unistd.h>
 
-#ifdef __ANDROID__
+#include "SinkI16.h"
+
+#if FLOWGRAPH_ANDROID_INTERNAL
 #include <audio_utils/primitives.h>
 #endif
 
-#include "AudioProcessorBase.h"
-#include "SinkI16.h"
-
 using namespace flowgraph;
 
 SinkI16::SinkI16(int32_t channelCount)
-        : AudioSink(channelCount) {}
+        : FlowGraphSink(channelCount) {}
 
 int32_t SinkI16::read(void *data, int32_t numFrames) {
     int16_t *shortData = (int16_t *) data;
@@ -36,13 +35,13 @@
     int32_t framesLeft = numFrames;
     while (framesLeft > 0) {
         // Run the graph and pull data through the input port.
-        int32_t framesRead = pull(framesLeft);
+        int32_t framesRead = pullData(framesLeft);
         if (framesRead <= 0) {
             break;
         }
-        const float *signal = input.getBlock();
+        const float *signal = input.getBuffer();
         int32_t numSamples = framesRead * channelCount;
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
         memcpy_to_i16_from_float(shortData, signal, numSamples);
         shortData += numSamples;
         signal += numSamples;
diff --git a/media/libaaudio/src/flowgraph/SinkI16.h b/media/libaaudio/src/flowgraph/SinkI16.h
index 6d86266..1e1ce3a 100644
--- a/media/libaaudio/src/flowgraph/SinkI16.h
+++ b/media/libaaudio/src/flowgraph/SinkI16.h
@@ -20,15 +20,22 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class SinkI16 : public AudioSink {
+/**
+ * AudioSink that lets you read data as 16-bit signed integers.
+ */
+class SinkI16 : public FlowGraphSink {
 public:
     explicit SinkI16(int32_t channelCount);
 
     int32_t read(void *data, int32_t numFrames) override;
+
+    const char *getName() override {
+        return "SinkI16";
+    }
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SinkI24.cpp b/media/libaaudio/src/flowgraph/SinkI24.cpp
index 0cb077d..a9fb5d2 100644
--- a/media/libaaudio/src/flowgraph/SinkI24.cpp
+++ b/media/libaaudio/src/flowgraph/SinkI24.cpp
@@ -15,19 +15,20 @@
  */
 
 #include <algorithm>
-#include <stdint.h>
+#include <unistd.h>
 
-#ifdef __ANDROID__
+
+#include "FlowGraphNode.h"
+#include "SinkI24.h"
+
+#if FLOWGRAPH_ANDROID_INTERNAL
 #include <audio_utils/primitives.h>
 #endif
 
-#include "AudioProcessorBase.h"
-#include "SinkI24.h"
-
 using namespace flowgraph;
 
 SinkI24::SinkI24(int32_t channelCount)
-        : AudioSink(channelCount) {}
+        : FlowGraphSink(channelCount) {}
 
 int32_t SinkI24::read(void *data, int32_t numFrames) {
     uint8_t *byteData = (uint8_t *) data;
@@ -36,13 +37,13 @@
     int32_t framesLeft = numFrames;
     while (framesLeft > 0) {
         // Run the graph and pull data through the input port.
-        int32_t framesRead = pull(framesLeft);
+        int32_t framesRead = pullData(framesLeft);
         if (framesRead <= 0) {
             break;
         }
-        const float *floatData = input.getBlock();
+        const float *floatData = input.getBuffer();
         int32_t numSamples = framesRead * channelCount;
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
         memcpy_to_p24_from_float(byteData, floatData, numSamples);
         static const int kBytesPerI24Packed = 3;
         byteData += numSamples * kBytesPerI24Packed;
diff --git a/media/libaaudio/src/flowgraph/SinkI24.h b/media/libaaudio/src/flowgraph/SinkI24.h
index 5b9b505..44078a9 100644
--- a/media/libaaudio/src/flowgraph/SinkI24.h
+++ b/media/libaaudio/src/flowgraph/SinkI24.h
@@ -20,15 +20,23 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class SinkI24 : public AudioSink {
+/**
+ * AudioSink that lets you read data as packed 24-bit signed integers.
+ * The sample size is 3 bytes.
+ */
+class SinkI24 : public FlowGraphSink {
 public:
     explicit SinkI24(int32_t channelCount);
 
     int32_t read(void *data, int32_t numFrames) override;
+
+    const char *getName() override {
+        return "SinkI24";
+    }
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SinkI32.cpp b/media/libaaudio/src/flowgraph/SinkI32.cpp
index eab863d..9fd4e96 100644
--- a/media/libaaudio/src/flowgraph/SinkI32.cpp
+++ b/media/libaaudio/src/flowgraph/SinkI32.cpp
@@ -14,18 +14,18 @@
  * limitations under the License.
  */
 
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
 #include <audio_utils/primitives.h>
 #endif
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "FlowgraphUtilities.h"
 #include "SinkI32.h"
 
 using namespace flowgraph;
 
 SinkI32::SinkI32(int32_t channelCount)
-        : AudioSink(channelCount) {}
+        : FlowGraphSink(channelCount) {}
 
 int32_t SinkI32::read(void *data, int32_t numFrames) {
     int32_t *intData = (int32_t *) data;
@@ -34,13 +34,13 @@
     int32_t framesLeft = numFrames;
     while (framesLeft > 0) {
         // Run the graph and pull data through the input port.
-        int32_t framesRead = pull(framesLeft);
+        int32_t framesRead = pullData(framesLeft);
         if (framesRead <= 0) {
             break;
         }
-        const float *signal = input.getBlock();
+        const float *signal = input.getBuffer();
         int32_t numSamples = framesRead * channelCount;
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
         memcpy_to_i32_from_float(intData, signal, numSamples);
         intData += numSamples;
         signal += numSamples;
diff --git a/media/libaaudio/src/flowgraph/SinkI32.h b/media/libaaudio/src/flowgraph/SinkI32.h
index 09d23b7..7456d5f 100644
--- a/media/libaaudio/src/flowgraph/SinkI32.h
+++ b/media/libaaudio/src/flowgraph/SinkI32.h
@@ -19,16 +19,20 @@
 
 #include <stdint.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class SinkI32 : public AudioSink {
+class SinkI32 : public FlowGraphSink {
 public:
     explicit SinkI32(int32_t channelCount);
     ~SinkI32() override = default;
 
     int32_t read(void *data, int32_t numFrames) override;
+
+    const char *getName() override {
+        return "SinkI32";
+    }
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SourceFloat.cpp b/media/libaaudio/src/flowgraph/SourceFloat.cpp
index 5b3a51e..1b3daf1 100644
--- a/media/libaaudio/src/flowgraph/SourceFloat.cpp
+++ b/media/libaaudio/src/flowgraph/SourceFloat.cpp
@@ -16,23 +16,22 @@
 
 #include <algorithm>
 #include <unistd.h>
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "SourceFloat.h"
 
 using namespace flowgraph;
 
 SourceFloat::SourceFloat(int32_t channelCount)
-        : AudioSource(channelCount) {
+        : FlowGraphSourceBuffered(channelCount) {
 }
 
-int32_t SourceFloat::onProcess(int64_t /*framePosition*/, int32_t numFrames) {
+int32_t SourceFloat::onProcess(int32_t numFrames) {
+    float *outputBuffer = output.getBuffer();
+    const int32_t channelCount = output.getSamplesPerFrame();
 
-    float *outputBuffer = output.getBlock();
-    int32_t channelCount = output.getSamplesPerFrame();
-
-    int32_t framesLeft = mSizeInFrames - mFrameIndex;
-    int32_t framesToProcess = std::min(numFrames, framesLeft);
-    int32_t numSamples = framesToProcess * channelCount;
+    const int32_t framesLeft = mSizeInFrames - mFrameIndex;
+    const int32_t framesToProcess = std::min(numFrames, framesLeft);
+    const int32_t numSamples = framesToProcess * channelCount;
 
     const float *floatBase = (float *) mData;
     const float *floatData = &floatBase[mFrameIndex * channelCount];
diff --git a/media/libaaudio/src/flowgraph/SourceFloat.h b/media/libaaudio/src/flowgraph/SourceFloat.h
index e6eed9f..4719669 100644
--- a/media/libaaudio/src/flowgraph/SourceFloat.h
+++ b/media/libaaudio/src/flowgraph/SourceFloat.h
@@ -20,15 +20,23 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class SourceFloat : public AudioSource {
+/**
+ * AudioSource that reads a block of pre-defined float data.
+ */
+class SourceFloat : public FlowGraphSourceBuffered {
 public:
     explicit SourceFloat(int32_t channelCount);
+    ~SourceFloat() override = default;
 
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+    int32_t onProcess(int32_t numFrames) override;
+
+    const char *getName() override {
+        return "SourceFloat";
+    }
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SourceI16.cpp b/media/libaaudio/src/flowgraph/SourceI16.cpp
index a645cc2..8813023 100644
--- a/media/libaaudio/src/flowgraph/SourceI16.cpp
+++ b/media/libaaudio/src/flowgraph/SourceI16.cpp
@@ -17,21 +17,21 @@
 #include <algorithm>
 #include <unistd.h>
 
-#ifdef __ANDROID__
+#include "FlowGraphNode.h"
+#include "SourceI16.h"
+
+#if FLOWGRAPH_ANDROID_INTERNAL
 #include <audio_utils/primitives.h>
 #endif
 
-#include "AudioProcessorBase.h"
-#include "SourceI16.h"
-
 using namespace flowgraph;
 
 SourceI16::SourceI16(int32_t channelCount)
-        : AudioSource(channelCount) {
+        : FlowGraphSourceBuffered(channelCount) {
 }
 
-int32_t SourceI16::onProcess(int64_t /*framePosition*/, int32_t numFrames) {
-    float *floatData = output.getBlock();
+int32_t SourceI16::onProcess(int32_t numFrames) {
+    float *floatData = output.getBuffer();
     int32_t channelCount = output.getSamplesPerFrame();
 
     int32_t framesLeft = mSizeInFrames - mFrameIndex;
@@ -41,7 +41,7 @@
     const int16_t *shortBase = static_cast<const int16_t *>(mData);
     const int16_t *shortData = &shortBase[mFrameIndex * channelCount];
 
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
     memcpy_to_float_from_i16(floatData, shortData, numSamples);
 #else
     for (int i = 0; i < numSamples; i++) {
diff --git a/media/libaaudio/src/flowgraph/SourceI16.h b/media/libaaudio/src/flowgraph/SourceI16.h
index 2b116cf..fe440b2 100644
--- a/media/libaaudio/src/flowgraph/SourceI16.h
+++ b/media/libaaudio/src/flowgraph/SourceI16.h
@@ -20,15 +20,21 @@
 #include <unistd.h>
 #include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
-
-class SourceI16 : public AudioSource {
+/**
+ * AudioSource that reads a block of pre-defined 16-bit integer data.
+ */
+class SourceI16 : public FlowGraphSourceBuffered {
 public:
     explicit SourceI16(int32_t channelCount);
 
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+    int32_t onProcess(int32_t numFrames) override;
+
+    const char *getName() override {
+        return "SourceI16";
+    }
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SourceI24.cpp b/media/libaaudio/src/flowgraph/SourceI24.cpp
index 50fb98e..1975878 100644
--- a/media/libaaudio/src/flowgraph/SourceI24.cpp
+++ b/media/libaaudio/src/flowgraph/SourceI24.cpp
@@ -15,13 +15,13 @@
  */
 
 #include <algorithm>
-#include <stdint.h>
+#include <unistd.h>
 
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
 #include <audio_utils/primitives.h>
 #endif
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "SourceI24.h"
 
 using namespace flowgraph;
@@ -29,11 +29,11 @@
 constexpr int kBytesPerI24Packed = 3;
 
 SourceI24::SourceI24(int32_t channelCount)
-        : AudioSource(channelCount) {
+        : FlowGraphSourceBuffered(channelCount) {
 }
 
-int32_t SourceI24::onProcess(int64_t /*framePosition*/, int32_t numFrames) {
-    float *floatData = output.getBlock();
+int32_t SourceI24::onProcess(int32_t numFrames) {
+    float *floatData = output.getBuffer();
     int32_t channelCount = output.getSamplesPerFrame();
 
     int32_t framesLeft = mSizeInFrames - mFrameIndex;
@@ -43,7 +43,7 @@
     const uint8_t *byteBase = (uint8_t *) mData;
     const uint8_t *byteData = &byteBase[mFrameIndex * channelCount * kBytesPerI24Packed];
 
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
     memcpy_to_float_from_p24(floatData, byteData, numSamples);
 #else
     static const float scale = 1. / (float)(1UL << 31);
diff --git a/media/libaaudio/src/flowgraph/SourceI24.h b/media/libaaudio/src/flowgraph/SourceI24.h
index 2ed6f18..3779534 100644
--- a/media/libaaudio/src/flowgraph/SourceI24.h
+++ b/media/libaaudio/src/flowgraph/SourceI24.h
@@ -17,17 +17,25 @@
 #ifndef FLOWGRAPH_SOURCE_I24_H
 #define FLOWGRAPH_SOURCE_I24_H
 
-#include <stdint.h>
+#include <unistd.h>
+#include <sys/types.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class SourceI24 : public AudioSource {
+/**
+ * AudioSource that reads a block of pre-defined 24-bit packed integer data.
+ */
+class SourceI24 : public FlowGraphSourceBuffered {
 public:
     explicit SourceI24(int32_t channelCount);
 
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+    int32_t onProcess(int32_t numFrames) override;
+
+    const char *getName() override {
+        return "SourceI24";
+    }
 };
 
 } /* namespace flowgraph */
diff --git a/media/libaaudio/src/flowgraph/SourceI32.cpp b/media/libaaudio/src/flowgraph/SourceI32.cpp
index 95bfd8f..4b2e8c4 100644
--- a/media/libaaudio/src/flowgraph/SourceI32.cpp
+++ b/media/libaaudio/src/flowgraph/SourceI32.cpp
@@ -17,31 +17,31 @@
 #include <algorithm>
 #include <unistd.h>
 
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
 #include <audio_utils/primitives.h>
 #endif
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 #include "SourceI32.h"
 
 using namespace flowgraph;
 
 SourceI32::SourceI32(int32_t channelCount)
-        : AudioSource(channelCount) {
+        : FlowGraphSourceBuffered(channelCount) {
 }
 
-int32_t SourceI32::onProcess(int64_t /*framePosition*/, int32_t numFrames) {
-    float *floatData = output.getBlock();
-    int32_t channelCount = output.getSamplesPerFrame();
+int32_t SourceI32::onProcess(int32_t numFrames) {
+    float *floatData = output.getBuffer();
+    const int32_t channelCount = output.getSamplesPerFrame();
 
-    int32_t framesLeft = mSizeInFrames - mFrameIndex;
-    int32_t framesToProcess = std::min(numFrames, framesLeft);
-    int32_t numSamples = framesToProcess * channelCount;
+    const int32_t framesLeft = mSizeInFrames - mFrameIndex;
+    const int32_t framesToProcess = std::min(numFrames, framesLeft);
+    const int32_t numSamples = framesToProcess * channelCount;
 
     const int32_t *intBase = static_cast<const int32_t *>(mData);
     const int32_t *intData = &intBase[mFrameIndex * channelCount];
 
-#ifdef __ANDROID__
+#if FLOWGRAPH_ANDROID_INTERNAL
     memcpy_to_float_from_i32(floatData, intData, numSamples);
 #else
     for (int i = 0; i < numSamples; i++) {
diff --git a/media/libaaudio/src/flowgraph/SourceI32.h b/media/libaaudio/src/flowgraph/SourceI32.h
index e50f9be..b4e0d7b 100644
--- a/media/libaaudio/src/flowgraph/SourceI32.h
+++ b/media/libaaudio/src/flowgraph/SourceI32.h
@@ -19,17 +19,20 @@
 
 #include <stdint.h>
 
-#include "AudioProcessorBase.h"
+#include "FlowGraphNode.h"
 
 namespace flowgraph {
 
-class SourceI32 : public AudioSource {
+class SourceI32 : public FlowGraphSourceBuffered {
 public:
     explicit SourceI32(int32_t channelCount);
     ~SourceI32() override = default;
 
-    int32_t onProcess(int64_t framePosition, int32_t numFrames) override;
+    int32_t onProcess(int32_t numFrames) override;
 
+    const char *getName() override {
+        return "SourceI32";
+    }
 private:
     static constexpr float kScale = 1.0 / (1UL << 31);
 };
diff --git a/media/libaaudio/src/flowgraph/resampler/HyperbolicCosineWindow.h b/media/libaaudio/src/flowgraph/resampler/HyperbolicCosineWindow.h
new file mode 100644
index 0000000..f6479ae
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/HyperbolicCosineWindow.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RESAMPLER_HYPERBOLIC_COSINE_WINDOW_H
+#define RESAMPLER_HYPERBOLIC_COSINE_WINDOW_H
+
+#include <math.h>
+
+namespace resampler {
+
+/**
+ * Calculate a HyperbolicCosineWindow window centered at 0.
+ * This can be used in place of a Kaiser window.
+ *
+ * The code is based on an anonymous contribution by "a concerned citizen":
+ * https://dsp.stackexchange.com/questions/37714/kaiser-window-approximation
+ */
+class HyperbolicCosineWindow {
+public:
+    HyperbolicCosineWindow() {
+        setStopBandAttenuation(60);
+    }
+
+    /**
+     * @param attenuation typical values range from 30 to 90 dB
+     * @return beta
+     */
+    double setStopBandAttenuation(double attenuation) {
+        double alpha = ((-325.1e-6 * attenuation + 0.1677) * attenuation) - 3.149;
+        setAlpha(alpha);
+        return alpha;
+    }
+
+    void setAlpha(double alpha) {
+        mAlpha = alpha;
+        mInverseCoshAlpha = 1.0 / cosh(alpha);
+    }
+
+    /**
+     * @param x ranges from -1.0 to +1.0
+     */
+    double operator()(double x) {
+        double x2 = x * x;
+        if (x2 >= 1.0) return 0.0;
+        double w = mAlpha * sqrt(1.0 - x2);
+        return cosh(w) * mInverseCoshAlpha;
+    }
+
+private:
+    double mAlpha = 0.0;
+    double mInverseCoshAlpha = 1.0;
+};
+
+} // namespace resampler
+#endif //RESAMPLER_HYPERBOLIC_COSINE_WINDOW_H
diff --git a/media/libaaudio/src/flowgraph/resampler/IntegerRatio.cpp b/media/libaaudio/src/flowgraph/resampler/IntegerRatio.cpp
new file mode 100644
index 0000000..4bd75b3
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/IntegerRatio.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IntegerRatio.h"
+
+using namespace resampler;
+
+// Enough primes to cover the common sample rates.
+static const int kPrimes[] = {
+        2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41,
+        43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97,
+        101, 103, 107, 109, 113, 127, 131, 137, 139, 149,
+        151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199};
+
+void IntegerRatio::reduce() {
+    for (int prime : kPrimes) {
+        if (mNumerator < prime || mDenominator < prime) {
+            break;
+        }
+
+        // Find biggest prime factor for numerator.
+        while (true) {
+            int top = mNumerator / prime;
+            int bottom = mDenominator / prime;
+            if ((top >= 1)
+                && (bottom >= 1)
+                && (top * prime == mNumerator) // divided evenly?
+                && (bottom * prime == mDenominator)) {
+                mNumerator = top;
+                mDenominator = bottom;
+            } else {
+                break;
+            }
+        }
+
+    }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/IntegerRatio.h b/media/libaaudio/src/flowgraph/resampler/IntegerRatio.h
new file mode 100644
index 0000000..8c044d8
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/IntegerRatio.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_INTEGER_RATIO_H
+#define OBOE_INTEGER_RATIO_H
+
+#include <sys/types.h>
+
+namespace resampler {
+
+/**
+ * Represent the ratio of two integers.
+ */
+class IntegerRatio {
+public:
+    IntegerRatio(int32_t numerator, int32_t denominator)
+            : mNumerator(numerator), mDenominator(denominator) {}
+
+    /**
+     * Reduce by removing common prime factors.
+     */
+    void reduce();
+
+    int32_t getNumerator() {
+        return mNumerator;
+    }
+
+    int32_t getDenominator() {
+        return mDenominator;
+    }
+
+private:
+    int32_t mNumerator;
+    int32_t mDenominator;
+};
+
+} // namespace resampler
+
+#endif //OBOE_INTEGER_RATIO_H
diff --git a/media/libaaudio/src/flowgraph/resampler/KaiserWindow.h b/media/libaaudio/src/flowgraph/resampler/KaiserWindow.h
new file mode 100644
index 0000000..73dbc41
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/KaiserWindow.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RESAMPLER_KAISER_WINDOW_H
+#define RESAMPLER_KAISER_WINDOW_H
+
+#include <math.h>
+
+namespace resampler {
+
+/**
+ * Calculate a Kaiser window centered at 0.
+ */
+class KaiserWindow {
+public:
+    KaiserWindow() {
+        setStopBandAttenuation(60);
+    }
+
+    /**
+     * @param attenuation typical values range from 30 to 90 dB
+     * @return beta
+     */
+    double setStopBandAttenuation(double attenuation) {
+        double beta = 0.0;
+        if (attenuation > 50) {
+            beta = 0.1102 * (attenuation - 8.7);
+        } else if (attenuation >= 21) {
+            double a21 = attenuation - 21;
+            beta = 0.5842 * pow(a21, 0.4) + (0.07886 * a21);
+        }
+        setBeta(beta);
+        return beta;
+    }
+
+    void setBeta(double beta) {
+        mBeta = beta;
+        mInverseBesselBeta = 1.0 / bessel(beta);
+    }
+
+    /**
+     * @param x ranges from -1.0 to +1.0
+     */
+    double operator()(double x) {
+        double x2 = x * x;
+        if (x2 >= 1.0) return 0.0;
+        double w = mBeta * sqrt(1.0 - x2);
+        return bessel(w) * mInverseBesselBeta;
+    }
+
+    // Approximation of a
+    // modified zero order Bessel function of the first kind.
+    // Based on a discussion at:
+    // https://dsp.stackexchange.com/questions/37714/kaiser-window-approximation
+    static double bessel(double x) {
+        double y = cosh(0.970941817426052 * x);
+        y += cosh(0.8854560256532099 * x);
+        y += cosh(0.7485107481711011 * x);
+        y += cosh(0.5680647467311558 * x);
+        y += cosh(0.3546048870425356 * x);
+        y += cosh(0.120536680255323 * x);
+        y *= 2;
+        y += cosh(x);
+        y /= 13;
+        return y;
+    }
+
+private:
+    double mBeta = 0.0;
+    double mInverseBesselBeta = 1.0;
+};
+
+} // namespace resampler
+#endif //RESAMPLER_KAISER_WINDOW_H
diff --git a/media/libaaudio/src/flowgraph/resampler/LinearResampler.cpp b/media/libaaudio/src/flowgraph/resampler/LinearResampler.cpp
new file mode 100644
index 0000000..a7748c1
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/LinearResampler.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LinearResampler.h"
+
+using namespace resampler;
+
+LinearResampler::LinearResampler(const MultiChannelResampler::Builder &builder)
+        : MultiChannelResampler(builder) {
+    mPreviousFrame = std::make_unique<float[]>(getChannelCount());
+    mCurrentFrame = std::make_unique<float[]>(getChannelCount());
+}
+
+void LinearResampler::writeFrame(const float *frame) {
+    memcpy(mPreviousFrame.get(), mCurrentFrame.get(), sizeof(float) * getChannelCount());
+    memcpy(mCurrentFrame.get(), frame, sizeof(float) * getChannelCount());
+}
+
+void LinearResampler::readFrame(float *frame) {
+    float *previous = mPreviousFrame.get();
+    float *current = mCurrentFrame.get();
+    float phase = (float) getIntegerPhase() / mDenominator;
+    // iterate across samples in the frame
+    for (int channel = 0; channel < getChannelCount(); channel++) {
+        float f0 = *previous++;
+        float f1 = *current++;
+        *frame++ = f0 + (phase * (f1 - f0));
+    }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/LinearResampler.h b/media/libaaudio/src/flowgraph/resampler/LinearResampler.h
new file mode 100644
index 0000000..6bde81d
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/LinearResampler.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_LINEAR_RESAMPLER_H
+#define OBOE_LINEAR_RESAMPLER_H
+
+#include <memory>
+#include <sys/types.h>
+#include <unistd.h>
+#include "MultiChannelResampler.h"
+
+namespace resampler {
+
+/**
+ * Simple resampler that uses bi-linear interpolation.
+ */
+class LinearResampler : public MultiChannelResampler {
+public:
+    explicit LinearResampler(const MultiChannelResampler::Builder &builder);
+
+    void writeFrame(const float *frame) override;
+
+    void readFrame(float *frame) override;
+
+private:
+    std::unique_ptr<float[]> mPreviousFrame;
+    std::unique_ptr<float[]> mCurrentFrame;
+};
+
+} // namespace resampler
+#endif //OBOE_LINEAR_RESAMPLER_H
diff --git a/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.cpp b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.cpp
new file mode 100644
index 0000000..d630520
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.cpp
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <math.h>
+
+#include "IntegerRatio.h"
+#include "LinearResampler.h"
+#include "MultiChannelResampler.h"
+#include "PolyphaseResampler.h"
+#include "PolyphaseResamplerMono.h"
+#include "PolyphaseResamplerStereo.h"
+#include "SincResampler.h"
+#include "SincResamplerStereo.h"
+
+using namespace resampler;
+
+MultiChannelResampler::MultiChannelResampler(const MultiChannelResampler::Builder &builder)
+        : mNumTaps(builder.getNumTaps())
+        , mX(builder.getChannelCount() * builder.getNumTaps() * 2)
+        , mSingleFrame(builder.getChannelCount())
+        , mChannelCount(builder.getChannelCount())
+        {
+    // Reduce sample rates to the smallest ratio.
+    // For example 44100/48000 would become 147/160.
+    IntegerRatio ratio(builder.getInputRate(), builder.getOutputRate());
+    ratio.reduce();
+    mNumerator = ratio.getNumerator();
+    mDenominator = ratio.getDenominator();
+    mIntegerPhase = mDenominator;
+}
+
+// static factory method
+MultiChannelResampler *MultiChannelResampler::make(int32_t channelCount,
+                                                   int32_t inputRate,
+                                                   int32_t outputRate,
+                                                   Quality quality) {
+    Builder builder;
+    builder.setInputRate(inputRate);
+    builder.setOutputRate(outputRate);
+    builder.setChannelCount(channelCount);
+
+    switch (quality) {
+        case Quality::Fastest:
+            builder.setNumTaps(2);
+            break;
+        case Quality::Low:
+            builder.setNumTaps(4);
+            break;
+        case Quality::Medium:
+        default:
+            builder.setNumTaps(8);
+            break;
+        case Quality::High:
+            builder.setNumTaps(16);
+            break;
+        case Quality::Best:
+            builder.setNumTaps(32);
+            break;
+    }
+
+    // Set the cutoff frequency so that we do not get aliasing when down-sampling.
+    if (inputRate > outputRate) {
+        builder.setNormalizedCutoff(kDefaultNormalizedCutoff);
+    }
+    return builder.build();
+}
+
+MultiChannelResampler *MultiChannelResampler::Builder::build() {
+    if (getNumTaps() == 2) {
+        // Note that this does not do low pass filteringh.
+        return new LinearResampler(*this);
+    }
+    IntegerRatio ratio(getInputRate(), getOutputRate());
+    ratio.reduce();
+    bool usePolyphase = (getNumTaps() * ratio.getDenominator()) <= kMaxCoefficients;
+    if (usePolyphase) {
+        if (getChannelCount() == 1) {
+            return new PolyphaseResamplerMono(*this);
+        } else if (getChannelCount() == 2) {
+            return new PolyphaseResamplerStereo(*this);
+        } else {
+            return new PolyphaseResampler(*this);
+        }
+    } else {
+        // Use less optimized resampler that uses a float phaseIncrement.
+        // TODO mono resampler
+        if (getChannelCount() == 2) {
+            return new SincResamplerStereo(*this);
+        } else {
+            return new SincResampler(*this);
+        }
+    }
+}
+
+void MultiChannelResampler::writeFrame(const float *frame) {
+    // Move cursor before write so that cursor points to last written frame in read.
+    if (--mCursor < 0) {
+        mCursor = getNumTaps() - 1;
+    }
+    float *dest = &mX[mCursor * getChannelCount()];
+    int offset = getNumTaps() * getChannelCount();
+    for (int channel = 0; channel < getChannelCount(); channel++) {
+        // Write twice so we avoid having to wrap when reading.
+        dest[channel] = dest[channel + offset] = frame[channel];
+    }
+}
+
+float MultiChannelResampler::sinc(float radians) {
+    if (abs(radians) < 1.0e-9) return 1.0f;   // avoid divide by zero
+    return sinf(radians) / radians;   // Sinc function
+}
+
+// Generate coefficients in the order they will be used by readFrame().
+// This is more complicated but readFrame() is called repeatedly and should be optimized.
+void MultiChannelResampler::generateCoefficients(int32_t inputRate,
+                                              int32_t outputRate,
+                                              int32_t numRows,
+                                              double phaseIncrement,
+                                              float normalizedCutoff) {
+    mCoefficients.resize(getNumTaps() * numRows);
+    int coefficientIndex = 0;
+    double phase = 0.0; // ranges from 0.0 to 1.0, fraction between samples
+    // Stretch the sinc function for low pass filtering.
+    const float cutoffScaler = normalizedCutoff *
+            ((outputRate < inputRate)
+             ? ((float)outputRate / inputRate)
+             : ((float)inputRate / outputRate));
+    const int numTapsHalf = getNumTaps() / 2; // numTaps must be even.
+    const float numTapsHalfInverse = 1.0f / numTapsHalf;
+    for (int i = 0; i < numRows; i++) {
+        float tapPhase = phase - numTapsHalf;
+        float gain = 0.0; // sum of raw coefficients
+        int gainCursor = coefficientIndex;
+        for (int tap = 0; tap < getNumTaps(); tap++) {
+            float radians = tapPhase * M_PI;
+
+#if MCR_USE_KAISER
+            float window = mKaiserWindow(tapPhase * numTapsHalfInverse);
+#else
+            float window = mCoshWindow(tapPhase * numTapsHalfInverse);
+#endif
+            float coefficient = sinc(radians * cutoffScaler) * window;
+            mCoefficients.at(coefficientIndex++) = coefficient;
+            gain += coefficient;
+            tapPhase += 1.0;
+        }
+        phase += phaseIncrement;
+        while (phase >= 1.0) {
+            phase -= 1.0;
+        }
+
+        // Correct for gain variations.
+        float gainCorrection = 1.0 / gain; // normalize the gain
+        for (int tap = 0; tap < getNumTaps(); tap++) {
+            mCoefficients.at(gainCursor + tap) *= gainCorrection;
+        }
+    }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.h b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.h
new file mode 100644
index 0000000..da79cad
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/MultiChannelResampler.h
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_MULTICHANNEL_RESAMPLER_H
+#define OBOE_MULTICHANNEL_RESAMPLER_H
+
+#include <memory>
+#include <vector>
+#include <sys/types.h>
+#include <unistd.h>
+
+#ifndef MCR_USE_KAISER
+// It appears from the spectrogram that the HyperbolicCosine window leads to fewer artifacts.
+// And it is faster to calculate.
+#define MCR_USE_KAISER 0
+#endif
+
+#if MCR_USE_KAISER
+#include "KaiserWindow.h"
+#else
+#include "HyperbolicCosineWindow.h"
+#endif
+
+namespace resampler {
+
+class MultiChannelResampler {
+
+public:
+
+    enum class Quality : int32_t {
+        Fastest,
+        Low,
+        Medium,
+        High,
+        Best,
+    };
+
+    class Builder {
+    public:
+        /**
+         * Construct an optimal resampler based on the specified parameters.
+         * @return address of a resampler
+         */
+        MultiChannelResampler *build();
+
+        /**
+         * The number of taps in the resampling filter.
+         * More taps gives better quality but uses more CPU time.
+         * This typically ranges from 4 to 64. Default is 16.
+         *
+         * For polyphase filters, numTaps must be a multiple of four for loop unrolling.
+         * @param numTaps number of taps for the filter
+         * @return address of this builder for chaining calls
+         */
+        Builder *setNumTaps(int32_t numTaps) {
+            mNumTaps = numTaps;
+            return this;
+        }
+
+        /**
+         * Use 1 for mono, 2 for stereo, etc. Default is 1.
+         *
+         * @param channelCount number of channels
+         * @return address of this builder for chaining calls
+         */
+        Builder *setChannelCount(int32_t channelCount) {
+            mChannelCount = channelCount;
+            return this;
+        }
+
+        /**
+         * Default is 48000.
+         *
+         * @param inputRate sample rate of the input stream
+         * @return address of this builder for chaining calls
+         */
+        Builder *setInputRate(int32_t inputRate) {
+            mInputRate = inputRate;
+            return this;
+        }
+
+        /**
+         * Default is 48000.
+         *
+         * @param outputRate sample rate of the output stream
+         * @return address of this builder for chaining calls
+         */
+        Builder *setOutputRate(int32_t outputRate) {
+            mOutputRate = outputRate;
+            return this;
+        }
+
+        /**
+         * Set cutoff frequency relative to the Nyquist rate of the output sample rate.
+         * Set to 1.0 to match the Nyquist frequency.
+         * Set lower to reduce aliasing.
+         * Default is 0.70.
+         *
+         * @param normalizedCutoff anti-aliasing filter cutoff
+         * @return address of this builder for chaining calls
+         */
+        Builder *setNormalizedCutoff(float normalizedCutoff) {
+            mNormalizedCutoff = normalizedCutoff;
+            return this;
+        }
+
+        int32_t getNumTaps() const {
+            return mNumTaps;
+        }
+
+        int32_t getChannelCount() const {
+            return mChannelCount;
+        }
+
+        int32_t getInputRate() const {
+            return mInputRate;
+        }
+
+        int32_t getOutputRate() const {
+            return mOutputRate;
+        }
+
+        float getNormalizedCutoff() const {
+            return mNormalizedCutoff;
+        }
+
+    protected:
+        int32_t mChannelCount = 1;
+        int32_t mNumTaps = 16;
+        int32_t mInputRate = 48000;
+        int32_t mOutputRate = 48000;
+        float   mNormalizedCutoff = kDefaultNormalizedCutoff;
+    };
+
+    virtual ~MultiChannelResampler() = default;
+
+    /**
+     * Factory method for making a resampler that is optimal for the given inputs.
+     *
+     * @param channelCount number of channels, 2 for stereo
+     * @param inputRate sample rate of the input stream
+     * @param outputRate  sample rate of the output stream
+     * @param quality higher quality sounds better but uses more CPU
+     * @return an optimal resampler
+     */
+    static MultiChannelResampler *make(int32_t channelCount,
+                                       int32_t inputRate,
+                                       int32_t outputRate,
+                                       Quality quality);
+
+    bool isWriteNeeded() const {
+        return mIntegerPhase >= mDenominator;
+    }
+
+    /**
+     * Write a frame containing N samples.
+     *
+     * @param frame pointer to the first sample in a frame
+     */
+    void writeNextFrame(const float *frame) {
+        writeFrame(frame);
+        advanceWrite();
+    }
+
+    /**
+     * Read a frame containing N samples.
+     *
+     * @param frame pointer to the first sample in a frame
+     */
+    void readNextFrame(float *frame) {
+        readFrame(frame);
+        advanceRead();
+    }
+
+    int getNumTaps() const {
+        return mNumTaps;
+    }
+
+    int getChannelCount() const {
+        return mChannelCount;
+    }
+
+    static float hammingWindow(float radians, float spread);
+
+    static float sinc(float radians);
+
+protected:
+
+    explicit MultiChannelResampler(const MultiChannelResampler::Builder &builder);
+
+    /**
+     * Write a frame containing N samples.
+     * Call advanceWrite() after calling this.
+     * @param frame pointer to the first sample in a frame
+     */
+    virtual void writeFrame(const float *frame);
+
+    /**
+     * Read a frame containing N samples using interpolation.
+     * Call advanceRead() after calling this.
+     * @param frame pointer to the first sample in a frame
+     */
+    virtual void readFrame(float *frame) = 0;
+
+    void advanceWrite() {
+        mIntegerPhase -= mDenominator;
+    }
+
+    void advanceRead() {
+        mIntegerPhase += mNumerator;
+    }
+
+    /**
+     * Generate the filter coefficients in optimal order.
+     * @param inputRate sample rate of the input stream
+     * @param outputRate  sample rate of the output stream
+     * @param numRows number of rows in the array that contain a set of tap coefficients
+     * @param phaseIncrement how much to increment the phase between rows
+     * @param normalizedCutoff filter cutoff frequency normalized to Nyquist rate of output
+     */
+    void generateCoefficients(int32_t inputRate,
+                              int32_t outputRate,
+                              int32_t numRows,
+                              double phaseIncrement,
+                              float normalizedCutoff);
+
+
+    int32_t getIntegerPhase() {
+        return mIntegerPhase;
+    }
+
+    static constexpr int kMaxCoefficients = 8 * 1024;
+    std::vector<float>   mCoefficients;
+
+    const int            mNumTaps;
+    int                  mCursor = 0;
+    std::vector<float>   mX;           // delayed input values for the FIR
+    std::vector<float>   mSingleFrame; // one frame for temporary use
+    int32_t              mIntegerPhase = 0;
+    int32_t              mNumerator = 0;
+    int32_t              mDenominator = 0;
+
+
+private:
+
+#if MCR_USE_KAISER
+    KaiserWindow           mKaiserWindow;
+#else
+    HyperbolicCosineWindow mCoshWindow;
+#endif
+
+    static constexpr float kDefaultNormalizedCutoff = 0.70f;
+
+    const int              mChannelCount;
+};
+
+} // namespace resampler
+#endif //OBOE_MULTICHANNEL_RESAMPLER_H
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.cpp b/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.cpp
new file mode 100644
index 0000000..aa4ffd9
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.cpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include <math.h>
+#include "IntegerRatio.h"
+#include "PolyphaseResampler.h"
+
+using namespace resampler;
+
+PolyphaseResampler::PolyphaseResampler(const MultiChannelResampler::Builder &builder)
+        : MultiChannelResampler(builder)
+        {
+    assert((getNumTaps() % 4) == 0); // Required for loop unrolling.
+
+    int32_t inputRate = builder.getInputRate();
+    int32_t outputRate = builder.getOutputRate();
+
+    int32_t numRows = mDenominator;
+    double phaseIncrement = (double) inputRate / (double) outputRate;
+    generateCoefficients(inputRate, outputRate,
+                         numRows, phaseIncrement,
+                         builder.getNormalizedCutoff());
+}
+
+void PolyphaseResampler::readFrame(float *frame) {
+    // Clear accumulator for mixing.
+    std::fill(mSingleFrame.begin(), mSingleFrame.end(), 0.0);
+
+    // Multiply input times windowed sinc function.
+    float *coefficients = &mCoefficients[mCoefficientCursor];
+    float *xFrame = &mX[mCursor * getChannelCount()];
+    for (int i = 0; i < mNumTaps; i++) {
+        float coefficient = *coefficients++;
+        for (int channel = 0; channel < getChannelCount(); channel++) {
+            mSingleFrame[channel] += *xFrame++ * coefficient;
+        }
+    }
+
+    // Advance and wrap through coefficients.
+    mCoefficientCursor = (mCoefficientCursor + mNumTaps) % mCoefficients.size();
+
+    // Copy accumulator to output.
+    for (int channel = 0; channel < getChannelCount(); channel++) {
+        frame[channel] = mSingleFrame[channel];
+    }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.h b/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.h
new file mode 100644
index 0000000..1aeb680
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResampler.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_POLYPHASE_RESAMPLER_H
+#define OBOE_POLYPHASE_RESAMPLER_H
+
+#include <memory>
+#include <vector>
+#include <sys/types.h>
+#include <unistd.h>
+#include "MultiChannelResampler.h"
+
+namespace resampler {
+/**
+ * Resampler that is optimized for a reduced ratio of sample rates.
+ * All of the coefficients for each possible phase value are pre-calculated.
+ */
+class PolyphaseResampler : public MultiChannelResampler {
+public:
+    /**
+     *
+     * @param builder containing lots of parameters
+     */
+    explicit PolyphaseResampler(const MultiChannelResampler::Builder &builder);
+
+    virtual ~PolyphaseResampler() = default;
+
+    void readFrame(float *frame) override;
+
+protected:
+
+    int32_t                mCoefficientCursor = 0;
+
+};
+
+} // namespace resampler
+
+#endif //OBOE_POLYPHASE_RESAMPLER_H
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.cpp b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.cpp
new file mode 100644
index 0000000..c0e29b7
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include "PolyphaseResamplerMono.h"
+
+using namespace resampler;
+
+#define MONO  1
+
+PolyphaseResamplerMono::PolyphaseResamplerMono(const MultiChannelResampler::Builder &builder)
+        : PolyphaseResampler(builder) {
+    assert(builder.getChannelCount() == MONO);
+}
+
+void PolyphaseResamplerMono::writeFrame(const float *frame) {
+    // Move cursor before write so that cursor points to last written frame in read.
+    if (--mCursor < 0) {
+        mCursor = getNumTaps() - 1;
+    }
+    float *dest = &mX[mCursor * MONO];
+    const int offset = mNumTaps * MONO;
+    // Write each channel twice so we avoid having to wrap when running the FIR.
+    const float sample =  frame[0];
+    // Put ordered writes together.
+    dest[0] = sample;
+    dest[offset] = sample;
+}
+
+void PolyphaseResamplerMono::readFrame(float *frame) {
+    // Clear accumulator.
+    float sum = 0.0;
+
+    // Multiply input times precomputed windowed sinc function.
+    const float *coefficients = &mCoefficients[mCoefficientCursor];
+    float *xFrame = &mX[mCursor * MONO];
+    const int numLoops = mNumTaps >> 2; // n/4
+    for (int i = 0; i < numLoops; i++) {
+        // Manual loop unrolling, might get converted to SIMD.
+        sum += *xFrame++ * *coefficients++;
+        sum += *xFrame++ * *coefficients++;
+        sum += *xFrame++ * *coefficients++;
+        sum += *xFrame++ * *coefficients++;
+    }
+
+    mCoefficientCursor = (mCoefficientCursor + mNumTaps) % mCoefficients.size();
+
+    // Copy accumulator to output.
+    frame[0] = sum;
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.h b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.h
new file mode 100644
index 0000000..0a691a3
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerMono.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_POLYPHASE_RESAMPLER_MONO_H
+#define OBOE_POLYPHASE_RESAMPLER_MONO_H
+
+#include <sys/types.h>
+#include <unistd.h>
+#include "PolyphaseResampler.h"
+
+namespace resampler {
+
+class PolyphaseResamplerMono : public PolyphaseResampler {
+public:
+    explicit PolyphaseResamplerMono(const MultiChannelResampler::Builder &builder);
+
+    virtual ~PolyphaseResamplerMono() = default;
+
+    void writeFrame(const float *frame) override;
+
+    void readFrame(float *frame) override;
+};
+
+} // namespace resampler
+
+#endif //OBOE_POLYPHASE_RESAMPLER_MONO_H
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.cpp b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.cpp
new file mode 100644
index 0000000..e4bef74
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include "PolyphaseResamplerStereo.h"
+
+using namespace resampler;
+
+#define STEREO  2
+
+PolyphaseResamplerStereo::PolyphaseResamplerStereo(const MultiChannelResampler::Builder &builder)
+        : PolyphaseResampler(builder) {
+    assert(builder.getChannelCount() == STEREO);
+}
+
+void PolyphaseResamplerStereo::writeFrame(const float *frame) {
+    // Move cursor before write so that cursor points to last written frame in read.
+    if (--mCursor < 0) {
+        mCursor = getNumTaps() - 1;
+    }
+    float *dest = &mX[mCursor * STEREO];
+    const int offset = mNumTaps * STEREO;
+    // Write each channel twice so we avoid having to wrap when running the FIR.
+    const float left =  frame[0];
+    const float right = frame[1];
+    // Put ordered writes together.
+    dest[0] = left;
+    dest[1] = right;
+    dest[offset] = left;
+    dest[1 + offset] = right;
+}
+
+void PolyphaseResamplerStereo::readFrame(float *frame) {
+    // Clear accumulators.
+    float left = 0.0;
+    float right = 0.0;
+
+    // Multiply input times precomputed windowed sinc function.
+    const float *coefficients = &mCoefficients[mCoefficientCursor];
+    float *xFrame = &mX[mCursor * STEREO];
+    const int numLoops = mNumTaps >> 2; // n/4
+    for (int i = 0; i < numLoops; i++) {
+        // Manual loop unrolling, might get converted to SIMD.
+        float coefficient = *coefficients++;
+        left += *xFrame++ * coefficient;
+        right += *xFrame++ * coefficient;
+
+        coefficient = *coefficients++; // next tap
+        left += *xFrame++ * coefficient;
+        right += *xFrame++ * coefficient;
+
+        coefficient = *coefficients++;  // next tap
+        left += *xFrame++ * coefficient;
+        right += *xFrame++ * coefficient;
+
+        coefficient = *coefficients++;  // next tap
+        left += *xFrame++ * coefficient;
+        right += *xFrame++ * coefficient;
+    }
+
+    mCoefficientCursor = (mCoefficientCursor + mNumTaps) % mCoefficients.size();
+
+    // Copy accumulators to output.
+    frame[0] = left;
+    frame[1] = right;
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.h b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.h
new file mode 100644
index 0000000..e608483
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/PolyphaseResamplerStereo.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_POLYPHASE_RESAMPLER_STEREO_H
+#define OBOE_POLYPHASE_RESAMPLER_STEREO_H
+
+#include <sys/types.h>
+#include <unistd.h>
+#include "PolyphaseResampler.h"
+
+namespace resampler {
+
+class PolyphaseResamplerStereo : public PolyphaseResampler {
+public:
+    explicit PolyphaseResamplerStereo(const MultiChannelResampler::Builder &builder);
+
+    virtual ~PolyphaseResamplerStereo() = default;
+
+    void writeFrame(const float *frame) override;
+
+    void readFrame(float *frame) override;
+};
+
+} // namespace resampler
+
+#endif //OBOE_POLYPHASE_RESAMPLER_STEREO_H
diff --git a/media/libaaudio/src/flowgraph/resampler/README.md b/media/libaaudio/src/flowgraph/resampler/README.md
new file mode 100644
index 0000000..05d8a89
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/README.md
@@ -0,0 +1,91 @@
+# Sample Rate Converter
+
+This folder contains a sample rate converter, or "resampler".
+
+The converter is based on a sinc function that has been windowed by a hyperbolic cosine.
+We found this had fewer artifacts than the more traditional Kaiser window.
+
+## Creating a Resampler
+
+Include the [main header](MultiChannelResampler.h) for the resampler.
+
+    #include "resampler/MultiChannelResampler.h"
+
+Here is an example of creating a stereo resampler that will convert from 44100 to 48000 Hz.
+Only do this once, when you open your stream. Then use the sample resampler to process multiple buffers.
+
+    MultiChannelResampler *resampler = MultiChannelResampler::make(
+            2, // channel count
+            44100, // input sampleRate
+            48000, // output sampleRate
+            MultiChannelResampler::Quality::Medium); // conversion quality
+
+Possible values for quality include { Fastest, Low, Medium, High, Best }.
+Higher quality levels will sound better but consume more CPU because they have more taps in the filter.
+
+## Fractional Frame Counts
+
+Note that the number of output frames generated for a given number of input frames can vary.
+
+For example, suppose you are converting from 44100 Hz to 48000 Hz and using an input buffer with 960 frames. If you calculate the number of output frames you get:
+
+    960 * 48000 * 44100 = 1044.897959...
+
+You cannot generate a fractional number of frames. So the resampler will sometimes generate 1044 frames and sometimes 1045 frames. On average it will generate 1044.897959 frames. The resampler stores the fraction internally and keeps track of when to consume or generate a frame.
+
+You can either use a fixed number of input frames or a fixed number of output frames. The other frame count will vary.
+
+## Calling the Resampler with a fixed number of OUTPUT frames
+
+In this example, suppose we have a fixed number of output frames and a variable number of input frames.
+
+Assume you start with these variables and a method that returns the next input frame:
+
+    float *outputBuffer;     // multi-channel buffer to be filled
+    int    numOutputFrames;  // number of frames of output
+
+The resampler has a method isWriteNeeded() that tells you whether to write to or read from the resampler.
+
+    int outputFramesLeft = numOutputFrames;
+    while (outputFramesLeft > 0) {
+        if(resampler->isWriteNeeded()) {
+            const float *frame = getNextInputFrame(); // you provide this
+            resampler->writeNextFrame(frame);
+        } else {
+            resampler->readNextFrame(outputBuffer);
+            outputBuffer += channelCount;
+            outputFramesLeft--;
+        }
+    }
+
+## Calling the Resampler with a fixed number of INPUT frames
+
+In this example, suppose we have a fixed number of input frames and a variable number of output frames.
+
+Assume you start with these variables:
+
+    float *inputBuffer;     // multi-channel buffer to be consumed
+    float *outputBuffer;    // multi-channel buffer to be filled
+    int    numInputFrames;  // number of frames of input
+    int    numOutputFrames = 0;
+    int    channelCount;    // 1 for mono, 2 for stereo
+
+    int inputFramesLeft = numInputFrames;
+    while (inputFramesLeft > 0) {
+        if(resampler->isWriteNeeded()) {
+            resampler->writeNextFrame(inputBuffer);
+            inputBuffer += channelCount;
+            inputFramesLeft--;
+        } else {
+            resampler->readNextFrame(outputBuffer);
+            outputBuffer += channelCount;
+            numOutputFrames++;
+        }
+    }
+
+## Deleting the Resampler
+
+When you are done, you should delete the Resampler to avoid a memory leak.
+
+    delete resampler;
+
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResampler.cpp b/media/libaaudio/src/flowgraph/resampler/SincResampler.cpp
new file mode 100644
index 0000000..5e8a9e0
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/SincResampler.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include <math.h>
+#include "SincResampler.h"
+
+using namespace resampler;
+
+SincResampler::SincResampler(const MultiChannelResampler::Builder &builder)
+        : MultiChannelResampler(builder)
+        , mSingleFrame2(builder.getChannelCount()) {
+    assert((getNumTaps() % 4) == 0); // Required for loop unrolling.
+    mNumRows = kMaxCoefficients / getNumTaps(); // no guard row needed
+    mPhaseScaler = (double) mNumRows / mDenominator;
+    double phaseIncrement = 1.0 / mNumRows;
+    generateCoefficients(builder.getInputRate(),
+                         builder.getOutputRate(),
+                         mNumRows,
+                         phaseIncrement,
+                         builder.getNormalizedCutoff());
+}
+
+void SincResampler::readFrame(float *frame) {
+    // Clear accumulator for mixing.
+    std::fill(mSingleFrame.begin(), mSingleFrame.end(), 0.0);
+    std::fill(mSingleFrame2.begin(), mSingleFrame2.end(), 0.0);
+
+    // Determine indices into coefficients table.
+    double tablePhase = getIntegerPhase() * mPhaseScaler;
+    int index1 = static_cast<int>(floor(tablePhase));
+    if (index1 >= mNumRows) { // no guard row needed because we wrap the indices
+        tablePhase -= mNumRows;
+        index1 -= mNumRows;
+    }
+
+    int index2 = index1 + 1;
+    if (index2 >= mNumRows) { // no guard row needed because we wrap the indices
+        index2 -= mNumRows;
+    }
+
+    float *coefficients1 = &mCoefficients[index1 * getNumTaps()];
+    float *coefficients2 = &mCoefficients[index2 * getNumTaps()];
+
+    float *xFrame = &mX[mCursor * getChannelCount()];
+    for (int i = 0; i < mNumTaps; i++) {
+        float coefficient1 = *coefficients1++;
+        float coefficient2 = *coefficients2++;
+        for (int channel = 0; channel < getChannelCount(); channel++) {
+            float sample = *xFrame++;
+            mSingleFrame[channel] +=  sample * coefficient1;
+            mSingleFrame2[channel] += sample * coefficient2;
+        }
+    }
+
+    // Interpolate and copy to output.
+    float fraction = tablePhase - index1;
+    for (int channel = 0; channel < getChannelCount(); channel++) {
+        float low = mSingleFrame[channel];
+        float high = mSingleFrame2[channel];
+        frame[channel] = low + (fraction * (high - low));
+    }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResampler.h b/media/libaaudio/src/flowgraph/resampler/SincResampler.h
new file mode 100644
index 0000000..b235188
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/SincResampler.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_SINC_RESAMPLER_H
+#define OBOE_SINC_RESAMPLER_H
+
+#include <memory>
+#include <sys/types.h>
+#include <unistd.h>
+#include "MultiChannelResampler.h"
+
+namespace resampler {
+
+/**
+ * Resampler that can interpolate between coefficients.
+ * This can be used to support arbitrary ratios.
+ */
+class SincResampler : public MultiChannelResampler {
+public:
+    explicit SincResampler(const MultiChannelResampler::Builder &builder);
+
+    virtual ~SincResampler() = default;
+
+    void readFrame(float *frame) override;
+
+protected:
+
+    std::vector<float> mSingleFrame2; // for interpolation
+    int32_t            mNumRows = 0;
+    double             mPhaseScaler = 1.0;
+};
+
+} // namespace resampler
+#endif //OBOE_SINC_RESAMPLER_H
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.cpp b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.cpp
new file mode 100644
index 0000000..ce00302
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cassert>
+#include <math.h>
+
+#include "SincResamplerStereo.h"
+
+using namespace resampler;
+
+#define STEREO  2
+
+SincResamplerStereo::SincResamplerStereo(const MultiChannelResampler::Builder &builder)
+        : SincResampler(builder) {
+    assert(builder.getChannelCount() == STEREO);
+}
+
+void SincResamplerStereo::writeFrame(const float *frame) {
+    // Move cursor before write so that cursor points to last written frame in read.
+    if (--mCursor < 0) {
+        mCursor = getNumTaps() - 1;
+    }
+    float *dest = &mX[mCursor * STEREO];
+    const int offset = mNumTaps * STEREO;
+    // Write each channel twice so we avoid having to wrap when running the FIR.
+    const float left =  frame[0];
+    const float right = frame[1];
+    // Put ordered writes together.
+    dest[0] = left;
+    dest[1] = right;
+    dest[offset] = left;
+    dest[1 + offset] = right;
+}
+
+// Multiply input times windowed sinc function.
+void SincResamplerStereo::readFrame(float *frame) {
+    // Clear accumulator for mixing.
+    std::fill(mSingleFrame.begin(), mSingleFrame.end(), 0.0);
+    std::fill(mSingleFrame2.begin(), mSingleFrame2.end(), 0.0);
+
+    // Determine indices into coefficients table.
+    double tablePhase = getIntegerPhase() * mPhaseScaler;
+    int index1 = static_cast<int>(floor(tablePhase));
+    float *coefficients1 = &mCoefficients[index1 * getNumTaps()];
+    int index2 = (index1 + 1);
+    if (index2 >= mNumRows) { // no guard row needed because we wrap the indices
+        index2 = 0;
+    }
+    float *coefficients2 = &mCoefficients[index2 * getNumTaps()];
+    float *xFrame = &mX[mCursor * getChannelCount()];
+    for (int i = 0; i < mNumTaps; i++) {
+        float coefficient1 = *coefficients1++;
+        float coefficient2 = *coefficients2++;
+        for (int channel = 0; channel < getChannelCount(); channel++) {
+            float sample = *xFrame++;
+            mSingleFrame[channel] +=  sample * coefficient1;
+            mSingleFrame2[channel] += sample * coefficient2;
+        }
+    }
+
+    // Interpolate and copy to output.
+    float fraction = tablePhase - index1;
+    for (int channel = 0; channel < getChannelCount(); channel++) {
+        float low = mSingleFrame[channel];
+        float high = mSingleFrame2[channel];
+        frame[channel] = low + (fraction * (high - low));
+    }
+}
diff --git a/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.h b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.h
new file mode 100644
index 0000000..7d49ec7
--- /dev/null
+++ b/media/libaaudio/src/flowgraph/resampler/SincResamplerStereo.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OBOE_SINC_RESAMPLER_STEREO_H
+#define OBOE_SINC_RESAMPLER_STEREO_H
+
+#include <sys/types.h>
+#include <unistd.h>
+#include "SincResampler.h"
+
+namespace resampler {
+
+class SincResamplerStereo : public SincResampler {
+public:
+    explicit SincResamplerStereo(const MultiChannelResampler::Builder &builder);
+
+    virtual ~SincResamplerStereo() = default;
+
+    void writeFrame(const float *frame) override;
+
+    void readFrame(float *frame) override;
+
+};
+
+} // namespace resampler
+#endif //OBOE_SINC_RESAMPLER_STEREO_H
diff --git a/media/libaaudio/tests/test_flowgraph.cpp b/media/libaaudio/tests/test_flowgraph.cpp
index 611cbf7..0792fc5 100644
--- a/media/libaaudio/tests/test_flowgraph.cpp
+++ b/media/libaaudio/tests/test_flowgraph.cpp
@@ -23,6 +23,7 @@
 #include <gtest/gtest.h>
 
 #include "flowgraph/ClipToRange.h"
+#include "flowgraph/MonoBlend.h"
 #include "flowgraph/MonoToMultiConverter.h"
 #include "flowgraph/SourceFloat.h"
 #include "flowgraph/RampLinear.h"
@@ -164,3 +165,29 @@
         EXPECT_NEAR(expected[i], output[i], tolerance);
     }
 }
+
+TEST(test_flowgraph, module_mono_blend) {
+    // Two channel to two channel with 3 inputs and outputs.
+    constexpr int numChannels = 2;
+    constexpr int numFrames = 3;
+
+    static const float input[] = {-0.7, 0.5, -0.25, 1.25, 1000, 2000};
+    static const float expected[] = {-0.1, -0.1, 0.5, 0.5, 1500, 1500};
+    float output[100];
+    SourceFloat sourceFloat{numChannels};
+    MonoBlend monoBlend{numChannels};
+    SinkFloat sinkFloat{numChannels};
+
+    sourceFloat.setData(input, numFrames);
+
+    sourceFloat.output.connect(&monoBlend.input);
+    monoBlend.output.connect(&sinkFloat.input);
+
+    int32_t numRead = sinkFloat.read(output, numFrames);
+    ASSERT_EQ(numRead, numFrames);
+    constexpr float tolerance = 0.000001f; // arbitrary
+    for (int i = 0; i < numRead; i++) {
+        EXPECT_NEAR(expected[i], output[i], tolerance);
+    }
+}
+
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index 4c83406..f81aa87 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -2301,6 +2301,8 @@
             return AUDIO_FLAG_CONTENT_SPATIALIZED;
         case media::AudioFlag::NEVER_SPATIALIZE:
             return AUDIO_FLAG_NEVER_SPATIALIZE;
+        case media::AudioFlag::CALL_REDIRECTION:
+            return AUDIO_FLAG_CALL_REDIRECTION;
     }
     return unexpected(BAD_VALUE);
 }
@@ -2342,6 +2344,8 @@
             return media::AudioFlag::CONTENT_SPATIALIZED;
         case AUDIO_FLAG_NEVER_SPATIALIZE:
             return media::AudioFlag::NEVER_SPATIALIZE;
+        case AUDIO_FLAG_CALL_REDIRECTION:
+            return media::AudioFlag::CALL_REDIRECTION;
     }
     return unexpected(BAD_VALUE);
 }
@@ -2957,11 +2961,8 @@
                                  }));
     legacy.num_gains = aidl.hal.gains.size();
 
-    media::AudioPortConfig aidlPortConfig;
-    aidlPortConfig.hal = aidl.hal.activeConfig;
-    aidlPortConfig.sys = aidl.sys.activeConfig;
     legacy.active_config = VALUE_OR_RETURN(
-            aidl2legacy_AudioPortConfig_audio_port_config(aidlPortConfig));
+            aidl2legacy_AudioPortConfig_audio_port_config(aidl.sys.activeConfig));
     legacy.ext = VALUE_OR_RETURN(
             aidl2legacy_AudioPortExt_audio_port_v7_ext(aidl.hal.ext, aidl.sys.type, aidl.sys.ext));
     return legacy;
@@ -3007,10 +3008,9 @@
                          }));
     aidl.sys.gains.resize(legacy.num_gains);
 
-    media::AudioPortConfig aidlPortConfig = VALUE_OR_RETURN(
+    aidl.sys.activeConfig = VALUE_OR_RETURN(
             legacy2aidl_audio_port_config_AudioPortConfig(legacy.active_config));
-    aidl.hal.activeConfig = aidlPortConfig.hal;
-    aidl.sys.activeConfig = aidlPortConfig.sys;
+    aidl.sys.activeConfig.hal.portId = aidl.hal.id;
     RETURN_IF_ERROR(
             legacy2aidl_AudioPortExt(legacy.ext, legacy.type, &aidl.hal.ext, &aidl.sys.ext));
     return aidl;
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 7e180a2..ab75c97 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -205,7 +205,7 @@
     ],
     apex_available: [
         "//apex_available:platform",
-        "com.android.bluetooth.updatable",
+        "com.android.bluetooth",
         "com.android.media",
         "com.android.media.swcodec",
     ],
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index ac128e6..f191c49 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -238,16 +238,18 @@
     // Otherwise the callback thread will never exit.
     stop();
     if (mAudioRecordThread != 0) {
-        mProxy->interrupt();
         mAudioRecordThread->requestExit();  // see comment in AudioRecord.h
+        mProxy->interrupt();
         mAudioRecordThread->requestExitAndWait();
         mAudioRecordThread.clear();
     }
-    // No lock here: worst case we remove a NULL callback which will be a nop
+
+    AutoMutex lock(mLock);
     if (mDeviceCallback != 0 && mInput != AUDIO_IO_HANDLE_NONE) {
         // This may not stop all of these device callbacks!
         // TODO: Add some sort of protection.
         AudioSystem::removeAudioDeviceCallback(this, mInput, mPortId);
+        mDeviceCallback.clear();
     }
 }
 namespace {
@@ -1075,6 +1077,7 @@
         .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
         // the following are NOT immutable
         .set(AMEDIAMETRICS_PROP_STATE, stateToString(mActive))
+        .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)status)
         .set(AMEDIAMETRICS_PROP_SELECTEDMICDIRECTION, (int32_t)mSelectedMicDirection)
         .set(AMEDIAMETRICS_PROP_SELECTEDMICFIELDDIRECTION, (double)mSelectedMicFieldDimension)
         .record();
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index d9fddf3..8be62ed 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -199,6 +199,7 @@
 
 #define MM_PREFIX "android.media.audiotrack." // avoid cut-n-paste errors.
 
+    // Do not change this without changing the MediaMetricsService side.
     // Java API 28 entries, do not change.
     mMetricsItem->setCString(MM_PREFIX "streamtype", toString(track->streamType()).c_str());
     mMetricsItem->setCString(MM_PREFIX "type",
@@ -214,6 +215,7 @@
     mMetricsItem->setInt32(MM_PREFIX "frameCount", (int32_t)track->mFrameCount);
     mMetricsItem->setCString(MM_PREFIX "attributes", toString(track->mAttributes).c_str());
     mMetricsItem->setCString(MM_PREFIX "logSessionId", track->mLogSessionId.c_str());
+    mMetricsItem->setInt32(MM_PREFIX "underrunFrames", (int32_t)track->getUnderrunFrames());
 }
 
 // hand the user a snapshot of the metrics.
@@ -469,12 +471,13 @@
     // Otherwise the callback thread will never exit.
     stop();
     if (mAudioTrackThread != 0) { // not thread safe
-        mProxy->interrupt();
         mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
+        mProxy->interrupt();
         mAudioTrackThread->requestExitAndWait();
         mAudioTrackThread.clear();
     }
-    // No lock here: worst case we remove a NULL callback which will be a nop
+
+    AutoMutex lock(mLock);
     if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
         // This may not stop all of these device callbacks!
         // TODO: Add some sort of protection.
@@ -550,8 +553,18 @@
           sessionId, transferType, attributionSource.uid, attributionSource.pid);
 
     mThreadCanCallJava = threadCanCallJava;
+
+    // These variables are pulled in an error report, so we initialize them early.
     mSelectedDeviceId = selectedDeviceId;
     mSessionId = sessionId;
+    mChannelMask = channelMask;
+    mFormat = format;
+    mOrigFlags = mFlags = flags;
+    mReqFrameCount = mFrameCount = frameCount;
+    mSampleRate = sampleRate;
+    mOriginalSampleRate = sampleRate;
+    mAttributes = pAttributes != nullptr ? *pAttributes : AUDIO_ATTRIBUTES_INITIALIZER;
+    mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
 
     switch (transferType) {
     case TRANSFER_DEFAULT:
@@ -626,7 +639,6 @@
 
     } else {
         // stream type shouldn't be looked at, this track has audio attributes
-        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
         ALOGV("%s(): Building AudioTrack with attributes:"
                 " usage=%d content=%d flags=0x%x tags=[%s]",
                 __func__,
@@ -648,14 +660,12 @@
         status = BAD_VALUE;
         goto error;
     }
-    mFormat = format;
 
     if (!audio_is_output_channel(channelMask)) {
         errorMessage = StringPrintf("%s: Invalid channel mask %#x",  __func__, channelMask);
         status = BAD_VALUE;
         goto error;
     }
-    mChannelMask = channelMask;
     channelCount = audio_channel_count_from_out_mask(channelMask);
     mChannelCount = channelCount;
 
@@ -697,9 +707,6 @@
         status = BAD_VALUE;
         goto error;
     }
-    mSampleRate = sampleRate;
-    mOriginalSampleRate = sampleRate;
-    mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
     // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
     mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
 
@@ -719,7 +726,6 @@
     mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
     mSendLevel = 0.0f;
     // mFrameCount is initialized in createTrack_l
-    mReqFrameCount = frameCount;
     if (notificationFrames >= 0) {
         mNotificationFramesReq = notificationFrames;
         mNotificationsPerBufferReq = 0;
@@ -760,7 +766,6 @@
         mClientAttributionSource.pid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(callingPid));
     }
     mAuxEffectId = 0;
-    mOrigFlags = mFlags = flags;
     mCallback = callback;
 
     if (_callback != nullptr) {
@@ -2116,6 +2121,7 @@
         .set(AMEDIAMETRICS_PROP_VOLUME_LEFT, (double)mVolume[AUDIO_INTERLEAVE_LEFT])
         .set(AMEDIAMETRICS_PROP_VOLUME_RIGHT, (double)mVolume[AUDIO_INTERLEAVE_RIGHT])
         .set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
+        .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)NO_ERROR)
         .set(AMEDIAMETRICS_PROP_AUXEFFECTID, (int32_t)mAuxEffectId)
         .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
         .set(AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)mPlaybackRate.mSpeed)
@@ -2155,10 +2161,11 @@
     if (status == NO_ERROR) return;
     // We report error on the native side because some callers do not come
     // from Java.
-    mediametrics::LogItem(std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK) + "error")
+    // Ensure these variables are initialized in set().
+    mediametrics::LogItem(AMEDIAMETRICS_KEY_AUDIO_TRACK_ERROR)
         .set(AMEDIAMETRICS_PROP_EVENT, event)
-        .set(AMEDIAMETRICS_PROP_ERROR, mediametrics::statusToErrorString(status))
-        .set(AMEDIAMETRICS_PROP_ERRORMESSAGE, message)
+        .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)status)
+        .set(AMEDIAMETRICS_PROP_STATUSMESSAGE, message)
         .set(AMEDIAMETRICS_PROP_ORIGINALFLAGS, toString(mOrigFlags).c_str())
         .set(AMEDIAMETRICS_PROP_SESSIONID, (int32_t)mSessionId)
         .set(AMEDIAMETRICS_PROP_CONTENTTYPE, toString(mAttributes.content_type).c_str())
@@ -2166,8 +2173,10 @@
         .set(AMEDIAMETRICS_PROP_SELECTEDDEVICEID, (int32_t)mSelectedDeviceId)
         .set(AMEDIAMETRICS_PROP_ENCODING, toString(mFormat).c_str())
         .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
-        .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mReqFrameCount) // requested frame count
         // the following are NOT immutable
+        // frame count is initially the requested frame count, but may be adjusted
+        // by AudioFlinger after creation.
+        .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mFrameCount)
         .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
         .set(AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)mPlaybackRate.mSpeed)
         .set(AMEDIAMETRICS_PROP_PLAYBACK_PITCH, (double)mPlaybackRate.mPitch)
diff --git a/media/libaudioclient/AudioTrackShared.cpp b/media/libaudioclient/AudioTrackShared.cpp
index 35719be..e3b79b2 100644
--- a/media/libaudioclient/AudioTrackShared.cpp
+++ b/media/libaudioclient/AudioTrackShared.cpp
@@ -409,7 +409,7 @@
         android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
         // it seems that a FUTEX_WAKE_PRIVATE will not wake a FUTEX_WAIT, even within same process
         (void) syscall(__NR_futex, &cblk->mFutex, mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE,
-                1);
+                INT_MAX);
     }
 }
 
@@ -419,7 +419,7 @@
     if (!(android_atomic_or(CBLK_INTERRUPT, &cblk->mFlags) & CBLK_INTERRUPT)) {
         android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
         (void) syscall(__NR_futex, &cblk->mFutex, mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE,
-                1);
+                INT_MAX);
     }
 }
 
@@ -490,6 +490,8 @@
 status_t AudioTrackClientProxy::waitStreamEndDone(const struct timespec *requested)
 {
     struct timespec total;          // total elapsed time spent waiting
+    struct timespec before;
+    bool beforeIsValid = false;
     total.tv_sec = 0;
     total.tv_nsec = 0;
     audio_track_cblk_t* cblk = mCblk;
@@ -570,17 +572,38 @@
         }
         int32_t old = android_atomic_and(~CBLK_FUTEX_WAKE, &cblk->mFutex);
         if (!(old & CBLK_FUTEX_WAKE)) {
+            if (!beforeIsValid) {
+                clock_gettime(CLOCK_MONOTONIC, &before);
+                beforeIsValid = true;
+            }
             errno = 0;
             (void) syscall(__NR_futex, &cblk->mFutex,
                     mClientInServer ? FUTEX_WAIT_PRIVATE : FUTEX_WAIT, old & ~CBLK_FUTEX_WAKE, ts);
-            switch (errno) {
+            status_t error = errno; // clock_gettime can affect errno
+            {
+                struct timespec after;
+                clock_gettime(CLOCK_MONOTONIC, &after);
+                total.tv_sec += after.tv_sec - before.tv_sec;
+                // Use auto instead of long to avoid the google-runtime-int warning.
+                auto deltaNs = after.tv_nsec - before.tv_nsec;
+                if (deltaNs < 0) {
+                    deltaNs += 1000000000;
+                    total.tv_sec--;
+                }
+                if ((total.tv_nsec += deltaNs) >= 1000000000) {
+                    total.tv_nsec -= 1000000000;
+                    total.tv_sec++;
+                }
+                before = after;
+            }
+            switch (error) {
             case 0:            // normal wakeup by server, or by binderDied()
             case EWOULDBLOCK:  // benign race condition with server
             case EINTR:        // wait was interrupted by signal or other spurious wakeup
             case ETIMEDOUT:    // time-out expired
                 break;
             default:
-                status = errno;
+                status = error;
                 ALOGE("%s unexpected error %s", __func__, strerror(status));
                 goto end;
             }
@@ -747,7 +770,7 @@
             int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
             if (!(old & CBLK_FUTEX_WAKE)) {
                 (void) syscall(__NR_futex, &cblk->mFutex,
-                        mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1);
+                        mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, INT_MAX);
             }
         }
         mFlushed += (newFront - front) & mask;
@@ -917,7 +940,7 @@
         int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
         if (!(old & CBLK_FUTEX_WAKE)) {
             (void) syscall(__NR_futex, &cblk->mFutex,
-                    mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1);
+                    mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, INT_MAX);
         }
     }
 
diff --git a/media/libaudioclient/aidl/android/media/AudioFlag.aidl b/media/libaudioclient/aidl/android/media/AudioFlag.aidl
index 91361fb..acf4e6d 100644
--- a/media/libaudioclient/aidl/android/media/AudioFlag.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioFlag.aidl
@@ -36,4 +36,5 @@
     CAPTURE_PRIVATE = 13,
     CONTENT_SPATIALIZED = 14,
     NEVER_SPATIALIZE = 15,
+    CALL_REDIRECTION = 16,
 }
diff --git a/media/libaudioclient/aidl/android/media/AudioPortSys.aidl b/media/libaudioclient/aidl/android/media/AudioPortSys.aidl
index 27c0fe5..f3b5c19 100644
--- a/media/libaudioclient/aidl/android/media/AudioPortSys.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortSys.aidl
@@ -17,7 +17,7 @@
 package android.media;
 
 import android.media.AudioGainSys;
-import android.media.AudioPortConfigSys;
+import android.media.AudioPortConfig;
 import android.media.AudioPortExtSys;
 import android.media.AudioPortRole;
 import android.media.AudioPortType;
@@ -35,8 +35,8 @@
     AudioProfileSys[] profiles;
     /** System-only parameters for each AudioGain from 'port.gains'. */
     AudioGainSys[] gains;
-    /** System-only parameters for 'port.activeConfig'. */
-    AudioPortConfigSys activeConfig;
+    /** Current audio port configuration. */
+    AudioPortConfig activeConfig;
     /** System-only extra parameters for 'port.ext'. */
     AudioPortExtSys ext;
 }
diff --git a/media/libaudiofoundation/AudioContainers.cpp b/media/libaudiofoundation/AudioContainers.cpp
index 3df9378..553a319 100644
--- a/media/libaudiofoundation/AudioContainers.cpp
+++ b/media/libaudiofoundation/AudioContainers.cpp
@@ -70,48 +70,39 @@
     return audioDeviceOutAllBleSet;
 }
 
-bool deviceTypesToString(const DeviceTypeSet &deviceTypes, std::string &str) {
+std::string deviceTypesToString(const DeviceTypeSet &deviceTypes) {
     if (deviceTypes.empty()) {
-        str = "Empty device types";
-        return true;
+        return "Empty device types";
     }
-    bool ret = true;
-    for (auto it = deviceTypes.begin(); it != deviceTypes.end();) {
-        std::string deviceTypeStr;
-        ret = audio_is_output_device(*it) ?
-              OutputDeviceConverter::toString(*it, deviceTypeStr) :
-              InputDeviceConverter::toString(*it, deviceTypeStr);
-        if (!ret) {
-            break;
+    std::stringstream ss;
+    for (auto it = deviceTypes.begin(); it != deviceTypes.end(); ++it) {
+        if (it != deviceTypes.begin()) {
+            ss << ", ";
         }
-        str.append(deviceTypeStr);
-        if (++it != deviceTypes.end()) {
-            str.append(" , ");
+        const char* strType = audio_device_to_string(*it);
+        if (strlen(strType) != 0) {
+            ss << strType;
+        } else {
+            ss << "unknown type:0x" << std::hex << *it;
         }
     }
-    if (!ret) {
-        str = "Unknown values";
-    }
-    return ret;
+    return ss.str();
+}
+
+bool deviceTypesToString(const DeviceTypeSet &deviceTypes, std::string &str) {
+    str = deviceTypesToString(deviceTypes);
+    return true;
 }
 
 std::string dumpDeviceTypes(const DeviceTypeSet &deviceTypes) {
-    std::string ret;
-    for (auto it = deviceTypes.begin(); it != deviceTypes.end();) {
-        std::stringstream ss;
-        ss << "0x" << std::hex << (*it);
-        ret.append(ss.str());
-        if (++it != deviceTypes.end()) {
-            ret.append(" , ");
+    std::stringstream ss;
+    for (auto it = deviceTypes.begin(); it != deviceTypes.end(); ++it) {
+        if (it != deviceTypes.begin()) {
+            ss << ", ";
         }
+        ss << "0x" << std::hex << (*it);
     }
-    return ret;
-}
-
-std::string toString(const DeviceTypeSet& deviceTypes) {
-    std::string ret;
-    deviceTypesToString(deviceTypes, ret);
-    return ret;
+    return ss.str();
 }
 
 } // namespace android
diff --git a/media/libaudiofoundation/AudioDeviceTypeAddr.cpp b/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
index 26eea87..4a7e956 100644
--- a/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
+++ b/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
@@ -100,10 +100,13 @@
 
 std::string AudioDeviceTypeAddr::toString(bool includeSensitiveInfo) const {
     std::stringstream sstream;
-    sstream << "type:0x" << std::hex << mType;
+    sstream << audio_device_to_string(mType);
+    if (sstream.str().empty()) {
+        sstream << "unknown type:0x" << std::hex << mType;
+    }
     // IP and MAC address are sensitive information. The sensitive information will be suppressed
     // is `includeSensitiveInfo` is false.
-    sstream << ",@:"
+    sstream << ", @:"
             << (!includeSensitiveInfo && mIsAddressSensitive ? SUPPRESSED : mAddress);
     return sstream.str();
 }
diff --git a/media/libaudiofoundation/AudioProfile.cpp b/media/libaudiofoundation/AudioProfile.cpp
index ec10bc9..9a67bb7 100644
--- a/media/libaudiofoundation/AudioProfile.cpp
+++ b/media/libaudiofoundation/AudioProfile.cpp
@@ -319,7 +319,7 @@
 {
     dst->append(base::StringPrintf("%*s- Profiles (%zu):\n", spaces - 2, "", size()));
     for (size_t i = 0; i < size(); i++) {
-        const std::string prefix = base::StringPrintf("%*s%zu. ", spaces + 1, "", i + 1);
+        const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
         dst->append(prefix);
         std::string profileStr;
         at(i)->dump(&profileStr, prefix.size());
diff --git a/media/libaudiofoundation/DeviceDescriptorBase.cpp b/media/libaudiofoundation/DeviceDescriptorBase.cpp
index 88ba544..5ffbffc 100644
--- a/media/libaudiofoundation/DeviceDescriptorBase.cpp
+++ b/media/libaudiofoundation/DeviceDescriptorBase.cpp
@@ -110,27 +110,23 @@
     return NO_ERROR;
 }
 
-void DeviceDescriptorBase::dump(std::string *dst, int spaces, int index,
+void DeviceDescriptorBase::dump(std::string *dst, int spaces,
                                 const char* extraInfo, bool verbose) const
 {
-    const std::string prefix = base::StringPrintf("%*s %d. ", spaces, "", index + 1);
-    dst->append(prefix);
     if (mId != 0) {
         dst->append(base::StringPrintf("Port ID: %d; ", mId));
     }
     if (extraInfo != nullptr) {
         dst->append(base::StringPrintf("%s; ", extraInfo));
     }
-    dst->append(base::StringPrintf("%s (%s)\n",
-                    audio_device_to_string(mDeviceTypeAddr.mType),
+    dst->append(base::StringPrintf("{%s}\n",
                     mDeviceTypeAddr.toString(true /*includeSensitiveInfo*/).c_str()));
 
     dst->append(base::StringPrintf(
-                    "%*sEncapsulation modes: %u, metadata types: %u\n",
-                    static_cast<int>(prefix.size()), "",
+                    "%*sEncapsulation modes: %u, metadata types: %u\n", spaces, "",
                     mEncapsulationModes, mEncapsulationMetadataTypes));
 
-    AudioPort::dump(dst, prefix.size(), nullptr, verbose);
+    AudioPort::dump(dst, spaces, nullptr, verbose);
 }
 
 std::string DeviceDescriptorBase::toString(bool includeSensitiveInfo) const
@@ -180,8 +176,9 @@
 
 status_t DeviceDescriptorBase::writeToParcelable(media::AudioPort* parcelable) const {
     AudioPort::writeToParcelable(parcelable);
-    AudioPortConfig::writeToParcelable(&parcelable->hal.activeConfig, useInputChannelMask());
+    AudioPortConfig::writeToParcelable(&parcelable->sys.activeConfig.hal, useInputChannelMask());
     parcelable->hal.id = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(mId));
+    parcelable->sys.activeConfig.hal.portId = parcelable->hal.id;
 
     media::audio::common::AudioPortDeviceExt deviceExt;
     deviceExt.device = VALUE_OR_RETURN_STATUS(
@@ -205,7 +202,7 @@
     }
     status_t status = AudioPort::readFromParcelable(parcelable)
             ?: AudioPortConfig::readFromParcelable(
-                    parcelable.hal.activeConfig, useInputChannelMask());
+                    parcelable.sys.activeConfig.hal, useInputChannelMask());
     if (status != OK) {
         return status;
     }
diff --git a/media/libaudiofoundation/include/media/AudioContainers.h b/media/libaudiofoundation/include/media/AudioContainers.h
index 60b42fb..d352a96 100644
--- a/media/libaudiofoundation/include/media/AudioContainers.h
+++ b/media/libaudiofoundation/include/media/AudioContainers.h
@@ -131,6 +131,8 @@
     return deviceTypes;
 }
 
+std::string deviceTypesToString(const DeviceTypeSet& deviceTypes);
+
 bool deviceTypesToString(const DeviceTypeSet& deviceTypes, std::string &str);
 
 std::string dumpDeviceTypes(const DeviceTypeSet& deviceTypes);
@@ -138,7 +140,9 @@
 /**
  * Return human readable string for device types.
  */
-std::string toString(const DeviceTypeSet& deviceTypes);
+inline std::string toString(const DeviceTypeSet& deviceTypes) {
+    return deviceTypesToString(deviceTypes);
+}
 
 
 } // namespace android
diff --git a/media/libaudiofoundation/include/media/DeviceDescriptorBase.h b/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
index b70da8a..1f0c768 100644
--- a/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
+++ b/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
@@ -65,7 +65,7 @@
     status_t setEncapsulationModes(uint32_t encapsulationModes);
     status_t setEncapsulationMetadataTypes(uint32_t encapsulationMetadataTypes);
 
-    void dump(std::string *dst, int spaces, int index,
+    void dump(std::string *dst, int spaces,
               const char* extraInfo = nullptr, bool verbose = true) const;
     void log() const;
 
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
index 19a8b2f..61a2bf5 100644
--- a/media/libeffects/preprocessing/PreProcessing.cpp
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -150,6 +150,7 @@
 
 bool sHasAuxChannels[PREPROC_NUM_EFFECTS] = {
         false,  // PREPROC_AGC
+        false,  // PREPROC_AGC2
         true,   // PREPROC_AEC
         true,   // PREPROC_NS
 };
diff --git a/media/libmediahelper/Android.bp b/media/libmediahelper/Android.bp
index 9b54199..a433fc6 100644
--- a/media/libmediahelper/Android.bp
+++ b/media/libmediahelper/Android.bp
@@ -20,7 +20,7 @@
     },
     apex_available: [
         "//apex_available:platform",
-        "com.android.bluetooth.updatable",
+        "com.android.bluetooth",
         "com.android.media",
         "com.android.media.swcodec",
     ],
diff --git a/media/libmediametrics/MediaMetricsItem.cpp b/media/libmediametrics/MediaMetricsItem.cpp
index a7ec975..57fc49d 100644
--- a/media/libmediametrics/MediaMetricsItem.cpp
+++ b/media/libmediametrics/MediaMetricsItem.cpp
@@ -57,18 +57,19 @@
     // This may be found in frameworks/av/media/libmediametrics/include/MediaMetricsConstants.h
     static std::unordered_map<std::string, int32_t> map{
         {"",                                      NO_ERROR},
-        {AMEDIAMETRICS_PROP_ERROR_VALUE_ARGUMENT, BAD_VALUE},
-        {AMEDIAMETRICS_PROP_ERROR_VALUE_IO,       DEAD_OBJECT},
-        {AMEDIAMETRICS_PROP_ERROR_VALUE_MEMORY,   NO_MEMORY},
-        {AMEDIAMETRICS_PROP_ERROR_VALUE_SECURITY, PERMISSION_DENIED},
-        {AMEDIAMETRICS_PROP_ERROR_VALUE_STATE,    INVALID_OPERATION},
-        {AMEDIAMETRICS_PROP_ERROR_VALUE_TIMEOUT,  WOULD_BLOCK},
-        {AMEDIAMETRICS_PROP_ERROR_VALUE_UNKNOWN,  UNKNOWN_ERROR},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_OK,       NO_ERROR},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_ARGUMENT, BAD_VALUE},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_IO,       DEAD_OBJECT},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_MEMORY,   NO_MEMORY},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_SECURITY, PERMISSION_DENIED},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_STATE,    INVALID_OPERATION},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_TIMEOUT,  WOULD_BLOCK},
+        {AMEDIAMETRICS_PROP_STATUS_VALUE_UNKNOWN,  UNKNOWN_ERROR},
     };
     return map;
 }
 
-status_t errorStringToStatus(const char *error) {
+status_t statusStringToStatus(const char *error) {
     const auto& map = getErrorStringMap();
     if (error == nullptr || error[0] == '\0') return NO_ERROR;
     auto it = map.find(error);
diff --git a/media/libmediametrics/include/MediaMetricsConstants.h b/media/libmediametrics/include/MediaMetricsConstants.h
index 5d0eca0..2bf72a7 100644
--- a/media/libmediametrics/include/MediaMetricsConstants.h
+++ b/media/libmediametrics/include/MediaMetricsConstants.h
@@ -61,6 +61,9 @@
 #define AMEDIAMETRICS_KEY_AUDIO_FLINGER       AMEDIAMETRICS_KEY_PREFIX_AUDIO "flinger"
 #define AMEDIAMETRICS_KEY_AUDIO_POLICY        AMEDIAMETRICS_KEY_PREFIX_AUDIO "policy"
 
+// Error keys
+#define AMEDIAMETRICS_KEY_AUDIO_TRACK_ERROR   AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK "error"
+
 /*
  * MediaMetrics Properties are unified space for consistency and readability.
  */
@@ -116,18 +119,6 @@
 #define AMEDIAMETRICS_PROP_DURATIONNS     "durationNs"     // int64 duration time span
 #define AMEDIAMETRICS_PROP_ENCODING       "encoding"       // string value of format
 
-// Error statistics
-#define AMEDIAMETRICS_PROP_ERROR          "error#"         // string, empty or one of
-                                                           // AMEDIAMETRICS_PROP_ERROR_VALUE_*
-                                                           // Used for error categorization.
-#define AMEDIAMETRICS_PROP_ERRORSUBCODE   "errorSubCode#"  // int32, specific code for error
-                                                           // used in conjunction with error#.
-#define AMEDIAMETRICS_PROP_ERRORMESSAGE   "errorMessage#"  // string, supplemental to error.
-                                                           // Arbitrary information treated as
-                                                           // informational, may be logcat msg,
-                                                           // or an exception with stack trace.
-                                                           // Treated as "debug" information.
-
 #define AMEDIAMETRICS_PROP_EVENT          "event#"         // string value (often func name)
 #define AMEDIAMETRICS_PROP_EXECUTIONTIMENS "executionTimeNs"  // time to execute the event
 
@@ -159,7 +150,17 @@
 #define AMEDIAMETRICS_PROP_STARTUPMS      "startupMs"      // double value
 // State is "ACTIVE" or "STOPPED" for AudioRecord
 #define AMEDIAMETRICS_PROP_STATE          "state"          // string
-#define AMEDIAMETRICS_PROP_STATUS         "status"         // int32 status_t
+#define AMEDIAMETRICS_PROP_STATUS         "status#"        // int32 status_t
+                                                           // AAudio uses their own status codes
+// Supplemental information to the status code.
+#define AMEDIAMETRICS_PROP_STATUSSUBCODE  "statusSubCode"  // int32, specific code
+                                                           // used in conjunction with status.
+#define AMEDIAMETRICS_PROP_STATUSMESSAGE  "statusMessage"  // string, supplemental info.
+                                                           // Arbitrary information treated as
+                                                           // informational, may be logcat msg,
+                                                           // or an exception with stack trace.
+                                                           // Treated as "debug" information.
+
 #define AMEDIAMETRICS_PROP_STREAMTYPE     "streamType"     // string (AudioTrack)
 #define AMEDIAMETRICS_PROP_THREADID       "threadId"       // int32 value io handle
 #define AMEDIAMETRICS_PROP_THROTTLEMS     "throttleMs"     // double
@@ -234,16 +235,20 @@
 // https://cs.android.com/android/platform/superproject/+/master:frameworks/native/libs/binder/include/binder/Status.h;drc=88e25c0861499ee3ab885814dddc097ab234cb7b;l=57
 // https://cs.android.com/android/platform/superproject/+/master:frameworks/base/media/java/android/media/AudioSystem.java;drc=3ac246c43294d7f7012bdcb0ccb7bae1aa695bd4;l=785
 // https://cs.android.com/android/platform/superproject/+/master:frameworks/av/media/libaaudio/include/aaudio/AAudio.h;drc=cfd3a6fa3aaaf712a890dc02452b38ef401083b8;l=120
+// https://abseil.io/docs/cpp/guides/status-codes
 
-// Error category:
-// An empty error string indicates no error.
+// Status errors:
+// An empty status string or "ok" is interpreted as no error.
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_OK                "ok"
 
 // Error category: argument
 //   IllegalArgumentException
 //   NullPointerException
 //   BAD_VALUE
+//   absl::INVALID_ARGUMENT
+//   absl::OUT_OF_RANGE
 //   Out of range, out of bounds.
-#define AMEDIAMETRICS_PROP_ERROR_VALUE_ARGUMENT           "argument"
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_ARGUMENT          "argument"
 
 // Error category: io
 //   IOException
@@ -254,36 +259,48 @@
 //   file or ioctl failure
 //   Service, rpc, binder, or socket failure.
 //   Hardware or device failure.
-#define AMEDIAMETRICS_PROP_ERROR_VALUE_IO                 "io"
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_IO                "io"
 
 // Error category: outOfMemory
 //   OutOfMemoryException
 //   NO_MEMORY
-#define AMEDIAMETRICS_PROP_ERROR_VALUE_MEMORY             "memory"
+//   absl::RESOURCE_EXHAUSTED
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_MEMORY            "memory"
 
 // Error category: security
 //   SecurityException
 //   PERMISSION_DENIED
-#define AMEDIAMETRICS_PROP_ERROR_VALUE_SECURITY           "security"
+//   absl::PERMISSION_DENIED
+//   absl::UNAUTHENTICATED
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_SECURITY          "security"
 
 // Error category: state
 //   IllegalStateException
 //   UnsupportedOperationException
 //   INVALID_OPERATION
 //   NO_INIT
+//   absl::NOT_FOUND
+//   absl::ALREADY_EXISTS
+//   absl::FAILED_PRECONDITION
+//   absl::UNAVAILABLE
+//   absl::UNIMPLEMENTED
 //   Functionality not implemented (argument may or may not be correct).
 //   Call unexpected or out of order.
-#define AMEDIAMETRICS_PROP_ERROR_VALUE_STATE              "state"
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_STATE             "state"
 
 // Error category: timeout
 //   TimeoutException
 //   WOULD_BLOCK
-#define AMEDIAMETRICS_PROP_ERROR_VALUE_TIMEOUT            "timeout"
+//   absl::DEADLINE_EXCEEDED
+//   absl::ABORTED
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_TIMEOUT           "timeout"
 
 // Error category: unknown
 //   Exception (Java specified not listed above, or custom app/service)
 //   UNKNOWN_ERROR
+//   absl::INTERNAL
+//   absl::DATA_LOSS
 //   Catch-all bucket for errors not listed above.
-#define AMEDIAMETRICS_PROP_ERROR_VALUE_UNKNOWN            "unknown"
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_UNKNOWN           "unknown"
 
 #endif // ANDROID_MEDIA_MEDIAMETRICSCONSTANTS_H
diff --git a/media/libmediametrics/include/media/MediaMetricsItem.h b/media/libmediametrics/include/media/MediaMetricsItem.h
index f2cd505..de56665 100644
--- a/media/libmediametrics/include/media/MediaMetricsItem.h
+++ b/media/libmediametrics/include/media/MediaMetricsItem.h
@@ -27,6 +27,7 @@
 #include <variant>
 
 #include <binder/Parcel.h>
+#include <log/log.h>
 #include <utils/Errors.h>
 #include <utils/Timers.h> // nsecs_t
 
@@ -105,34 +106,34 @@
 };
 
 /*
- * Helper for error conversions
+ * Helper for status conversions
  */
 
-static inline constexpr const char* statusToErrorString(status_t status) {
+inline constexpr const char* statusToStatusString(status_t status) {
     switch (status) {
-    case NO_ERROR:
-        return "";
     case BAD_VALUE:
-        return AMEDIAMETRICS_PROP_ERROR_VALUE_ARGUMENT;
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_ARGUMENT;
     case DEAD_OBJECT:
     case FAILED_TRANSACTION:
-        return AMEDIAMETRICS_PROP_ERROR_VALUE_IO;
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_IO;
     case NO_MEMORY:
-        return AMEDIAMETRICS_PROP_ERROR_VALUE_MEMORY;
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_MEMORY;
     case PERMISSION_DENIED:
-        return AMEDIAMETRICS_PROP_ERROR_VALUE_SECURITY;
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_SECURITY;
     case NO_INIT:
     case INVALID_OPERATION:
-        return AMEDIAMETRICS_PROP_ERROR_VALUE_STATE;
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_STATE;
     case WOULD_BLOCK:
-        return AMEDIAMETRICS_PROP_ERROR_VALUE_TIMEOUT;
-    case UNKNOWN_ERROR:
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_TIMEOUT;
     default:
-        return AMEDIAMETRICS_PROP_ERROR_VALUE_UNKNOWN;
+        if (status >= 0) return AMEDIAMETRICS_PROP_STATUS_VALUE_OK; // non-negative values "OK"
+        [[fallthrough]];            // negative values are error.
+    case UNKNOWN_ERROR:
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_UNKNOWN;
     }
 }
 
-status_t errorStringToStatus(const char *error);
+status_t statusStringToStatus(const char *error);
 
 /*
  * Time printing
@@ -502,6 +503,7 @@
         do {
             if (ptr >= bufferptrmax) {
                 ALOGE("%s: buffer exceeded", __func__);
+                android_errorWriteLog(0x534e4554, "204445255");
                 return BAD_VALUE;
             }
         } while (*ptr++ != 0);
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index bffd7b3..6347b7a 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -16,6 +16,8 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "StagefrightRecorder"
+#define ATRACE_TAG ATRACE_TAG_VIDEO
+#include <utils/Trace.h>
 #include <inttypes.h>
 // TODO/workaround: including base logging now as it conflicts with ADebug.h
 // and it must be included first.
@@ -1856,6 +1858,7 @@
 // Set up the appropriate MediaSource depending on the chosen option
 status_t StagefrightRecorder::setupMediaSource(
                       sp<MediaSource> *mediaSource) {
+    ATRACE_CALL();
     if (mVideoSource == VIDEO_SOURCE_DEFAULT
             || mVideoSource == VIDEO_SOURCE_CAMERA) {
         sp<CameraSource> cameraSource;
@@ -1936,6 +1939,7 @@
 status_t StagefrightRecorder::setupVideoEncoder(
         const sp<MediaSource> &cameraSource,
         sp<MediaCodecSource> *source) {
+    ATRACE_CALL();
     source->clear();
 
     sp<AMessage> format = new AMessage();
@@ -2114,6 +2118,7 @@
 }
 
 status_t StagefrightRecorder::setupAudioEncoder(const sp<MediaWriter>& writer) {
+    ATRACE_CALL();
     status_t status = BAD_VALUE;
     if (OK != (status = checkAudioEncoderCapabilities())) {
         return status;
diff --git a/media/libmediatranscoding/include/media/ControllerClientInterface.h b/media/libmediatranscoding/include/media/ControllerClientInterface.h
index 9311e2e..ea63da8 100644
--- a/media/libmediatranscoding/include/media/ControllerClientInterface.h
+++ b/media/libmediatranscoding/include/media/ControllerClientInterface.h
@@ -66,7 +66,7 @@
      * Returns false if the session doesn't exist, or the client is already requesting the
      * session. Returns true otherwise.
      */
-    virtual bool addClientUid(ClientIdType clientId, SessionIdType sessionId, uid_t clientUid);
+    virtual bool addClientUid(ClientIdType clientId, SessionIdType sessionId, uid_t clientUid) = 0;
 
     /**
      * Retrieves the (unsorted) list of all clients requesting the session identified by
@@ -81,7 +81,7 @@
      * Returns false if the session doesn't exist. Returns true otherwise.
      */
     virtual bool getClientUids(ClientIdType clientId, SessionIdType sessionId,
-                               std::vector<int32_t>* out_clientUids);
+                               std::vector<int32_t>* out_clientUids) = 0;
 
 protected:
     virtual ~ControllerClientInterface() = default;
diff --git a/media/libstagefright/TEST_MAPPING b/media/libstagefright/TEST_MAPPING
index 0987a5b..7d4e168 100644
--- a/media/libstagefright/TEST_MAPPING
+++ b/media/libstagefright/TEST_MAPPING
@@ -40,6 +40,17 @@
           "exclude-filter": "android.media.audio.cts.AudioRecordTest"
         }
       ]
+    },
+    {
+      "name": "CtsMediaPlayerTestCases",
+      "options": [
+        {
+          "include-annotation": "android.platform.test.annotations.Presubmit"
+        },
+        {
+          "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
+        }
+      ]
     }
   ],
   "presubmit": [
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
index fb6c4e2..bb1cb0b 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
@@ -354,7 +354,7 @@
             }
 
             if (mpeg4type->eProfile != OMX_VIDEO_MPEG4ProfileCore ||
-                mpeg4type->eLevel != OMX_VIDEO_MPEG4Level2 ||
+                mpeg4type->eLevel > OMX_VIDEO_MPEG4Level2 ||
                 (mpeg4type->nAllowedPictureTypes & OMX_VIDEO_PictureTypeB) ||
                 mpeg4type->nBFrames != 0 ||
                 mpeg4type->nIDCVLCThreshold != 0 ||
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index c2114b3..5c99cc9 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -33,7 +33,7 @@
 
 #include <media/stagefright/foundation/hexdump.h>
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#if defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 #include <binder/Parcel.h>
 #endif
 
@@ -659,7 +659,7 @@
     return s;
 }
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#if defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 // static
 sp<AMessage> AMessage::FromParcel(const Parcel &parcel, size_t maxNestingLevel) {
     int32_t what = parcel.readInt32();
@@ -825,7 +825,7 @@
         }
     }
 }
-#endif  // !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#endif  // defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 
 sp<AMessage> AMessage::changesFrom(const sp<const AMessage> &other, bool deep) const {
     if (other == NULL) {
diff --git a/media/libstagefright/foundation/AString.cpp b/media/libstagefright/foundation/AString.cpp
index b1ed077..a5e0ff8 100644
--- a/media/libstagefright/foundation/AString.cpp
+++ b/media/libstagefright/foundation/AString.cpp
@@ -27,7 +27,7 @@
 #include "ADebug.h"
 #include "AString.h"
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#if defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 #include <binder/Parcel.h>
 #endif
 
@@ -365,7 +365,7 @@
     return !strcasecmp(mData + mSize - suffixLen, suffix);
 }
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#if defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 // static
 AString AString::FromParcel(const Parcel &parcel) {
     size_t size = static_cast<size_t>(parcel.readInt32());
@@ -380,7 +380,7 @@
     }
     return err;
 }
-#endif // !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#endif // defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 
 AString AStringPrintf(const char *format, ...) {
     va_list ap;
diff --git a/media/libstagefright/foundation/MetaData.cpp b/media/libstagefright/foundation/MetaData.cpp
index 7f48cfd..77913d5 100644
--- a/media/libstagefright/foundation/MetaData.cpp
+++ b/media/libstagefright/foundation/MetaData.cpp
@@ -28,7 +28,7 @@
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/MetaData.h>
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#if defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 #include <binder/Parcel.h>
 #endif
 
@@ -48,7 +48,7 @@
 MetaData::~MetaData() {
 }
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#if defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 /* static */
 sp<MetaData> MetaData::createFromParcel(const Parcel &parcel) {
 
diff --git a/media/libstagefright/foundation/MetaDataBase.cpp b/media/libstagefright/foundation/MetaDataBase.cpp
index 3f050ea..980eb22 100644
--- a/media/libstagefright/foundation/MetaDataBase.cpp
+++ b/media/libstagefright/foundation/MetaDataBase.cpp
@@ -28,7 +28,7 @@
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/MetaDataBase.h>
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#if defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 #include <binder/Parcel.h>
 #endif
 
@@ -452,7 +452,7 @@
     }
 }
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#if defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 status_t MetaDataBase::writeToParcel(Parcel &parcel) {
     status_t ret;
     size_t numItems = mInternalData->mItems.size();
@@ -532,7 +532,7 @@
     ALOGW("no metadata in parcel");
     return UNKNOWN_ERROR;
 }
-#endif // !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#endif // defined(__ANDROID__) && !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
 
 }  // namespace android
 
diff --git a/media/libstagefright/include/media/stagefright/MediaBuffer.h b/media/libstagefright/include/media/stagefright/MediaBuffer.h
index 2c03f27..f070aac 100644
--- a/media/libstagefright/include/media/stagefright/MediaBuffer.h
+++ b/media/libstagefright/include/media/stagefright/MediaBuffer.h
@@ -105,7 +105,6 @@
         if (mMemory.get() == nullptr || mMemory->unsecurePointer() == nullptr) return 0;
         int32_t remoteRefcount =
                 reinterpret_cast<SharedControl *>(mMemory->unsecurePointer())->getRemoteRefcount();
-        // Sanity check so that remoteRefCount() is non-negative.
         return remoteRefcount >= 0 ? remoteRefcount : 0; // do not allow corrupted data.
 #else
         return 0;
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index 8d527e9..6c5e6cb 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -167,7 +167,7 @@
     stubs: {
         symbol_file: "libmediandk.map.txt",
         versions: ["29"],
-    },
+    }
 }
 
 cc_library {
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index 6e9945d..59c1103 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -97,6 +97,8 @@
     List<idvec_t> mIds;
     KeyedVector<String8, String8> mQueryResults;
     Vector<uint8_t> mKeyRequest;
+    String8 mDefaultUrl;
+    AMediaDrmKeyRequestType mkeyRequestType;
     Vector<uint8_t> mProvisionRequest;
     String8 mProvisionUrl;
     String8 mPropertyString;
@@ -416,6 +418,21 @@
         const AMediaDrmKeyValue *optionalParameters, size_t numOptionalParameters,
         const uint8_t **keyRequest, size_t *keyRequestSize) {
 
+    return AMediaDrm_getKeyRequestWithDefaultUrlAndType(mObj,
+        scope, init, initSize, mimeType, keyType, optionalParameters,
+        numOptionalParameters, keyRequest,
+        keyRequestSize, NULL, NULL);
+}
+
+EXPORT
+media_status_t AMediaDrm_getKeyRequestWithDefaultUrlAndType(AMediaDrm *mObj,
+        const AMediaDrmScope *scope, const uint8_t *init, size_t initSize,
+        const char *mimeType, AMediaDrmKeyType keyType,
+        const AMediaDrmKeyValue *optionalParameters,
+        size_t numOptionalParameters, const uint8_t **keyRequest,
+        size_t *keyRequestSize, const char **defaultUrl,
+        AMediaDrmKeyRequestType *keyRequestType) {
+
     if (!mObj || mObj->mDrm == NULL) {
         return AMEDIA_ERROR_INVALID_OBJECT;
     }
@@ -449,18 +466,43 @@
         mdOptionalParameters.add(String8(optionalParameters[i].mKey),
                 String8(optionalParameters[i].mValue));
     }
-    String8 defaultUrl;
-    DrmPlugin::KeyRequestType keyRequestType;
+
+    DrmPlugin::KeyRequestType requestType;
     mObj->mKeyRequest.clear();
     status_t status = mObj->mDrm->getKeyRequest(*iter, mdInit, String8(mimeType),
-            mdKeyType, mdOptionalParameters, mObj->mKeyRequest, defaultUrl,
-            &keyRequestType);
+            mdKeyType, mdOptionalParameters, mObj->mKeyRequest, mObj->mDefaultUrl,
+            &requestType);
     if (status != OK) {
         return translateStatus(status);
     } else {
         *keyRequest = mObj->mKeyRequest.array();
         *keyRequestSize = mObj->mKeyRequest.size();
+        if (defaultUrl != NULL)
+            *defaultUrl = mObj->mDefaultUrl.string();
+        switch(requestType) {
+            case DrmPlugin::kKeyRequestType_Initial:
+                mObj->mkeyRequestType = KEY_REQUEST_TYPE_INITIAL;
+                break;
+            case DrmPlugin::kKeyRequestType_Renewal:
+                mObj->mkeyRequestType = KEY_REQUEST_TYPE_RENEWAL;
+                break;
+            case DrmPlugin::kKeyRequestType_Release:
+                mObj->mkeyRequestType = KEY_REQUEST_TYPE_RELEASE;
+                break;
+            case DrmPlugin::kKeyRequestType_None:
+                mObj->mkeyRequestType = KEY_REQUEST_TYPE_NONE;
+                break;
+            case DrmPlugin::kKeyRequestType_Update:
+                mObj->mkeyRequestType = KEY_REQUEST_TYPE_UPDATE;
+                break;
+            default:
+                return AMEDIA_ERROR_UNKNOWN;
+        }
+
+        if (keyRequestType != NULL)
+            *keyRequestType = mObj->mkeyRequestType;
     }
+
     return AMEDIA_OK;
 }
 
diff --git a/media/ndk/include/media/NdkMediaDrm.h b/media/ndk/include/media/NdkMediaDrm.h
index 849a8f9..4eca3d7 100644
--- a/media/ndk/include/media/NdkMediaDrm.h
+++ b/media/ndk/include/media/NdkMediaDrm.h
@@ -112,6 +112,41 @@
 } AMediaDrmKeyType;
 
 /**
+ * Introduced in API 33.
+ */
+typedef enum AMediaDrmKeyRequestType : int32_t {
+    /**
+     * Key request type is initial license request.
+     * An initial license request is necessary to load keys.
+     */
+    KEY_REQUEST_TYPE_INITIAL,
+
+    /**
+     * Key request type is license renewal.
+     * A renewal license request is necessary to prevent the keys from expiring.
+     */
+    KEY_REQUEST_TYPE_RENEWAL,
+
+    /**
+     * Key request type is license release.
+     * A license release request indicates that keys are removed.
+     */
+    KEY_REQUEST_TYPE_RELEASE,
+
+    /**
+     * Keys are already loaded and are available for use. No license request is necessary, and
+     * no key request data is returned.
+     */
+    KEY_REQUEST_TYPE_NONE,
+
+    /**
+     * Keys have been loaded but an additional license request is needed
+     * to update their values.
+     */
+    KEY_REQUEST_TYPE_UPDATE
+} AMediaDrmKeyRequestType;
+
+/**
  *  Data type containing {key, value} pair
  */
 typedef struct AMediaDrmKeyValuePair {
@@ -248,7 +283,10 @@
  * to obtain or release keys used to decrypt encrypted content.
  * AMediaDrm_getKeyRequest is used to obtain an opaque key request byte array that
  * is delivered to the license server.  The opaque key request byte array is
- * returned in KeyRequest.data.
+ * returned in *keyRequest and the number of bytes in the request is
+ * returned in *keyRequestSize.
+ * This API has same functionality as AMediaDrm_getKeyRequestWithDefaultUrlAndType()
+ * when defaultUrl and keyRequestType are passed in as NULL.
  *
  * After the app has received the key request response from the server,
  * it should deliver to the response to the DRM engine plugin using the method
@@ -280,11 +318,14 @@
  *   by the caller
  *
  * On exit:
+ *   If this returns AMEDIA_OK,
  *   1. The keyRequest pointer will reference the opaque key request data.  It
  *       will reside in memory owned by the AMediaDrm object, and will remain
- *       accessible until the next call to AMediaDrm_getKeyRequest or until the
+ *       accessible until the next call to AMediaDrm_getKeyRequest
+ *       or AMediaDrm_getKeyRequestWithDefaultUrlAndType or until the
  *       MediaDrm object is released.
  *   2. keyRequestSize will be set to the size of the request
+ *   If this does not return AMEDIA_OK, value of these parameters should not be used.
  *
  * Returns MEDIADRM_NOT_PROVISIONED_ERROR if reprovisioning is needed, due to a
  * problem with the device certificate.
@@ -297,6 +338,72 @@
         const uint8_t **keyRequest, size_t *keyRequestSize) __INTRODUCED_IN(21);
 
 /**
+ * A key request/response exchange occurs between the app and a license server
+ * to obtain or release keys used to decrypt encrypted content.
+ * AMediaDrm_getKeyRequest is used to obtain an opaque key request byte array that
+ * is delivered to the license server.  The opaque key request byte array is
+ * returned in *keyRequest and the number of bytes in the request is
+ * returned in *keyRequestSize.
+ *
+ * After the app has received the key request response from the server,
+ * it should deliver to the response to the DRM engine plugin using the method
+ * AMediaDrm_provideKeyResponse.
+ *
+ * scope may be a sessionId or a keySetId, depending on the specified keyType.
+ * When the keyType is KEY_TYPE_STREAMING or KEY_TYPE_OFFLINE, scope should be set
+ * to the sessionId the keys will be provided to.  When the keyType is
+ * KEY_TYPE_RELEASE, scope should be set to the keySetId of the keys being released.
+ * Releasing keys from a device invalidates them for all sessions.
+ *
+ * init container-specific data, its meaning is interpreted based on the mime type
+ * provided in the mimeType parameter.  It could contain, for example, the content
+ * ID, key ID or other data obtained from the content metadata that is required in
+ * generating the key request. init may be null when keyType is KEY_TYPE_RELEASE.
+ *
+ * initSize is the number of bytes of initData
+ *
+ * mimeType identifies the mime type of the content.
+ *
+ * keyType specifes the type of the request. The request may be to acquire keys for
+ *   streaming or offline content, or to release previously acquired keys, which are
+ *   identified by a keySetId.
+ *
+ * optionalParameters are included in the key request message to allow a client
+ *   application to provide additional message parameters to the server.
+ *
+ * numOptionalParameters indicates the number of optional parameters provided
+ *   by the caller
+ *
+ * On exit:
+ *   If this returns AMEDIA_OK,
+ *   1. The keyRequest pointer will reference the opaque key request data.  It
+ *       will reside in memory owned by the AMediaDrm object, and will remain
+ *       accessible until the next call to either AMediaDrm_getKeyRequest
+ *       or AMediaDrm_getKeyRequestWithDefaultUrlAndType or until the
+ *       MediaDrm object is released.
+ *   2. keyRequestSize will be set to the size of the request.
+ *   3. defaultUrl will be set to the recommended URL to deliver the key request.
+ *      The defaultUrl pointer will reference a NULL terminated URL string.
+ *      It will be UTF-8 encoded and have same lifetime with the key request data
+ *      KeyRequest pointer references to. Passing in NULL means you don't need it
+ *      to be reported.
+ *   4. keyRequestType will be set to the key request type. Passing in NULL means
+*       you don't need it to be reported.
+ *
+ * Returns MEDIADRM_NOT_PROVISIONED_ERROR if reprovisioning is needed, due to a
+ * problem with the device certificate.
+ *
+ * Available since API level 33.
+ */
+media_status_t AMediaDrm_getKeyRequestWithDefaultUrlAndType(AMediaDrm *,
+        const AMediaDrmScope *scope, const uint8_t *init, size_t initSize,
+        const char *mimeType, AMediaDrmKeyType keyType,
+        const AMediaDrmKeyValue *optionalParameters,
+        size_t numOptionalParameters, const uint8_t **keyRequest,
+        size_t *keyRequestSize, const char **defaultUrl,
+        AMediaDrmKeyRequestType *keyRequestType) __INTRODUCED_IN(__ANDROID_API_T__);
+
+/**
  * A key response is received from the license server by the app, then it is
  * provided to the DRM engine plugin using provideKeyResponse.  When the
  * response is for an offline key request, a keySetId is returned that can be
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index 6f275c7..b228945 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -229,6 +229,7 @@
     AMediaDrm_decrypt;
     AMediaDrm_encrypt;
     AMediaDrm_getKeyRequest;
+    AMediaDrm_getKeyRequestWithDefaultUrlAndType; # introduced=Tiramisu
     AMediaDrm_getPropertyByteArray;
     AMediaDrm_getPropertyString;
     AMediaDrm_getProvisionRequest;
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 4f0909b..15d6d3697 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -45,6 +45,7 @@
 static const String16 sAndroidPermissionRecordAudio("android.permission.RECORD_AUDIO");
 static const String16 sModifyPhoneState("android.permission.MODIFY_PHONE_STATE");
 static const String16 sModifyAudioRouting("android.permission.MODIFY_AUDIO_ROUTING");
+static const String16 sCallAudioInterception("android.permission.CALL_AUDIO_INTERCEPTION");
 
 static String16 resolveCallingPackage(PermissionController& permissionController,
         const std::optional<String16> opPackageName, uid_t uid) {
@@ -309,6 +310,17 @@
     return ok;
 }
 
+bool callAudioInterceptionAllowed(const AttributionSourceState& attributionSource) {
+    uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
+    pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
+
+    // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
+    bool ok = PermissionCache::checkPermission(sCallAudioInterception, pid, uid);
+    if (!ok) ALOGV("%s(): android.permission.CALL_AUDIO_INTERCEPTION denied for uid %d",
+        __func__, uid);
+    return ok;
+}
+
 AttributionSourceState getCallingAttributionSource() {
     AttributionSourceState attributionSource = AttributionSourceState();
     attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(
diff --git a/media/utils/TimeCheck.cpp b/media/utils/TimeCheck.cpp
index 878ae8c..2b765cc 100644
--- a/media/utils/TimeCheck.cpp
+++ b/media/utils/TimeCheck.cpp
@@ -17,6 +17,7 @@
 #define LOG_TAG "TimeCheck"
 
 #include <optional>
+#include <sstream>
 
 #include <mediautils/EventLog.h>
 #include <mediautils/TimeCheck.h>
@@ -25,6 +26,15 @@
 
 namespace android {
 
+namespace {
+
+std::string formatTime(std::chrono::system_clock::time_point t) {
+    auto msSinceEpoch = std::chrono::round<std::chrono::milliseconds>(t.time_since_epoch());
+    return (std::ostringstream() << msSinceEpoch.count()).str();
+}
+
+}  // namespace
+
 // Audio HAL server pids vector used to generate audio HAL processes tombstone
 // when audioserver watchdog triggers.
 // We use a lockless storage to avoid potential deadlocks in the context of watchdog
@@ -66,15 +76,18 @@
 }
 
 TimeCheck::TimeCheck(const char* tag, uint32_t timeoutMs)
-    : mTimerHandle(getTimeCheckThread()->scheduleTask([tag] { crash(tag); },
-                                                      std::chrono::milliseconds(timeoutMs))) {}
+    : mTimerHandle(getTimeCheckThread()->scheduleTask(
+              [tag, startTime = std::chrono::system_clock::now()] { crash(tag, startTime); },
+              std::chrono::milliseconds(timeoutMs))) {}
 
 TimeCheck::~TimeCheck() {
     getTimeCheckThread()->cancelTask(mTimerHandle);
 }
 
 /* static */
-void TimeCheck::crash(const char* tag) {
+void TimeCheck::crash(const char* tag, std::chrono::system_clock::time_point startTime) {
+    std::chrono::system_clock::time_point endTime = std::chrono::system_clock::now();
+
     // Generate audio HAL processes tombstones and allow time to complete
     // before forcing restart
     std::vector<pid_t> pids = getAudioHalPids();
@@ -88,7 +101,8 @@
         ALOGI("No HAL process pid available, skipping tombstones");
     }
     LOG_EVENT_STRING(LOGTAG_AUDIO_BINDER_TIMEOUT, tag);
-    LOG_ALWAYS_FATAL("TimeCheck timeout for %s", tag);
+    LOG_ALWAYS_FATAL("TimeCheck timeout for %s (start=%s, end=%s)", tag,
+                     formatTime(startTime).c_str(), formatTime(endTime).c_str());
 }
 
 };  // namespace android
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
index 734313c..2fe2451 100644
--- a/media/utils/include/mediautils/ServiceUtilities.h
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -104,6 +104,7 @@
 bool dumpAllowed();
 bool modifyPhoneStateAllowed(const AttributionSourceState& attributionSource);
 bool bypassInterruptionPolicyAllowed(const AttributionSourceState& attributionSource);
+bool callAudioInterceptionAllowed(const AttributionSourceState& attributionSource);
 void purgePermissionCache();
 int32_t getOpForSource(audio_source_t source);
 
diff --git a/media/utils/include/mediautils/TimeCheck.h b/media/utils/include/mediautils/TimeCheck.h
index 2411f97..0d6e80d 100644
--- a/media/utils/include/mediautils/TimeCheck.h
+++ b/media/utils/include/mediautils/TimeCheck.h
@@ -38,7 +38,7 @@
   private:
     static TimerThread* getTimeCheckThread();
     static void accessAudioHalPids(std::vector<pid_t>* pids, bool update);
-    static void crash(const char* tag);
+    static void crash(const char* tag, std::chrono::system_clock::time_point startTime);
 
     const TimerThread::Handle mTimerHandle;
 };
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index ec414e0..ed4666f 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -4084,7 +4084,6 @@
     // transfer all effects one by one so that new effect chain is created on new thread with
     // correct buffer sizes and audio parameters and effect engines reconfigured accordingly
     sp<EffectChain> dstChain;
-    uint32_t strategy = 0; // prevent compiler warning
     sp<EffectModule> effect = chain->getEffectFromId_l(0);
     Vector< sp<EffectModule> > removed;
     status_t status = NO_ERROR;
@@ -4109,7 +4108,6 @@
                 status = NO_INIT;
                 break;
             }
-            strategy = dstChain->strategy();
         }
         effect = chain->getEffectFromId_l(0);
     }
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 2e9ecb1..ca7ffdb 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -3258,6 +3258,8 @@
         } else {
             mHalEffect->setDevices({mDevice});
         }
+        mHalEffect->configure();
+
         *handle = new EffectHandle(mHalEffect, nullptr, nullptr, 0 /*priority*/,
                                    mNotifyFramesProcessed);
         status = (*handle)->initCheck();
@@ -3306,8 +3308,14 @@
 }
 
 void AudioFlinger::DeviceEffectProxy::onReleasePatch(audio_patch_handle_t patchHandle) {
-    Mutex::Autolock _l(mProxyLock);
-    mEffectHandles.erase(patchHandle);
+    sp<EffectHandle> effect;
+    {
+        Mutex::Autolock _l(mProxyLock);
+        if (mEffectHandles.find(patchHandle) != mEffectHandles.end()) {
+            effect = mEffectHandles.at(patchHandle);
+            mEffectHandles.erase(patchHandle);
+        }
+    }
 }
 
 
@@ -3315,6 +3323,7 @@
 {
     Mutex::Autolock _l(mProxyLock);
     if (effect == mHalEffect) {
+        mHalEffect->release_l();
         mHalEffect.clear();
         mDevicePort.id = AUDIO_PORT_HANDLE_NONE;
     }
@@ -3462,7 +3471,7 @@
     if (proxy == nullptr) {
         return NO_INIT;
     }
-    return proxy->addEffectToHal(effect);
+    return proxy->removeEffectFromHal(effect);
 }
 
 bool AudioFlinger::DeviceEffectProxy::ProxyCallback::isOutput() const {
@@ -3514,4 +3523,22 @@
     return proxy->channelCount();
 }
 
+void AudioFlinger::DeviceEffectProxy::ProxyCallback::onEffectEnable(
+        const sp<EffectBase>& effectBase) {
+    sp<EffectModule> effect = effectBase->asEffectModule();
+    if (effect == nullptr) {
+        return;
+    }
+    effect->start();
+}
+
+void AudioFlinger::DeviceEffectProxy::ProxyCallback::onEffectDisable(
+        const sp<EffectBase>& effectBase) {
+    sp<EffectModule> effect = effectBase->asEffectModule();
+    if (effect == nullptr) {
+        return;
+    }
+    effect->stop();
+}
+
 } // namespace android
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 5ebf483..e2bea67 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -766,8 +766,8 @@
         void resetVolume() override {}
         product_strategy_t strategy() const override  { return static_cast<product_strategy_t>(0); }
         int32_t activeTrackCnt() const override { return 0; }
-        void onEffectEnable(const sp<EffectBase>& effect __unused) override {}
-        void onEffectDisable(const sp<EffectBase>& effect __unused) override {}
+        void onEffectEnable(const sp<EffectBase>& effect __unused) override;
+        void onEffectDisable(const sp<EffectBase>& effect __unused) override;
 
         wp<EffectChain> chain() const override { return nullptr; }
 
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index 93118b8..45dd258 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -575,6 +575,12 @@
 
     // create a special playback track to render to playback thread.
     // this track is given the same buffer as the PatchRecord buffer
+
+    // Default behaviour is to start as soon as possible to have the lowest possible latency even if
+    // it might glitch.
+    // Disable this behavior for FM Tuner source if no fast capture/mixer available.
+    const bool isFmBridge = mAudioPatch.sources[0].ext.device.type == AUDIO_DEVICE_IN_FM_TUNER;
+    const size_t frameCountToBeReady = isFmBridge && !usePassthruPatchRecord ? frameCount / 4 : 1;
     sp<PlaybackThread::PatchTrack> tempPatchTrack = new PlaybackThread::PatchTrack(
                                            mPlayback.thread().get(),
                                            streamType,
@@ -584,7 +590,9 @@
                                            frameCount,
                                            tempRecordTrack->buffer(),
                                            tempRecordTrack->bufferSize(),
-                                           outputFlags);
+                                           outputFlags,
+                                           {} /*timeout*/,
+                                           frameCountToBeReady);
     status = mPlayback.checkTrack(tempPatchTrack.get());
     if (status != NO_ERROR) {
         return status;
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 3cce998..aecd4d3 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -193,6 +193,12 @@
        }
     }
 
+    static bool checkServerLatencySupported(
+            audio_format_t format, audio_output_flags_t flags) {
+        return audio_is_linear_pcm(format)
+                && (flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) == 0;
+    }
+
     audio_output_flags_t getOutputFlags() const { return mFlags; }
     float getSpeed() const { return mSpeed; }
 
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 43fa781..dd278f0 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -5889,6 +5889,20 @@
     return trackCount;
 }
 
+bool AudioFlinger::PlaybackThread::checkRunningTimestamp()
+{
+    uint64_t position = 0;
+    struct timespec unused;
+    const status_t ret = mOutput->getPresentationPosition(&position, &unused);
+    if (ret == NO_ERROR) {
+        if (position != mLastCheckedTimestampPosition) {
+            mLastCheckedTimestampPosition = position;
+            return true;
+        }
+    }
+    return false;
+}
+
 // isTrackAllowed_l() must be called with ThreadBase::mLock held
 bool AudioFlinger::MixerThread::isTrackAllowed_l(
         audio_channel_mask_t channelMask, audio_format_t format,
@@ -6317,19 +6331,24 @@
                 // fill a buffer, then remove it from active list.
                 // Only consider last track started for mixer state control
                 if (--(track->mRetryCount) <= 0) {
-                    ALOGV("BUFFER TIMEOUT: remove track(%d) from active list", trackId);
-                    tracksToRemove->add(track);
-                    // indicate to client process that the track was disabled because of underrun;
-                    // it will then automatically call start() when data is available
-                    track->disable();
-                    // only do hw pause when track is going to be removed due to BUFFER TIMEOUT.
-                    // unlike mixerthread, HAL can be paused for direct output
-                    ALOGW("pause because of UNDERRUN, framesReady = %zu,"
-                            "minFrames = %u, mFormat = %#x",
-                            framesReady, minFrames, mFormat);
-                    if (last && mHwSupportsPause && !mHwPaused && !mStandby) {
-                        doHwPause = true;
-                        mHwPaused = true;
+                    const bool running = checkRunningTimestamp();
+                    if (running) { // still running, give us more time.
+                        track->mRetryCount = kMaxTrackRetriesOffload;
+                    } else {
+                        ALOGV("BUFFER TIMEOUT: remove track(%d) from active list", trackId);
+                        tracksToRemove->add(track);
+                        // indicate to client process that the track was disabled because of
+                        // underrun; it will then automatically call start() when data is available
+                        track->disable();
+                        // only do hw pause when track is going to be removed due to BUFFER TIMEOUT.
+                        // unlike mixerthread, HAL can be paused for direct output
+                        ALOGW("pause because of UNDERRUN, framesReady = %zu,"
+                                "minFrames = %u, mFormat = %#x",
+                                framesReady, minFrames, mFormat);
+                        if (last && mHwSupportsPause && !mHwPaused && !mStandby) {
+                            doHwPause = true;
+                            mHwPaused = true;
+                        }
                     }
                 } else if (last) {
                     mixerStatus = MIXER_TRACKS_ENABLED;
@@ -6540,6 +6559,7 @@
 
 void AudioFlinger::DirectOutputThread::flushHw_l()
 {
+    PlaybackThread::flushHw_l();
     mOutput->flush();
     mHwPaused = false;
     mFlushPending = false;
@@ -6675,8 +6695,7 @@
 AudioFlinger::OffloadThread::OffloadThread(const sp<AudioFlinger>& audioFlinger,
         AudioStreamOut* output, audio_io_handle_t id, bool systemReady)
     :   DirectOutputThread(audioFlinger, output, id, OFFLOAD, systemReady),
-        mPausedWriteLength(0), mPausedBytesRemaining(0), mKeepWakeLock(true),
-        mOffloadUnderrunPosition(~0LL)
+        mPausedWriteLength(0), mPausedBytesRemaining(0), mKeepWakeLock(true)
 {
     //FIXME: mStandby should be set to true by ThreadBase constructo
     mStandby = true;
@@ -6893,19 +6912,7 @@
                 // No buffers for this track. Give it a few chances to
                 // fill a buffer, then remove it from active list.
                 if (--(track->mRetryCount) <= 0) {
-                    bool running = false;
-                    uint64_t position = 0;
-                    struct timespec unused;
-                    // The running check restarts the retry counter at least once.
-                    status_t ret = mOutput->stream->getPresentationPosition(&position, &unused);
-                    if (ret == NO_ERROR && position != mOffloadUnderrunPosition) {
-                        running = true;
-                        mOffloadUnderrunPosition = position;
-                    }
-                    if (ret == NO_ERROR) {
-                        ALOGVV("underrun counter, running(%d): %lld vs %lld", running,
-                                (long long)position, (long long)mOffloadUnderrunPosition);
-                    }
+                    const bool running = checkRunningTimestamp();
                     if (running) { // still running, give us more time.
                         track->mRetryCount = kMaxTrackRetriesOffload;
                     } else {
@@ -6976,7 +6983,6 @@
     mPausedBytesRemaining = 0;
     // reset bytes written count to reflect that DSP buffers are empty after flush.
     mBytesWritten = 0;
-    mOffloadUnderrunPosition = ~0LL;
 
     if (mUseAsyncWrite) {
         // discard any pending drain or write ack by incrementing sequence
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 43d1055..61537a8 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1376,6 +1376,14 @@
                 struct audio_patch mDownStreamPatch;
 
                 std::atomic_bool mCheckOutputStageEffects{};
+
+                // A differential check on the timestamps to see if there is a change in the
+                // timestamp frame position between the last call to checkRunningTimestamp.
+                uint64_t mLastCheckedTimestampPosition = ~0LL;
+
+                bool checkRunningTimestamp();
+
+    virtual     void flushHw_l() { mLastCheckedTimestampPosition = ~0LL; }
 };
 
 class MixerThread : public PlaybackThread {
@@ -1493,7 +1501,7 @@
     virtual     bool        checkForNewParameter_l(const String8& keyValuePair,
                                                    status_t& status);
 
-    virtual     void        flushHw_l();
+                void        flushHw_l() override;
 
                 void        setMasterBalance(float balance) override;
 
@@ -1558,7 +1566,7 @@
     OffloadThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
                   audio_io_handle_t id, bool systemReady);
     virtual                 ~OffloadThread() {};
-    virtual     void        flushHw_l();
+                void        flushHw_l() override;
 
 protected:
     // threadLoop snippets
@@ -1575,10 +1583,6 @@
     size_t      mPausedWriteLength;     // length in bytes of write interrupted by pause
     size_t      mPausedBytesRemaining;  // bytes still waiting in mixbuffer after resume
     bool        mKeepWakeLock;          // keep wake lock while waiting for write callback
-    uint64_t    mOffloadUnderrunPosition; // Current frame position for offloaded playback
-                                          // used and valid only during underrun.  ~0 if
-                                          // no underrun has occurred during playback and
-                                          // is not reset on standby.
 };
 
 class AsyncCallbackThread : public Thread {
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 616fd78..233865f 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -713,8 +713,7 @@
         thread->mFastTrackAvailMask &= ~(1 << i);
     }
 
-    mServerLatencySupported = thread->type() == ThreadBase::MIXER
-            || thread->type() == ThreadBase::DUPLICATING;
+    mServerLatencySupported = checkServerLatencySupported(format, flags);
 #ifdef TEE_SINK
     mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
             + "_" + std::to_string(mId) + "_T");
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h b/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
index a5de655..955b0cf 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
@@ -41,7 +41,7 @@
 
     void setUid(uid_t uid) { mUid = uid; }
 
-    void dump(String8 *dst, int spaces, int index) const;
+    void dump(String8 *dst, int spaces) const;
 
     struct audio_patch mPatch;
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
index e421c94..dc2403c 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -16,19 +16,21 @@
 
 #pragma once
 
-#include <vector>
-#include <map>
-#include <unistd.h>
 #include <sys/types.h>
+#include <unistd.h>
 
-#include <system/audio.h>
+#include <map>
+#include <vector>
+
+#include <android-base/stringprintf.h>
 #include <audiomanager/AudioManager.h>
 #include <media/AudioProductStrategy.h>
+#include <policy.h>
+#include <system/audio.h>
 #include <utils/Errors.h>
 #include <utils/KeyedVector.h>
 #include <utils/RefBase.h>
 #include <utils/String8.h>
-#include <policy.h>
 #include <Volume.h>
 #include "AudioPatch.h"
 #include "EffectDescriptor.h"
@@ -52,7 +54,7 @@
         mPreferredDeviceForExclusiveUse(isPreferredDeviceForExclusiveUse){}
     ~ClientDescriptor() override = default;
 
-    virtual void dump(String8 *dst, int spaces, int index) const;
+    virtual void dump(String8 *dst, int spaces) const;
     virtual std::string toShortString() const;
 
     audio_port_handle_t portId() const { return mPortId; }
@@ -100,7 +102,7 @@
     ~TrackClientDescriptor() override = default;
 
     using ClientDescriptor::dump;
-    void dump(String8 *dst, int spaces, int index) const override;
+    void dump(String8 *dst, int spaces) const override;
     std::string toShortString() const override;
 
     audio_output_flags_t flags() const { return mFlags; }
@@ -168,7 +170,7 @@
     ~RecordClientDescriptor() override = default;
 
     using ClientDescriptor::dump;
-    void dump(String8 *dst, int spaces, int index) const override;
+    void dump(String8 *dst, int spaces) const override;
 
     audio_unique_id_t riid() const { return mRIId; }
     audio_source_t source() const { return mSource; }
@@ -219,7 +221,7 @@
     void setHwOutput(const sp<HwAudioOutputDescriptor>& hwOutput);
 
     using ClientDescriptor::dump;
-    void dump(String8 *dst, int spaces, int index) const override;
+    void dump(String8 *dst, int spaces) const override;
 
  private:
     audio_patch_handle_t mPatchHandle = AUDIO_PATCH_HANDLE_NONE;
@@ -273,7 +275,9 @@
         (void)extraInfo;
         size_t index = 0;
         for (const auto& client: getClientIterable()) {
-            client->dump(dst, spaces, index++);
+            const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", ++index);
+            dst->appendFormat("%s", prefix.c_str());
+            client->dump(dst, prefix.size());
         }
     }
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index b444fd7..4adc920 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -91,7 +91,7 @@
 
     void setEncapsulationInfoFromHal(AudioPolicyClientInterface *clientInterface);
 
-    void dump(String8 *dst, int spaces, int index, bool verbose = true) const;
+    void dump(String8 *dst, int spaces, bool verbose = true) const;
 
 private:
     template <typename T, std::enable_if_t<std::is_same<T, struct audio_port>::value
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
index 0fe5c16..580938e 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
@@ -44,7 +44,7 @@
     }
     dst->appendFormat("%*s- Audio Routes (%zu):\n", spaces - 2, "", audioRouteVector.size());
     for (size_t i = 0; i < audioRouteVector.size(); i++) {
-        const std::string prefix = base::StringPrintf("%*s%zu. ", spaces + 1, "", i + 1);
+        const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
         dst->append(prefix.c_str());
         audioRouteVector.itemAt(i)->dump(dst, prefix.size());
     }
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 1ae66de..235e4aa 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -255,32 +255,35 @@
             devices().toString(true /*includeSensitiveInfo*/).c_str());
     dst->appendFormat("%*sGlobal active count: %u\n", spaces, "", mGlobalActiveCount);
     if (!mRoutingActivities.empty()) {
-        dst->appendFormat("%*sProduct Strategies (%zu):\n", spaces, "", mRoutingActivities.size());
+        dst->appendFormat("%*s- Product Strategies (%zu):\n", spaces - 2, "",
+                mRoutingActivities.size());
         for (const auto &iter : mRoutingActivities) {
             dst->appendFormat("%*sid %d: ", spaces + 1, "", iter.first);
             iter.second.dump(dst, 0);
         }
     }
     if (!mVolumeActivities.empty()) {
-        dst->appendFormat("%*sVolume Activities (%zu):\n", spaces, "", mVolumeActivities.size());
+        dst->appendFormat("%*s- Volume Activities (%zu):\n", spaces - 2, "",
+                mVolumeActivities.size());
         for (const auto &iter : mVolumeActivities) {
             dst->appendFormat("%*sid %d: ", spaces + 1, "", iter.first);
             iter.second.dump(dst, 0);
         }
     }
     if (getClientCount() != 0) {
-        dst->appendFormat("%*sAudioTrack Clients (%zu):\n", spaces, "", getClientCount());
+        dst->appendFormat("%*s- AudioTrack clients (%zu):\n", spaces - 2, "", getClientCount());
         ClientMapHandler<TrackClientDescriptor>::dump(dst, spaces);
-        dst->append("\n");
     }
     if (!mActiveClients.empty()) {
-        dst->appendFormat("%*sAudioTrack active (stream) clients (%zu):\n", spaces, "",
+        dst->appendFormat("%*s- AudioTrack active (stream) clients (%zu):\n", spaces - 2, "",
                 mActiveClients.size());
         size_t index = 0;
         for (const auto& client : mActiveClients) {
-            client->dump(dst, spaces, index++);
+            const std::string prefix = base::StringPrintf(
+                    "%*sid %zu: ", spaces + 1, "", index + 1);
+            dst->appendFormat("%s", prefix.c_str());
+            client->dump(dst, prefix.size());
         }
-        dst->append("\n");
     }
 }
 
@@ -708,7 +711,7 @@
 {
     AudioOutputDescriptor::dump(dst, spaces, extraInfo);
     dst->appendFormat("%*sSource:\n", spaces, "");
-    mSource->dump(dst, spaces, 0);
+    mSource->dump(dst, spaces);
 }
 
 void HwAudioOutputDescriptor::toAudioPortConfig(
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
index d79110a..4f03db9 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -20,7 +20,9 @@
 #include "AudioPatch.h"
 #include "TypeConverter.h"
 
+#include <android-base/stringprintf.h>
 #include <log/log.h>
+#include <media/AudioDeviceTypeAddr.h>
 #include <utils/String8.h>
 
 namespace android {
@@ -37,20 +39,21 @@
 {
     for (int i = 0; i < count; ++i) {
         const audio_port_config &cfg = cfgs[i];
-        dst->appendFormat("%*s  [%s %d] ", spaces, "", prefix, i + 1);
+        dst->appendFormat("%*s[%s %d] ", spaces, "", prefix, i + 1);
         if (cfg.type == AUDIO_PORT_TYPE_DEVICE) {
-            dst->appendFormat("Device ID %d %s", cfg.id, toString(cfg.ext.device.type).c_str());
+            AudioDeviceTypeAddr device(cfg.ext.device.type, cfg.ext.device.address);
+            dst->appendFormat("Device Port ID: %d; {%s}",
+                    cfg.id, device.toString(true /*includeSensitiveInfo*/).c_str());
         } else {
-            dst->appendFormat("Mix ID %d I/O handle %d", cfg.id, cfg.ext.mix.handle);
+            dst->appendFormat("Mix Port ID: %d; I/O handle: %d;", cfg.id, cfg.ext.mix.handle);
         }
         dst->append("\n");
     }
 }
 
-void AudioPatch::dump(String8 *dst, int spaces, int index) const
+void AudioPatch::dump(String8 *dst, int spaces) const
 {
-    dst->appendFormat("%*sPatch %d: owner uid %4d, handle %2d, af handle %2d\n",
-            spaces, "", index + 1, mUid, mHandle, mAfPatchHandle);
+    dst->appendFormat("owner uid %4d; handle %2d; af handle %2d\n", mUid, mHandle, mAfPatchHandle);
     dumpPatchEndpoints(dst, spaces, "src ", mPatch.num_sources, mPatch.sources);
     dumpPatchEndpoints(dst, spaces, "sink", mPatch.num_sinks, mPatch.sinks);
 }
@@ -135,9 +138,11 @@
 
 void AudioPatchCollection::dump(String8 *dst) const
 {
-    dst->append("\nAudio Patches:\n");
+    dst->appendFormat("\n Audio Patches (%zu):\n", size());
     for (size_t i = 0; i < size(); i++) {
-        valueAt(i)->dump(dst, 2, i);
+        const std::string prefix = base::StringPrintf("  %zu. ", i + 1);
+        dst->appendFormat("%s", prefix.c_str());
+        valueAt(i)->dump(dst, prefix.size());
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index b209a88..546f56b 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -675,7 +675,7 @@
 
 void AudioPolicyMixCollection::dump(String8 *dst) const
 {
-    dst->append("\nAudio Policy Mix:\n");
+    dst->append("\n Audio Policy Mix:\n");
     for (size_t i = 0; i < size(); i++) {
         itemAt(i)->dump(dst, 2, i);
     }
diff --git a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
index afc4d01..035bef2 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
@@ -18,9 +18,12 @@
 //#define LOG_NDEBUG 0
 
 #include <sstream>
+
+#include <android-base/stringprintf.h>
+#include <TypeConverter.h>
 #include <utils/Log.h>
 #include <utils/String8.h>
-#include <TypeConverter.h>
+
 #include "AudioOutputDescriptor.h"
 #include "AudioPatch.h"
 #include "AudioPolicyMix.h"
@@ -39,35 +42,36 @@
     return ss.str();
 }
 
-void ClientDescriptor::dump(String8 *dst, int spaces, int index) const
+void ClientDescriptor::dump(String8 *dst, int spaces) const
 {
-    dst->appendFormat("%*sClient %d:\n", spaces, "", index+1);
-    dst->appendFormat("%*s- Port Id: %d Session Id: %d UID: %d\n", spaces, "",
-             mPortId, mSessionId, mUid);
-    dst->appendFormat("%*s- Format: %08x Sampling rate: %d Channels: %08x\n", spaces, "",
-             mConfig.format, mConfig.sample_rate, mConfig.channel_mask);
-    dst->appendFormat("%*s- Attributes: %s\n", spaces, "", toString(mAttributes).c_str());
-    dst->appendFormat("%*s- Preferred Device Id: %08x\n", spaces, "", mPreferredDeviceId);
-    dst->appendFormat("%*s- State: %s\n", spaces, "", mActive ? "Active" : "Inactive");
+    dst->appendFormat("Port ID: %d; Session ID: %d; uid %d; State: %s\n",
+            mPortId, mSessionId, mUid, mActive ? "Active" : "Inactive");
+    dst->appendFormat("%*s%s; %d; Channel mask: 0x%x\n", spaces, "",
+            audio_format_to_string(mConfig.format), mConfig.sample_rate, mConfig.channel_mask);
+    dst->appendFormat("%*sAttributes: %s\n", spaces, "", toString(mAttributes).c_str());
+    if (mPreferredDeviceId != AUDIO_PORT_HANDLE_NONE) {
+        dst->appendFormat("%*sPreferred Device Port ID: %d;\n", spaces, "", mPreferredDeviceId);
+    }
 }
 
-void TrackClientDescriptor::dump(String8 *dst, int spaces, int index) const
+void TrackClientDescriptor::dump(String8 *dst, int spaces) const
 {
-    ClientDescriptor::dump(dst, spaces, index);
-    dst->appendFormat("%*s- Stream: %d flags: %08x\n", spaces, "", mStream, mFlags);
-    dst->appendFormat("%*s- Refcount: %d\n", spaces, "", mActivityCount);
-    dst->appendFormat("%*s- DAP Primary Mix: %p\n", spaces, "", mPrimaryMix.promote().get());
-    dst->appendFormat("%*s- DAP Secondary Outputs:\n", spaces, "");
-    for (auto desc : mSecondaryOutputs) {
-        dst->appendFormat("%*s  - %d\n", spaces, "",
-                desc.promote() == nullptr ? 0 : desc.promote()->mIoHandle);
+    ClientDescriptor::dump(dst, spaces);
+    dst->appendFormat("%*sStream: %d; Flags: %08x; Refcount: %d\n", spaces, "",
+            mStream, mFlags, mActivityCount);
+    dst->appendFormat("%*sDAP Primary Mix: %p\n", spaces, "", mPrimaryMix.promote().get());
+    if (!mSecondaryOutputs.empty()) {
+        dst->appendFormat("%*sDAP Secondary Outputs: ", spaces - 2, "");
+        for (auto desc : mSecondaryOutputs) {
+            dst->appendFormat("%d, ", desc.promote() == nullptr ? 0 : desc.promote()->mIoHandle);
+        }
+        dst->append("\n");
     }
 }
 
 std::string TrackClientDescriptor::toShortString() const
 {
     std::stringstream ss;
-
     ss << ClientDescriptor::toShortString() << " Stream: " << mStream;
     return ss.str();
 }
@@ -81,10 +85,10 @@
     }
 }
 
-void RecordClientDescriptor::dump(String8 *dst, int spaces, int index) const
+void RecordClientDescriptor::dump(String8 *dst, int spaces) const
 {
-    ClientDescriptor::dump(dst, spaces, index);
-    dst->appendFormat("%*s- Source: %d flags: %08x\n", spaces, "", mSource, mFlags);
+    ClientDescriptor::dump(dst, spaces);
+    dst->appendFormat("%*sSource: %d; Flags: %08x\n", spaces, "", mSource, mFlags);
     mEnabledEffects.dump(dst, spaces + 2 /*spaces*/, false /*verbose*/);
 }
 
@@ -109,18 +113,21 @@
     mHwOutput = hwOutput;
 }
 
-void SourceClientDescriptor::dump(String8 *dst, int spaces, int index) const
+void SourceClientDescriptor::dump(String8 *dst, int spaces) const
 {
-    TrackClientDescriptor::dump(dst, spaces, index);
-    dst->appendFormat("%*s- Device:\n", spaces, "");
-    mSrcDevice->dump(dst, 2, 0);
+    TrackClientDescriptor::dump(dst, spaces);
+    const std::string prefix = base::StringPrintf("%*sDevice: ", spaces, "");
+    dst->appendFormat("%s", prefix.c_str());
+    mSrcDevice->dump(dst, prefix.size());
 }
 
 void SourceClientCollection::dump(String8 *dst) const
 {
-    dst->append("\nAudio sources:\n");
+    dst->append("\n Audio sources (%zu):\n", size());
     for (size_t i = 0; i < size(); i++) {
-        valueAt(i)->dump(dst, 2, i);
+        const std::string prefix = base::StringPrintf("  %zu. ", i + 1);
+        dst->appendFormat("%s", prefix.c_str());
+        valueAt(i)->dump(dst, prefix.size());
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index d76d0c2..a909331 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -19,10 +19,11 @@
 
 #include <set>
 
-#include <AudioPolicyInterface.h>
+#include <android-base/stringprintf.h>
 #include <audio_utils/string.h>
 #include <media/AudioParameter.h>
 #include <media/TypeConverter.h>
+#include <AudioPolicyInterface.h>
 #include "DeviceDescriptor.h"
 #include "TypeConverter.h"
 #include "HwModule.h"
@@ -176,7 +177,7 @@
     }
 }
 
-void DeviceDescriptor::dump(String8 *dst, int spaces, int index, bool verbose) const
+void DeviceDescriptor::dump(String8 *dst, int spaces, bool verbose) const
 {
     String8 extraInfo;
     if (!mTagName.empty()) {
@@ -184,7 +185,7 @@
     }
 
     std::string descBaseDumpStr;
-    DeviceDescriptorBase::dump(&descBaseDumpStr, spaces, index, extraInfo.string(), verbose);
+    DeviceDescriptorBase::dump(&descBaseDumpStr, spaces, extraInfo.string(), verbose);
     dst->append(descBaseDumpStr.c_str());
 }
 
@@ -449,7 +450,9 @@
     }
     dst->appendFormat("%*s%s devices (%zu):\n", spaces, "", tag.string(), size());
     for (size_t i = 0; i < size(); i++) {
-        itemAt(i)->dump(dst, spaces + 2, i, verbose);
+        const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+        dst->appendFormat("%s", prefix.c_str());
+        itemAt(i)->dump(dst, prefix.size(), verbose);
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
index 843f5da..3f9c8b0 100644
--- a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
@@ -17,6 +17,7 @@
 #define LOG_TAG "APM::EffectDescriptor"
 //#define LOG_NDEBUG 0
 
+#include <android-base/stringprintf.h>
 #include "EffectDescriptor.h"
 #include <utils/String8.h>
 
@@ -24,13 +25,11 @@
 
 void EffectDescriptor::dump(String8 *dst, int spaces) const
 {
-    dst->appendFormat("%*sID: %d\n", spaces, "", mId);
-    dst->appendFormat("%*sI/O: %d\n", spaces, "", mIo);
-    dst->appendFormat("%*sMusic Effect: %s\n", spaces, "", isMusicEffect()? "yes" : "no");
-    dst->appendFormat("%*sSession: %d\n", spaces, "", mSession);
-    dst->appendFormat("%*sName: %s\n", spaces, "",  mDesc.name);
-    dst->appendFormat("%*s%s\n", spaces, "",  mEnabled ? "Enabled" : "Disabled");
-    dst->appendFormat("%*s%s\n", spaces, "",  mSuspended ? "Suspended" : "Active");
+    dst->appendFormat("Effect ID: %d; Attached to I/O handle: %d; Session: %d;\n",
+            mId, mIo, mSession);
+    dst->appendFormat("%*sMusic Effect? %s; \"%s\"; %s; %s\n", spaces, "",
+            isMusicEffect()? "yes" : "no", mDesc.name,
+            mEnabled ? "Enabled" : "Disabled", mSuspended ? "Suspended" : "Active");
 }
 
 EffectDescriptorCollection::EffectDescriptorCollection() :
@@ -237,10 +236,14 @@
             mTotalEffectsMemory,
             mTotalEffectsMemoryMaxUsed);
     }
-    dst->appendFormat("%*sEffects:\n", spaces, "");
-    for (size_t i = 0; i < size(); i++) {
-        dst->appendFormat("%*s- Effect %d:\n", spaces, "", keyAt(i));
-        valueAt(i)->dump(dst, spaces + 2);
+    if (size() > 0) {
+        if (spaces > 1) spaces -= 2;
+        dst->appendFormat("%*s- Effects (%zu):\n", spaces, "", size());
+        for (size_t i = 0; i < size(); i++) {
+            const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
+            dst->appendFormat("%s", prefix.c_str());
+            valueAt(i)->dump(dst, prefix.size());
+        }
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index 2977f38..418b7eb 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -255,7 +255,7 @@
     if (mOutputProfiles.size()) {
         dst->appendFormat("%*s- Output MixPorts (%zu):\n", spaces - 2, "", mOutputProfiles.size());
         for (size_t i = 0; i < mOutputProfiles.size(); i++) {
-            const std::string prefix = base::StringPrintf("%*s%zu. ", spaces, "", i + 1);
+            const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
             dst->append(prefix.c_str());
             mOutputProfiles[i]->dump(dst, prefix.size());
         }
@@ -263,7 +263,7 @@
     if (mInputProfiles.size()) {
         dst->appendFormat("%*s- Input MixPorts (%zu):\n", spaces - 2, "", mInputProfiles.size());
         for (size_t i = 0; i < mInputProfiles.size(); i++) {
-            const std::string prefix = base::StringPrintf("%*s%zu. ", spaces, "", i + 1);
+            const std::string prefix = base::StringPrintf("%*s %zu. ", spaces, "", i + 1);
             dst->append(prefix.c_str());
             mInputProfiles[i]->dump(dst, prefix.size());
         }
diff --git a/services/audiopolicy/engine/common/src/ProductStrategy.cpp b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
index b3d144f..fbfcf72 100644
--- a/services/audiopolicy/engine/common/src/ProductStrategy.cpp
+++ b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
@@ -150,12 +150,8 @@
 void ProductStrategy::dump(String8 *dst, int spaces) const
 {
     dst->appendFormat("\n%*s-%s (id: %d)\n", spaces, "", mName.c_str(), mId);
-    std::string deviceLiteral;
-    if (!deviceTypesToString(mApplicableDevices, deviceLiteral)) {
-        ALOGE("%s: failed to convert device %s",
-              __FUNCTION__, dumpDeviceTypes(mApplicableDevices).c_str());
-    }
-    dst->appendFormat("%*sSelected Device: {type:%s, @:%s}\n", spaces + 2, "",
+    std::string deviceLiteral = deviceTypesToString(mApplicableDevices);
+    dst->appendFormat("%*sSelected Device: {%s, @:%s}\n", spaces + 2, "",
                        deviceLiteral.c_str(), mDeviceAddress.c_str());
 
     for (const auto &attr : mAttributesVector) {
@@ -333,4 +329,3 @@
     dst->appendFormat("\n");
 }
 }
-
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.cpp b/services/audiopolicy/engineconfigurable/src/Engine.cpp
index b0c376a..9a61a05 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Engine.cpp
@@ -299,8 +299,13 @@
     if (device != nullptr) {
         return DeviceVector(device);
     }
+    return fromCache? getCachedDevices(strategy) : getDevicesForProductStrategy(strategy);
+}
 
-    return fromCache? mDevicesForStrategies.at(strategy) : getDevicesForProductStrategy(strategy);
+DeviceVector Engine::getCachedDevices(product_strategy_t ps) const
+{
+    return mDevicesForStrategies.find(ps) != mDevicesForStrategies.end() ?
+                mDevicesForStrategies.at(ps) : DeviceVector{};
 }
 
 DeviceVector Engine::getOutputDevicesForStream(audio_stream_type_t stream, bool fromCache) const
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.h b/services/audiopolicy/engineconfigurable/src/Engine.h
index d8e2742..f665da5 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.h
+++ b/services/audiopolicy/engineconfigurable/src/Engine.h
@@ -126,6 +126,7 @@
     status_t loadAudioPolicyEngineConfig();
 
     DeviceVector getDevicesForProductStrategy(product_strategy_t strategy) const;
+    DeviceVector getCachedDevices(product_strategy_t ps) const;
 
     /**
      * Policy Parameter Manager hidden through a wrapper.
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 22eeadd..05eae98 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -1702,6 +1702,8 @@
     // The priority is as follows:
     // 1: the output supporting haptic playback when requesting haptic playback
     // 2: the output with the highest number of requested functional flags
+    //    with tiebreak preferring the minimum number of extra functional flags
+    //    (see b/200293124, the incorrect selection of AUDIO_OUTPUT_FLAG_VOIP_RX).
     // 3: the output supporting the exact channel mask
     // 4: the output with a higher channel count than requested
     // 5: the output with a higher sampling rate than requested
@@ -1743,7 +1745,12 @@
         }
 
         // functional flags match
-        currentMatchCriteria[1] = popcount(outputDesc->mFlags & functionalFlags);
+        const int matchingFunctionalFlags =
+                __builtin_popcount(outputDesc->mFlags & functionalFlags);
+        const int totalFunctionalFlags =
+                __builtin_popcount(outputDesc->mFlags & kFunctionalFlags);
+        // Prefer matching functional flags, but subtract unnecessary functional flags.
+        currentMatchCriteria[1] = 100 * (matchingFunctionalFlags + 1) - totalFunctionalFlags;
 
         // channel mask and channel count match
         uint32_t outputChannelCount = audio_channel_count_from_out_mask(
@@ -3608,12 +3615,14 @@
     dst->appendFormat(" Communnication Strategy: %d\n", mCommunnicationStrategy);
     dst->appendFormat(" Config source: %s\n", mConfig.getSource().c_str()); // getConfig not const
 
-    mAvailableOutputDevices.dump(dst, String8("\n Available output"));
-    mAvailableInputDevices.dump(dst, String8("\n Available input"));
+    dst->append("\n");
+    mAvailableOutputDevices.dump(dst, String8("Available output"), 1);
+    dst->append("\n");
+    mAvailableInputDevices.dump(dst, String8("Available input"), 1);
     mHwModulesAll.dump(dst);
     mOutputs.dump(dst);
     mInputs.dump(dst);
-    mEffects.dump(dst);
+    mEffects.dump(dst, 1);
     mAudioPatches.dump(dst);
     mPolicyMixes.dump(dst);
     mAudioSources.dump(dst);
@@ -5471,8 +5480,7 @@
             if (!desc->isDuplicated()) {
                 // exact match on device
                 if (device_distinguishes_on_address(deviceType) && desc->supportsDevice(device)
-                        && desc->containsSingleDeviceSupportingEncodedFormats(device)
-                        && !mAvailableOutputDevices.containsAtLeastOne(desc->supportedDevices())) {
+                        && desc->containsSingleDeviceSupportingEncodedFormats(device)) {
                     outputs.add(mOutputs.keyAt(i));
                 } else if (!mAvailableOutputDevices.containsAtLeastOne(desc->supportedDevices())) {
                     ALOGV("checkOutputsForDevice(): disconnecting adding output %d",
@@ -6102,7 +6110,8 @@
             return hasVoiceStream(streams) && (outputDesc == mPrimaryOutput ||
                 outputDesc->isActive(toVolumeSource(AUDIO_STREAM_VOICE_CALL))) &&
                 (isInCall() ||
-                 mOutputs.isStrategyActiveOnSameModule(productStrategy, outputDesc));
+                 mOutputs.isStrategyActiveOnSameModule(productStrategy, outputDesc)) &&
+                !isStreamActive(AUDIO_STREAM_ENFORCED_AUDIBLE, 0);
         };
 
         // With low-latency playing on speaker, music on WFD, when the first low-latency
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index f7442cb..1fbea7d 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -79,15 +79,22 @@
         != std::end(mSupportedSystemUsages);
 }
 
-status_t AudioPolicyService::validateUsage(audio_usage_t usage) {
-     return validateUsage(usage, getCallingAttributionSource());
+status_t AudioPolicyService::validateUsage(const audio_attributes_t& attr) {
+     return validateUsage(attr, getCallingAttributionSource());
 }
 
-status_t AudioPolicyService::validateUsage(audio_usage_t usage,
+status_t AudioPolicyService::validateUsage(const audio_attributes_t& attr,
         const AttributionSourceState& attributionSource) {
-    if (isSystemUsage(usage)) {
-        if (isSupportedSystemUsage(usage)) {
-            if (!modifyAudioRoutingAllowed(attributionSource)) {
+    if (isSystemUsage(attr.usage)) {
+        if (isSupportedSystemUsage(attr.usage)) {
+            if (attr.usage == AUDIO_USAGE_CALL_ASSISTANT
+                    && ((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0)) {
+                if (!callAudioInterceptionAllowed(attributionSource)) {
+                    ALOGE(("permission denied: modify audio routing not allowed "
+                           "for attributionSource %s"), attributionSource.toString().c_str());
+                    return PERMISSION_DENIED;
+                }
+            } else if (!modifyAudioRoutingAllowed(attributionSource)) {
                 ALOGE(("permission denied: modify audio routing not allowed "
                        "for attributionSource %s"), attributionSource.toString().c_str());
                 return PERMISSION_DENIED;
@@ -344,7 +351,7 @@
 
     RETURN_IF_BINDER_ERROR(
             binderStatusFromStatusT(AudioValidator::validateAudioAttributes(attr, "68953950")));
-    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr.usage, attributionSource)));
+    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr, attributionSource)));
 
     ALOGV("%s()", __func__);
     Mutex::Autolock _l(mLock);
@@ -386,7 +393,12 @@
         case AudioPolicyInterface::API_OUTPUT_LEGACY:
             break;
         case AudioPolicyInterface::API_OUTPUT_TELEPHONY_TX:
-            if (!modifyPhoneStateAllowed(adjAttributionSource)) {
+            if (((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0)
+                && !callAudioInterceptionAllowed(adjAttributionSource)) {
+                ALOGE("%s() permission denied: call redirection not allowed for uid %d",
+                    __func__, adjAttributionSource.uid);
+                result = PERMISSION_DENIED;
+            } else if (!modifyPhoneStateAllowed(adjAttributionSource)) {
                 ALOGE("%s() permission denied: modify phone state not allowed for uid %d",
                     __func__, adjAttributionSource.uid);
                 result = PERMISSION_DENIED;
@@ -613,7 +625,7 @@
         adjAttributionSource.pid = callingPid;
     }
 
-    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr.usage,
+    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attr,
             adjAttributionSource)));
 
     // check calling permissions.
@@ -635,14 +647,18 @@
     }
 
     bool canCaptureOutput = captureAudioOutputAllowed(adjAttributionSource);
-    if ((inputSource == AUDIO_SOURCE_VOICE_UPLINK
-            || inputSource == AUDIO_SOURCE_VOICE_DOWNLINK
-            || inputSource == AUDIO_SOURCE_VOICE_CALL
-            || inputSource == AUDIO_SOURCE_ECHO_REFERENCE)
+    bool canInterceptCallAudio = callAudioInterceptionAllowed(adjAttributionSource);
+    bool isCallAudioSource = inputSource == AUDIO_SOURCE_VOICE_UPLINK
+             || inputSource == AUDIO_SOURCE_VOICE_DOWNLINK
+             || inputSource == AUDIO_SOURCE_VOICE_CALL;
+
+    if (isCallAudioSource && !canInterceptCallAudio && !canCaptureOutput) {
+        return binderStatusFromStatusT(PERMISSION_DENIED);
+    }
+    if (inputSource == AUDIO_SOURCE_ECHO_REFERENCE
             && !canCaptureOutput) {
         return binderStatusFromStatusT(PERMISSION_DENIED);
     }
-
     if (inputSource == AUDIO_SOURCE_FM_TUNER
         && !canCaptureOutput
         && !captureTunerAudioInputAllowed(adjAttributionSource)) {
@@ -687,23 +703,30 @@
             case AudioPolicyInterface::API_INPUT_LEGACY:
                 break;
             case AudioPolicyInterface::API_INPUT_TELEPHONY_RX:
+                if ((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0
+                        && canInterceptCallAudio) {
+                    break;
+                }
                 // FIXME: use the same permission as for remote submix for now.
+                FALLTHROUGH_INTENDED;
             case AudioPolicyInterface::API_INPUT_MIX_CAPTURE:
                 if (!canCaptureOutput) {
-                    ALOGE("getInputForAttr() permission denied: capture not allowed");
+                    ALOGE("%s permission denied: capture not allowed", __func__);
                     status = PERMISSION_DENIED;
                 }
                 break;
             case AudioPolicyInterface::API_INPUT_MIX_EXT_POLICY_REROUTE:
-                if (!modifyAudioRoutingAllowed(adjAttributionSource)) {
-                    ALOGE("getInputForAttr() permission denied: modify audio routing not allowed");
+                if (!(modifyAudioRoutingAllowed(adjAttributionSource)
+                        || ((attr.flags & AUDIO_FLAG_CALL_REDIRECTION) != 0
+                            && canInterceptCallAudio))) {
+                    ALOGE("%s permission denied for remote submix capture", __func__);
                     status = PERMISSION_DENIED;
                 }
                 break;
             case AudioPolicyInterface::API_INPUT_INVALID:
             default:
-                LOG_ALWAYS_FATAL("getInputForAttr() encountered an invalid input type %d",
-                        (int)inputType);
+                LOG_ALWAYS_FATAL("%s encountered an invalid input type %d",
+                        __func__, (int)inputType);
             }
         }
 
@@ -1489,7 +1512,7 @@
         return binderStatusFromStatusT(NO_INIT);
     }
 
-    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes.usage)));
+    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes)));
 
     Mutex::Autolock _l(mLock);
     *_aidl_return = mAudioPolicyManager->isDirectOutputSupported(config, attributes);
@@ -1805,7 +1828,7 @@
         return binderStatusFromStatusT(NO_INIT);
     }
 
-    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes.usage)));
+    RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(validateUsage(attributes)));
 
     // startAudioSource should be created as the calling uid
     const uid_t callingUid = IPCThreadState::self()->getCallingUid();
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 0471ddc..ef7a83b 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -729,7 +729,8 @@
         if (current->attributes.source != AUDIO_SOURCE_HOTWORD) {
             onlyHotwordActive = false;
         }
-        if (currentUid == mPhoneStateOwnerUid) {
+        if (currentUid == mPhoneStateOwnerUid &&
+                !isVirtualSource(current->attributes.source)) {
             isPhoneStateOwnerActive = true;
         }
     }
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 8a42b7c..b3ac21b 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -385,8 +385,9 @@
     app_state_t apmStatFromAmState(int amState);
 
     bool isSupportedSystemUsage(audio_usage_t usage);
-    status_t validateUsage(audio_usage_t usage);
-    status_t validateUsage(audio_usage_t usage, const AttributionSourceState& attributionSource);
+    status_t validateUsage(const audio_attributes_t& attr);
+    status_t validateUsage(const audio_attributes_t& attr,
+                           const AttributionSourceState& attributionSource);
 
     void updateUidStates();
     void updateUidStates_l() REQUIRES(mLock);
diff --git a/services/camera/libcameraservice/CameraFlashlight.cpp b/services/camera/libcameraservice/CameraFlashlight.cpp
index ccdd9e5..015ae2f 100644
--- a/services/camera/libcameraservice/CameraFlashlight.cpp
+++ b/services/camera/libcameraservice/CameraFlashlight.cpp
@@ -119,6 +119,59 @@
     return res;
 }
 
+status_t CameraFlashlight::turnOnTorchWithStrengthLevel(const String8& cameraId,
+            int32_t torchStrength) {
+    if (!mFlashlightMapInitialized) {
+        ALOGE("%s: findFlashUnits() must be called before this method.",
+               __FUNCTION__);
+        return NO_INIT;
+    }
+
+    ALOGV("%s: set torch strength of camera %s to %d", __FUNCTION__,
+            cameraId.string(), torchStrength);
+    status_t res = OK;
+    Mutex::Autolock l(mLock);
+
+    if (mOpenedCameraIds.indexOf(cameraId) != NAME_NOT_FOUND) {
+        ALOGE("%s: Camera device %s is in use, cannot be turned ON.",
+                __FUNCTION__, cameraId.string());
+        return -EBUSY;
+    }
+
+    if (mFlashControl == NULL) {
+        res = createFlashlightControl(cameraId);
+        if (res) {
+            return res;
+        }
+    }
+
+    res = mFlashControl->turnOnTorchWithStrengthLevel(cameraId, torchStrength);
+    return res;
+}
+
+
+status_t CameraFlashlight::getTorchStrengthLevel(const String8& cameraId,
+            int32_t* torchStrength) {
+    status_t res = OK;
+    if (!mFlashlightMapInitialized) {
+        ALOGE("%s: findFlashUnits() must be called before this method.",
+            __FUNCTION__);
+        return false;
+    }
+
+    Mutex::Autolock l(mLock);
+
+    if (mFlashControl == NULL) {
+        res = createFlashlightControl(cameraId);
+        if (res) {
+            return res;
+        }
+    }
+
+    res = mFlashControl->getTorchStrengthLevel(cameraId, torchStrength);
+    return res;
+}
+
 status_t CameraFlashlight::findFlashUnits() {
     Mutex::Autolock l(mLock);
     status_t res;
@@ -306,6 +359,22 @@
 
     return mProviderManager->setTorchMode(cameraId.string(), enabled);
 }
+
+status_t ProviderFlashControl::turnOnTorchWithStrengthLevel(const String8& cameraId,
+            int32_t torchStrength) {
+    ALOGV("%s: change torch strength level of camera %s to %d", __FUNCTION__,
+            cameraId.string(), torchStrength);
+
+    return mProviderManager->turnOnTorchWithStrengthLevel(cameraId.string(), torchStrength);
+}
+
+status_t ProviderFlashControl::getTorchStrengthLevel(const String8& cameraId,
+            int32_t* torchStrength) {
+    ALOGV("%s: get torch strength level of camera %s", __FUNCTION__,
+            cameraId.string());
+
+    return mProviderManager->getTorchStrengthLevel(cameraId.string(), torchStrength);
+}
 // ProviderFlashControl implementation ends
 
 }
diff --git a/services/camera/libcameraservice/CameraFlashlight.h b/services/camera/libcameraservice/CameraFlashlight.h
index b97fa5f..1703ddc 100644
--- a/services/camera/libcameraservice/CameraFlashlight.h
+++ b/services/camera/libcameraservice/CameraFlashlight.h
@@ -44,6 +44,14 @@
         // set the torch mode to on or off.
         virtual status_t setTorchMode(const String8& cameraId,
                     bool enabled) = 0;
+
+        // Change the brightness level of the torch. If the torch is OFF and
+        // torchStrength >= 1, then the torch will also be turned ON.
+        virtual status_t turnOnTorchWithStrengthLevel(const String8& cameraId,
+                    int32_t torchStrength) = 0;
+
+        // Returns the torch strength level.
+        virtual status_t getTorchStrengthLevel(const String8& cameraId, int32_t* torchStrength) = 0;
 };
 
 /**
@@ -67,6 +75,12 @@
         // set the torch mode to on or off.
         status_t setTorchMode(const String8& cameraId, bool enabled);
 
+        // Change the torch strength level of the flash unit in torch mode.
+        status_t turnOnTorchWithStrengthLevel(const String8& cameraId, int32_t torchStrength);
+
+        // Get the torch strength level
+        status_t getTorchStrengthLevel(const String8& cameraId, int32_t* torchStrength);
+
         // Notify CameraFlashlight that camera service is going to open a camera
         // device. CameraFlashlight will free the resources that may cause the
         // camera open to fail. Camera service must call this function before
@@ -115,6 +129,8 @@
         // FlashControlBase
         status_t hasFlashUnit(const String8& cameraId, bool *hasFlash);
         status_t setTorchMode(const String8& cameraId, bool enabled);
+        status_t turnOnTorchWithStrengthLevel(const String8& cameraId, int32_t torchStrength);
+        status_t getTorchStrengthLevel(const String8& cameraId, int32_t* torchStrength);
 
     private:
         sp<CameraProviderManager> mProviderManager;
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 2551ea5..b3db66b 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -569,6 +569,15 @@
     onTorchStatusChangedLocked(cameraId, newStatus, systemCameraKind);
 }
 
+void CameraService::broadcastTorchStrengthLevel(const String8& cameraId,
+        int32_t newStrengthLevel) {
+    Mutex::Autolock lock(mStatusListenerLock);
+    for (auto& i : mListenerList) {
+        i->getListener()->onTorchStrengthLevelChanged(String16{cameraId},
+                newStrengthLevel);
+    }
+}
+
 void CameraService::onTorchStatusChangedLocked(const String8& cameraId,
         TorchModeStatus newStatus, SystemCameraKind systemCameraKind) {
     ALOGI("%s: Torch status changed for cameraId=%s, newStatus=%d",
@@ -804,6 +813,31 @@
     return ret;
 }
 
+Status CameraService::getTorchStrengthLevel(const String16& cameraId,
+        int32_t* torchStrength) {
+    ATRACE_CALL();
+    Mutex::Autolock l(mServiceLock);
+    if (!mInitialized) {
+        ALOGE("%s: Camera HAL couldn't be initialized.", __FUNCTION__);
+        return STATUS_ERROR(ERROR_DISCONNECTED, "Camera HAL couldn't be initialized.");
+    }
+
+    if(torchStrength == NULL) {
+        ALOGE("%s: strength level must not be null.", __FUNCTION__);
+        return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, "Strength level should not be null.");
+    }
+
+    status_t res = mCameraProviderManager->getTorchStrengthLevel(String8(cameraId).string(),
+        torchStrength);
+    if (res != OK) {
+        return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION, "Unable to retrieve torch "
+            "strength level for device %s: %s (%d)", String8(cameraId).string(),
+            strerror(-res), res);
+    }
+    ALOGI("%s: Torch strength level is: %d", __FUNCTION__, *torchStrength);
+    return Status::ok();
+}
+
 String8 CameraService::getFormattedCurrentTime() {
     time_t now = time(nullptr);
     char formattedTime[64];
@@ -1848,8 +1882,10 @@
         if (mOverrideRotateAndCropMode != ANDROID_SCALER_ROTATE_AND_CROP_AUTO) {
             client->setRotateAndCropOverride(mOverrideRotateAndCropMode);
         } else if (effectiveApiLevel == API_2) {
-            client->setRotateAndCropOverride(CameraServiceProxyWrapper::getRotateAndCropOverride(
-                    clientPackageName, facing));
+
+          client->setRotateAndCropOverride(
+              CameraServiceProxyWrapper::getRotateAndCropOverride(
+                  clientPackageName, facing, multiuser_get_user_id(clientUid)));
         }
 
         // Set camera muting behavior
@@ -1908,8 +1944,14 @@
             status_t res = NO_ERROR;
             auto clientDescriptor = mActiveClientManager.get(mInjectionInternalCamId);
             if (clientDescriptor != nullptr) {
-                BasicClient* baseClientPtr = clientDescriptor->getValue().get();
-                res = baseClientPtr->injectCamera(mInjectionExternalCamId, mCameraProviderManager);
+                sp<BasicClient> clientSp = clientDescriptor->getValue();
+                res = checkIfInjectionCameraIsPresent(mInjectionExternalCamId, clientSp);
+                if(res != OK) {
+                    return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+                            "No camera device with ID \"%s\" currently available",
+                            mInjectionExternalCamId.string());
+                }
+                res = clientSp->injectCamera(mInjectionExternalCamId, mCameraProviderManager);
                 if (res != OK) {
                     mInjectionStatusListener->notifyInjectionError(mInjectionExternalCamId, res);
                 }
@@ -1998,6 +2040,132 @@
     return OK;
 }
 
+Status CameraService::turnOnTorchWithStrengthLevel(const String16& cameraId, int32_t torchStrength,
+        const sp<IBinder>& clientBinder) {
+    Mutex::Autolock lock(mServiceLock);
+
+    ATRACE_CALL();
+    if (clientBinder == nullptr) {
+        ALOGE("%s: torch client binder is NULL", __FUNCTION__);
+        return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT,
+                "Torch client binder in null.");
+    }
+
+    String8 id = String8(cameraId.string());
+    int uid = CameraThreadState::getCallingUid();
+
+    if (shouldRejectSystemCameraConnection(id)) {
+        return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT, "Unable to change the strength level"
+                "for system only device %s: ", id.string());
+    }
+
+    // verify id is valid
+    auto state = getCameraState(id);
+    if (state == nullptr) {
+        ALOGE("%s: camera id is invalid %s", __FUNCTION__, id.string());
+        return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+            "Camera ID \"%s\" is a not valid camera ID", id.string());
+    }
+
+    StatusInternal cameraStatus = state->getStatus();
+    if (cameraStatus != StatusInternal::NOT_AVAILABLE &&
+            cameraStatus != StatusInternal::PRESENT) {
+        ALOGE("%s: camera id is invalid %s, status %d", __FUNCTION__, id.string(),
+            (int)cameraStatus);
+        return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+                "Camera ID \"%s\" is a not valid camera ID", id.string());
+    }
+
+    {
+        Mutex::Autolock al(mTorchStatusMutex);
+        TorchModeStatus status;
+        status_t err = getTorchStatusLocked(id, &status);
+        if (err != OK) {
+            if (err == NAME_NOT_FOUND) {
+             return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+                    "Camera \"%s\" does not have a flash unit", id.string());
+            }
+            ALOGE("%s: getting current torch status failed for camera %s",
+                    __FUNCTION__, id.string());
+            return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+                    "Error changing torch strength level for camera \"%s\": %s (%d)",
+                    id.string(), strerror(-err), err);
+        }
+
+        if (status == TorchModeStatus::NOT_AVAILABLE) {
+            if (cameraStatus == StatusInternal::NOT_AVAILABLE) {
+                ALOGE("%s: torch mode of camera %s is not available because "
+                        "camera is in use.", __FUNCTION__, id.string());
+                return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
+                        "Torch for camera \"%s\" is not available due to an existing camera user",
+                        id.string());
+            } else {
+                ALOGE("%s: torch mode of camera %s is not available due to "
+                       "insufficient resources", __FUNCTION__, id.string());
+                return STATUS_ERROR_FMT(ERROR_MAX_CAMERAS_IN_USE,
+                        "Torch for camera \"%s\" is not available due to insufficient resources",
+                        id.string());
+            }
+        }
+    }
+
+    {
+        Mutex::Autolock al(mTorchUidMapMutex);
+        updateTorchUidMapLocked(cameraId, uid);
+    }
+    // Check if the current torch strength level is same as the new one.
+    bool shouldSkipTorchStrengthUpdates = mCameraProviderManager->shouldSkipTorchStrengthUpdate(
+            id.string(), torchStrength);
+
+    status_t err = mFlashlight->turnOnTorchWithStrengthLevel(id, torchStrength);
+
+    if (err != OK) {
+        int32_t errorCode;
+        String8 msg;
+        switch (err) {
+            case -ENOSYS:
+                msg = String8::format("Camera \"%s\" has no flashlight.",
+                    id.string());
+                errorCode = ERROR_ILLEGAL_ARGUMENT;
+                break;
+            case -EBUSY:
+                msg = String8::format("Camera \"%s\" is in use",
+                    id.string());
+                errorCode = ERROR_CAMERA_IN_USE;
+                break;
+            default:
+                msg = String8::format("Changing torch strength level failed.");
+                errorCode = ERROR_INVALID_OPERATION;
+
+        }
+        ALOGE("%s: %s", __FUNCTION__, msg.string());
+        return STATUS_ERROR(errorCode, msg.string());
+    }
+
+    {
+        // update the link to client's death
+        // Store the last client that turns on each camera's torch mode.
+        Mutex::Autolock al(mTorchClientMapMutex);
+        ssize_t index = mTorchClientMap.indexOfKey(id);
+        if (index == NAME_NOT_FOUND) {
+            mTorchClientMap.add(id, clientBinder);
+        } else {
+            mTorchClientMap.valueAt(index)->unlinkToDeath(this);
+            mTorchClientMap.replaceValueAt(index, clientBinder);
+        }
+        clientBinder->linkToDeath(this);
+    }
+
+    int clientPid = CameraThreadState::getCallingPid();
+    const char *id_cstr = id.c_str();
+    ALOGI("%s: Torch strength for camera id %s changed to %d for client PID %d",
+            __FUNCTION__, id_cstr, torchStrength, clientPid);
+    if (!shouldSkipTorchStrengthUpdates) {
+        broadcastTorchStrengthLevel(id, torchStrength);
+    }
+    return Status::ok();
+}
+
 Status CameraService::setTorchMode(const String16& cameraId, bool enabled,
         const sp<IBinder>& clientBinder) {
     Mutex::Autolock lock(mServiceLock);
@@ -2069,13 +2237,7 @@
         // Update UID map - this is used in the torch status changed callbacks, so must be done
         // before setTorchMode
         Mutex::Autolock al(mTorchUidMapMutex);
-        if (mTorchUidMap.find(id) == mTorchUidMap.end()) {
-            mTorchUidMap[id].first = uid;
-            mTorchUidMap[id].second = uid;
-        } else {
-            // Set the pending UID
-            mTorchUidMap[id].first = uid;
-        }
+        updateTorchUidMapLocked(cameraId, uid);
     }
 
     status_t err = mFlashlight->setTorchMode(id, enabled);
@@ -2130,6 +2292,17 @@
     return Status::ok();
 }
 
+void CameraService::updateTorchUidMapLocked(const String16& cameraId, int uid) {
+    String8 id = String8(cameraId.string());
+    if (mTorchUidMap.find(id) == mTorchUidMap.end()) {
+        mTorchUidMap[id].first = uid;
+        mTorchUidMap[id].second = uid;
+    } else {
+        // Set the pending UID
+        mTorchUidMap[id].first = uid;
+    }
+}
+
 Status CameraService::notifySystemEvent(int32_t eventId,
         const std::vector<int32_t>& args) {
     const int pid = CameraThreadState::getCallingPid();
@@ -2252,9 +2425,11 @@
         if (current != nullptr) {
             const auto basicClient = current->getValue();
             if (basicClient.get() != nullptr && basicClient->canCastToApiClient(API_2)) {
-                basicClient->setRotateAndCropOverride(
-                        CameraServiceProxyWrapper::getRotateAndCropOverride(
-                            basicClient->getPackageName(), basicClient->getCameraFacing()));
+              basicClient->setRotateAndCropOverride(
+                  CameraServiceProxyWrapper::getRotateAndCropOverride(
+                      basicClient->getPackageName(),
+                      basicClient->getCameraFacing(),
+                      multiuser_get_user_id(basicClient->getClientUid())));
             }
         }
     }
@@ -2606,6 +2781,8 @@
         Mutex::Autolock lock(mInjectionParametersLock);
         mInjectionInternalCamId = String8(internalCamId);
         mInjectionExternalCamId = String8(externalCamId);
+        mInjectionStatusListener->addListener(callback);
+        *cameraInjectionSession = new CameraInjectionSession(this);
         status_t res = NO_ERROR;
         auto clientDescriptor = mActiveClientManager.get(mInjectionInternalCamId);
         // If the client already exists, we can directly connect to the camera device through the
@@ -2613,8 +2790,14 @@
         // (execute connectHelper()) before injecting the camera to the camera device.
         if (clientDescriptor != nullptr) {
             mInjectionInitPending = false;
-            BasicClient* baseClientPtr = clientDescriptor->getValue().get();
-            res = baseClientPtr->injectCamera(mInjectionExternalCamId, mCameraProviderManager);
+            sp<BasicClient> clientSp = clientDescriptor->getValue();
+            res = checkIfInjectionCameraIsPresent(mInjectionExternalCamId, clientSp);
+            if(res != OK) {
+                return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+                        "No camera device with ID \"%s\" currently available",
+                        mInjectionExternalCamId.string());
+            }
+            res = clientSp->injectCamera(mInjectionExternalCamId, mCameraProviderManager);
             if(res != OK) {
                 mInjectionStatusListener->notifyInjectionError(mInjectionExternalCamId, res);
             }
@@ -2622,8 +2805,6 @@
             mInjectionInitPending = true;
         }
     }
-    mInjectionStatusListener->addListener(callback);
-    *cameraInjectionSession = new CameraInjectionSession(this);
 
     return binder::Status::ok();
 }
@@ -4272,12 +4453,20 @@
 
     if (dumpVector.empty()) { return; }
 
+    std::string dumpString;
+
+    String8 currentTime = getFormattedCurrentTime();
+    dumpString += "Cached @ ";
+    dumpString += currentTime.string();
+    dumpString += "\n"; // First line is the timestamp of when client is cached.
+
+
     const String16 &packageName = client->getPackageName();
 
     String8 packageName8 = String8(packageName);
     const char *printablePackageName = packageName8.lockBuffer(packageName.size());
 
-    std::string dumpString;
+
     size_t i = dumpVector.size();
 
     // Store the string in reverse order (latest last)
@@ -5135,10 +5324,39 @@
     return mode;
 }
 
+status_t CameraService::checkIfInjectionCameraIsPresent(const String8& externalCamId,
+        sp<BasicClient> clientSp) {
+    std::unique_ptr<AutoConditionLock> lock =
+            AutoConditionLock::waitAndAcquire(mServiceLockWrapper);
+    status_t res = NO_ERROR;
+    if ((res = checkIfDeviceIsUsable(externalCamId)) != NO_ERROR) {
+        ALOGW("Device %s is not usable!", externalCamId.string());
+        mInjectionStatusListener->notifyInjectionError(
+                externalCamId, UNKNOWN_TRANSACTION);
+        clientSp->notifyError(
+                hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
+                CaptureResultExtras());
+
+        // Do not hold mServiceLock while disconnecting clients, but retain the condition blocking
+        // other clients from connecting in mServiceLockWrapper if held
+        mServiceLock.unlock();
+
+        // Clear caller identity temporarily so client disconnect PID checks work correctly
+        int64_t token = CameraThreadState::clearCallingIdentity();
+        clientSp->disconnect();
+        CameraThreadState::restoreCallingIdentity(token);
+
+        // Reacquire mServiceLock
+        mServiceLock.lock();
+    }
+
+    return res;
+}
+
 void CameraService::clearInjectionParameters() {
     {
         Mutex::Autolock lock(mInjectionParametersLock);
-        mInjectionInitPending = true;
+        mInjectionInitPending = false;
         mInjectionInternalCamId = "";
     }
     mInjectionExternalCamId = "";
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index a3125c6..060f075 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -172,6 +172,12 @@
     virtual binder::Status    setTorchMode(const String16& cameraId, bool enabled,
             const sp<IBinder>& clientBinder);
 
+    virtual binder::Status    turnOnTorchWithStrengthLevel(const String16& cameraId,
+            int32_t torchStrength, const sp<IBinder>& clientBinder);
+
+    virtual binder::Status    getTorchStrengthLevel(const String16& cameraId,
+            int32_t* torchStrength);
+
     virtual binder::Status    notifySystemEvent(int32_t eventId,
             const std::vector<int32_t>& args);
 
@@ -1232,6 +1238,8 @@
             hardware::camera::common::V1_0::TorchModeStatus status,
             SystemCameraKind systemCameraKind);
 
+    void broadcastTorchStrengthLevel(const String8& cameraId, int32_t newTorchStrengthLevel);
+
     void disconnectClient(const String8& id, sp<BasicClient> clientToDisconnect);
 
     // Regular online and offline devices must not be in conflict at camera service layer.
@@ -1296,15 +1304,22 @@
             wp<CameraService> mParent;
     };
 
+    // When injecting the camera, it will check whether the injecting camera status is unavailable.
+    // If it is, the disconnect function will be called to to prevent camera access on the device.
+    status_t checkIfInjectionCameraIsPresent(const String8& externalCamId,
+            sp<BasicClient> clientSp);
+
     void clearInjectionParameters();
 
     // This is the existing camera id being replaced.
     String8 mInjectionInternalCamId;
     // This is the external camera Id replacing the internalId.
     String8 mInjectionExternalCamId;
-    bool mInjectionInitPending = true;
+    bool mInjectionInitPending = false;
     // Guard mInjectionInternalCamId and mInjectionInitPending.
     Mutex mInjectionParametersLock;
+
+    void updateTorchUidMapLocked(const String16& cameraId, int uid);
 };
 
 } // namespace android
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index eed2654..a38d7ae 100755
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -78,7 +78,8 @@
     }
 
     // Find out buffer size for JPEG
-    ssize_t maxJpegSize = device->getJpegBufferSize(params.pictureWidth, params.pictureHeight);
+    ssize_t maxJpegSize = device->getJpegBufferSize(device->infoPhysical(String8("")),
+            params.pictureWidth, params.pictureHeight);
     if (maxJpegSize <= 0) {
         ALOGE("%s: Camera %d: Jpeg buffer size (%zu) is invalid ",
                 __FUNCTION__, mId, maxJpegSize);
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 06a3d36..c454716 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -102,7 +102,8 @@
     virtual status_t dumpWatchedEventsToVector(std::vector<std::string> &out) = 0;
 
     /**
-     * The physical camera device's static characteristics metadata buffer
+     * The physical camera device's static characteristics metadata buffer, or
+     * the logical camera's static characteristics if physical id is empty.
      */
     virtual const CameraMetadata& infoPhysical(const String8& physicalId) const = 0;
 
@@ -307,7 +308,8 @@
      * Get Jpeg buffer size for a given jpeg resolution.
      * Negative values are error codes.
      */
-    virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const = 0;
+    virtual ssize_t getJpegBufferSize(const CameraMetadata &info, uint32_t width,
+            uint32_t height) const = 0;
 
     /**
      * Connect HAL notifications to a listener. Overwrites previous
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 0cce2ca..d37d717 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -20,7 +20,7 @@
 
 #include "CameraProviderManager.h"
 
-#include <android/hardware/camera/device/3.7/ICameraDevice.h>
+#include <android/hardware/camera/device/3.8/ICameraDevice.h>
 
 #include <algorithm>
 #include <chrono>
@@ -307,6 +307,50 @@
     return OK;
 }
 
+status_t CameraProviderManager::getTorchStrengthLevel(const std::string &id,
+        int32_t* torchStrength /*out*/) {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id);
+    if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+    return deviceInfo->getTorchStrengthLevel(torchStrength);
+}
+
+status_t CameraProviderManager::turnOnTorchWithStrengthLevel(const std::string &id,
+        int32_t torchStrength) {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id);
+    if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+    return deviceInfo->turnOnTorchWithStrengthLevel(torchStrength);
+}
+
+bool CameraProviderManager::shouldSkipTorchStrengthUpdate(const std::string &id,
+        int32_t torchStrength) const {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id);
+    if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+    if (deviceInfo->mTorchStrengthLevel == torchStrength) {
+        ALOGV("%s: Skipping torch strength level updates prev_level: %d, new_level: %d",
+                __FUNCTION__, deviceInfo->mTorchStrengthLevel, torchStrength);
+        return true;
+    }
+    return false;
+}
+
+int32_t CameraProviderManager::getTorchDefaultStrengthLevel(const std::string &id) const {
+    std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+    auto deviceInfo = findDeviceInfoLocked(id);
+    if (deviceInfo == nullptr) return NAME_NOT_FOUND;
+
+    return deviceInfo->mTorchDefaultStrengthLevel;
+}
+
 bool CameraProviderManager::supportSetTorchMode(const std::string &id) const {
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
     for (auto& provider : mProviders) {
@@ -2385,6 +2429,22 @@
         mHasFlashUnit = false;
     }
 
+    camera_metadata_entry entry =
+            mCameraCharacteristics.find(ANDROID_FLASH_INFO_STRENGTH_DEFAULT_LEVEL);
+    if (entry.count == 1) {
+        mTorchDefaultStrengthLevel = entry.data.i32[0];
+    } else {
+        mTorchDefaultStrengthLevel = 0;
+    }
+
+    entry = mCameraCharacteristics.find(ANDROID_FLASH_INFO_STRENGTH_MAXIMUM_LEVEL);
+    if (entry.count == 1) {
+        mTorchMaximumStrengthLevel = entry.data.i32[0];
+    } else {
+        mTorchMaximumStrengthLevel = 0;
+    }
+
+    mTorchStrengthLevel = 0;
     queryPhysicalCameraIds();
 
     // Get physical camera characteristics if applicable
@@ -2468,6 +2528,80 @@
     return setTorchModeForDevice<InterfaceT>(enabled);
 }
 
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::turnOnTorchWithStrengthLevel(
+        int32_t torchStrength) {
+    const sp<CameraProviderManager::ProviderInfo::DeviceInfo3::InterfaceT> interface =
+        startDeviceInterface<CameraProviderManager::ProviderInfo::DeviceInfo3::InterfaceT>();
+    if (interface == nullptr) {
+        return DEAD_OBJECT;
+    }
+    sp<hardware::camera::device::V3_8::ICameraDevice> interface_3_8 = nullptr;
+    auto castResult_3_8 = device::V3_8::ICameraDevice::castFrom(interface);
+    if (castResult_3_8.isOk()) {
+        interface_3_8 = castResult_3_8;
+    }
+
+    if (interface_3_8 == nullptr) {
+        return INVALID_OPERATION;
+    }
+
+    Status s = interface_3_8->turnOnTorchWithStrengthLevel(torchStrength);
+    if (s == Status::OK) {
+        mTorchStrengthLevel = torchStrength;
+    }
+    return mapToStatusT(s);
+}
+
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::getTorchStrengthLevel(
+        int32_t *torchStrength) {
+    if (torchStrength == nullptr) {
+        return BAD_VALUE;
+    }
+    const sp<CameraProviderManager::ProviderInfo::DeviceInfo3::InterfaceT> interface =
+        startDeviceInterface<CameraProviderManager::ProviderInfo::DeviceInfo3::InterfaceT>();
+    if (interface == nullptr) {
+        return DEAD_OBJECT;
+    }
+    auto castResult_3_8 = device::V3_8::ICameraDevice::castFrom(interface);
+    sp<hardware::camera::device::V3_8::ICameraDevice> interface_3_8 = nullptr;
+    if (castResult_3_8.isOk()) {
+        interface_3_8 = castResult_3_8;
+    }
+
+    if (interface_3_8 == nullptr) {
+        return INVALID_OPERATION;
+    }
+
+    Status callStatus;
+    status_t res;
+    hardware::Return<void> ret = interface_3_8->getTorchStrengthLevel([&callStatus, &torchStrength]
+        (Status status, const int32_t& torchStrengthLevel) {
+        callStatus = status;
+        if (status == Status::OK) {
+             *torchStrength = torchStrengthLevel;
+        } });
+
+    if (ret.isOk()) {
+        switch (callStatus) {
+            case Status::OK:
+                // Expected case, do nothing.
+                res = OK;
+                break;
+            case Status::METHOD_NOT_SUPPORTED:
+                res = INVALID_OPERATION;
+                break;
+            default:
+                ALOGE("%s: Get torch strength level failed: %d", __FUNCTION__, callStatus);
+                res = UNKNOWN_ERROR;
+        }
+    } else {
+        ALOGE("%s: Unexpected binder error: %s", __FUNCTION__, ret.description().c_str());
+        res = UNKNOWN_ERROR;
+    }
+
+    return res;
+}
+
 status_t CameraProviderManager::ProviderInfo::DeviceInfo3::getCameraInfo(
         hardware::CameraInfo *info) const {
     if (info == nullptr) return BAD_VALUE;
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index f28d128..b03ca62 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -258,6 +258,17 @@
     bool supportSetTorchMode(const std::string &id) const;
 
     /**
+     * Check if torch strength update should be skipped or not.
+     */
+    bool shouldSkipTorchStrengthUpdate(const std::string &id, int32_t torchStrength) const;
+
+    /**
+     * Return the default torch strength level if the torch strength control
+     * feature is supported.
+     */
+    int32_t getTorchDefaultStrengthLevel(const std::string &id) const;
+
+    /**
      * Turn on or off the flashlight on a given camera device.
      * May fail if the device does not support this API, is in active use, or if the device
      * doesn't exist, etc.
@@ -265,6 +276,24 @@
     status_t setTorchMode(const std::string &id, bool enabled);
 
     /**
+     * Change the brightness level of the flash unit associated with the cameraId and
+     * set it to the value in torchStrength.
+     * If the torch is OFF and torchStrength > 0, the torch will be turned ON with the
+     * specified strength level. If the torch is ON, only the brightness level will be
+     * changed.
+     *
+     * This operation will fail if the device does not have flash unit, has flash unit
+     * but does not support this API, torchStrength is invalid or if the device doesn't
+     * exist etc.
+     */
+    status_t turnOnTorchWithStrengthLevel(const std::string &id, int32_t torchStrength);
+
+    /**
+     * Return the torch strength level of this camera device.
+     */
+    status_t getTorchStrengthLevel(const std::string &id, int32_t* torchStrength);
+
+    /**
      * Setup vendor tags for all registered providers
      */
     status_t setUpVendorTags();
@@ -475,10 +504,17 @@
             hardware::camera::common::V1_0::CameraDeviceStatus mStatus;
 
             wp<ProviderInfo> mParentProvider;
+            // Torch strength default, maximum levels if the torch strength control
+            // feature is supported.
+            int32_t mTorchStrengthLevel;
+            int32_t mTorchMaximumStrengthLevel;
+            int32_t mTorchDefaultStrengthLevel;
 
             bool hasFlashUnit() const { return mHasFlashUnit; }
             bool supportNativeZoomRatio() const { return mSupportNativeZoomRatio; }
             virtual status_t setTorchMode(bool enabled) = 0;
+            virtual status_t turnOnTorchWithStrengthLevel(int32_t torchStrength) = 0;
+            virtual status_t getTorchStrengthLevel(int32_t *torchStrength) = 0;
             virtual status_t getCameraInfo(hardware::CameraInfo *info) const = 0;
             virtual bool isAPI1Compatible() const = 0;
             virtual status_t dumpState(int fd) = 0;
@@ -516,8 +552,10 @@
                     mName(name), mId(id), mVersion(version), mProviderTagid(tagId),
                     mIsLogicalCamera(false), mResourceCost(resourceCost),
                     mStatus(hardware::camera::common::V1_0::CameraDeviceStatus::PRESENT),
-                    mParentProvider(parentProvider), mHasFlashUnit(false),
-                    mSupportNativeZoomRatio(false), mPublicCameraIds(publicCameraIds) {}
+                    mParentProvider(parentProvider), mTorchStrengthLevel(0),
+                    mTorchMaximumStrengthLevel(0), mTorchDefaultStrengthLevel(0),
+                    mHasFlashUnit(false), mSupportNativeZoomRatio(false),
+                    mPublicCameraIds(publicCameraIds) {}
             virtual ~DeviceInfo();
         protected:
             bool mHasFlashUnit; // const after constructor
@@ -551,6 +589,9 @@
             typedef hardware::camera::device::V3_2::ICameraDevice InterfaceT;
 
             virtual status_t setTorchMode(bool enabled) override;
+            virtual status_t turnOnTorchWithStrengthLevel(int32_t torchStrength) override;
+            virtual status_t getTorchStrengthLevel(int32_t *torchStrength) override;
+
             virtual status_t getCameraInfo(hardware::CameraInfo *info) const override;
             virtual bool isAPI1Compatible() const override;
             virtual status_t dumpState(int fd) override;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 3742a17..2f571a6 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -602,15 +602,16 @@
     return usage;
 }
 
-ssize_t Camera3Device::getJpegBufferSize(uint32_t width, uint32_t height) const {
+ssize_t Camera3Device::getJpegBufferSize(const CameraMetadata &info, uint32_t width,
+        uint32_t height) const {
     // Get max jpeg size (area-wise) for default sensor pixel mode
     camera3::Size maxDefaultJpegResolution =
-            SessionConfigurationUtils::getMaxJpegResolution(mDeviceInfo,
+            SessionConfigurationUtils::getMaxJpegResolution(info,
                     /*isUltraHighResolutionSensor*/false);
     // Get max jpeg size (area-wise) for max resolution sensor pixel mode / 0 if
     // not ultra high res sensor
     camera3::Size uhrMaxJpegResolution =
-            SessionConfigurationUtils::getMaxJpegResolution(mDeviceInfo,
+            SessionConfigurationUtils::getMaxJpegResolution(info,
                     /*isUltraHighResolution*/true);
     if (maxDefaultJpegResolution.width == 0) {
         ALOGE("%s: Camera %s: Can't find valid available jpeg sizes in static metadata!",
@@ -626,7 +627,7 @@
 
     // Get max jpeg buffer size
     ssize_t maxJpegBufferSize = 0;
-    camera_metadata_ro_entry jpegBufMaxSize = mDeviceInfo.find(ANDROID_JPEG_MAX_SIZE);
+    camera_metadata_ro_entry jpegBufMaxSize = info.find(ANDROID_JPEG_MAX_SIZE);
     if (jpegBufMaxSize.count == 0) {
         ALOGE("%s: Camera %s: Can't find maximum JPEG size in static metadata!", __FUNCTION__,
                 mId.string());
@@ -656,9 +657,9 @@
     return jpegBufferSize;
 }
 
-ssize_t Camera3Device::getPointCloudBufferSize() const {
+ssize_t Camera3Device::getPointCloudBufferSize(const CameraMetadata &info) const {
     const int FLOATS_PER_POINT=4;
-    camera_metadata_ro_entry maxPointCount = mDeviceInfo.find(ANDROID_DEPTH_MAX_DEPTH_SAMPLES);
+    camera_metadata_ro_entry maxPointCount = info.find(ANDROID_DEPTH_MAX_DEPTH_SAMPLES);
     if (maxPointCount.count == 0) {
         ALOGE("%s: Camera %s: Can't find maximum depth point cloud size in static metadata!",
                 __FUNCTION__, mId.string());
@@ -669,14 +670,14 @@
     return maxBytesForPointCloud;
 }
 
-ssize_t Camera3Device::getRawOpaqueBufferSize(int32_t width, int32_t height,
-        bool maxResolution) const {
+ssize_t Camera3Device::getRawOpaqueBufferSize(const CameraMetadata &info, int32_t width,
+        int32_t height, bool maxResolution) const {
     const int PER_CONFIGURATION_SIZE = 3;
     const int WIDTH_OFFSET = 0;
     const int HEIGHT_OFFSET = 1;
     const int SIZE_OFFSET = 2;
     camera_metadata_ro_entry rawOpaqueSizes =
-        mDeviceInfo.find(
+        info.find(
             camera3::SessionConfigurationUtils::getAppropriateModeTag(
                     ANDROID_SENSOR_OPAQUE_RAW_SIZE,
                     maxResolution));
@@ -1477,7 +1478,7 @@
     if (format == HAL_PIXEL_FORMAT_BLOB) {
         ssize_t blobBufferSize;
         if (dataSpace == HAL_DATASPACE_DEPTH) {
-            blobBufferSize = getPointCloudBufferSize();
+            blobBufferSize = getPointCloudBufferSize(infoPhysical(physicalCameraId));
             if (blobBufferSize <= 0) {
                 SET_ERR_L("Invalid point cloud buffer size %zd", blobBufferSize);
                 return BAD_VALUE;
@@ -1485,7 +1486,7 @@
         } else if (dataSpace == static_cast<android_dataspace>(HAL_DATASPACE_JPEG_APP_SEGMENTS)) {
             blobBufferSize = width * height;
         } else {
-            blobBufferSize = getJpegBufferSize(width, height);
+            blobBufferSize = getJpegBufferSize(infoPhysical(physicalCameraId), width, height);
             if (blobBufferSize <= 0) {
                 SET_ERR_L("Invalid jpeg buffer size %zd", blobBufferSize);
                 return BAD_VALUE;
@@ -1499,7 +1500,8 @@
         bool maxResolution =
                 sensorPixelModesUsed.find(ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION) !=
                         sensorPixelModesUsed.end();
-        ssize_t rawOpaqueBufferSize = getRawOpaqueBufferSize(width, height, maxResolution);
+        ssize_t rawOpaqueBufferSize = getRawOpaqueBufferSize(infoPhysical(physicalCameraId), width,
+                height, maxResolution);
         if (rawOpaqueBufferSize <= 0) {
             SET_ERR_L("Invalid RAW opaque buffer size %zd", rawOpaqueBufferSize);
             return BAD_VALUE;
@@ -2730,7 +2732,8 @@
                                                                 // always occupy the initial entry.
             if (outputStream->data_space == HAL_DATASPACE_V0_JFIF) {
                 bufferSizes[k] = static_cast<uint32_t>(
-                        getJpegBufferSize(outputStream->width, outputStream->height));
+                        getJpegBufferSize(infoPhysical(String8(outputStream->physical_camera_id)),
+                                outputStream->width, outputStream->height));
             } else if (outputStream->data_space ==
                     static_cast<android_dataspace>(HAL_DATASPACE_JPEG_APP_SEGMENTS)) {
                 bufferSizes[k] = outputStream->width * outputStream->height;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index d08c41f..3f069f9 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -196,9 +196,11 @@
 
     status_t prepare(int maxCount, int streamId) override;
 
-    ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const override;
-    ssize_t getPointCloudBufferSize() const;
-    ssize_t getRawOpaqueBufferSize(int32_t width, int32_t height, bool maxResolution) const;
+    ssize_t getJpegBufferSize(const CameraMetadata &info, uint32_t width,
+            uint32_t height) const override;
+    ssize_t getPointCloudBufferSize(const CameraMetadata &info) const;
+    ssize_t getRawOpaqueBufferSize(const CameraMetadata &info, int32_t width, int32_t height,
+            bool maxResolution) const;
 
     // Methods called by subclasses
     void             notifyStatus(bool idle); // updates from StatusTracker
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
index 0d79b54..5e4f38a 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
@@ -1265,6 +1265,14 @@
     notify(states, &m);
 }
 
+// The buffers requested through this call are not tied to any CaptureRequest in
+// particular. They may used by the hal for a particular frame's output buffer
+// or for its internal use as well. In the case that the hal does use any buffer
+// from the requested list here, for a particular frame's output buffer, the
+// buffer will be returned with the processCaptureResult call corresponding to
+// the frame. The other buffers will be returned through returnStreamBuffers.
+// The buffers returned via returnStreamBuffers will not have a valid
+// timestamp(0) and will be dropped by the bufferqueue.
 void requestStreamBuffers(RequestBufferStates& states,
         const hardware::hidl_vec<hardware::camera::device::V3_5::BufferRequest>& bufReqs,
         hardware::camera::device::V3_5::ICameraDeviceCallback::requestStreamBuffers_cb _hidl_cb) {
diff --git a/services/camera/libcameraservice/hidl/AidlCameraServiceListener.cpp b/services/camera/libcameraservice/hidl/AidlCameraServiceListener.cpp
index 8e619e1..cca3f2e 100644
--- a/services/camera/libcameraservice/hidl/AidlCameraServiceListener.cpp
+++ b/services/camera/libcameraservice/hidl/AidlCameraServiceListener.cpp
@@ -70,6 +70,11 @@
   return binder::Status::ok();
 }
 
+::android::binder::Status H2BCameraServiceListener::onTorchStrengthLevelChanged(
+    const ::android::String16&, int32_t) {
+  return binder::Status::ok();
+}
+
 } // implementation
 } // V2_0
 } // common
diff --git a/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h b/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h
index 7148035..7ef413f 100644
--- a/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h
+++ b/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h
@@ -54,6 +54,8 @@
 
     virtual ::android::binder::Status onTorchStatusChanged(
             int32_t status, const ::android::String16& cameraId) override;
+    virtual ::android::binder::Status onTorchStrengthLevelChanged(
+            const ::android::String16& cameraId, int32_t newStrengthLevel) override;
     virtual binder::Status onCameraAccessPrioritiesChanged() {
         // TODO: no implementation yet.
         return binder::Status::ok();
diff --git a/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp b/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp
index e46bf74..97d7bf4 100644
--- a/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp
+++ b/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp
@@ -466,6 +466,12 @@
         // No op
         return binder::Status::ok();
     }
+
+    virtual binder::Status onTorchStrengthLevelChanged(const String16& /*cameraId*/,
+            int32_t /*torchStrength*/) {
+        // No op
+        return binder::Status::ok();
+    }
 };
 
 class TestCameraDeviceCallbacks : public hardware::camera2::BnCameraDeviceCallbacks {
diff --git a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
index 8d170f1..8699543 100644
--- a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
+++ b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
@@ -120,11 +120,12 @@
     proxyBinder->pingForUserUpdate();
 }
 
-int CameraServiceProxyWrapper::getRotateAndCropOverride(String16 packageName, int lensFacing) {
+int CameraServiceProxyWrapper::getRotateAndCropOverride(String16 packageName, int lensFacing,
+        int userId) {
     sp<ICameraServiceProxy> proxyBinder = getCameraServiceProxy();
     if (proxyBinder == nullptr) return true;
     int ret = 0;
-    auto status = proxyBinder->getRotateAndCropOverride(packageName, lensFacing, &ret);
+    auto status = proxyBinder->getRotateAndCropOverride(packageName, lensFacing, userId, &ret);
     if (!status.isOk()) {
         ALOGE("%s: Failed during top activity orientation query: %s", __FUNCTION__,
                 status.exceptionMessage().c_str());
diff --git a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
index a51e568..f701e94 100644
--- a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
+++ b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
@@ -92,7 +92,7 @@
     static void pingCameraServiceProxy();
 
     // Return the current top activity rotate and crop override.
-    static int getRotateAndCropOverride(String16 packageName, int lensFacing);
+    static int getRotateAndCropOverride(String16 packageName, int lensFacing, int userId);
 };
 
 } // android
diff --git a/services/mediametrics/Android.bp b/services/mediametrics/Android.bp
index 74e4715..c98d5fc 100644
--- a/services/mediametrics/Android.bp
+++ b/services/mediametrics/Android.bp
@@ -185,6 +185,10 @@
         "libplatformprotos",
     ],
 
+    header_libs: [
+        "libaaudio_headers",
+    ],
+
     include_dirs: [
         "system/media/audio_utils/include",
     ],
diff --git a/services/mediametrics/AudioAnalytics.cpp b/services/mediametrics/AudioAnalytics.cpp
index 270fe2f..46c701c 100644
--- a/services/mediametrics/AudioAnalytics.cpp
+++ b/services/mediametrics/AudioAnalytics.cpp
@@ -21,6 +21,7 @@
 
 #include "AudioAnalytics.h"
 
+#include <aaudio/AAudio.h>        // error codes
 #include <audio_utils/clock.h>    // clock conversions
 #include <cutils/properties.h>
 #include <statslog.h>             // statsd
@@ -64,6 +65,50 @@
     }
 }
 
+// The status variable contains status_t codes which are used by
+// the core audio framework. We also consider AAudio status codes.
+//
+// Compare with mediametrics::statusToStatusString
+//
+inline constexpr const char* extendedStatusToStatusString(status_t status) {
+    switch (status) {
+    case BAD_VALUE:           // status_t
+    case AAUDIO_ERROR_ILLEGAL_ARGUMENT:
+    case AAUDIO_ERROR_INVALID_FORMAT:
+    case AAUDIO_ERROR_INVALID_RATE:
+    case AAUDIO_ERROR_NULL:
+    case AAUDIO_ERROR_OUT_OF_RANGE:
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_ARGUMENT;
+    case DEAD_OBJECT:         // status_t
+    case FAILED_TRANSACTION:  // status_t
+    case AAUDIO_ERROR_DISCONNECTED:
+    case AAUDIO_ERROR_INVALID_HANDLE:
+    case AAUDIO_ERROR_NO_SERVICE:
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_IO;
+    case NO_MEMORY:           // status_t
+    case AAUDIO_ERROR_NO_FREE_HANDLES:
+    case AAUDIO_ERROR_NO_MEMORY:
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_MEMORY;
+    case PERMISSION_DENIED:   // status_t
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_SECURITY;
+    case INVALID_OPERATION:   // status_t
+    case NO_INIT:             // status_t
+    case AAUDIO_ERROR_INVALID_STATE:
+    case AAUDIO_ERROR_UNAVAILABLE:
+    case AAUDIO_ERROR_UNIMPLEMENTED:
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_STATE;
+    case WOULD_BLOCK:         // status_t
+    case AAUDIO_ERROR_TIMEOUT:
+    case AAUDIO_ERROR_WOULD_BLOCK:
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_TIMEOUT;
+    default:
+        if (status >= 0) return AMEDIAMETRICS_PROP_STATUS_VALUE_OK; // non-negative values "OK"
+        [[fallthrough]];            // negative values are error.
+    case UNKNOWN_ERROR:       // status_t
+        return AMEDIAMETRICS_PROP_STATUS_VALUE_UNKNOWN;
+    }
+}
+
 static constexpr const auto LOG_LEVEL = android::base::VERBOSE;
 
 static constexpr int PREVIOUS_STATE_EXPIRE_SEC = 60 * 60; // 1 hour.
@@ -392,11 +437,15 @@
 {
     if (!startsWith(item->getKey(), AMEDIAMETRICS_KEY_PREFIX_AUDIO)) return BAD_VALUE;
     status_t status = mAnalyticsState->submit(item, isTrusted);
+
+    // Status is selectively authenticated.
+    processStatus(item);
+
     if (status != NO_ERROR) return status;  // may not be permitted.
 
     // Only if the item was successfully submitted (permission)
     // do we check triggered actions.
-    checkActions(item);
+    processActions(item);
     return NO_ERROR;
 }
 
@@ -430,7 +479,7 @@
     return { ss.str(), lines - ll };
 }
 
-void AudioAnalytics::checkActions(const std::shared_ptr<const mediametrics::Item>& item)
+void AudioAnalytics::processActions(const std::shared_ptr<const mediametrics::Item>& item)
 {
     auto actions = mActions.getActionsForItem(item); // internally locked.
     // Execute actions with no lock held.
@@ -439,6 +488,36 @@
     }
 }
 
+void AudioAnalytics::processStatus(const std::shared_ptr<const mediametrics::Item>& item)
+{
+    int32_t status;
+    if (!item->get(AMEDIAMETRICS_PROP_STATUS, &status)) return;
+
+    // Any record with a status will automatically be added to a heat map.
+    // Standard information.
+    const auto key = item->getKey();
+    const auto uid = item->getUid();
+
+    // from audio.track.10 ->  prefix = audio.track, suffix = 10
+    // from audio.track.error -> prefix = audio.track, suffix = error
+    const auto [prefixKey, suffixKey] = stringutils::splitPrefixKey(key);
+
+    std::string message;
+    item->get(AMEDIAMETRICS_PROP_STATUSMESSAGE, &message); // optional
+
+    int32_t subCode = 0; // not used
+    (void)item->get(AMEDIAMETRICS_PROP_STATUSSUBCODE, &subCode); // optional
+
+    std::string eventStr; // optional
+    item->get(AMEDIAMETRICS_PROP_EVENT, &eventStr);
+
+    const std::string statusString = extendedStatusToStatusString(status);
+
+    // Add to the heat map - we automatically track every item's status to see
+    // the types of errors and the frequency of errors.
+    mHeatMap.add(prefixKey, suffixKey, eventStr, statusString, uid, message, subCode);
+}
+
 // HELPER METHODS
 
 std::string AudioAnalytics::getThreadFromTrack(const std::string& track) const
diff --git a/services/mediametrics/AudioAnalytics.h b/services/mediametrics/AudioAnalytics.h
index 2b41a95..9b54cf3 100644
--- a/services/mediametrics/AudioAnalytics.h
+++ b/services/mediametrics/AudioAnalytics.h
@@ -20,6 +20,7 @@
 #include "AnalyticsActions.h"
 #include "AnalyticsState.h"
 #include "AudioPowerUsage.h"
+#include "HeatMap.h"
 #include "StatsdLog.h"
 #include "TimedAction.h"
 #include "Wrap.h"
@@ -73,11 +74,23 @@
     std::pair<std::string, int32_t> dump(
             int32_t lines = INT32_MAX, int64_t sinceNs = 0, const char *prefix = nullptr) const;
 
+    /**
+     * Returns a pair consisting of the dump string and the number of lines in the string.
+     *
+     * HeatMap dump.
+     */
+    std::pair<std::string, int32_t> dumpHeatMap(int32_t lines = INT32_MAX) const {
+        return mHeatMap.dump(lines);
+    }
+
     void clear() {
         // underlying state is locked.
         mPreviousAnalyticsState->clear();
         mAnalyticsState->clear();
 
+        // Clears the status map
+        mHeatMap.clear();
+
         // Clear power usage state.
         mAudioPowerUsage.clear();
     }
@@ -96,11 +109,18 @@
      */
 
     /**
-     * Checks for any pending actions for a particular item.
+     * Processes any pending actions for a particular item.
      *
      * \param item to check against the current AnalyticsActions.
      */
-    void checkActions(const std::shared_ptr<const mediametrics::Item>& item);
+    void processActions(const std::shared_ptr<const mediametrics::Item>& item);
+
+    /**
+     * Processes status information contained in the item.
+     *
+     * \param item to check against for status handling
+     */
+    void processStatus(const std::shared_ptr<const mediametrics::Item>& item);
 
     // HELPER METHODS
     /**
@@ -124,6 +144,9 @@
     TimedAction mTimedAction; // locked internally
     const std::shared_ptr<StatsdLog> mStatsdLog; // locked internally, ok for multiple threads.
 
+    static constexpr size_t kHeatEntries = 100;
+    HeatMap mHeatMap{kHeatEntries}; // locked internally, ok for multiple threads.
+
     // DeviceUse is a nested class which handles audio device usage accounting.
     // We define this class at the end to ensure prior variables all properly constructed.
     // TODO: Track / Thread interaction
diff --git a/services/mediametrics/AudioTypes.cpp b/services/mediametrics/AudioTypes.cpp
index 838cdd5..b67967b 100644
--- a/services/mediametrics/AudioTypes.cpp
+++ b/services/mediametrics/AudioTypes.cpp
@@ -15,8 +15,10 @@
  */
 
 #include "AudioTypes.h"
+#include "MediaMetricsConstants.h"
 #include "StringUtils.h"
 #include <media/TypeConverter.h> // requires libmedia_helper to get the Audio code.
+#include <statslog.h>            // statsd
 
 namespace android::mediametrics::types {
 
diff --git a/services/mediametrics/HeatMap.h b/services/mediametrics/HeatMap.h
new file mode 100644
index 0000000..950501a
--- /dev/null
+++ b/services/mediametrics/HeatMap.h
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <iomanip>
+#include <map>
+#include <sstream>
+#include "MediaMetricsConstants.h"
+
+namespace android::mediametrics {
+
+/**
+ * HeatData accumulates statistics on the status reported for a given key.
+ *
+ * HeatData is a helper class used by HeatMap to represent statistics.  We expose it
+ * here for testing purposes currently.
+ *
+ * Note: This class is not thread safe, so mutual exclusion should be obtained by the caller
+ * which in this case is HeatMap.  HeatMap getData() returns a local copy of HeatData, so use
+ * of that is thread-safe.
+ */
+class HeatData {
+    /* HeatData for a key is stored in a map based on the event (e.g. "start", "pause", create)
+     * and then another map based on the status (e.g. "ok", "argument", "state").
+     */
+    std::map<std::string /* event */,
+             std::map<std::string /* status name */, size_t /* count, nonzero */>> mMap;
+
+public:
+    /**
+     * Add status data.
+     *
+     * \param suffix  (ignored) the suffix to the key that was stripped, if any.
+     * \param event             the event (e.g. create, start, pause, stop, etc.).
+     * \param uid     (ignored) the uid associated with the error.
+     * \param message (ignored) the status message, if any.
+     * \param subCode (ignored) the status subcode, if any.
+     */
+    void add(const std::string& suffix, const std::string& event, const std::string& status,
+            uid_t uid, const std::string& message, int32_t subCode) {
+        // Perhaps there could be a more detailed print.
+        (void)suffix;
+        (void)uid;
+        (void)message;
+        (void)subCode;
+        ++mMap[event][status];
+    }
+
+    /** Returns the number of event names with status. */
+    size_t size() const {
+        return mMap.size();
+    }
+
+    /**
+     * Returns a deque with pairs indicating the count of Oks and Errors.
+     * The first pair is total, the other pairs are in order of mMap.
+     *
+     * Example return value of {ok, error} pairs:
+     *     total     key1      key2
+     * { { 2, 1 }, { 1, 0 }, { 1, 1 } }
+     */
+    std::deque<std::pair<size_t /* oks */, size_t /* errors */>> heatCount() const {
+        size_t totalOk = 0;
+        size_t totalError = 0;
+        std::deque<std::pair<size_t /* oks */, size_t /* errors */>> heat;
+        for (const auto &eventPair : mMap) {
+            size_t ok = 0;
+            size_t error = 0;
+            for (const auto &[name, count] : eventPair.second) {
+                if (name == AMEDIAMETRICS_PROP_STATUS_VALUE_OK) {
+                    ok += count;
+                } else {
+                    error += count;
+                }
+            }
+            totalOk += ok;
+            totalError += error;
+            heat.emplace_back(ok, error);
+        }
+        heat.emplace_front(totalOk, totalError);
+        return heat;
+    }
+
+    /** Returns the error fraction from a pair <oks, errors>, a float between 0.f to 1.f. */
+    static float fraction(const std::pair<size_t, size_t>& count) {
+        return (float)count.second / (count.first + count.second);
+    }
+
+    /** Returns the HeatMap information in a single line string. */
+    std::string dump() const {
+        const auto heat = heatCount();
+        auto it = heat.begin();
+        std::stringstream ss;
+        ss << "{ ";
+        float errorFraction = fraction(*it++);
+        if (errorFraction > 0.f) {
+            ss << std::fixed << std::setprecision(2) << errorFraction << " ";
+        }
+        for (const auto &eventPair : mMap) {
+            ss << eventPair.first << ": { ";
+            errorFraction = fraction(*it++);
+            if (errorFraction > 0.f) {
+                ss << std::fixed << std::setprecision(2) << errorFraction << " ";
+            }
+            for (const auto &[name, count]: eventPair.second) {
+                ss << "[ " << name << " : " << count << " ] ";
+            }
+            ss << "} ";
+        }
+        ss << " }";
+        return ss.str();
+    }
+};
+
+/**
+ * HeatMap is a thread-safe collection that counts activity of status errors per key.
+ *
+ * The classic heat map is a 2D picture with intensity shown by color.
+ * Here we accumulate the status results from keys to see if there are consistent
+ * failures in the system.
+ *
+ * TODO(b/210855555): Heatmap improvements.
+ *   1) heat decays in intensity in time for past events, currently we don't decay.
+ */
+
+class HeatMap {
+    const size_t mMaxSize;
+    mutable std::mutex mLock;
+    size_t mRejected GUARDED_BY(mLock) = 0;
+    std::map<std::string, HeatData> mMap GUARDED_BY(mLock);
+
+public:
+    /**
+     * Constructs a HeatMap.
+     *
+     * \param maxSize the maximum number of elements that are tracked.
+     */
+    explicit HeatMap(size_t maxSize) : mMaxSize(maxSize) {
+    }
+
+    /** Returns the number of keys. */
+    size_t size() const {
+        std::lock_guard l(mLock);
+        return mMap.size();
+    }
+
+    /** Clears error history. */
+    void clear() {
+        std::lock_guard l(mLock);
+        return mMap.clear();
+    }
+
+    /** Returns number of keys rejected due to space. */
+    size_t rejected() const {
+        std::lock_guard l(mLock);
+        return mRejected;
+    }
+
+    /** Returns a copy of the heat data associated with key. */
+    HeatData getData(const std::string& key) const {
+        std::lock_guard l(mLock);
+        return mMap.count(key) == 0 ? HeatData{} : mMap.at(key);
+    }
+
+    /**
+     * Adds a new entry.
+     * \param key               the key category (e.g. audio.track).
+     * \param suffix  (ignored) the suffix to the key that was stripped, if any.
+     * \param event             the event (e.g. create, start, pause, stop, etc.).
+     * \param uid     (ignored) the uid associated with the error.
+     * \param message (ignored) the status message, if any.
+     * \param subCode (ignored) the status subcode, if any.
+     */
+    void add(const std::string& key, const std::string& suffix, const std::string& event,
+            const std::string& status, uid_t uid, const std::string& message, int32_t subCode) {
+        std::lock_guard l(mLock);
+
+        // Hard limit on heat map entries.
+        // TODO: have better GC.
+        if (mMap.size() == mMaxSize && mMap.count(key) == 0) {
+            ++mRejected;
+            return;
+        }
+        mMap[key].add(suffix, event, status, uid, message, subCode);
+    }
+
+    /**
+     * Returns a pair consisting of the dump string and the number of lines in the string.
+     */
+    std::pair<std::string, int32_t> dump(int32_t lines = INT32_MAX) const {
+        std::stringstream ss;
+        int32_t ll = lines;
+        std::lock_guard l(mLock);
+        if (ll > 0) {
+            ss << "Error Heat Map (rejected: " << mRejected << "):\n";
+            --ll;
+        }
+        // TODO: restriction is implemented alphabetically not on priority.
+        for (const auto& [name, data] : mMap) {
+            if (ll <= 0) break;
+            ss << name << ": " << data.dump() << "\n";
+            --ll;
+        }
+        return { ss.str(), lines - ll };
+    }
+};
+
+} // namespace android::mediametrics
diff --git a/services/mediametrics/MediaMetricsService.cpp b/services/mediametrics/MediaMetricsService.cpp
index 35e0ae4..636b343 100644
--- a/services/mediametrics/MediaMetricsService.cpp
+++ b/services/mediametrics/MediaMetricsService.cpp
@@ -319,11 +319,19 @@
                 result << "-- some lines may be truncated --\n";
             }
 
-            result << "LogSessionId:\n"
+            const int32_t heatLinesToDump = all ? INT32_MAX : 20;
+            const auto [ heatDumpString, heatLines] =
+                    mAudioAnalytics.dumpHeatMap(heatLinesToDump);
+            result << "\n" << heatDumpString;
+            if (heatLines == heatLinesToDump) {
+                result << "-- some lines may be truncated --\n";
+            }
+
+            result << "\nLogSessionId:\n"
                    << mediametrics::ValidateId::get()->dump();
 
             // Dump the statsd atoms we sent out.
-            result << "Statsd atoms:\n"
+            result << "\nStatsd atoms:\n"
                    << mStatsdLog->dumpToString("  " /* prefix */,
                            all ? STATSD_LOG_LINES_MAX : STATSD_LOG_LINES_DUMP);
         }
diff --git a/services/mediametrics/StringUtils.h b/services/mediametrics/StringUtils.h
index 01034d9..a56f5b8 100644
--- a/services/mediametrics/StringUtils.h
+++ b/services/mediametrics/StringUtils.h
@@ -167,4 +167,41 @@
     return ss.str();
 }
 
+/**
+ * Returns true if the string is non-null, not empty, and contains only digits.
+ */
+inline constexpr bool isNumeric(const char *s)
+{
+    if (s == nullptr || *s == 0) return false;
+    do {
+        if (!isdigit(*s)) return false;
+    } while (*++s != 0);
+    return true;  // all digits
+}
+
+/**
+ * Extracts out the prefix from the key, returning a pair of prefix, suffix.
+ *
+ * Usually the key is something like:
+ * Prefix.(ID)
+ *   where ID is an integer,
+ *               or "error" if the id was not returned because of failure,
+ *               or "status" if general status.
+ *
+ * Example: audio.track.10     -> prefix = audio.track, suffix = 10
+ *          audio.track.error  -> prefix = audio.track, suffix = error
+ *          audio.track.status -> prefix = audio.track, suffix = status
+ *          audio.mute         -> prefix = audio.mute,  suffix = ""
+ */
+inline std::pair<std::string /* prefix */,
+                 std::string /* suffix */> splitPrefixKey(const std::string &key)
+{
+    const size_t split = key.rfind('.');
+    const char* suffix = key.c_str() + split + 1;
+    if (*suffix && (!strcmp(suffix, "error") || !strcmp(suffix, "status") || isNumeric(suffix))) {
+        return { key.substr(0, split), suffix };
+    }
+    return { key, "" };
+}
+
 } // namespace android::mediametrics::stringutils
diff --git a/services/mediametrics/statsd_audiorecord.cpp b/services/mediametrics/statsd_audiorecord.cpp
index c53b6f3..a7b045e 100644
--- a/services/mediametrics/statsd_audiorecord.cpp
+++ b/services/mediametrics/statsd_audiorecord.cpp
@@ -80,16 +80,20 @@
     }
 
     int64_t created_millis = -1;
+    // not currently sent from client.
     if (item->getInt64("android.media.audiorecord.createdMs", &created_millis)) {
         metrics_proto.set_created_millis(created_millis);
     }
 
     int64_t duration_millis = -1;
-    if (item->getInt64("android.media.audiorecord.durationMs", &duration_millis)) {
+    double durationMs = 0.;
+    if (item->getDouble("android.media.audiorecord.durationMs", &durationMs)) {
+        duration_millis = (int64_t)durationMs;
         metrics_proto.set_duration_millis(duration_millis);
     }
 
     int32_t count = -1;
+    // not currently sent from client.  (see start count instead).
     if (item->getInt32("android.media.audiorecord.n", &count)) {
         metrics_proto.set_count(count);
     }
@@ -129,7 +133,7 @@
     }
 
     int64_t start_count = -1;
-    if (item->getInt64("android.media.audiorecord.startcount", &start_count)) {
+    if (item->getInt64("android.media.audiorecord.startCount", &start_count)) {
         metrics_proto.set_start_count(start_count);
     }
 
diff --git a/services/mediametrics/statsd_audiotrack.cpp b/services/mediametrics/statsd_audiotrack.cpp
index 707effd..67514e9 100644
--- a/services/mediametrics/statsd_audiotrack.cpp
+++ b/services/mediametrics/statsd_audiotrack.cpp
@@ -56,52 +56,47 @@
     // flesh out the protobuf we'll hand off with our data
     //
 
-    // static constexpr char kAudioTrackStreamType[] = "android.media.audiotrack.streamtype";
+    // Do not change this without changing AudioTrack.cpp collection.
+
     // optional string streamType;
     std::string stream_type;
     if (item->getString("android.media.audiotrack.streamtype", &stream_type)) {
         metrics_proto.set_stream_type(stream_type);
     }
 
-    // static constexpr char kAudioTrackContentType[] = "android.media.audiotrack.type";
     // optional string contentType;
     std::string content_type;
     if (item->getString("android.media.audiotrack.type", &content_type)) {
         metrics_proto.set_content_type(content_type);
     }
 
-    // static constexpr char kAudioTrackUsage[] = "android.media.audiotrack.usage";
     // optional string trackUsage;
     std::string track_usage;
     if (item->getString("android.media.audiotrack.usage", &track_usage)) {
         metrics_proto.set_track_usage(track_usage);
     }
 
-    // static constexpr char kAudioTrackSampleRate[] = "android.media.audiotrack.samplerate";
-    // optional int32 samplerate;
+    // optional int32 sampleRate;
     int32_t sample_rate = -1;
-    if (item->getInt32("android.media.audiotrack.samplerate", &sample_rate)) {
+    if (item->getInt32("android.media.audiotrack.sampleRate", &sample_rate)) {
         metrics_proto.set_sample_rate(sample_rate);
     }
 
-    // static constexpr char kAudioTrackChannelMask[] = "android.media.audiotrack.channelmask";
     // optional int64 channelMask;
     int64_t channel_mask = -1;
-    if (item->getInt64("android.media.audiotrack.channelmask", &channel_mask)) {
+    if (item->getInt64("android.media.audiotrack.channelMask", &channel_mask)) {
         metrics_proto.set_channel_mask(channel_mask);
     }
 
-    // NB: These are not yet exposed as public Java API constants.
-    // static constexpr char kAudioTrackUnderrunFrames[] = "android.media.audiotrack.underrunframes";
-    // optional int32 underrunframes;
+    // optional int32 underrunFrames;
     int32_t underrun_frames = -1;
-    if (item->getInt32("android.media.audiotrack.underrunframes", &underrun_frames)) {
+    if (item->getInt32("android.media.audiotrack.underrunFrames", &underrun_frames)) {
         metrics_proto.set_underrun_frames(underrun_frames);
     }
 
-    // static constexpr char kAudioTrackStartupGlitch[] = "android.media.audiotrack.glitch.startup";
-    // optional int32 startupglitch;
+    // optional int32 glitch.startup;
     int32_t startup_glitch = -1;
+    // Not currently sent from client.
     if (item->getInt32("android.media.audiotrack.glitch.startup", &startup_glitch)) {
         metrics_proto.set_startup_glitch(startup_glitch);
     }
diff --git a/services/mediametrics/tests/mediametrics_tests.cpp b/services/mediametrics/tests/mediametrics_tests.cpp
index cd6af9f..102700a 100644
--- a/services/mediametrics/tests/mediametrics_tests.cpp
+++ b/services/mediametrics/tests/mediametrics_tests.cpp
@@ -1226,8 +1226,8 @@
     }
 }
 
-TEST(mediametrics_tests, ErrorConversion) {
-    constexpr status_t errors[] = {
+TEST(mediametrics_tests, StatusConversion) {
+    constexpr status_t statuses[] = {
         NO_ERROR,
         BAD_VALUE,
         DEAD_OBJECT,
@@ -1239,15 +1239,58 @@
     };
 
     auto roundTrip = [](status_t status) {
-        return android::mediametrics::errorStringToStatus(
-                android::mediametrics::statusToErrorString(status));
+        return android::mediametrics::statusStringToStatus(
+                android::mediametrics::statusToStatusString(status));
     };
 
     // Primary status error categories.
-    for (const auto error : errors) {
-        ASSERT_EQ(error, roundTrip(error));
+    for (const auto status : statuses) {
+        ASSERT_EQ(status, roundTrip(status));
     }
 
     // Status errors specially considered.
     ASSERT_EQ(DEAD_OBJECT, roundTrip(FAILED_TRANSACTION));
 }
+
+TEST(mediametrics_tests, HeatMap) {
+    constexpr size_t SIZE = 2;
+    android::mediametrics::HeatMap heatMap{SIZE};
+    constexpr uid_t UID = 0;
+    constexpr int32_t SUBCODE = 1;
+
+    ASSERT_EQ((size_t)0, heatMap.size());
+    heatMap.add("someKey", "someSuffix", "someEvent",
+            AMEDIAMETRICS_PROP_STATUS_VALUE_OK, UID, "message", SUBCODE);
+    ASSERT_EQ((size_t)1, heatMap.size());
+    heatMap.add("someKey", "someSuffix", "someEvent",
+            AMEDIAMETRICS_PROP_STATUS_VALUE_OK, UID, "message", SUBCODE);
+    heatMap.add("someKey", "someSuffix", "anotherEvent",
+            AMEDIAMETRICS_PROP_STATUS_VALUE_ARGUMENT, UID, "message", SUBCODE);
+    ASSERT_EQ((size_t)1, heatMap.size());
+    heatMap.add("anotherKey", "someSuffix", "someEvent",
+            AMEDIAMETRICS_PROP_STATUS_VALUE_OK, UID, "message", SUBCODE);
+    ASSERT_EQ((size_t)2, heatMap.size());
+    ASSERT_EQ((size_t)0, heatMap.rejected());
+
+    heatMap.add("thirdKey", "someSuffix", "someEvent",
+            AMEDIAMETRICS_PROP_STATUS_VALUE_OK, UID, "message", SUBCODE);
+    ASSERT_EQ((size_t)2, heatMap.size());
+    ASSERT_EQ((size_t)1, heatMap.rejected());
+
+    android::mediametrics::HeatData heatData = heatMap.getData("someKey");
+    ASSERT_EQ((size_t)2, heatData.size());
+    auto count = heatData.heatCount();
+    ASSERT_EQ((size_t)3, count.size()); // pairs in order { total, "anotherEvent", "someEvent" }
+    // check total value
+    ASSERT_EQ((size_t)2, count[0].first);  // OK
+    ASSERT_EQ((size_t)1, count[0].second); // ERROR;
+    // first key "anotherEvent"
+    ASSERT_EQ((size_t)0, count[1].first);  // OK
+    ASSERT_EQ((size_t)1, count[1].second); // ERROR;
+    // second key "someEvent"
+    ASSERT_EQ((size_t)2, count[2].first);  // OK
+    ASSERT_EQ((size_t)0, count[2].second); // ERROR;
+
+    heatMap.clear();
+    ASSERT_EQ((size_t)0, heatMap.size());
+}
diff --git a/services/oboeservice/AAudioCommandQueue.cpp b/services/oboeservice/AAudioCommandQueue.cpp
index ddaabe8..9bd18b3 100644
--- a/services/oboeservice/AAudioCommandQueue.cpp
+++ b/services/oboeservice/AAudioCommandQueue.cpp
@@ -28,6 +28,10 @@
 aaudio_result_t AAudioCommandQueue::sendCommand(std::shared_ptr<AAudioCommand> command) {
     {
         std::scoped_lock<std::mutex> _l(mLock);
+        if (!mRunning) {
+            ALOGE("Tried to send command while it was not running");
+            return AAUDIO_ERROR_INVALID_STATE;
+        }
         mCommands.push(command);
         mWaitWorkCond.notify_one();
     }
@@ -68,7 +72,7 @@
                 return !mRunning || !mCommands.empty();
             });
         }
-        if (!mCommands.empty()) {
+        if (!mCommands.empty() && mRunning) {
             command = mCommands.front();
             mCommands.pop();
         }
@@ -76,9 +80,27 @@
     return command;
 }
 
+void AAudioCommandQueue::startWaiting() {
+    std::scoped_lock<std::mutex> _l(mLock);
+    mRunning = true;
+}
+
 void AAudioCommandQueue::stopWaiting() {
     std::scoped_lock<std::mutex> _l(mLock);
     mRunning = false;
+    // Clear all commands in the queue as the command thread is stopped.
+    while (!mCommands.empty()) {
+        auto command = mCommands.front();
+        mCommands.pop();
+        std::scoped_lock<std::mutex> _cl(command->lock);
+        // If the command is waiting for result, returns AAUDIO_ERROR_INVALID_STATE
+        // as there is no thread waiting for the command.
+        if (command->isWaitingForReply) {
+            command->result = AAUDIO_ERROR_INVALID_STATE;
+            command->isWaitingForReply = false;
+            command->conditionVariable.notify_one();
+        }
+    }
     mWaitWorkCond.notify_one();
 }
 
diff --git a/services/oboeservice/AAudioCommandQueue.h b/services/oboeservice/AAudioCommandQueue.h
index 5f25507..64442a3 100644
--- a/services/oboeservice/AAudioCommandQueue.h
+++ b/services/oboeservice/AAudioCommandQueue.h
@@ -78,6 +78,12 @@
     std::shared_ptr<AAudioCommand> waitForCommand(int64_t timeoutNanos = -1);
 
     /**
+     * Start waiting for commands. Commands can only be pushed into the command queue after it
+     * starts waiting.
+     */
+    void startWaiting();
+
+    /**
      * Force stop waiting for next command
      */
     void stopWaiting();
@@ -87,7 +93,7 @@
     std::condition_variable mWaitWorkCond;
 
     std::queue<std::shared_ptr<AAudioCommand>> mCommands GUARDED_BY(mLock);
-    bool mRunning GUARDED_BY(mLock) = true;
+    bool mRunning GUARDED_BY(mLock) = false;
 };
 
 } // namespace aaudio
\ No newline at end of file
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index a25a791..8b5ccaa 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -52,7 +52,6 @@
         , mAtomicStreamTimestamp()
         , mAudioService(audioService) {
     mMmapClient.attributionSource = AttributionSourceState();
-    mThreadEnabled = true;
 }
 
 AAudioServiceStreamBase::~AAudioServiceStreamBase() {
@@ -178,6 +177,7 @@
 
     // Make sure this object does not get deleted before the run() method
     // can protect it by making a strong pointer.
+    mCommandQueue.startWaiting();
     mThreadEnabled = true;
     incStrong(nullptr); // See run() method.
     result = mCommandThread.start(this);
@@ -188,14 +188,15 @@
     return result;
 
 error:
-    close();
+    closeAndClear();
+    mThreadEnabled = false;
+    mCommandQueue.stopWaiting();
+    mCommandThread.stop();
     return result;
 }
 
 aaudio_result_t AAudioServiceStreamBase::close() {
-    auto command = std::make_shared<AAudioCommand>(
-            CLOSE, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
-    aaudio_result_t result = mCommandQueue.sendCommand(command);
+    aaudio_result_t result = sendCommand(CLOSE, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
 
     // Stop the command thread as the stream is closed.
     mThreadEnabled = false;
@@ -213,25 +214,7 @@
     // This will stop the stream, just in case it was not already stopped.
     stop_l();
 
-    aaudio_result_t result = AAUDIO_OK;
-    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
-    if (endpoint == nullptr) {
-        result = AAUDIO_ERROR_INVALID_STATE;
-    } else {
-        endpoint->unregisterStream(this);
-        AAudioEndpointManager &endpointManager = AAudioEndpointManager::getInstance();
-        endpointManager.closeEndpoint(endpoint);
-
-        // AAudioService::closeStream() prevents two threads from closing at the same time.
-        mServiceEndpoint.clear(); // endpoint will hold the pointer after this method returns.
-    }
-
-    setState(AAUDIO_STREAM_STATE_CLOSED);
-
-    mediametrics::LogItem(mMetricsId)
-        .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_CLOSE)
-        .record();
-    return result;
+    return closeAndClear();
 }
 
 aaudio_result_t AAudioServiceStreamBase::startDevice() {
@@ -250,9 +233,7 @@
  * An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete.
  */
 aaudio_result_t AAudioServiceStreamBase::start() {
-    auto command = std::make_shared<AAudioCommand>(
-            START, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
-    return mCommandQueue.sendCommand(command);
+    return sendCommand(START, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
 }
 
 aaudio_result_t AAudioServiceStreamBase::start_l() {
@@ -300,9 +281,7 @@
 }
 
 aaudio_result_t AAudioServiceStreamBase::pause() {
-    auto command = std::make_shared<AAudioCommand>(
-            PAUSE, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
-    return mCommandQueue.sendCommand(command);
+    return sendCommand(PAUSE, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
 }
 
 aaudio_result_t AAudioServiceStreamBase::pause_l() {
@@ -338,9 +317,7 @@
 }
 
 aaudio_result_t AAudioServiceStreamBase::stop() {
-    auto command = std::make_shared<AAudioCommand>(
-            STOP, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
-    return mCommandQueue.sendCommand(command);
+    return sendCommand(STOP, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
 }
 
 aaudio_result_t AAudioServiceStreamBase::stop_l() {
@@ -385,9 +362,7 @@
 }
 
 aaudio_result_t AAudioServiceStreamBase::flush() {
-    auto command = std::make_shared<AAudioCommand>(
-            FLUSH, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
-    return mCommandQueue.sendCommand(command);
+    return sendCommand(FLUSH, nullptr, true /*waitForReply*/, TIMEOUT_NANOS);
 }
 
 aaudio_result_t AAudioServiceStreamBase::flush_l() {
@@ -514,8 +489,7 @@
 }
 
 void AAudioServiceStreamBase::disconnect() {
-    auto command = std::make_shared<AAudioCommand>(DISCONNECT);
-    mCommandQueue.sendCommand(command);
+    sendCommand(DISCONNECT);
 }
 
 void AAudioServiceStreamBase::disconnect_l() {
@@ -533,12 +507,10 @@
 
 aaudio_result_t AAudioServiceStreamBase::registerAudioThread(pid_t clientThreadId, int priority) {
     const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
-    auto command = std::make_shared<AAudioCommand>(
-            REGISTER_AUDIO_THREAD,
+    return sendCommand(REGISTER_AUDIO_THREAD,
             std::make_shared<RegisterAudioThreadParam>(ownerPid, clientThreadId, priority),
             true /*waitForReply*/,
             TIMEOUT_NANOS);
-    return mCommandQueue.sendCommand(command);
 }
 
 aaudio_result_t AAudioServiceStreamBase::registerAudioThread_l(
@@ -561,12 +533,10 @@
 }
 
 aaudio_result_t AAudioServiceStreamBase::unregisterAudioThread(pid_t clientThreadId) {
-    auto command = std::make_shared<AAudioCommand>(
-            UNREGISTER_AUDIO_THREAD,
+    return sendCommand(UNREGISTER_AUDIO_THREAD,
             std::make_shared<UnregisterAudioThreadParam>(clientThreadId),
             true /*waitForReply*/,
             TIMEOUT_NANOS);
-    return mCommandQueue.sendCommand(command);
 }
 
 aaudio_result_t AAudioServiceStreamBase::unregisterAudioThread_l(pid_t clientThreadId) {
@@ -682,12 +652,11 @@
  * used to communicate with the underlying HAL or Service.
  */
 aaudio_result_t AAudioServiceStreamBase::getDescription(AudioEndpointParcelable &parcelable) {
-    auto command = std::make_shared<AAudioCommand>(
+    return sendCommand(
             GET_DESCRIPTION,
             std::make_shared<GetDescriptionParam>(&parcelable),
             true /*waitForReply*/,
             TIMEOUT_NANOS);
-    return mCommandQueue.sendCommand(command);
 }
 
 aaudio_result_t AAudioServiceStreamBase::getDescription_l(AudioEndpointParcelable* parcelable) {
@@ -707,3 +676,33 @@
 void AAudioServiceStreamBase::onVolumeChanged(float volume) {
     sendServiceEvent(AAUDIO_SERVICE_EVENT_VOLUME, volume);
 }
+
+aaudio_result_t AAudioServiceStreamBase::sendCommand(aaudio_command_opcode opCode,
+                                                     std::shared_ptr<AAudioCommandParam> param,
+                                                     bool waitForReply,
+                                                     int64_t timeoutNanos) {
+    return mCommandQueue.sendCommand(std::make_shared<AAudioCommand>(
+            opCode, param, waitForReply, timeoutNanos));
+}
+
+aaudio_result_t AAudioServiceStreamBase::closeAndClear() {
+    aaudio_result_t result = AAUDIO_OK;
+    sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
+    if (endpoint == nullptr) {
+        result = AAUDIO_ERROR_INVALID_STATE;
+    } else {
+        endpoint->unregisterStream(this);
+        AAudioEndpointManager &endpointManager = AAudioEndpointManager::getInstance();
+        endpointManager.closeEndpoint(endpoint);
+
+        // AAudioService::closeStream() prevents two threads from closing at the same time.
+        mServiceEndpoint.clear(); // endpoint will hold the pointer after this method returns.
+    }
+
+    setState(AAUDIO_STREAM_STATE_CLOSED);
+
+    mediametrics::LogItem(mMetricsId)
+        .set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_CLOSE)
+        .record();
+    return result;
+}
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index aa8e8cf..dddd69f 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -366,6 +366,13 @@
     aaudio_result_t sendServiceEvent(aaudio_service_event_t event,
                                      double dataDouble);
 
+    aaudio_result_t sendCommand(aaudio_command_opcode opCode,
+                                std::shared_ptr<AAudioCommandParam> param = nullptr,
+                                bool waitForReply = false,
+                                int64_t timeoutNanos = 0);
+
+    aaudio_result_t closeAndClear();
+
     /**
      * @return true if the queue is getting full.
      */
diff --git a/services/tuner/hidl/TunerHidlFilter.cpp b/services/tuner/hidl/TunerHidlFilter.cpp
index 7b76093..b738b57 100644
--- a/services/tuner/hidl/TunerHidlFilter.cpp
+++ b/services/tuner/hidl/TunerHidlFilter.cpp
@@ -1036,6 +1036,8 @@
         media.streamId = static_cast<int32_t>(mediaEvent.streamId);
         media.isPtsPresent = mediaEvent.isPtsPresent;
         media.pts = static_cast<int64_t>(mediaEvent.pts);
+        media.isDtsPresent = false;
+        media.dts = static_cast<int64_t>(-1);
         media.dataLength = static_cast<int64_t>(mediaEvent.dataLength);
         media.offset = static_cast<int64_t>(mediaEvent.offset);
         media.isSecureMemory = mediaEvent.isSecureMemory;
@@ -1078,7 +1080,7 @@
         section.tableId = static_cast<int32_t>(sectionEvent.tableId);
         section.version = static_cast<int32_t>(sectionEvent.version);
         section.sectionNum = static_cast<int32_t>(sectionEvent.sectionNum);
-        section.dataLength = static_cast<int32_t>(sectionEvent.dataLength);
+        section.dataLength = static_cast<int64_t>(sectionEvent.dataLength);
 
         DemuxFilterEvent filterEvent;
         filterEvent.set<DemuxFilterEvent::section>(move(section));
@@ -1186,6 +1188,7 @@
         DemuxFilterDownloadEvent download;
 
         download.itemId = static_cast<int32_t>(downloadEvent.itemId);
+        download.downloadId = -1;
         download.itemFragmentIndex = static_cast<int32_t>(downloadEvent.itemFragmentIndex);
         download.mpuSequenceNumber = static_cast<int32_t>(downloadEvent.mpuSequenceNumber);
         download.lastItemFragmentIndex = static_cast<int32_t>(downloadEvent.lastItemFragmentIndex);