Merge "Fix audioflinger in overflow sanitized builds."
diff --git a/camera/Android.bp b/camera/Android.bp
index c76ae50..24b3918 100644
--- a/camera/Android.bp
+++ b/camera/Android.bp
@@ -29,12 +29,7 @@
         // AIDL files for camera interfaces
         // The headers for these interfaces will be available to any modules that
         // include libcamera_client, at the path "aidl/package/path/BnFoo.h"
-        "aidl/android/hardware/ICameraService.aidl",
-        "aidl/android/hardware/ICameraServiceListener.aidl",
-        "aidl/android/hardware/ICameraServiceProxy.aidl",
-        "aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl",
-        "aidl/android/hardware/camera2/ICameraDeviceUser.aidl",
-
+        ":libcamera_client_aidl",
 
         // Source for camera interface parcelables, and manually-written interfaces
         "Camera.cpp",
@@ -81,3 +76,25 @@
     ],
 
 }
+
+// AIDL interface between camera clients and the camera service.
+filegroup {
+    name: "libcamera_client_aidl",
+    srcs: [
+        "aidl/android/hardware/ICameraService.aidl",
+        "aidl/android/hardware/ICameraServiceListener.aidl",
+        "aidl/android/hardware/ICameraServiceProxy.aidl",
+        "aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl",
+        "aidl/android/hardware/camera2/ICameraDeviceUser.aidl",
+    ],
+}
+
+// Extra AIDL files that are used by framework.jar but not libcamera_client
+// because they have hand-written native implementations.
+filegroup {
+    name: "libcamera_client_framework_aidl",
+    srcs: [
+        "aidl/android/hardware/ICamera.aidl",
+        "aidl/android/hardware/ICameraClient.aidl",
+    ],
+}
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 1793877..4f893f1 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -1635,6 +1635,28 @@
      */
     ACAMERA_CONTROL_ENABLE_ZSL =                                // byte (acamera_metadata_enum_android_control_enable_zsl_t)
             ACAMERA_CONTROL_START + 41,
+    /**
+     * <p>Whether a significant scene change is detected within the currently-set AF
+     * region(s).</p>
+     *
+     * <p>Type: int32 (acamera_metadata_enum_android_control_af_scene_change_t)</p>
+     *
+     * <p>This tag may appear in:
+     * <ul>
+     *   <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+     * </ul></p>
+     *
+     * <p>When the camera focus routine detects a change in the scene it is looking at,
+     * such as a large shift in camera viewpoint, significant motion in the scene, or a
+     * significant illumination change, this value will be set to DETECTED for a single capture
+     * result. Otherwise the value will be NOT_DETECTED. The threshold for detection is similar
+     * to what would trigger a new passive focus scan to begin in CONTINUOUS autofocus modes.</p>
+     * <p>afSceneChange may be DETECTED only if afMode is AF_MODE_CONTINUOUS_VIDEO or
+     * AF_MODE_CONTINUOUS_PICTURE. In other AF modes, afSceneChange must be NOT_DETECTED.</p>
+     * <p>This key will be available if the camera device advertises this key via {@link ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS }.</p>
+     */
+    ACAMERA_CONTROL_AF_SCENE_CHANGE =                           // int32 (acamera_metadata_enum_android_control_af_scene_change_t)
+            ACAMERA_CONTROL_START + 42,
     ACAMERA_CONTROL_END,
 
     /**
@@ -6115,6 +6137,20 @@
 
 } acamera_metadata_enum_android_control_enable_zsl_t;
 
+// ACAMERA_CONTROL_AF_SCENE_CHANGE
+typedef enum acamera_metadata_enum_acamera_control_af_scene_change {
+    /**
+     * <p>Scene change is not detected within the AF region(s).</p>
+     */
+    ACAMERA_CONTROL_AF_SCENE_CHANGE_NOT_DETECTED                     = 0,
+
+    /**
+     * <p>Scene change is detected within the AF region(s).</p>
+     */
+    ACAMERA_CONTROL_AF_SCENE_CHANGE_DETECTED                         = 1,
+
+} acamera_metadata_enum_android_control_af_scene_change_t;
+
 
 
 // ACAMERA_EDGE_MODE
diff --git a/drm/mediadrm/plugins/clearkey/ClearKeyDrmProperties.h b/drm/mediadrm/plugins/clearkey/ClearKeyDrmProperties.h
new file mode 100644
index 0000000..a99e174
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/ClearKeyDrmProperties.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CLEARKEY_DRM_PROPERTIES_H_
+#define CLEARKEY_DRM_PROPERTIES_H_
+
+#include <utils/String8.h>
+
+namespace clearkeydrm {
+
+static const android::String8 kVendorKey("vendor");
+static const android::String8 kVendorValue("Google");
+static const android::String8 kVersionKey("version");
+static const android::String8 kVersionValue("1.0");
+static const android::String8 kPluginDescriptionKey("description");
+static const android::String8 kPluginDescriptionValue("ClearKey CDM");
+static const android::String8 kAlgorithmsKey("algorithms");
+static const android::String8 kAlgorithmsValue("");
+static const android::String8 kListenerTestSupportKey("listenerTestSupport");
+static const android::String8 kListenerTestSupportValue("true");
+
+static const android::String8 kDeviceIdKey("deviceId");
+static const uint8_t kTestDeviceIdData[] =
+        {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
+         0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
+} // namespace clearkeydrm
+
+#endif // CLEARKEY_DRM_PROPERTIES_H_
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
index ec07d87..7c43994 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
@@ -22,7 +22,7 @@
 #include <utils/StrongPointer.h>
 
 #include "DrmPlugin.h"
-
+#include "ClearKeyDrmProperties.h"
 #include "Session.h"
 
 namespace {
@@ -44,7 +44,22 @@
 
 DrmPlugin::DrmPlugin(SessionLibrary* sessionLibrary)
         : mSessionLibrary(sessionLibrary) {
+
     mPlayPolicy.clear();
+    initProperties();
+}
+
+void DrmPlugin::initProperties() {
+    mStringProperties.clear();
+    mStringProperties.add(kVendorKey, kVendorValue);
+    mStringProperties.add(kVersionKey, kVersionValue);
+    mStringProperties.add(kPluginDescriptionKey, kPluginDescriptionValue);
+    mStringProperties.add(kAlgorithmsKey, kAlgorithmsValue);
+    mStringProperties.add(kListenerTestSupportKey, kListenerTestSupportValue);
+
+    Vector<uint8_t> testDeviceId;
+    testDeviceId.appendArray(kTestDeviceIdData, sizeof(kTestDeviceIdData) / sizeof(uint8_t));
+    mByteArrayProperties.add(kDeviceIdKey, testDeviceId);
 }
 
 status_t DrmPlugin::openSession(Vector<uint8_t>& sessionId) {
@@ -122,21 +137,57 @@
     return res;
 }
 
+status_t DrmPlugin::getPropertyByteArray(
+        const String8& name, Vector<uint8_t>& value) const {
+    ssize_t index = mByteArrayProperties.indexOfKey(name);
+    if (index < 0) {
+        ALOGE("App requested unknown property: %s", name.string());
+        return android::BAD_VALUE;
+    }
+    value = mByteArrayProperties.valueAt(index);
+    return android::OK;
+}
+
+status_t DrmPlugin::setPropertyByteArray(
+        const String8& name, const Vector<uint8_t>& value) {
+    if (0 == name.compare(kDeviceIdKey)) {
+        ALOGD("Cannot set immutable property: %s", name.string());
+        return android::BAD_VALUE;
+    }
+
+    ssize_t status = mByteArrayProperties.replaceValueFor(name, value);
+    if (status >= 0) {
+        return android::OK;
+    }
+    ALOGE("Failed to set property byte array, key=%s", name.string());
+    return android::BAD_VALUE;
+}
+
 status_t DrmPlugin::getPropertyString(
         const String8& name, String8& value) const {
-    if (name == "vendor") {
-        value = "Google";
-    } else if (name == "version") {
-        value = "1.0";
-    } else if (name == "description") {
-        value = "ClearKey CDM";
-    } else if (name == "algorithms") {
-        value = "";
-    } else if (name == "listenerTestSupport") {
-        value = "true";
-    } else {
-        ALOGE("App requested unknown string property %s", name.string());
-        return android::ERROR_DRM_CANNOT_HANDLE;
+    ssize_t index = mStringProperties.indexOfKey(name);
+    if (index < 0) {
+        ALOGE("App requested unknown property: %s", name.string());
+        return android::BAD_VALUE;
+    }
+    value = mStringProperties.valueAt(index);
+    return android::OK;
+}
+
+status_t DrmPlugin::setPropertyString(
+        const String8& name, const String8& value) {
+    String8 immutableKeys;
+    immutableKeys.appendFormat("%s,%s,%s,%s",
+            kAlgorithmsKey.string(), kPluginDescriptionKey.string(),
+            kVendorKey.string(), kVersionKey.string());
+    if (immutableKeys.contains(name.string())) {
+        ALOGD("Cannot set immutable property: %s", name.string());
+        return android::BAD_VALUE;
+    }
+
+    if (mStringProperties.add(name, value) < 0) {
+        ALOGE("Failed to set property string, key=%s", name.string());
+        return android::BAD_VALUE;
     }
     return android::OK;
 }
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
index f37a706..62bc86f 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
@@ -137,25 +137,13 @@
             const String8& name, String8& value) const;
 
     virtual status_t getPropertyByteArray(
-            const String8& name, Vector<uint8_t>& value) const {
-        UNUSED(name);
-        UNUSED(value);
-        return android::ERROR_DRM_CANNOT_HANDLE;
-    }
+            const String8& name, Vector<uint8_t>& value) const;
 
     virtual status_t setPropertyString(
-            const String8& name, const String8& value) {
-        UNUSED(name);
-        UNUSED(value);
-        return android::ERROR_DRM_CANNOT_HANDLE;
-    }
+            const String8& name, const String8& value);
 
     virtual status_t setPropertyByteArray(
-            const String8& name, const Vector<uint8_t>& value) {
-        UNUSED(name);
-        UNUSED(value);
-        return android::ERROR_DRM_CANNOT_HANDLE;
-    }
+            const String8& name, const Vector<uint8_t>& value);
 
     virtual status_t setCipherAlgorithm(
             const Vector<uint8_t>& sessionId, const String8& algorithm) {
@@ -242,9 +230,13 @@
     }
 
 private:
+    void initProperties();
     void setPlayPolicy();
 
-    android::KeyedVector<android::String8, android::String8> mPlayPolicy;
+    android::KeyedVector<String8, String8> mPlayPolicy;
+    android::KeyedVector<String8, String8> mStringProperties;
+    android::KeyedVector<String8, Vector<uint8_t>> mByteArrayProperties;
+
     SessionLibrary* mSessionLibrary;
 
     DISALLOW_EVIL_CONSTRUCTORS(DrmPlugin);
diff --git a/media/libaaudio/examples/input_monitor/Android.bp b/media/libaaudio/examples/input_monitor/Android.bp
index 2c3418d..d8c5843 100644
--- a/media/libaaudio/examples/input_monitor/Android.bp
+++ b/media/libaaudio/examples/input_monitor/Android.bp
@@ -2,6 +2,7 @@
     name: "input_monitor",
     gtest: false,
     srcs: ["src/input_monitor.cpp"],
+    cflags: ["-Wall", "-Werror"],
     shared_libs: ["libaaudio"],
     header_libs: ["libaaudio_example_utils"],
 }
@@ -10,6 +11,7 @@
     name: "input_monitor_callback",
     gtest: false,
     srcs: ["src/input_monitor_callback.cpp"],
+    cflags: ["-Wall", "-Werror"],
     shared_libs: ["libaaudio"],
     header_libs: ["libaaudio_example_utils"],
 }
diff --git a/media/libaaudio/examples/loopback/Android.bp b/media/libaaudio/examples/loopback/Android.bp
index 2b624a8..fa8fdc9 100644
--- a/media/libaaudio/examples/loopback/Android.bp
+++ b/media/libaaudio/examples/loopback/Android.bp
@@ -2,6 +2,7 @@
     name: "aaudio_loopback",
     gtest: false,
     srcs: ["src/loopback.cpp"],
+    cflags: ["-Wall", "-Werror"],
     shared_libs: ["libaaudio"],
     header_libs: ["libaaudio_example_utils"],
 }
diff --git a/media/libaaudio/examples/write_sine/Android.bp b/media/libaaudio/examples/write_sine/Android.bp
index f162e85..aa25e67 100644
--- a/media/libaaudio/examples/write_sine/Android.bp
+++ b/media/libaaudio/examples/write_sine/Android.bp
@@ -1,6 +1,7 @@
 cc_test {
     name: "write_sine",
     srcs: ["src/write_sine.cpp"],
+    cflags: ["-Wall", "-Werror"],
     shared_libs: ["libaaudio"],
     header_libs: ["libaaudio_example_utils"],
 }
@@ -8,6 +9,7 @@
 cc_test {
     name: "write_sine_callback",
     srcs: ["src/write_sine_callback.cpp"],
+    cflags: ["-Wall", "-Werror"],
     shared_libs: ["libaaudio"],
     header_libs: ["libaaudio_example_utils"],
 }
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index 2450920..fc5830a 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -25,6 +25,7 @@
 
 #include "aaudio/AAudio.h"
 #include <aaudio/AAudioTesting.h>
+#include <math.h>
 
 #include "utility/AAudioUtilities.h"
 
@@ -50,44 +51,10 @@
     return size;
 }
 
-
 // TODO expose and call clamp16_from_float function in primitives.h
 static inline int16_t clamp16_from_float(float f) {
-    /* Offset is used to expand the valid range of [-1.0, 1.0) into the 16 lsbs of the
-     * floating point significand. The normal shift is 3<<22, but the -15 offset
-     * is used to multiply by 32768.
-     */
-    static const float offset = (float)(3 << (22 - 15));
-    /* zero = (0x10f << 22) =  0x43c00000 (not directly used) */
-    static const int32_t limneg = (0x10f << 22) /*zero*/ - 32768; /* 0x43bf8000 */
-    static const int32_t limpos = (0x10f << 22) /*zero*/ + 32767; /* 0x43c07fff */
-
-    union {
-        float f;
-        int32_t i;
-    } u;
-
-    u.f = f + offset; /* recenter valid range */
-    /* Now the valid range is represented as integers between [limneg, limpos].
-     * Clamp using the fact that float representation (as an integer) is an ordered set.
-     */
-    if (u.i < limneg)
-        u.i = -32768;
-    else if (u.i > limpos)
-        u.i = 32767;
-    return u.i; /* Return lower 16 bits, the part of interest in the significand. */
-}
-
-// Same but without clipping.
-// Convert -1.0f to +1.0f to -32768 to +32767
-static inline int16_t floatToInt16(float f) {
-    static const float offset = (float)(3 << (22 - 15));
-    union {
-        float f;
-        int32_t i;
-    } u;
-    u.f = f + offset; /* recenter valid range */
-    return u.i; /* Return lower 16 bits, the part of interest in the significand. */
+    static const float scale = 1 << 15;
+    return (int16_t) roundf(fmaxf(fminf(f * scale, scale - 1.f), -scale));
 }
 
 static float clipAndClampFloatToPcm16(float sample, float scaler) {
@@ -188,13 +155,14 @@
                        int32_t samplesPerFrame,
                        float amplitude1,
                        float amplitude2) {
-    float scaler = amplitude1 / SHORT_SCALE;
-    float delta = (amplitude2 - amplitude1) / (SHORT_SCALE * (float) numFrames);
+    // Because we are converting from int16 to 1nt16, we do not have to scale by 1/32768.
+    float scaler = amplitude1;
+    float delta = (amplitude2 - amplitude1) / numFrames;
     for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
         for (int sampleIndex = 0; sampleIndex < samplesPerFrame; sampleIndex++) {
             // No need to clip because int16_t range is inherently limited.
             float sample =  *source++ * scaler;
-            *destination++ =  floatToInt16(sample);
+            *destination++ = (int16_t) roundf(sample);
         }
         scaler += delta;
     }
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 884a2b3..9f80695 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -111,3 +111,16 @@
         "libutils",
     ],
 }
+
+cc_test {
+    name: "test_aaudio_monkey",
+    defaults: ["libaaudio_tests_defaults"],
+    srcs: ["test_aaudio_monkey.cpp"],
+    header_libs: ["libaaudio_example_utils"],
+    shared_libs: [
+        "libaaudio",
+        "libbinder",
+        "libcutils",
+        "libutils",
+    ],
+}
diff --git a/media/libaaudio/tests/test_aaudio_monkey.cpp b/media/libaaudio/tests/test_aaudio_monkey.cpp
new file mode 100644
index 0000000..be54835
--- /dev/null
+++ b/media/libaaudio/tests/test_aaudio_monkey.cpp
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Try to trigger bugs by playing randomly on multiple streams.
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <vector>
+
+#include <aaudio/AAudio.h>
+#include "AAudioArgsParser.h"
+#include "AAudioExampleUtils.h"
+#include "AAudioSimplePlayer.h"
+#include "SineGenerator.h"
+
+#define DEFAULT_TIMEOUT_NANOS  (1 * NANOS_PER_SECOND)
+
+#define NUM_LOOPS          1000
+#define MAX_MICROS_DELAY   (2 * 1000 * 1000)
+
+// TODO Consider adding an input stream.
+#define PROB_START   (0.20)
+#define PROB_PAUSE   (PROB_START + 0.10)
+#define PROB_FLUSH   (PROB_PAUSE + 0.10)
+#define PROB_STOP    (PROB_FLUSH + 0.10)
+#define PROB_CLOSE   (PROB_STOP + 0.10)
+static_assert(PROB_CLOSE < 0.9, "Probability sum too high.");
+
+aaudio_data_callback_result_t AAudioMonkeyDataCallback(
+        AAudioStream *stream,
+        void *userData,
+        void *audioData,
+        int32_t numFrames);
+
+void AAudioMonkeyErrorCallbackProc(
+        AAudioStream *stream __unused,
+        void *userData __unused,
+        aaudio_result_t error) {
+    printf("Error Callback, error: %d\n",(int)error);
+}
+
+// This function is not thread safe. Only use this from a single thread.
+double nextRandomDouble() {
+    return drand48();
+}
+
+class AAudioMonkey : public AAudioSimplePlayer {
+public:
+
+    AAudioMonkey(int index, AAudioArgsParser *argParser)
+            : mArgParser(argParser)
+            , mIndex(index) {}
+
+    aaudio_result_t open() {
+        printf("Monkey # %d ---------------------------------------------- OPEN\n", mIndex);
+        double offset = mIndex * 50;
+        mSine1.setup(440.0, 48000);
+        mSine1.setSweep(300.0 + offset, 600.0 + offset, 5.0);
+        mSine2.setup(660.0, 48000);
+        mSine2.setSweep(350.0 + offset, 900.0 + offset, 7.0);
+
+        aaudio_result_t result = AAudioSimplePlayer::open(*mArgParser,
+                                      AAudioMonkeyDataCallback,
+                                      AAudioMonkeyErrorCallbackProc,
+                                      this);
+        if (result != AAUDIO_OK) {
+            printf("ERROR -  player.open() returned %d\n", result);
+        }
+
+        mArgParser->compareWithStream(getStream());
+        return result;
+    }
+
+    bool isOpen() {
+        return (getStream() != nullptr);
+
+    }
+    /**
+     *
+     * @return true if stream passes tests
+     */
+    bool validate() {
+        if (!isOpen()) return true; // closed is OK
+
+        // update and query stream state
+        aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+        aaudio_result_t result = AAudioStream_waitForStateChange(getStream(),
+            AAUDIO_STREAM_STATE_UNKNOWN, &state, 0);
+        if (result != AAUDIO_OK) {
+            printf("ERROR - AAudioStream_waitForStateChange returned %d\n", result);
+            return false;
+        }
+
+        int64_t framesRead = AAudioStream_getFramesRead(getStream());
+        int64_t framesWritten = AAudioStream_getFramesWritten(getStream());
+        int32_t xRuns = AAudioStream_getXRunCount(getStream());
+        // Print status
+        printf("%30s, framesWritten = %8lld, framesRead = %8lld, xRuns = %d\n",
+               AAudio_convertStreamStateToText(state),
+               (unsigned long long) framesWritten,
+               (unsigned long long) framesRead,
+               xRuns);
+
+        if (framesWritten < framesRead) {
+            printf("WARNING - UNDERFLOW - diff = %d !!!!!!!!!!!!\n",
+                   (int) (framesWritten - framesRead));
+        }
+        return true;
+    }
+
+    aaudio_result_t invoke() {
+        aaudio_result_t result = AAUDIO_OK;
+        if (!isOpen()) {
+            result = open();
+            if (result != AAUDIO_OK) return result;
+        }
+
+        if (!validate()) {
+            return -1;
+        }
+
+        double dice = nextRandomDouble();
+        // Select an action based on a weighted probability.
+        if (dice < PROB_START) {
+            printf("start\n");
+            result = AAudioStream_requestStart(getStream());
+        } else if (dice < PROB_PAUSE) {
+            printf("pause\n");
+            result = AAudioStream_requestPause(getStream());
+        } else if (dice < PROB_FLUSH) {
+            printf("flush\n");
+            result = AAudioStream_requestFlush(getStream());
+        } else if (dice < PROB_STOP) {
+            printf("stop\n");
+            result = AAudioStream_requestStop(getStream());
+        } else if (dice < PROB_CLOSE) {
+            printf("close\n");
+            result = close();
+        } else {
+            printf("do nothing\n");
+        }
+
+        if (result == AAUDIO_ERROR_INVALID_STATE) {
+            printf("    got AAUDIO_ERROR_INVALID_STATE - expected from a monkey\n");
+            result = AAUDIO_OK;
+        }
+        if (result == AAUDIO_OK && isOpen()) {
+            if (!validate()) {
+                result = -1;
+            }
+        }
+        return result;
+    }
+
+    aaudio_data_callback_result_t renderAudio(
+            AAudioStream *stream,
+            void *audioData,
+            int32_t numFrames) {
+
+        int32_t samplesPerFrame = AAudioStream_getChannelCount(stream);
+        // This code only plays on the first one or two channels.
+        // TODO Support arbitrary number of channels.
+        switch (AAudioStream_getFormat(stream)) {
+            case AAUDIO_FORMAT_PCM_I16: {
+                int16_t *audioBuffer = (int16_t *) audioData;
+                // Render sine waves as shorts to first channel.
+                mSine1.render(&audioBuffer[0], samplesPerFrame, numFrames);
+                // Render sine waves to second channel if there is one.
+                if (samplesPerFrame > 1) {
+                    mSine2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+                }
+            }
+                break;
+            case AAUDIO_FORMAT_PCM_FLOAT: {
+                float *audioBuffer = (float *) audioData;
+                // Render sine waves as floats to first channel.
+                mSine1.render(&audioBuffer[0], samplesPerFrame, numFrames);
+                // Render sine waves to second channel if there is one.
+                if (samplesPerFrame > 1) {
+                    mSine2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+                }
+            }
+                break;
+            default:
+                return AAUDIO_CALLBACK_RESULT_STOP;
+        }
+        return AAUDIO_CALLBACK_RESULT_CONTINUE;
+    }
+
+private:
+    const AAudioArgsParser  *mArgParser;
+    const int                mIndex;
+    SineGenerator            mSine1;
+    SineGenerator            mSine2;
+};
+
+// Callback function that fills the audio output buffer.
+aaudio_data_callback_result_t AAudioMonkeyDataCallback(
+        AAudioStream *stream,
+        void *userData,
+        void *audioData,
+        int32_t numFrames
+) {
+    // should not happen but just in case...
+    if (userData == nullptr) {
+        printf("ERROR - AAudioMonkeyDataCallback needs userData\n");
+        return AAUDIO_CALLBACK_RESULT_STOP;
+    }
+    AAudioMonkey *monkey = (AAudioMonkey *) userData;
+    return monkey->renderAudio(stream, audioData, numFrames);
+}
+
+
+static void usage() {
+    AAudioArgsParser::usage();
+    printf("      -i{seed}  Initial random seed\n");
+    printf("      -t{count} number of monkeys in the Troop\n");
+}
+
+int main(int argc, const char **argv) {
+    AAudioArgsParser argParser;
+    std::vector<AAudioMonkey> monkeys;
+    aaudio_result_t result;
+    int numMonkeys = 1;
+
+    // Make printf print immediately so that debug info is not stuck
+    // in a buffer if we hang or crash.
+    setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+    printf("%s - Monkeys\n", argv[0]);
+
+    long int seed = (long int)getNanoseconds();  // different every time by default
+
+    for (int i = 1; i < argc; i++) {
+        const char *arg = argv[i];
+        if (argParser.parseArg(arg)) {
+            // Handle options that are not handled by the ArgParser
+            if (arg[0] == '-') {
+                char option = arg[1];
+                switch (option) {
+                    case 'i':
+                        seed = atol(&arg[2]);
+                        break;
+                    case 't':
+                        numMonkeys = atoi(&arg[2]);
+                        break;
+                    default:
+                        usage();
+                        exit(EXIT_FAILURE);
+                        break;
+                }
+            } else {
+                usage();
+                exit(EXIT_FAILURE);
+                break;
+            }
+        }
+    }
+
+    srand48(seed);
+    printf("seed = %ld, nextRandomDouble() = %f\n", seed, nextRandomDouble());
+
+    for (int m = 0; m < numMonkeys; m++) {
+        monkeys.emplace_back(m, &argParser);
+    }
+
+    for (int i = 0; i < NUM_LOOPS; i++) {
+        // pick a random monkey and invoke it
+        double dice = nextRandomDouble();
+        int monkeyIndex = floor(dice * numMonkeys);
+        printf("----------- Monkey #%d\n", monkeyIndex);
+        result = monkeys[monkeyIndex].invoke();
+        if (result != AAUDIO_OK) {
+            goto error;
+        }
+
+        // sleep some random time
+        dice = nextRandomDouble();
+        dice = dice * dice * dice; // skew towards smaller delays
+        int micros = (int) (dice * MAX_MICROS_DELAY);
+        usleep(micros);
+
+        // TODO consider making this multi-threaded, one thread per monkey, to catch more bugs
+    }
+
+    printf("PASS\n");
+    return EXIT_SUCCESS;
+
+error:
+    printf("FAIL - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+    usleep(1000 * 1000); // give me time to stop the logcat
+    return EXIT_FAILURE;
+}
+
diff --git a/media/libaaudio/tests/test_linear_ramp.cpp b/media/libaaudio/tests/test_linear_ramp.cpp
index 5c53982..93226ba 100644
--- a/media/libaaudio/tests/test_linear_ramp.cpp
+++ b/media/libaaudio/tests/test_linear_ramp.cpp
@@ -15,13 +15,13 @@
  */
 
 #include <iostream>
+#include <math.h>
 
 #include <gtest/gtest.h>
 
 #include "utility/AAudioUtilities.h"
 #include "utility/LinearRamp.h"
 
-
 TEST(test_linear_ramp, linear_ramp_segments) {
     LinearRamp ramp;
     const float source[4] = {1.0f, 1.0f, 1.0f, 1.0f };
@@ -32,40 +32,40 @@
     ramp.setLengthInFrames(8);
     ramp.setTarget(8.0f);
 
-    ASSERT_EQ(8, ramp.getLengthInFrames());
+    EXPECT_EQ(8, ramp.getLengthInFrames());
 
     bool ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
-    ASSERT_EQ(1, ramping);
-    ASSERT_EQ(0.0f, levelFrom);
-    ASSERT_EQ(4.0f, levelTo);
+    EXPECT_EQ(1, ramping);
+    EXPECT_EQ(0.0f, levelFrom);
+    EXPECT_EQ(4.0f, levelTo);
 
     AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
-    ASSERT_EQ(0.0f, destination[0]);
-    ASSERT_EQ(1.0f, destination[1]);
-    ASSERT_EQ(2.0f, destination[2]);
-    ASSERT_EQ(3.0f, destination[3]);
+    EXPECT_EQ(0.0f, destination[0]);
+    EXPECT_EQ(1.0f, destination[1]);
+    EXPECT_EQ(2.0f, destination[2]);
+    EXPECT_EQ(3.0f, destination[3]);
 
     ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
-    ASSERT_EQ(1, ramping);
-    ASSERT_EQ(4.0f, levelFrom);
-    ASSERT_EQ(8.0f, levelTo);
+    EXPECT_EQ(1, ramping);
+    EXPECT_EQ(4.0f, levelFrom);
+    EXPECT_EQ(8.0f, levelTo);
 
     AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
-    ASSERT_EQ(4.0f, destination[0]);
-    ASSERT_EQ(5.0f, destination[1]);
-    ASSERT_EQ(6.0f, destination[2]);
-    ASSERT_EQ(7.0f, destination[3]);
+    EXPECT_EQ(4.0f, destination[0]);
+    EXPECT_EQ(5.0f, destination[1]);
+    EXPECT_EQ(6.0f, destination[2]);
+    EXPECT_EQ(7.0f, destination[3]);
 
     ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
-    ASSERT_EQ(0, ramping);
-    ASSERT_EQ(8.0f, levelFrom);
-    ASSERT_EQ(8.0f, levelTo);
+    EXPECT_EQ(0, ramping);
+    EXPECT_EQ(8.0f, levelFrom);
+    EXPECT_EQ(8.0f, levelTo);
 
     AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
-    ASSERT_EQ(8.0f, destination[0]);
-    ASSERT_EQ(8.0f, destination[1]);
-    ASSERT_EQ(8.0f, destination[2]);
-    ASSERT_EQ(8.0f, destination[3]);
+    EXPECT_EQ(8.0f, destination[0]);
+    EXPECT_EQ(8.0f, destination[1]);
+    EXPECT_EQ(8.0f, destination[2]);
+    EXPECT_EQ(8.0f, destination[3]);
 
 };
 
@@ -80,29 +80,101 @@
     ramp.setLengthInFrames(4);
     ramp.setTarget(8.0f);
     ramp.forceCurrent(4.0f);
-    ASSERT_EQ(4.0f, ramp.getCurrent());
+    EXPECT_EQ(4.0f, ramp.getCurrent());
 
     bool ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
-    ASSERT_EQ(1, ramping);
-    ASSERT_EQ(4.0f, levelFrom);
-    ASSERT_EQ(8.0f, levelTo);
+    EXPECT_EQ(1, ramping);
+    EXPECT_EQ(4.0f, levelFrom);
+    EXPECT_EQ(8.0f, levelTo);
 
     AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
-    ASSERT_EQ(4.0f, destination[0]);
-    ASSERT_EQ(5.0f, destination[1]);
-    ASSERT_EQ(6.0f, destination[2]);
-    ASSERT_EQ(7.0f, destination[3]);
+    EXPECT_EQ(4.0f, destination[0]);
+    EXPECT_EQ(5.0f, destination[1]);
+    EXPECT_EQ(6.0f, destination[2]);
+    EXPECT_EQ(7.0f, destination[3]);
 
     ramping = ramp.nextSegment(4, &levelFrom, &levelTo);
-    ASSERT_EQ(0, ramping);
-    ASSERT_EQ(8.0f, levelFrom);
-    ASSERT_EQ(8.0f, levelTo);
+    EXPECT_EQ(0, ramping);
+    EXPECT_EQ(8.0f, levelFrom);
+    EXPECT_EQ(8.0f, levelTo);
 
     AAudio_linearRamp(source, destination, 4, 1, levelFrom, levelTo);
-    ASSERT_EQ(8.0f, destination[0]);
-    ASSERT_EQ(8.0f, destination[1]);
-    ASSERT_EQ(8.0f, destination[2]);
-    ASSERT_EQ(8.0f, destination[3]);
+    EXPECT_EQ(8.0f, destination[0]);
+    EXPECT_EQ(8.0f, destination[1]);
+    EXPECT_EQ(8.0f, destination[2]);
+    EXPECT_EQ(8.0f, destination[3]);
 
 };
 
+constexpr int16_t kMaxI16 = INT16_MAX;
+constexpr int16_t kMinI16 = INT16_MIN;
+constexpr int16_t kHalfI16 = 16384;
+constexpr int16_t kTenthI16 = 3277;
+
+//void AAudioConvert_floatToPcm16(const float *source,
+//                                int16_t *destination,
+//                                int32_t numSamples,
+//                                float amplitude);
+TEST(test_linear_ramp, float_to_i16) {
+    const float source[] = {12345.6f, 1.0f, 0.5f, 0.1f, 0.0f, -0.1f, -0.5f, -1.0f, -12345.6f};
+    constexpr size_t count = sizeof(source) / sizeof(source[0]);
+    int16_t destination[count];
+    const int16_t expected[count] = {kMaxI16, kMaxI16, kHalfI16, kTenthI16, 0,
+                                     -kTenthI16, -kHalfI16, kMinI16, kMinI16};
+
+    AAudioConvert_floatToPcm16(source, destination, count, 1.0f);
+    for (size_t i = 0; i < count; i++) {
+        EXPECT_EQ(expected[i], destination[i]);
+    }
+
+}
+
+//void AAudioConvert_pcm16ToFloat(const int16_t *source,
+//                                float *destination,
+//                                int32_t numSamples,
+//                                float amplitude);
+TEST(test_linear_ramp, i16_to_float) {
+    const int16_t source[] = {kMaxI16, kHalfI16, kTenthI16, 0,
+                              -kTenthI16, -kHalfI16, kMinI16};
+    constexpr size_t count = sizeof(source) / sizeof(source[0]);
+    float destination[count];
+    const float expected[count] = {(32767.0f / 32768.0f), 0.5f, 0.1f, 0.0f, -0.1f, -0.5f, -1.0f};
+
+    AAudioConvert_pcm16ToFloat(source, destination, count, 1.0f);
+    for (size_t i = 0; i < count; i++) {
+        EXPECT_NEAR(expected[i], destination[i], 0.0001f);
+    }
+
+}
+
+//void AAudio_linearRamp(const int16_t *source,
+//                       int16_t *destination,
+//                       int32_t numFrames,
+//                       int32_t samplesPerFrame,
+//                       float amplitude1,
+//                       float amplitude2);
+TEST(test_linear_ramp, ramp_i16_to_i16) {
+    const int16_t source[] = {1, 1, 1, 1, 1, 1, 1, 1};
+    constexpr size_t count = sizeof(source) / sizeof(source[0]);
+    int16_t destination[count];
+    // Ramp will sweep from -1 to almost +1
+    const int16_t expected[count] = {
+            -1, // from -1.00
+            -1, // from -0.75
+            -1, // from -0.55, round away from zero
+            0,  // from -0.25, round up to zero
+            0,  // from  0.00
+            0,  // from  0.25, round down to zero
+            1,  // from  0.50, round away from zero
+            1   // from  0.75
+    };
+
+    // sweep across zero to test symmetry
+    constexpr float amplitude1 = -1.0;
+    constexpr float amplitude2 = 1.0;
+    AAudio_linearRamp(source, destination, count, 1, amplitude1, amplitude2);
+    for (size_t i = 0; i < count; i++) {
+        EXPECT_EQ(expected[i], destination[i]);
+    }
+
+}
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 98e8d95..bedde43 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -20,7 +20,7 @@
         // The headers for these interfaces will be available to any modules that
         // include libaudioclient, at the path "aidl/package/path/BnFoo.h"
         "aidl/android/media/IAudioRecord.aidl",
-        "aidl/android/media/IPlayer.aidl",
+        ":libaudioclient_aidl",
 
         "AudioEffect.cpp",
         "AudioPolicy.cpp",
@@ -70,3 +70,11 @@
         ],
     },
 }
+
+// AIDL interface between libaudioclient and framework.jar
+filegroup {
+    name: "libaudioclient_aidl",
+    srcs: [
+        "aidl/android/media/IPlayer.aidl",
+    ],
+}
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 2432cac..741d084 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -69,8 +69,7 @@
     : mActive(false), mStatus(NO_INIT), mOpPackageName(opPackageName),
       mSessionId(AUDIO_SESSION_ALLOCATE),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT),
-      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE), mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
-      mPortId(AUDIO_PORT_HANDLE_NONE)
+      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE), mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE)
 {
 }
 
@@ -97,10 +96,9 @@
       mSessionId(AUDIO_SESSION_ALLOCATE),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
       mPreviousSchedulingGroup(SP_DEFAULT),
-      mProxy(NULL),
-      mPortId(AUDIO_PORT_HANDLE_NONE)
+      mProxy(NULL)
 {
-    mStatus = set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
+    (void)set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
             notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
             uid, pid, pAttributes, selectedDeviceId);
 }
@@ -151,6 +149,11 @@
         const audio_attributes_t* pAttributes,
         audio_port_handle_t selectedDeviceId)
 {
+    status_t status = NO_ERROR;
+    uint32_t channelCount;
+    pid_t callingPid;
+    pid_t myPid;
+
     ALOGV("set(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
           "notificationFrames %u, sessionId %d, transferType %d, flags %#x, opPackageName %s "
           "uid %d, pid %d",
@@ -170,7 +173,8 @@
     case TRANSFER_CALLBACK:
         if (cbf == NULL) {
             ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL");
-            return BAD_VALUE;
+            status = BAD_VALUE;
+            goto exit;
         }
         break;
     case TRANSFER_OBTAIN:
@@ -178,14 +182,16 @@
         break;
     default:
         ALOGE("Invalid transfer type %d", transferType);
-        return BAD_VALUE;
+        status = BAD_VALUE;
+        goto exit;
     }
     mTransfer = transferType;
 
     // invariant that mAudioRecord != 0 is true only after set() returns successfully
     if (mAudioRecord != 0) {
         ALOGE("Track already in use");
-        return INVALID_OPERATION;
+        status = INVALID_OPERATION;
+        goto exit;
     }
 
     if (pAttributes == NULL) {
@@ -209,16 +215,18 @@
     // AudioFlinger capture only supports linear PCM
     if (!audio_is_valid_format(format) || !audio_is_linear_pcm(format)) {
         ALOGE("Format %#x is not linear pcm", format);
-        return BAD_VALUE;
+        status = BAD_VALUE;
+        goto exit;
     }
     mFormat = format;
 
     if (!audio_is_input_channel(channelMask)) {
         ALOGE("Invalid channel mask %#x", channelMask);
-        return BAD_VALUE;
+        status = BAD_VALUE;
+        goto exit;
     }
     mChannelMask = channelMask;
-    uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
+    channelCount = audio_channel_count_from_in_mask(channelMask);
     mChannelCount = channelCount;
 
     if (audio_is_linear_pcm(format)) {
@@ -227,28 +235,24 @@
         mFrameSize = sizeof(uint8_t);
     }
 
-    // mFrameCount is initialized in openRecord_l
+    // mFrameCount is initialized in createRecord_l
     mReqFrameCount = frameCount;
 
     mNotificationFramesReq = notificationFrames;
-    // mNotificationFramesAct is initialized in openRecord_l
+    // mNotificationFramesAct is initialized in createRecord_l
 
-    if (sessionId == AUDIO_SESSION_ALLOCATE) {
-        mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
-    } else {
-        mSessionId = sessionId;
-    }
+    mSessionId = sessionId;
     ALOGV("set(): mSessionId %d", mSessionId);
 
-    int callingpid = IPCThreadState::self()->getCallingPid();
-    int mypid = getpid();
-    if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
+    callingPid = IPCThreadState::self()->getCallingPid();
+    myPid = getpid();
+    if (uid == AUDIO_UID_INVALID || (callingPid != myPid)) {
         mClientUid = IPCThreadState::self()->getCallingUid();
     } else {
         mClientUid = uid;
     }
-    if (pid == -1 || (callingpid != mypid)) {
-        mClientPid = callingpid;
+    if (pid == -1 || (callingPid != myPid)) {
+        mClientPid = callingPid;
     } else {
         mClientPid = pid;
     }
@@ -263,7 +267,7 @@
     }
 
     // create the IAudioRecord
-    status_t status = openRecord_l(0 /*epoch*/, mOpPackageName);
+    status = createRecord_l(0 /*epoch*/, mOpPackageName);
 
     if (status != NO_ERROR) {
         if (mAudioRecordThread != 0) {
@@ -271,10 +275,9 @@
             mAudioRecordThread->requestExitAndWait();
             mAudioRecordThread.clear();
         }
-        return status;
+        goto exit;
     }
 
-    mStatus = NO_ERROR;
     mUserData = user;
     // TODO: add audio hardware input latency here
     mLatency = (1000LL * mFrameCount) / mSampleRate;
@@ -289,7 +292,9 @@
     mFramesRead = 0;
     mFramesReadServerOffset = 0;
 
-    return NO_ERROR;
+exit:
+    mStatus = status;
+    return status;
 }
 
 // -------------------------------------------------------------------------
@@ -540,70 +545,29 @@
 }
 
 // must be called with mLock held
-status_t AudioRecord::openRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName)
+status_t AudioRecord::createRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName)
 {
     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
+    IAudioFlinger::CreateRecordInput input;
+    IAudioFlinger::CreateRecordOutput output;
+    audio_session_t originalSessionId;
+    sp<media::IAudioRecord> record;
+    void *iMemPointer;
+    audio_track_cblk_t* cblk;
+    status_t status;
+
     if (audioFlinger == 0) {
         ALOGE("Could not get audioflinger");
-        return NO_INIT;
+        status = NO_INIT;
+        goto exit;
     }
 
-    audio_io_handle_t input;
-
     // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
     // After fast request is denied, we will request again if IAudioRecord is re-created.
 
-    status_t status;
-
-    // Not a conventional loop, but a retry loop for at most two iterations total.
-    // Try first maybe with FAST flag then try again without FAST flag if that fails.
-    // Exits loop normally via a return at the bottom, or with error via a break.
-    // The sp<> references will be dropped when re-entering scope.
-    // The lack of indentation is deliberate, to reduce code churn and ease merges.
-    for (;;) {
-    audio_config_base_t config  = {
-            .sample_rate = mSampleRate,
-            .channel_mask = mChannelMask,
-            .format = mFormat
-        };
-    mRoutedDeviceId = mSelectedDeviceId;
-    status = AudioSystem::getInputForAttr(&mAttributes, &input,
-                                        mSessionId,
-                                        // FIXME compare to AudioTrack
-                                        mClientPid,
-                                        mClientUid,
-                                        &config,
-                                        mFlags, &mRoutedDeviceId, &mPortId);
-
-    if (status != NO_ERROR || input == AUDIO_IO_HANDLE_NONE) {
-        ALOGE("Could not get audio input for session %d, record source %d, sample rate %u, "
-              "format %#x, channel mask %#x, flags %#x",
-              mSessionId, mAttributes.source, mSampleRate, mFormat, mChannelMask, mFlags);
-        return BAD_VALUE;
-    }
-
     // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
     // we must release it ourselves if anything goes wrong.
 
-#if 0
-    size_t afFrameCount;
-    status = AudioSystem::getFrameCount(input, &afFrameCount);
-    if (status != NO_ERROR) {
-        ALOGE("getFrameCount(input=%d) status %d", input, status);
-        break;
-    }
-#endif
-
-    uint32_t afSampleRate;
-    status = AudioSystem::getSamplingRate(input, &afSampleRate);
-    if (status != NO_ERROR) {
-        ALOGE("getSamplingRate(input=%d) status %d", input, status);
-        break;
-    }
-    if (mSampleRate == 0) {
-        mSampleRate = afSampleRate;
-    }
-
     // Client can only express a preference for FAST.  Server will perform additional tests.
     if (mFlags & AUDIO_INPUT_FLAG_FAST) {
         bool useCaseAllowed =
@@ -622,66 +586,41 @@
         if (!useCaseAllowed) {
             ALOGW("AUDIO_INPUT_FLAG_FAST denied, incompatible transfer = %s",
                   convertTransferToText(mTransfer));
-        }
-
-        // sample rates must also match
-        bool sampleRateAllowed = mSampleRate == afSampleRate;
-        if (!sampleRateAllowed) {
-            ALOGW("AUDIO_INPUT_FLAG_FAST denied, rates do not match %u Hz, require %u Hz",
-                  mSampleRate, afSampleRate);
-        }
-
-        bool fastAllowed = useCaseAllowed && sampleRateAllowed;
-        if (!fastAllowed) {
             mFlags = (audio_input_flags_t) (mFlags & ~(AUDIO_INPUT_FLAG_FAST |
                     AUDIO_INPUT_FLAG_RAW));
-            AudioSystem::releaseInput(input, mSessionId);
-            continue;   // retry
         }
     }
 
-    // The notification frame count is the period between callbacks, as suggested by the client
-    // but moderated by the server.  For record, the calculations are done entirely on server side.
-    size_t notificationFrames = mNotificationFramesReq;
-    size_t frameCount = mReqFrameCount;
-
-    audio_input_flags_t flags = mFlags;
-
-    pid_t tid = -1;
+    input.attr = mAttributes;
+    input.config.sample_rate = mSampleRate;
+    input.config.channel_mask = mChannelMask;
+    input.config.format = mFormat;
+    input.clientInfo.clientUid = mClientUid;
+    input.clientInfo.clientPid = mClientPid;
+    input.clientInfo.clientTid = -1;
     if (mFlags & AUDIO_INPUT_FLAG_FAST) {
         if (mAudioRecordThread != 0) {
-            tid = mAudioRecordThread->getTid();
+            input.clientInfo.clientTid = mAudioRecordThread->getTid();
         }
     }
+    input.opPackageName = opPackageName;
 
-    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
-                                // but we will still need the original value also
-    audio_session_t originalSessionId = mSessionId;
+    input.flags = mFlags;
+    // The notification frame count is the period between callbacks, as suggested by the client
+    // but moderated by the server.  For record, the calculations are done entirely on server side.
+    input.frameCount = mReqFrameCount;
+    input.notificationFrameCount = mNotificationFramesReq;
+    input.selectedDeviceId = mSelectedDeviceId;
+    input.sessionId = mSessionId;
+    originalSessionId = mSessionId;
 
-    sp<IMemory> iMem;           // for cblk
-    sp<IMemory> bufferMem;
-    sp<media::IAudioRecord> record = audioFlinger->openRecord(input,
-                                                              mSampleRate,
-                                                              mFormat,
-                                                              mChannelMask,
-                                                              opPackageName,
-                                                              &temp,
-                                                              &flags,
-                                                              mClientPid,
-                                                              tid,
-                                                              mClientUid,
-                                                              &mSessionId,
-                                                              &notificationFrames,
-                                                              iMem,
-                                                              bufferMem,
-                                                              &status,
-                                                              mPortId);
-    ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
-            "session ID changed from %d to %d", originalSessionId, mSessionId);
+    record = audioFlinger->createRecord(input,
+                                                              output,
+                                                              &status);
 
     if (status != NO_ERROR) {
         ALOGE("AudioFlinger could not create record track, status: %d", status);
-        break;
+        goto exit;
     }
     ALOG_ASSERT(record != 0);
 
@@ -689,41 +628,41 @@
     // so we are no longer responsible for releasing it.
 
     mAwaitBoost = false;
-    if (mFlags & AUDIO_INPUT_FLAG_FAST) {
-        if (flags & AUDIO_INPUT_FLAG_FAST) {
-            ALOGI("AUDIO_INPUT_FLAG_FAST successful; frameCount %zu -> %zu", frameCount, temp);
-            mAwaitBoost = true;
-        } else {
-            ALOGW("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %zu -> %zu", frameCount, temp);
-            mFlags = (audio_input_flags_t) (mFlags & ~(AUDIO_INPUT_FLAG_FAST |
-                    AUDIO_INPUT_FLAG_RAW));
-            continue;   // retry
-        }
+    if (output.flags & AUDIO_INPUT_FLAG_FAST) {
+        ALOGI("AUDIO_INPUT_FLAG_FAST successful; frameCount %zu -> %zu",
+              mReqFrameCount, output.frameCount);
+        mAwaitBoost = true;
     }
-    mFlags = flags;
+    mFlags = output.flags;
+    mRoutedDeviceId = output.selectedDeviceId;
+    mSessionId = output.sessionId;
+    mSampleRate = output.sampleRate;
 
-    if (iMem == 0) {
+    if (output.cblk == 0) {
         ALOGE("Could not get control block");
-        return NO_INIT;
+        status = NO_INIT;
+        goto exit;
     }
-    void *iMemPointer = iMem->pointer();
+    iMemPointer = output.cblk ->pointer();
     if (iMemPointer == NULL) {
         ALOGE("Could not get control block pointer");
-        return NO_INIT;
+        status = NO_INIT;
+        goto exit;
     }
-    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
+    cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
 
     // Starting address of buffers in shared memory.
     // The buffers are either immediately after the control block,
     // or in a separate area at discretion of server.
     void *buffers;
-    if (bufferMem == 0) {
+    if (output.buffers == 0) {
         buffers = cblk + 1;
     } else {
-        buffers = bufferMem->pointer();
+        buffers = output.buffers->pointer();
         if (buffers == NULL) {
             ALOGE("Could not get buffer pointer");
-            return NO_INIT;
+            status = NO_INIT;
+            goto exit;
         }
     }
 
@@ -733,43 +672,42 @@
         mDeathNotifier.clear();
     }
     mAudioRecord = record;
-    mCblkMemory = iMem;
-    mBufferMemory = bufferMem;
+    mCblkMemory = output.cblk;
+    mBufferMemory = output.buffers;
     IPCThreadState::self()->flushCommands();
 
     mCblk = cblk;
-    // note that temp is the (possibly revised) value of frameCount
-    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
-        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
+    // note that output.frameCount is the (possibly revised) value of mReqFrameCount
+    if (output.frameCount < mReqFrameCount || (mReqFrameCount == 0 && output.frameCount == 0)) {
+        ALOGW("Requested frameCount %zu but received frameCount %zu",
+              mReqFrameCount,  output.frameCount);
     }
-    frameCount = temp;
 
     // Make sure that application is notified with sufficient margin before overrun.
     // The computation is done on server side.
-    if (mNotificationFramesReq > 0 && notificationFrames != mNotificationFramesReq) {
+    if (mNotificationFramesReq > 0 && output.notificationFrameCount != mNotificationFramesReq) {
         ALOGW("Server adjusted notificationFrames from %u to %zu for frameCount %zu",
-                mNotificationFramesReq, notificationFrames, frameCount);
+                mNotificationFramesReq, output.notificationFrameCount, output.frameCount);
     }
-    mNotificationFramesAct = (uint32_t) notificationFrames;
-
+    mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
 
     //mInput != input includes the case where mInput == AUDIO_IO_HANDLE_NONE for first creation
-    if (mDeviceCallback != 0 && mInput != input) {
+    if (mDeviceCallback != 0 && mInput != output.inputId) {
         if (mInput != AUDIO_IO_HANDLE_NONE) {
             AudioSystem::removeAudioDeviceCallback(this, mInput);
         }
-        AudioSystem::addAudioDeviceCallback(this, input);
+        AudioSystem::addAudioDeviceCallback(this, output.inputId);
     }
 
     // We retain a copy of the I/O handle, but don't own the reference
-    mInput = input;
+    mInput = output.inputId;
     mRefreshRemaining = true;
 
-    mFrameCount = frameCount;
+    mFrameCount = output.frameCount;
     // If IAudioRecord is re-created, don't let the requested frameCount
     // decrease.  This can confuse clients that cache frameCount().
-    if (frameCount > mReqFrameCount) {
-        mReqFrameCount = frameCount;
+    if (mFrameCount > mReqFrameCount) {
+        mReqFrameCount = mFrameCount;
     }
 
     // update proxy
@@ -780,17 +718,9 @@
     mDeathNotifier = new DeathNotifier(this);
     IInterface::asBinder(mAudioRecord)->linkToDeath(mDeathNotifier, this);
 
-    return NO_ERROR;
-
-    // End of retry loop.
-    // The lack of indentation is deliberate, to reduce code churn and ease merges.
-    }
-
-// Arrive here on error, via a break
-    AudioSystem::releaseInput(input, mSessionId);
-    if (status == NO_ERROR) {
-        status = NO_INIT;
-    }
+exit:
+    mStatus = status;
+    // sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
     return status;
 }
 
@@ -1222,12 +1152,12 @@
 
     mFlags = mOrigFlags;
 
-    // if the new IAudioRecord is created, openRecord_l() will modify the
+    // if the new IAudioRecord is created, createRecord_l() will modify the
     // following member variables: mAudioRecord, mCblkMemory, mCblk, mBufferMemory.
     // It will also delete the strong references on previous IAudioRecord and IMemory
     Modulo<uint32_t> position(mProxy->getPosition());
     mNewPosition = position + mUpdatePeriod;
-    status_t result = openRecord_l(position, mOpPackageName);
+    status_t result = createRecord_l(position, mOpPackageName);
     if (result == NO_ERROR) {
         if (mActive) {
             // callback thread or sync event hasn't changed
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 30f97ac..c8fa618 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -197,7 +197,7 @@
       mPreviousSchedulingGroup(SP_DEFAULT),
       mPausedPosition(0)
 {
-    mStatus = set(streamType, sampleRate, format, channelMask,
+    (void)set(streamType, sampleRate, format, channelMask,
             frameCount, flags, cbf, user, notificationFrames,
             0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
             offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
@@ -228,7 +228,7 @@
       mPausedPosition(0),
       mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
 {
-    mStatus = set(streamType, sampleRate, format, channelMask,
+    (void)set(streamType, sampleRate, format, channelMask,
             0 /*frameCount*/, flags, cbf, user, notificationFrames,
             sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
             uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
@@ -284,6 +284,11 @@
         float maxRequiredSpeed,
         audio_port_handle_t selectedDeviceId)
 {
+    status_t status;
+    uint32_t channelCount;
+    pid_t callingPid;
+    pid_t myPid;
+
     ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
           "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
           streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
@@ -306,25 +311,29 @@
     case TRANSFER_CALLBACK:
         if (cbf == NULL || sharedBuffer != 0) {
             ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
-            return BAD_VALUE;
+            status = BAD_VALUE;
+            goto exit;
         }
         break;
     case TRANSFER_OBTAIN:
     case TRANSFER_SYNC:
         if (sharedBuffer != 0) {
             ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
-            return BAD_VALUE;
+            status = BAD_VALUE;
+            goto exit;
         }
         break;
     case TRANSFER_SHARED:
         if (sharedBuffer == 0) {
             ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
-            return BAD_VALUE;
+            status = BAD_VALUE;
+            goto exit;
         }
         break;
     default:
         ALOGE("Invalid transfer type %d", transferType);
-        return BAD_VALUE;
+        status = BAD_VALUE;
+        goto exit;
     }
     mSharedBuffer = sharedBuffer;
     mTransfer = transferType;
@@ -338,7 +347,8 @@
     // invariant that mAudioTrack != 0 is true only after set() returns successfully
     if (mAudioTrack != 0) {
         ALOGE("Track already in use");
-        return INVALID_OPERATION;
+        status = INVALID_OPERATION;
+        goto exit;
     }
 
     // handle default values first.
@@ -348,7 +358,8 @@
     if (pAttributes == NULL) {
         if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
             ALOGE("Invalid stream type %d", streamType);
-            return BAD_VALUE;
+            status = BAD_VALUE;
+            goto exit;
         }
         mStreamType = streamType;
 
@@ -380,16 +391,18 @@
     // validate parameters
     if (!audio_is_valid_format(format)) {
         ALOGE("Invalid format %#x", format);
-        return BAD_VALUE;
+        status = BAD_VALUE;
+        goto exit;
     }
     mFormat = format;
 
     if (!audio_is_output_channel(channelMask)) {
         ALOGE("Invalid channel mask %#x", channelMask);
-        return BAD_VALUE;
+        status = BAD_VALUE;
+        goto exit;
     }
     mChannelMask = channelMask;
-    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
+    channelCount = audio_channel_count_from_out_mask(channelMask);
     mChannelCount = channelCount;
 
     // force direct flag if format is not linear PCM
@@ -424,7 +437,8 @@
 
     // sampling rate must be specified for direct outputs
     if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
-        return BAD_VALUE;
+        status = BAD_VALUE;
+        goto exit;
     }
     mSampleRate = sampleRate;
     mOriginalSampleRate = sampleRate;
@@ -455,12 +469,14 @@
         if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
             ALOGE("notificationFrames=%d not permitted for non-fast track",
                     notificationFrames);
-            return BAD_VALUE;
+            status = BAD_VALUE;
+            goto exit;
         }
         if (frameCount > 0) {
             ALOGE("notificationFrames=%d not permitted with non-zero frameCount=%zu",
                     notificationFrames, frameCount);
-            return BAD_VALUE;
+            status = BAD_VALUE;
+            goto exit;
         }
         mNotificationFramesReq = 0;
         const uint32_t minNotificationsPerBuffer = 1;
@@ -472,15 +488,15 @@
                 notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
     }
     mNotificationFramesAct = 0;
-    int callingpid = IPCThreadState::self()->getCallingPid();
-    int mypid = getpid();
-    if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
+    callingPid = IPCThreadState::self()->getCallingPid();
+    myPid = getpid();
+    if (uid == AUDIO_UID_INVALID || (callingPid != myPid)) {
         mClientUid = IPCThreadState::self()->getCallingUid();
     } else {
         mClientUid = uid;
     }
-    if (pid == -1 || (callingpid != mypid)) {
-        mClientPid = callingpid;
+    if (pid == -1 || (callingPid != myPid)) {
+        mClientPid = callingPid;
     } else {
         mClientPid = pid;
     }
@@ -495,7 +511,7 @@
     }
 
     // create the IAudioTrack
-    status_t status = createTrack_l();
+    status = createTrack_l();
 
     if (status != NO_ERROR) {
         if (mAudioTrackThread != 0) {
@@ -503,10 +519,9 @@
             mAudioTrackThread->requestExitAndWait();
             mAudioTrackThread.clear();
         }
-        return status;
+        goto exit;
     }
 
-    mStatus = NO_ERROR;
     mUserData = user;
     mLoopCount = 0;
     mLoopStart = 0;
@@ -534,7 +549,10 @@
     mFramesWrittenServerOffset = 0;
     mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
     mVolumeHandler = new media::VolumeHandler();
-    return NO_ERROR;
+
+exit:
+    mStatus = status;
+    return status;
 }
 
 // -------------------------------------------------------------------------
@@ -1278,15 +1296,16 @@
 
 status_t AudioTrack::createTrack_l()
 {
+    status_t status;
+    bool callbackAdded = false;
+
     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
     if (audioFlinger == 0) {
         ALOGE("Could not get audioflinger");
-        return NO_INIT;
+        status = NO_INIT;
+        goto exit;
     }
 
-    status_t status;
-    bool callbackAdded = false;
-
     {
     // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
     // After fast request is denied, we will request again if IAudioTrack is re-created.
@@ -1355,7 +1374,10 @@
 
     if (status != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
         ALOGE("AudioFlinger could not create track, status: %d output %d", status, output.outputId);
-        goto error;
+        if (status == NO_ERROR) {
+            status = NO_INIT;
+        }
+        goto exit;
     }
     ALOG_ASSERT(track != 0);
 
@@ -1383,13 +1405,13 @@
     if (iMem == 0) {
         ALOGE("Could not get control block");
         status = NO_INIT;
-        goto error;
+        goto exit;
     }
     void *iMemPointer = iMem->pointer();
     if (iMemPointer == NULL) {
         ALOGE("Could not get control block pointer");
         status = NO_INIT;
-        goto error;
+        goto exit;
     }
     // invariant that mAudioTrack != 0 is true only after set() returns successfully
     if (mAudioTrack != 0) {
@@ -1443,7 +1465,7 @@
         if (buffers == NULL) {
             ALOGE("Could not get buffer pointer");
             status = NO_INIT;
-            goto error;
+            goto exit;
         }
     }
 
@@ -1486,17 +1508,15 @@
     mDeathNotifier = new DeathNotifier(this);
     IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
 
-    return NO_ERROR;
     }
 
-error:
-    if (callbackAdded) {
+exit:
+    if (status != NO_ERROR && callbackAdded) {
         // note: mOutput is always valid is callbackAdded is true
         AudioSystem::removeAudioDeviceCallback(this, mOutput);
     }
-    if (status == NO_ERROR) {
-        status = NO_INIT;
-    }
+
+    mStatus = status;
 
     // sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
     return status;
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 5cf2bdb..56ddd4f 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -22,6 +22,7 @@
 #include <stdint.h>
 #include <sys/types.h>
 
+#include <binder/IPCThreadState.h>
 #include <binder/Parcel.h>
 
 #include "IAudioFlinger.h"
@@ -30,7 +31,7 @@
 
 enum {
     CREATE_TRACK = IBinder::FIRST_CALL_TRANSACTION,
-    OPEN_RECORD,
+    CREATE_RECORD,
     SAMPLE_RATE,
     RESERVED,   // obsolete, was CHANNEL_COUNT
     FORMAT,
@@ -130,102 +131,39 @@
         return track;
     }
 
-    virtual sp<media::IAudioRecord> openRecord(
-                                audio_io_handle_t input,
-                                uint32_t sampleRate,
-                                audio_format_t format,
-                                audio_channel_mask_t channelMask,
-                                const String16& opPackageName,
-                                size_t *pFrameCount,
-                                audio_input_flags_t *flags,
-                                pid_t pid,
-                                pid_t tid,
-                                int clientUid,
-                                audio_session_t *sessionId,
-                                size_t *notificationFrames,
-                                sp<IMemory>& cblk,
-                                sp<IMemory>& buffers,
-                                status_t *status,
-                                audio_port_handle_t portId)
+    virtual sp<media::IAudioRecord> createRecord(const CreateRecordInput& input,
+                                                 CreateRecordOutput& output,
+                                                 status_t *status)
     {
         Parcel data, reply;
         sp<media::IAudioRecord> record;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) input);
-        data.writeInt32(sampleRate);
-        data.writeInt32(format);
-        data.writeInt32(channelMask);
-        data.writeString16(opPackageName);
-        size_t frameCount = pFrameCount != NULL ? *pFrameCount : 0;
-        data.writeInt64(frameCount);
-        audio_input_flags_t lFlags = flags != NULL ? *flags : AUDIO_INPUT_FLAG_NONE;
-        data.writeInt32(lFlags);
-        data.writeInt32((int32_t) pid);
-        data.writeInt32((int32_t) tid);
-        data.writeInt32((int32_t) clientUid);
-        audio_session_t lSessionId = AUDIO_SESSION_ALLOCATE;
-        if (sessionId != NULL) {
-            lSessionId = *sessionId;
+
+        if (status == nullptr) {
+            return record;
         }
-        data.writeInt32(lSessionId);
-        data.writeInt64(notificationFrames != NULL ? *notificationFrames : 0);
-        data.writeInt32(portId);
-        cblk.clear();
-        buffers.clear();
-        status_t lStatus = remote()->transact(OPEN_RECORD, data, &reply);
+
+        input.writeToParcel(&data);
+
+        status_t lStatus = remote()->transact(CREATE_RECORD, data, &reply);
         if (lStatus != NO_ERROR) {
-            ALOGE("openRecord error: %s", strerror(-lStatus));
-        } else {
-            frameCount = reply.readInt64();
-            if (pFrameCount != NULL) {
-                *pFrameCount = frameCount;
-            }
-            lFlags = (audio_input_flags_t)reply.readInt32();
-            if (flags != NULL) {
-                *flags = lFlags;
-            }
-            lSessionId = (audio_session_t) reply.readInt32();
-            if (sessionId != NULL) {
-                *sessionId = lSessionId;
-            }
-            size_t lNotificationFrames = (size_t) reply.readInt64();
-            if (notificationFrames != NULL) {
-                *notificationFrames = lNotificationFrames;
-            }
-            lStatus = reply.readInt32();
-            record = interface_cast<media::IAudioRecord>(reply.readStrongBinder());
-            cblk = interface_cast<IMemory>(reply.readStrongBinder());
-            if (cblk != 0 && cblk->pointer() == NULL) {
-                cblk.clear();
-            }
-            buffers = interface_cast<IMemory>(reply.readStrongBinder());
-            if (buffers != 0 && buffers->pointer() == NULL) {
-                buffers.clear();
-            }
-            if (lStatus == NO_ERROR) {
-                if (record == 0) {
-                    ALOGE("openRecord should have returned an IAudioRecord");
-                    lStatus = UNKNOWN_ERROR;
-                } else if (cblk == 0) {
-                    ALOGE("openRecord should have returned a cblk");
-                    lStatus = NO_MEMORY;
-                }
-                // buffers is permitted to be 0
-            } else {
-                if (record != 0 || cblk != 0 || buffers != 0) {
-                    ALOGE("openRecord returned an IAudioRecord, cblk, "
-                          "or buffers but with status %d", lStatus);
-                }
-            }
-            if (lStatus != NO_ERROR) {
-                record.clear();
-                cblk.clear();
-                buffers.clear();
-            }
+            ALOGE("createRecord transaction error %d", lStatus);
+            *status = DEAD_OBJECT;
+            return record;
         }
-        if (status != NULL) {
-            *status = lStatus;
+        *status = reply.readInt32();
+        if (*status != NO_ERROR) {
+            ALOGE("createRecord returned error %d", *status);
+            return record;
         }
+
+        record = interface_cast<media::IAudioRecord>(reply.readStrongBinder());
+        if (record == 0) {
+            ALOGE("createRecord returned a NULL IAudioRecord with status OK");
+            *status = DEAD_OBJECT;
+            return record;
+        }
+        output.readFromParcel(&reply);
         return record;
     }
 
@@ -899,21 +837,46 @@
 status_t BnAudioFlinger::onTransact(
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
+    // make sure transactions reserved to AudioPolicyManager do not come from other processes
+    switch (code) {
+        case SET_STREAM_VOLUME:
+        case SET_STREAM_MUTE:
+        case SET_MODE:
+        case OPEN_OUTPUT:
+        case OPEN_DUPLICATE_OUTPUT:
+        case CLOSE_OUTPUT:
+        case SUSPEND_OUTPUT:
+        case RESTORE_OUTPUT:
+        case OPEN_INPUT:
+        case CLOSE_INPUT:
+        case INVALIDATE_STREAM:
+        case SET_VOICE_VOLUME:
+        case MOVE_EFFECTS:
+        case LOAD_HW_MODULE:
+        case LIST_AUDIO_PORTS:
+        case GET_AUDIO_PORT:
+        case CREATE_AUDIO_PATCH:
+        case RELEASE_AUDIO_PATCH:
+        case LIST_AUDIO_PATCHES:
+        case SET_AUDIO_PORT_CONFIG:
+            ALOGW("%s: transaction %d received from PID %d",
+                  __func__, code, IPCThreadState::self()->getCallingPid());
+            return INVALID_OPERATION;
+        default:
+            break;
+    }
+
     // Whitelist of relevant events to trigger log merging.
     // Log merging should activate during audio activity of any kind. This are considered the
     // most relevant events.
     // TODO should select more wisely the items from the list
     switch (code) {
         case CREATE_TRACK:
-        case OPEN_RECORD:
+        case CREATE_RECORD:
         case SET_MASTER_VOLUME:
         case SET_MASTER_MUTE:
-        case SET_STREAM_VOLUME:
-        case SET_STREAM_MUTE:
         case SET_MIC_MUTE:
         case SET_PARAMETERS:
-        case OPEN_INPUT:
-        case SET_VOICE_VOLUME:
         case CREATE_EFFECT:
         case SYSTEM_READY: {
             requestLogMerge();
@@ -922,6 +885,7 @@
         default:
             break;
     }
+
     switch (code) {
         case CREATE_TRACK: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
@@ -948,37 +912,29 @@
             output.writeToParcel(reply);
             return NO_ERROR;
         } break;
-        case OPEN_RECORD: {
+        case CREATE_RECORD: {
             CHECK_INTERFACE(IAudioFlinger, data, reply);
-            audio_io_handle_t input = (audio_io_handle_t) data.readInt32();
-            uint32_t sampleRate = data.readInt32();
-            audio_format_t format = (audio_format_t) data.readInt32();
-            audio_channel_mask_t channelMask = data.readInt32();
-            const String16& opPackageName = data.readString16();
-            size_t frameCount = data.readInt64();
-            audio_input_flags_t flags = (audio_input_flags_t) data.readInt32();
-            pid_t pid = (pid_t) data.readInt32();
-            pid_t tid = (pid_t) data.readInt32();
-            int clientUid = data.readInt32();
-            audio_session_t sessionId = (audio_session_t) data.readInt32();
-            size_t notificationFrames = data.readInt64();
-            audio_port_handle_t portId = (audio_port_handle_t) data.readInt32();
-            sp<IMemory> cblk;
-            sp<IMemory> buffers;
-            status_t status = NO_ERROR;
-            sp<media::IAudioRecord> record = openRecord(input,
-                    sampleRate, format, channelMask, opPackageName, &frameCount, &flags,
-                    pid, tid, clientUid, &sessionId, &notificationFrames, cblk, buffers,
-                    &status, portId);
+
+            CreateRecordInput input;
+            if (input.readFromParcel((Parcel*)&data) != NO_ERROR) {
+                reply->writeInt32(DEAD_OBJECT);
+                return NO_ERROR;
+            }
+
+            status_t status;
+            CreateRecordOutput output;
+
+            sp<media::IAudioRecord> record = createRecord(input,
+                                                          output,
+                                                          &status);
+
             LOG_ALWAYS_FATAL_IF((record != 0) != (status == NO_ERROR));
-            reply->writeInt64(frameCount);
-            reply->writeInt32(flags);
-            reply->writeInt32(sessionId);
-            reply->writeInt64(notificationFrames);
             reply->writeInt32(status);
+            if (status != NO_ERROR) {
+                return NO_ERROR;
+            }
             reply->writeStrongBinder(IInterface::asBinder(record));
-            reply->writeStrongBinder(IInterface::asBinder(cblk));
-            reply->writeStrongBinder(IInterface::asBinder(buffers));
+            output.writeToParcel(reply);
             return NO_ERROR;
         } break;
         case SAMPLE_RATE: {
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index 0397eec..53bc1b7 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -22,6 +22,7 @@
 #include <math.h>
 #include <sys/types.h>
 
+#include <binder/IPCThreadState.h>
 #include <binder/Parcel.h>
 
 #include <media/AudioEffect.h>
@@ -831,10 +832,33 @@
 
 // ----------------------------------------------------------------------
 
-
 status_t BnAudioPolicyService::onTransact(
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
+    // make sure transactions reserved to AudioFlinger do not come from other processes
+    switch (code) {
+        case START_OUTPUT:
+        case STOP_OUTPUT:
+        case RELEASE_OUTPUT:
+        case GET_INPUT_FOR_ATTR:
+        case START_INPUT:
+        case STOP_INPUT:
+        case RELEASE_INPUT:
+        case GET_STRATEGY_FOR_STREAM:
+        case GET_OUTPUT_FOR_EFFECT:
+        case REGISTER_EFFECT:
+        case UNREGISTER_EFFECT:
+        case SET_EFFECT_ENABLED:
+        case GET_OUTPUT_FOR_ATTR:
+        case ACQUIRE_SOUNDTRIGGER_SESSION:
+        case RELEASE_SOUNDTRIGGER_SESSION:
+            ALOGW("%s: transaction %d received from PID %d",
+                  __func__, code, IPCThreadState::self()->getCallingPid());
+            return INVALID_OPERATION;
+        default:
+            break;
+    }
+
     switch (code) {
         case SET_DEVICE_CONNECTION_STATE: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
@@ -928,6 +952,7 @@
             bool hasAttributes = data.readInt32() != 0;
             if (hasAttributes) {
                 data.read(&attr, sizeof(audio_attributes_t));
+                sanetizeAudioAttributes(&attr);
             }
             audio_session_t session = (audio_session_t)data.readInt32();
             audio_stream_type_t stream = AUDIO_STREAM_DEFAULT;
@@ -993,6 +1018,7 @@
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             audio_attributes_t attr;
             data.read(&attr, sizeof(audio_attributes_t));
+            sanetizeAudioAttributes(&attr);
             audio_io_handle_t input = (audio_io_handle_t)data.readInt32();
             audio_session_t session = (audio_session_t)data.readInt32();
             pid_t pid = (pid_t)data.readInt32();
@@ -1368,6 +1394,7 @@
             data.read(&source, sizeof(struct audio_port_config));
             audio_attributes_t attributes;
             data.read(&attributes, sizeof(audio_attributes_t));
+            sanetizeAudioAttributes(&attributes);
             audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
             status_t status = startAudioSource(&source, &attributes, &handle);
             reply->writeInt32(status);
@@ -1418,6 +1445,15 @@
     }
 }
 
+void BnAudioPolicyService::sanetizeAudioAttributes(audio_attributes_t* attr)
+{
+    const size_t tagsMaxSize = AUDIO_ATTRIBUTES_TAGS_MAX_SIZE;
+    if (strnlen(attr->tags, tagsMaxSize) >= tagsMaxSize) {
+        android_errorWriteLog(0x534e4554, "68953950"); // SafetyNet logging
+    }
+    attr->tags[tagsMaxSize - 1] = '\0';
+}
+
 // ----------------------------------------------------------------------------
 
 } // namespace android
diff --git a/media/libaudioclient/aidl/android/media/IAudioRecord.aidl b/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
index 50ce78f..7572671 100644
--- a/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioRecord.aidl
@@ -16,6 +16,7 @@
 
 package android.media;
 
+/* Native code must specify namespace media (media::IAudioRecord) when referring to this class */
 interface IAudioRecord {
 
   /* After it's created the track is not active. Call start() to
diff --git a/media/libaudioclient/include/media/AudioClient.h b/media/libaudioclient/include/media/AudioClient.h
index 108e326..247af9e 100644
--- a/media/libaudioclient/include/media/AudioClient.h
+++ b/media/libaudioclient/include/media/AudioClient.h
@@ -19,12 +19,13 @@
 #define ANDROID_AUDIO_CLIENT_H
 
 #include <binder/Parcel.h>
+#include <binder/Parcelable.h>
 #include <system/audio.h>
 #include <utils/String16.h>
 
 namespace android {
 
-class AudioClient {
+class AudioClient : public Parcelable {
  public:
     AudioClient() :
         clientUid(-1), clientPid(-1), clientTid(-1), packageName("") {}
@@ -34,7 +35,7 @@
     pid_t clientTid;
     String16 packageName;
 
-    status_t readFromParcel(Parcel *parcel) {
+    status_t readFromParcel(const Parcel *parcel) override {
         clientUid = parcel->readInt32();
         clientPid = parcel->readInt32();
         clientTid = parcel->readInt32();
@@ -42,7 +43,7 @@
         return NO_ERROR;
     }
 
-    status_t writeToParcel(Parcel *parcel) const {
+    status_t writeToParcel(Parcel *parcel) const override {
         parcel->writeInt32(clientUid);
         parcel->writeInt32(clientPid);
         parcel->writeInt32(clientTid);
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index 51596a2..00c2a88 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -570,7 +570,7 @@
 
             // caller must hold lock on mLock for all _l methods
 
-            status_t openRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName);
+            status_t createRecord_l(const Modulo<uint32_t> &epoch, const String16& opPackageName);
 
             // FIXME enum is faster than strcmp() for parameter 'from'
             status_t restoreRecord_l(const char *from);
@@ -682,7 +682,6 @@
                                               // May not match the app selection depending on other
                                               // activity and connected devices
     wp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
-    audio_port_handle_t    mPortId;  // unique ID allocated by audio policy
 
 };
 
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 66601da..24a6e22 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -231,7 +231,7 @@
                               audio_stream_type_t stream,
                               audio_session_t session);
 
-    // Client must successfully hand off the handle reference to AudioFlinger via openRecord(),
+    // Client must successfully hand off the handle reference to AudioFlinger via createRecord(),
     // or release it with releaseInput().
     static status_t getInputForAttr(const audio_attributes_t *attr,
                                     audio_io_handle_t *input,
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 9061c26..57d9778 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -25,6 +25,7 @@
 #include <utils/Errors.h>
 #include <binder/IInterface.h>
 #include <binder/Parcel.h>
+#include <binder/Parcelable.h>
 #include <media/AudioClient.h>
 #include <media/IAudioTrack.h>
 #include <media/IAudioFlingerClient.h>
@@ -50,9 +51,9 @@
      * when calling createTrack() including arguments that will be updated by AudioFlinger
      * and returned in CreateTrackOutput object
      */
-    class CreateTrackInput {
+    class CreateTrackInput : public Parcelable {
     public:
-        status_t readFromParcel(Parcel *parcel) {
+        status_t readFromParcel(const Parcel *parcel) override {
             /* input arguments*/
             memset(&attr, 0, sizeof(audio_attributes_t));
             if (parcel->read(&attr, sizeof(audio_attributes_t)) != NO_ERROR) {
@@ -63,7 +64,9 @@
             if (parcel->read(&config, sizeof(audio_config_t)) != NO_ERROR) {
                 return DEAD_OBJECT;
             }
-            (void)clientInfo.readFromParcel(parcel);
+            if (clientInfo.readFromParcel(parcel) != NO_ERROR) {
+                return DEAD_OBJECT;
+            }
             if (parcel->readInt32() != 0) {
                 sharedBuffer = interface_cast<IMemory>(parcel->readStrongBinder());
                 if (sharedBuffer == 0 || sharedBuffer->pointer() == NULL) {
@@ -82,7 +85,7 @@
             return NO_ERROR;
         }
 
-        status_t writeToParcel(Parcel *parcel) const {
+        status_t writeToParcel(Parcel *parcel) const override {
             /* input arguments*/
             (void)parcel->write(&attr, sizeof(audio_attributes_t));
             (void)parcel->write(&config, sizeof(audio_config_t));
@@ -125,9 +128,9 @@
      * when calling createTrack() including arguments that were passed as I/O for update by
      * CreateTrackInput.
      */
-    class CreateTrackOutput {
+    class CreateTrackOutput : public Parcelable {
     public:
-        status_t readFromParcel(Parcel *parcel) {
+        status_t readFromParcel(const Parcel *parcel) override {
             /* input/output arguments*/
             (void)parcel->read(&flags, sizeof(audio_output_flags_t));
             frameCount = parcel->readInt64();
@@ -144,7 +147,7 @@
             return NO_ERROR;
         }
 
-        status_t writeToParcel(Parcel *parcel) const {
+        status_t writeToParcel(Parcel *parcel) const override {
             /* input/output arguments*/
             (void)parcel->write(&flags, sizeof(audio_output_flags_t));
             (void)parcel->writeInt64(frameCount);
@@ -176,6 +179,140 @@
         audio_io_handle_t outputId;
     };
 
+    /* CreateRecordInput contains all input arguments sent by AudioRecord to AudioFlinger
+     * when calling createRecord() including arguments that will be updated by AudioFlinger
+     * and returned in CreateRecordOutput object
+     */
+    class CreateRecordInput : public Parcelable {
+    public:
+        status_t readFromParcel(const Parcel *parcel) override {
+            /* input arguments*/
+            memset(&attr, 0, sizeof(audio_attributes_t));
+            if (parcel->read(&attr, sizeof(audio_attributes_t)) != NO_ERROR) {
+                return DEAD_OBJECT;
+            }
+            attr.tags[AUDIO_ATTRIBUTES_TAGS_MAX_SIZE -1] = '\0';
+            memset(&config, 0, sizeof(audio_config_base_t));
+            if (parcel->read(&config, sizeof(audio_config_base_t)) != NO_ERROR) {
+                return DEAD_OBJECT;
+            }
+            if (clientInfo.readFromParcel(parcel) != NO_ERROR) {
+                return DEAD_OBJECT;
+            }
+            opPackageName = parcel->readString16();
+
+            /* input/output arguments*/
+            (void)parcel->read(&flags, sizeof(audio_input_flags_t));
+            frameCount = parcel->readInt64();
+            notificationFrameCount = parcel->readInt64();
+            (void)parcel->read(&selectedDeviceId, sizeof(audio_port_handle_t));
+            (void)parcel->read(&sessionId, sizeof(audio_session_t));
+            return NO_ERROR;
+        }
+
+        status_t writeToParcel(Parcel *parcel) const override {
+            /* input arguments*/
+            (void)parcel->write(&attr, sizeof(audio_attributes_t));
+            (void)parcel->write(&config, sizeof(audio_config_base_t));
+            (void)clientInfo.writeToParcel(parcel);
+            (void)parcel->writeString16(opPackageName);
+
+            /* input/output arguments*/
+            (void)parcel->write(&flags, sizeof(audio_input_flags_t));
+            (void)parcel->writeInt64(frameCount);
+            (void)parcel->writeInt64(notificationFrameCount);
+            (void)parcel->write(&selectedDeviceId, sizeof(audio_port_handle_t));
+            (void)parcel->write(&sessionId, sizeof(audio_session_t));
+            return NO_ERROR;
+        }
+
+        /* input */
+        audio_attributes_t attr;
+        audio_config_base_t config;
+        AudioClient clientInfo;
+        String16 opPackageName;
+
+        /* input/output */
+        audio_input_flags_t flags;
+        size_t frameCount;
+        size_t notificationFrameCount;
+        audio_port_handle_t selectedDeviceId;
+        audio_session_t sessionId;
+    };
+
+    /* CreateRecordOutput contains all output arguments returned by AudioFlinger to AudioRecord
+     * when calling createRecord() including arguments that were passed as I/O for update by
+     * CreateRecordInput.
+     */
+    class CreateRecordOutput : public Parcelable {
+    public:
+        status_t readFromParcel(const Parcel *parcel) override {
+            /* input/output arguments*/
+            (void)parcel->read(&flags, sizeof(audio_input_flags_t));
+            frameCount = parcel->readInt64();
+            notificationFrameCount = parcel->readInt64();
+            (void)parcel->read(&selectedDeviceId, sizeof(audio_port_handle_t));
+            (void)parcel->read(&sessionId, sizeof(audio_session_t));
+
+            /* output arguments*/
+            sampleRate = parcel->readUint32();
+            (void)parcel->read(&inputId, sizeof(audio_io_handle_t));
+            if (parcel->readInt32() != 0) {
+                cblk = interface_cast<IMemory>(parcel->readStrongBinder());
+                if (cblk == 0 || cblk->pointer() == NULL) {
+                    return BAD_VALUE;
+                }
+            }
+            if (parcel->readInt32() != 0) {
+                buffers = interface_cast<IMemory>(parcel->readStrongBinder());
+                if (buffers == 0 || buffers->pointer() == NULL) {
+                    return BAD_VALUE;
+                }
+            }
+            return NO_ERROR;
+        }
+
+        status_t writeToParcel(Parcel *parcel) const override {
+            /* input/output arguments*/
+            (void)parcel->write(&flags, sizeof(audio_input_flags_t));
+            (void)parcel->writeInt64(frameCount);
+            (void)parcel->writeInt64(notificationFrameCount);
+            (void)parcel->write(&selectedDeviceId, sizeof(audio_port_handle_t));
+            (void)parcel->write(&sessionId, sizeof(audio_session_t));
+
+            /* output arguments*/
+            (void)parcel->writeUint32(sampleRate);
+            (void)parcel->write(&inputId, sizeof(audio_io_handle_t));
+            if (cblk != 0) {
+                (void)parcel->writeInt32(1);
+                (void)parcel->writeStrongBinder(IInterface::asBinder(cblk));
+            } else {
+                (void)parcel->writeInt32(0);
+            }
+            if (buffers != 0) {
+                (void)parcel->writeInt32(1);
+                (void)parcel->writeStrongBinder(IInterface::asBinder(buffers));
+            } else {
+                (void)parcel->writeInt32(0);
+            }
+
+            return NO_ERROR;
+        }
+
+        /* input/output */
+        audio_input_flags_t flags;
+        size_t frameCount;
+        size_t notificationFrameCount;
+        audio_port_handle_t selectedDeviceId;
+        audio_session_t sessionId;
+
+        /* output */
+        uint32_t sampleRate;
+        audio_io_handle_t inputId;
+        sp<IMemory> cblk;
+        sp<IMemory> buffers;
+    };
+
     // invariant on exit for all APIs that return an sp<>:
     //   (return value != 0) == (*status == NO_ERROR)
 
@@ -186,26 +323,9 @@
                                         CreateTrackOutput& output,
                                         status_t *status) = 0;
 
-    virtual sp<media::IAudioRecord> openRecord(
-                                // On successful return, AudioFlinger takes over the handle
-                                // reference and will release it when the track is destroyed.
-                                // However on failure, the client is responsible for release.
-                                audio_io_handle_t input,
-                                uint32_t sampleRate,
-                                audio_format_t format,
-                                audio_channel_mask_t channelMask,
-                                const String16& callingPackage,
-                                size_t *pFrameCount,
-                                audio_input_flags_t *flags,
-                                pid_t pid,
-                                pid_t tid,  // -1 means unused, otherwise must be valid non-0
-                                int clientUid,
-                                audio_session_t *sessionId,
-                                size_t *notificationFrames,
-                                sp<IMemory>& cblk,
-                                sp<IMemory>& buffers,   // return value 0 means it follows cblk
-                                status_t *status,
-                                audio_port_handle_t portId) = 0;
+    virtual sp<media::IAudioRecord> createRecord(const CreateRecordInput& input,
+                                        CreateRecordOutput& output,
+                                        status_t *status) = 0;
 
     // FIXME Surprisingly, format/latency don't work for input handles
 
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index 7c88e57..5558b77 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -178,6 +178,8 @@
                                     const Parcel& data,
                                     Parcel* reply,
                                     uint32_t flags = 0);
+private:
+    void sanetizeAudioAttributes(audio_attributes_t* attr);
 };
 
 // ----------------------------------------------------------------------------
diff --git a/media/libeffects/config/Android.bp b/media/libeffects/config/Android.bp
index 4398a91..3e88c7c 100644
--- a/media/libeffects/config/Android.bp
+++ b/media/libeffects/config/Android.bp
@@ -5,6 +5,11 @@
 
     srcs: ["src/EffectsConfig.cpp"],
 
+    cflags: [
+        "-Wall",
+        "-Werror",
+    ],
+
     shared_libs: [
         "liblog",
         "libtinyxml2",
diff --git a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.c b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.c
index c290aec..7b0f341 100644
--- a/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.c
+++ b/media/libeffects/lvm/lib/Eq/src/LVEQNB_Control.c
@@ -430,7 +430,15 @@
     }
 
 
-    if(bChange){
+    // During operating mode transition, there is a race condition where the mode
+    // is still LVEQNB_ON, but the effect is considered disabled in the upper layers.
+    // modeChange handles this special race condition.
+    const int /* bool */ modeChange = pParams->OperatingMode != OperatingModeSave
+            || (OperatingModeSave == LVEQNB_ON
+                    && pInstance->bInOperatingModeTransition
+                    && LVC_Mixer_GetTarget(&pInstance->BypassMixer.MixerStream[0]) == 0);
+
+    if (bChange || modeChange) {
 
         /*
          * If the sample rate has changed clear the history
@@ -462,8 +470,7 @@
             LVEQNB_SetCoefficients(pInstance);                  /* Instance pointer */
         }
 
-        if(pParams->OperatingMode != OperatingModeSave)
-        {
+        if (modeChange) {
             if(pParams->OperatingMode == LVEQNB_ON)
             {
 #ifdef BUILD_FLOAT
@@ -479,6 +486,8 @@
             else
             {
                 /* Stay on the ON operating mode until the transition is done */
+                // This may introduce a state race condition if the effect is enabled again
+                // while in transition.  This is fixed in the modeChange logic.
                 pInstance->Params.OperatingMode = LVEQNB_ON;
 #ifdef BUILD_FLOAT
                 LVC_Mixer_SetTarget(&pInstance->BypassMixer.MixerStream[0], 0.0f);
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 146e9e8..8ebae11 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -3330,14 +3330,19 @@
         //ALOGV("\tEffect_process Not Calling process with %d effects enabled, %d called: Effect %d",
         //pContext->pBundledContext->NumberEffectsEnabled,
         //pContext->pBundledContext->NumberEffectsCalled, pContext->EffectType);
-        // 2 is for stereo input
+
         if (pContext->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
-            for (size_t i=0; i < outBuffer->frameCount*2; i++){
-                outBuffer->s16[i] =
-                        clamp16((LVM_INT32)outBuffer->s16[i] + (LVM_INT32)inBuffer->s16[i]);
+            for (size_t i = 0; i < outBuffer->frameCount * FCC_2; ++i){
+#ifdef NATIVE_FLOAT_BUFFER
+                outBuffer->f32[i] += inBuffer->f32[i];
+#else
+                outBuffer->s16[i] = clamp16((LVM_INT32)outBuffer->s16[i] + inBuffer->s16[i]);
+#endif
             }
         } else if (outBuffer->raw != inBuffer->raw) {
-            memcpy(outBuffer->raw, inBuffer->raw, outBuffer->frameCount*sizeof(LVM_INT16)*2);
+            memcpy(outBuffer->raw,
+                    inBuffer->raw,
+                    outBuffer->frameCount * sizeof(effect_buffer_t) * FCC_2);
         }
     }
 
diff --git a/media/libeffects/visualizer/EffectVisualizer.cpp b/media/libeffects/visualizer/EffectVisualizer.cpp
index 0e82339..c33f9f5 100644
--- a/media/libeffects/visualizer/EffectVisualizer.cpp
+++ b/media/libeffects/visualizer/EffectVisualizer.cpp
@@ -594,7 +594,7 @@
                     deltaSmpl = CAPTURE_BUF_SIZE;
                 }
 
-                int32_t capturePoint = pContext->mCaptureIdx - deltaSmpl;
+                int32_t capturePoint = (int32_t)pContext->mCaptureIdx - deltaSmpl;
                 // a negative capturePoint means we wrap the buffer.
                 if (capturePoint < 0) {
                     uint32_t size = -capturePoint;
diff --git a/media/libmediametrics/MediaAnalyticsItem.cpp b/media/libmediametrics/MediaAnalyticsItem.cpp
index f7df2b4..6b063e8 100644
--- a/media/libmediametrics/MediaAnalyticsItem.cpp
+++ b/media/libmediametrics/MediaAnalyticsItem.cpp
@@ -214,12 +214,12 @@
     return mPkgName;
 }
 
-MediaAnalyticsItem &MediaAnalyticsItem::setPkgVersionCode(int32_t pkgVersionCode) {
+MediaAnalyticsItem &MediaAnalyticsItem::setPkgVersionCode(int64_t pkgVersionCode) {
     mPkgVersionCode = pkgVersionCode;
     return *this;
 }
 
-int32_t MediaAnalyticsItem::getPkgVersionCode() const {
+int64_t MediaAnalyticsItem::getPkgVersionCode() const {
     return mPkgVersionCode;
 }
 
@@ -640,7 +640,7 @@
     mPid = data.readInt32();
     mUid = data.readInt32();
     mPkgName = data.readCString();
-    mPkgVersionCode = data.readInt32();
+    mPkgVersionCode = data.readInt64();
     mSessionID = data.readInt64();
     mFinalized = data.readInt32();
     mTimestamp = data.readInt64();
@@ -687,7 +687,7 @@
     data->writeInt32(mPid);
     data->writeInt32(mUid);
     data->writeCString(mPkgName.c_str());
-    data->writeInt32(mPkgVersionCode);
+    data->writeInt64(mPkgVersionCode);
     data->writeInt64(mSessionID);
     data->writeInt32(mFinalized);
     data->writeInt64(mTimestamp);
@@ -766,7 +766,7 @@
 
     if (version >= PROTO_V1) {
         result.append(mPkgName);
-        snprintf(buffer, sizeof(buffer), ":%d:", mPkgVersionCode);
+        snprintf(buffer, sizeof(buffer), ":%"  PRId64 ":", mPkgVersionCode);
         result.append(buffer);
     }
 
diff --git a/media/libmediametrics/include/MediaAnalyticsItem.h b/media/libmediametrics/include/MediaAnalyticsItem.h
index 5f9b916..ec9b660 100644
--- a/media/libmediametrics/include/MediaAnalyticsItem.h
+++ b/media/libmediametrics/include/MediaAnalyticsItem.h
@@ -173,8 +173,8 @@
         MediaAnalyticsItem &setPkgName(AString);
         AString getPkgName() const;
 
-        MediaAnalyticsItem &setPkgVersionCode(int32_t);
-        int32_t getPkgVersionCode() const;
+        MediaAnalyticsItem &setPkgVersionCode(int64_t);
+        int64_t getPkgVersionCode() const;
 
         // our serialization code for binder calls
         int32_t writeToParcel(Parcel *);
@@ -205,7 +205,7 @@
         pid_t     mPid;
         uid_t     mUid;
         AString   mPkgName;
-        int32_t   mPkgVersionCode;
+        int64_t   mPkgVersionCode;
 
         // let's reuse a binder connection
         static sp<IMediaAnalyticsService> sAnalyticsService;
diff --git a/media/libnblog/PerformanceAnalysis.cpp b/media/libnblog/PerformanceAnalysis.cpp
index 478c460..f09e93d 100644
--- a/media/libnblog/PerformanceAnalysis.cpp
+++ b/media/libnblog/PerformanceAnalysis.cpp
@@ -230,6 +230,7 @@
 }
 
 // rounds value to precision based on log-distance from mean
+__attribute__((no_sanitize("signed-integer-overflow")))
 inline double logRound(double x, double mean) {
     // Larger values decrease range of high resolution and prevent overflow
     // of a histogram on the console.
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index fa5f37ec..b529940 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -419,8 +419,11 @@
 
     *done = (++mNumFramesDecoded >= mNumFrames);
 
+    if (outputFormat == NULL) {
+        return ERROR_MALFORMED;
+    }
+
     int32_t width, height;
-    CHECK(outputFormat != NULL);
     CHECK(outputFormat->findInt32("width", &width));
     CHECK(outputFormat->findInt32("height", &height));
 
@@ -540,8 +543,11 @@
 status_t ImageDecoder::onOutputReceived(
         const sp<MediaCodecBuffer> &videoFrameBuffer,
         const sp<AMessage> &outputFormat, int64_t /*timeUs*/, bool *done) {
+    if (outputFormat == NULL) {
+        return ERROR_MALFORMED;
+    }
+
     int32_t width, height;
-    CHECK(outputFormat != NULL);
     CHECK(outputFormat->findInt32("width", &width));
     CHECK(outputFormat->findInt32("height", &height));
 
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 1fe5f60..8db00f0 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -2951,212 +2951,215 @@
             mGotStartKeyFrame = true;
         }
 ////////////////////////////////////////////////////////////////////////////////
-        if (mStszTableEntries->count() == 0) {
-            mFirstSampleTimeRealUs = systemTime() / 1000;
-            mStartTimestampUs = timestampUs;
-            mOwner->setStartTimestampUs(mStartTimestampUs);
-            previousPausedDurationUs = mStartTimestampUs;
-        }
 
-        if (mResumed) {
-            int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs;
-            if (WARN_UNLESS(durExcludingEarlierPausesUs >= 0ll, "for %s track", trackName)) {
-                copy->release();
-                mSource->stop();
-                mIsMalformed = true;
-                break;
-            }
-
-            int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs;
-            if (WARN_UNLESS(pausedDurationUs >= lastDurationUs, "for %s track", trackName)) {
-                copy->release();
-                mSource->stop();
-                mIsMalformed = true;
-                break;
-            }
-
-            previousPausedDurationUs += pausedDurationUs - lastDurationUs;
-            mResumed = false;
-        }
-        TimestampDebugHelperEntry timestampDebugEntry;
-        timestampUs -= previousPausedDurationUs;
-        timestampDebugEntry.pts = timestampUs;
-        if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
-            copy->release();
-            mSource->stop();
-            mIsMalformed = true;
-            break;
-        }
-
-        if (mIsVideo) {
-            /*
-             * Composition time: timestampUs
-             * Decoding time: decodingTimeUs
-             * Composition time offset = composition time - decoding time
-             */
-            int64_t decodingTimeUs;
-            CHECK(meta_data->findInt64(kKeyDecodingTime, &decodingTimeUs));
-            decodingTimeUs -= previousPausedDurationUs;
-
-            // ensure non-negative, monotonic decoding time
-            if (mLastDecodingTimeUs < 0) {
-                decodingTimeUs = std::max((int64_t)0, decodingTimeUs);
-            } else {
-                // increase decoding time by at least the larger vaule of 1 tick and
-                // 0.1 milliseconds. This needs to take into account the possible
-                // delta adjustment in DurationTicks in below.
-                decodingTimeUs = std::max(mLastDecodingTimeUs +
-                        std::max(100, divUp(1000000, mTimeScale)), decodingTimeUs);
-            }
-
-            mLastDecodingTimeUs = decodingTimeUs;
-            timestampDebugEntry.dts = decodingTimeUs;
-            timestampDebugEntry.frameType = isSync ? "Key frame" : "Non-Key frame";
-            // Insert the timestamp into the mTimestampDebugHelper
-            if (mTimestampDebugHelper.size() >= kTimestampDebugCount) {
-                mTimestampDebugHelper.pop_front();
-            }
-            mTimestampDebugHelper.push_back(timestampDebugEntry);
-
-            cttsOffsetTimeUs =
-                    timestampUs + kMaxCttsOffsetTimeUs - decodingTimeUs;
-            if (WARN_UNLESS(cttsOffsetTimeUs >= 0ll, "for %s track", trackName)) {
-                copy->release();
-                mSource->stop();
-                mIsMalformed = true;
-                break;
-            }
-
-            timestampUs = decodingTimeUs;
-            ALOGV("decoding time: %" PRId64 " and ctts offset time: %" PRId64,
-                timestampUs, cttsOffsetTimeUs);
-
-            // Update ctts box table if necessary
-            currCttsOffsetTimeTicks =
-                    (cttsOffsetTimeUs * mTimeScale + 500000LL) / 1000000LL;
-            if (WARN_UNLESS(currCttsOffsetTimeTicks <= 0x0FFFFFFFFLL, "for %s track", trackName)) {
-                copy->release();
-                mSource->stop();
-                mIsMalformed = true;
-                break;
-            }
-
+        if (!mIsHeic) {
             if (mStszTableEntries->count() == 0) {
-                // Force the first ctts table entry to have one single entry
-                // so that we can do adjustment for the initial track start
-                // time offset easily in writeCttsBox().
-                lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
-                addOneCttsTableEntry(1, currCttsOffsetTimeTicks);
-                cttsSampleCount = 0;      // No sample in ctts box is pending
-            } else {
-                if (currCttsOffsetTimeTicks != lastCttsOffsetTimeTicks) {
-                    addOneCttsTableEntry(cttsSampleCount, lastCttsOffsetTimeTicks);
-                    lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
-                    cttsSampleCount = 1;  // One sample in ctts box is pending
+                mFirstSampleTimeRealUs = systemTime() / 1000;
+                mStartTimestampUs = timestampUs;
+                mOwner->setStartTimestampUs(mStartTimestampUs);
+                previousPausedDurationUs = mStartTimestampUs;
+            }
+
+            if (mResumed) {
+                int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs;
+                if (WARN_UNLESS(durExcludingEarlierPausesUs >= 0ll, "for %s track", trackName)) {
+                    copy->release();
+                    mSource->stop();
+                    mIsMalformed = true;
+                    break;
+                }
+
+                int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs;
+                if (WARN_UNLESS(pausedDurationUs >= lastDurationUs, "for %s track", trackName)) {
+                    copy->release();
+                    mSource->stop();
+                    mIsMalformed = true;
+                    break;
+                }
+
+                previousPausedDurationUs += pausedDurationUs - lastDurationUs;
+                mResumed = false;
+            }
+            TimestampDebugHelperEntry timestampDebugEntry;
+            timestampUs -= previousPausedDurationUs;
+            timestampDebugEntry.pts = timestampUs;
+            if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
+                copy->release();
+                mSource->stop();
+                mIsMalformed = true;
+                break;
+            }
+
+            if (mIsVideo) {
+                /*
+                 * Composition time: timestampUs
+                 * Decoding time: decodingTimeUs
+                 * Composition time offset = composition time - decoding time
+                 */
+                int64_t decodingTimeUs;
+                CHECK(meta_data->findInt64(kKeyDecodingTime, &decodingTimeUs));
+                decodingTimeUs -= previousPausedDurationUs;
+
+                // ensure non-negative, monotonic decoding time
+                if (mLastDecodingTimeUs < 0) {
+                    decodingTimeUs = std::max((int64_t)0, decodingTimeUs);
                 } else {
-                    ++cttsSampleCount;
+                    // increase decoding time by at least the larger vaule of 1 tick and
+                    // 0.1 milliseconds. This needs to take into account the possible
+                    // delta adjustment in DurationTicks in below.
+                    decodingTimeUs = std::max(mLastDecodingTimeUs +
+                            std::max(100, divUp(1000000, mTimeScale)), decodingTimeUs);
                 }
-            }
 
-            // Update ctts time offset range
-            if (mStszTableEntries->count() == 0) {
-                mMinCttsOffsetTicks = currCttsOffsetTimeTicks;
-                mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
-            } else {
-                if (currCttsOffsetTimeTicks > mMaxCttsOffsetTicks) {
-                    mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
-                } else if (currCttsOffsetTimeTicks < mMinCttsOffsetTicks) {
+                mLastDecodingTimeUs = decodingTimeUs;
+                timestampDebugEntry.dts = decodingTimeUs;
+                timestampDebugEntry.frameType = isSync ? "Key frame" : "Non-Key frame";
+                // Insert the timestamp into the mTimestampDebugHelper
+                if (mTimestampDebugHelper.size() >= kTimestampDebugCount) {
+                    mTimestampDebugHelper.pop_front();
+                }
+                mTimestampDebugHelper.push_back(timestampDebugEntry);
+
+                cttsOffsetTimeUs =
+                        timestampUs + kMaxCttsOffsetTimeUs - decodingTimeUs;
+                if (WARN_UNLESS(cttsOffsetTimeUs >= 0ll, "for %s track", trackName)) {
+                    copy->release();
+                    mSource->stop();
+                    mIsMalformed = true;
+                    break;
+                }
+
+                timestampUs = decodingTimeUs;
+                ALOGV("decoding time: %" PRId64 " and ctts offset time: %" PRId64,
+                    timestampUs, cttsOffsetTimeUs);
+
+                // Update ctts box table if necessary
+                currCttsOffsetTimeTicks =
+                        (cttsOffsetTimeUs * mTimeScale + 500000LL) / 1000000LL;
+                if (WARN_UNLESS(currCttsOffsetTimeTicks <= 0x0FFFFFFFFLL, "for %s track", trackName)) {
+                    copy->release();
+                    mSource->stop();
+                    mIsMalformed = true;
+                    break;
+                }
+
+                if (mStszTableEntries->count() == 0) {
+                    // Force the first ctts table entry to have one single entry
+                    // so that we can do adjustment for the initial track start
+                    // time offset easily in writeCttsBox().
+                    lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
+                    addOneCttsTableEntry(1, currCttsOffsetTimeTicks);
+                    cttsSampleCount = 0;      // No sample in ctts box is pending
+                } else {
+                    if (currCttsOffsetTimeTicks != lastCttsOffsetTimeTicks) {
+                        addOneCttsTableEntry(cttsSampleCount, lastCttsOffsetTimeTicks);
+                        lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
+                        cttsSampleCount = 1;  // One sample in ctts box is pending
+                    } else {
+                        ++cttsSampleCount;
+                    }
+                }
+
+                // Update ctts time offset range
+                if (mStszTableEntries->count() == 0) {
                     mMinCttsOffsetTicks = currCttsOffsetTimeTicks;
-                    mMinCttsOffsetTimeUs = cttsOffsetTimeUs;
+                    mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
+                } else {
+                    if (currCttsOffsetTimeTicks > mMaxCttsOffsetTicks) {
+                        mMaxCttsOffsetTicks = currCttsOffsetTimeTicks;
+                    } else if (currCttsOffsetTimeTicks < mMinCttsOffsetTicks) {
+                        mMinCttsOffsetTicks = currCttsOffsetTimeTicks;
+                        mMinCttsOffsetTimeUs = cttsOffsetTimeUs;
+                    }
                 }
             }
-        }
 
-        if (mOwner->isRealTimeRecording()) {
-            if (mIsAudio) {
-                updateDriftTime(meta_data);
-            }
-        }
-
-        if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
-            copy->release();
-            mSource->stop();
-            mIsMalformed = true;
-            break;
-        }
-
-        ALOGV("%s media time stamp: %" PRId64 " and previous paused duration %" PRId64,
-                trackName, timestampUs, previousPausedDurationUs);
-        if (timestampUs > mTrackDurationUs) {
-            mTrackDurationUs = timestampUs;
-        }
-
-        // We need to use the time scale based ticks, rather than the
-        // timestamp itself to determine whether we have to use a new
-        // stts entry, since we may have rounding errors.
-        // The calculation is intended to reduce the accumulated
-        // rounding errors.
-        currDurationTicks =
-            ((timestampUs * mTimeScale + 500000LL) / 1000000LL -
-                (lastTimestampUs * mTimeScale + 500000LL) / 1000000LL);
-        if (currDurationTicks < 0ll) {
-            ALOGE("do not support out of order frames (timestamp: %lld < last: %lld for %s track",
-                    (long long)timestampUs, (long long)lastTimestampUs, trackName);
-            copy->release();
-            mSource->stop();
-            mIsMalformed = true;
-            break;
-        }
-
-        // if the duration is different for this sample, see if it is close enough to the previous
-        // duration that we can fudge it and use the same value, to avoid filling the stts table
-        // with lots of near-identical entries.
-        // "close enough" here means that the current duration needs to be adjusted by less
-        // than 0.1 milliseconds
-        if (lastDurationTicks && (currDurationTicks != lastDurationTicks)) {
-            int64_t deltaUs = ((lastDurationTicks - currDurationTicks) * 1000000LL
-                    + (mTimeScale / 2)) / mTimeScale;
-            if (deltaUs > -100 && deltaUs < 100) {
-                // use previous ticks, and adjust timestamp as if it was actually that number
-                // of ticks
-                currDurationTicks = lastDurationTicks;
-                timestampUs += deltaUs;
-            }
-        }
-        mStszTableEntries->add(htonl(sampleSize));
-        if (mStszTableEntries->count() > 2) {
-
-            // Force the first sample to have its own stts entry so that
-            // we can adjust its value later to maintain the A/V sync.
-            if (mStszTableEntries->count() == 3 || currDurationTicks != lastDurationTicks) {
-                addOneSttsTableEntry(sampleCount, lastDurationTicks);
-                sampleCount = 1;
-            } else {
-                ++sampleCount;
+            if (mOwner->isRealTimeRecording()) {
+                if (mIsAudio) {
+                    updateDriftTime(meta_data);
+                }
             }
 
-        }
-        if (mSamplesHaveSameSize) {
-            if (mStszTableEntries->count() >= 2 && previousSampleSize != sampleSize) {
-                mSamplesHaveSameSize = false;
+            if (WARN_UNLESS(timestampUs >= 0ll, "for %s track", trackName)) {
+                copy->release();
+                mSource->stop();
+                mIsMalformed = true;
+                break;
             }
-            previousSampleSize = sampleSize;
-        }
-        ALOGV("%s timestampUs/lastTimestampUs: %" PRId64 "/%" PRId64,
-                trackName, timestampUs, lastTimestampUs);
-        lastDurationUs = timestampUs - lastTimestampUs;
-        lastDurationTicks = currDurationTicks;
-        lastTimestampUs = timestampUs;
 
-        if (isSync != 0) {
-            addOneStssTableEntry(mStszTableEntries->count());
-        }
-
-        if (mTrackingProgressStatus) {
-            if (mPreviousTrackTimeUs <= 0) {
-                mPreviousTrackTimeUs = mStartTimestampUs;
+            ALOGV("%s media time stamp: %" PRId64 " and previous paused duration %" PRId64,
+                    trackName, timestampUs, previousPausedDurationUs);
+            if (timestampUs > mTrackDurationUs) {
+                mTrackDurationUs = timestampUs;
             }
-            trackProgressStatus(timestampUs);
+
+            // We need to use the time scale based ticks, rather than the
+            // timestamp itself to determine whether we have to use a new
+            // stts entry, since we may have rounding errors.
+            // The calculation is intended to reduce the accumulated
+            // rounding errors.
+            currDurationTicks =
+                ((timestampUs * mTimeScale + 500000LL) / 1000000LL -
+                    (lastTimestampUs * mTimeScale + 500000LL) / 1000000LL);
+            if (currDurationTicks < 0ll) {
+                ALOGE("do not support out of order frames (timestamp: %lld < last: %lld for %s track",
+                        (long long)timestampUs, (long long)lastTimestampUs, trackName);
+                copy->release();
+                mSource->stop();
+                mIsMalformed = true;
+                break;
+            }
+
+            // if the duration is different for this sample, see if it is close enough to the previous
+            // duration that we can fudge it and use the same value, to avoid filling the stts table
+            // with lots of near-identical entries.
+            // "close enough" here means that the current duration needs to be adjusted by less
+            // than 0.1 milliseconds
+            if (lastDurationTicks && (currDurationTicks != lastDurationTicks)) {
+                int64_t deltaUs = ((lastDurationTicks - currDurationTicks) * 1000000LL
+                        + (mTimeScale / 2)) / mTimeScale;
+                if (deltaUs > -100 && deltaUs < 100) {
+                    // use previous ticks, and adjust timestamp as if it was actually that number
+                    // of ticks
+                    currDurationTicks = lastDurationTicks;
+                    timestampUs += deltaUs;
+                }
+            }
+            mStszTableEntries->add(htonl(sampleSize));
+            if (mStszTableEntries->count() > 2) {
+
+                // Force the first sample to have its own stts entry so that
+                // we can adjust its value later to maintain the A/V sync.
+                if (mStszTableEntries->count() == 3 || currDurationTicks != lastDurationTicks) {
+                    addOneSttsTableEntry(sampleCount, lastDurationTicks);
+                    sampleCount = 1;
+                } else {
+                    ++sampleCount;
+                }
+
+            }
+            if (mSamplesHaveSameSize) {
+                if (mStszTableEntries->count() >= 2 && previousSampleSize != sampleSize) {
+                    mSamplesHaveSameSize = false;
+                }
+                previousSampleSize = sampleSize;
+            }
+            ALOGV("%s timestampUs/lastTimestampUs: %" PRId64 "/%" PRId64,
+                    trackName, timestampUs, lastTimestampUs);
+            lastDurationUs = timestampUs - lastTimestampUs;
+            lastDurationTicks = currDurationTicks;
+            lastTimestampUs = timestampUs;
+
+            if (isSync != 0) {
+                addOneStssTableEntry(mStszTableEntries->count());
+            }
+
+            if (mTrackingProgressStatus) {
+                if (mPreviousTrackTimeUs <= 0) {
+                    mPreviousTrackTimeUs = mStartTimestampUs;
+                }
+                trackProgressStatus(timestampUs);
+            }
         }
         if (!hasMultipleTracks) {
             size_t bytesWritten;
@@ -4331,9 +4334,12 @@
     }
 
     // patch up the mPrimaryItemId and count items with prop associations
+    uint16_t firstVisibleItemId = 0;
     for (size_t index = 0; index < mItems.size(); index++) {
         if (mItems[index].isPrimary) {
             mPrimaryItemId = mItems[index].itemId;
+        } else if (!firstVisibleItemId && !mItems[index].isHidden) {
+            firstVisibleItemId = mItems[index].itemId;
         }
 
         if (!mItems[index].properties.empty()) {
@@ -4342,8 +4348,13 @@
     }
 
     if (mPrimaryItemId == 0) {
-        ALOGW("didn't find primary, using first item");
-        mPrimaryItemId = mItems[0].itemId;
+        if (firstVisibleItemId > 0) {
+            ALOGW("didn't find primary, using first visible item");
+            mPrimaryItemId = firstVisibleItemId;
+        } else {
+            ALOGW("no primary and no visible item, using first item");
+            mPrimaryItemId = mItems[0].itemId;
+        }
     }
 
     beginBox("meta");
diff --git a/media/libstagefright/codec2/Android.bp b/media/libstagefright/codec2/Android.bp
index 2f0deb4..696a062 100644
--- a/media/libstagefright/codec2/Android.bp
+++ b/media/libstagefright/codec2/Android.bp
@@ -7,6 +7,11 @@
 
     srcs: ["C2.cpp"],
 
+    cflags: [
+        "-Wall",
+        "-Werror",
+    ],
+
     include_dirs: [
         "frameworks/av/media/libstagefright/codec2/include",
         "frameworks/native/include/media/hardware",
diff --git a/media/libstagefright/codec2/include/C2Component.h b/media/libstagefright/codec2/include/C2Component.h
index 2dbf7ea..a555b35 100644
--- a/media/libstagefright/codec2/include/C2Component.h
+++ b/media/libstagefright/codec2/include/C2Component.h
@@ -360,6 +360,7 @@
         C2DomainKind domain;       ///< component domain (e.g. audio or video)
         C2ComponentKind type;      ///< component type (e.g. encoder, decoder or filter)
         C2StringLiteral mediaType; ///< media type supported by the component
+        C2ComponentPriority priority; ///< priority used to determine component ordering
 
         /**
          * name alias(es) for backward compatibility.
@@ -569,7 +570,6 @@
      */
     virtual std::shared_ptr<C2ComponentInterface> intf() = 0;
 
-protected:
     virtual ~C2Component() = default;
 };
 
@@ -724,11 +724,11 @@
     /**
      * Returns the list of components supported by this component store.
      *
-     * This method may be momentarily blocking, but MUST return within 5ms.
+     * This method MUST return within 500ms.
      *
      * \retval vector of component information.
      */
-    virtual std::vector<std::shared_ptr<const C2Component::Traits>> listComponents_sm() const = 0;
+    virtual std::vector<std::shared_ptr<const C2Component::Traits>> listComponents() = 0;
 
     // -------------------------------------- UTILITY METHODS --------------------------------------
 
diff --git a/media/libstagefright/codec2/vndk/C2Store.cpp b/media/libstagefright/codec2/vndk/C2Store.cpp
index 73ffaea..460cc60 100644
--- a/media/libstagefright/codec2/vndk/C2Store.cpp
+++ b/media/libstagefright/codec2/vndk/C2Store.cpp
@@ -20,12 +20,26 @@
 #include <C2Component.h>
 #include <C2PlatformSupport.h>
 
+#define LOG_TAG "C2Store"
+#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <dlfcn.h>
+
 #include <map>
 #include <memory>
 #include <mutex>
 
 namespace android {
 
+/**
+ * The platform allocator store provides basic allocator-types for the framework based on ion and
+ * gralloc. Allocators are not meant to be updatable.
+ *
+ * \todo Provide allocator based on ashmem
+ * \todo Move ion allocation into its HIDL or provide some mapping from memory usage to ion flags
+ * \todo Make this allocator store extendable
+ */
 class C2PlatformAllocatorStore : public C2AllocatorStore {
 public:
     enum : id_t {
@@ -37,9 +51,11 @@
         /* ionmapper */
     );
 
-    virtual c2_status_t fetchAllocator(id_t id, std::shared_ptr<C2Allocator> *const allocator) override;
+    virtual c2_status_t fetchAllocator(
+            id_t id, std::shared_ptr<C2Allocator> *const allocator) override;
 
-    virtual std::vector<std::shared_ptr<const C2Allocator::Traits>> listAllocators_nb() const override {
+    virtual std::vector<std::shared_ptr<const C2Allocator::Traits>> listAllocators_nb()
+            const override {
         return std::vector<std::shared_ptr<const C2Allocator::Traits>>(); /// \todo
     }
 
@@ -48,10 +64,10 @@
     }
 
 private:
-    // returns a shared-singleton ion allocator
+    /// returns a shared-singleton ion allocator
     std::shared_ptr<C2Allocator> fetchIonAllocator();
 
-    // returns a shared-singleton gralloc allocator
+    /// returns a shared-singleton gralloc allocator
     std::shared_ptr<C2Allocator> fetchGrallocAllocator();
 };
 
@@ -141,4 +157,385 @@
     return res;
 }
 
-} // namespace android
\ No newline at end of file
+class C2PlatformComponentStore : public C2ComponentStore {
+public:
+    virtual std::vector<std::shared_ptr<const C2Component::Traits>> listComponents() override;
+    virtual std::shared_ptr<C2ParamReflector> getParamReflector() const override;
+    virtual C2String getName() const override;
+    virtual c2_status_t querySupportedValues_nb(
+            std::vector<C2FieldSupportedValuesQuery> &fields) const override;
+    virtual c2_status_t querySupportedParams_nb(
+            std::vector<std::shared_ptr<C2ParamDescriptor>> *const params) const override;
+    virtual c2_status_t query_sm(
+            const std::vector<C2Param *const> &stackParams,
+            const std::vector<C2Param::Index> &heapParamIndices,
+            std::vector<std::unique_ptr<C2Param>> *const heapParams) const override;
+    virtual c2_status_t createInterface(
+            C2String name, std::shared_ptr<C2ComponentInterface> *const interface) override;
+    virtual c2_status_t createComponent(
+            C2String name, std::shared_ptr<C2Component> *const component) override;
+    virtual c2_status_t copyBuffer(
+            std::shared_ptr<C2GraphicBuffer> src, std::shared_ptr<C2GraphicBuffer> dst) override;
+    virtual c2_status_t config_sm(
+            const std::vector<C2Param *const> &params,
+            std::vector<std::unique_ptr<C2SettingResult>> *const failures) override;
+    virtual c2_status_t commit_sm(
+            const std::vector<C2Param *const> &params,
+            std::vector<std::unique_ptr<C2SettingResult>> *const failures) override;
+
+    C2PlatformComponentStore();
+
+    virtual ~C2PlatformComponentStore() override = default;
+
+private:
+
+    /**
+     * An object encapsulating a loaded component module.
+     *
+     * \todo provide a way to add traits to known components here to avoid loading the .so-s
+     * for listComponents
+     */
+    struct ComponentModule : public C2ComponentFactory,
+            public std::enable_shared_from_this<ComponentModule> {
+        virtual c2_status_t createComponent(
+                c2_node_id_t id, std::shared_ptr<C2Component> *component,
+                ComponentDeleter deleter = std::default_delete<C2Component>()) override;
+        virtual c2_status_t createInterface(
+                c2_node_id_t id, std::shared_ptr<C2ComponentInterface> *interface,
+                InterfaceDeleter deleter = std::default_delete<C2ComponentInterface>()) override;
+
+        /**
+         * \returns the traits of the component in this module.
+         */
+        std::shared_ptr<const C2Component::Traits> getTraits();
+
+        /**
+         * Creates an uninitialized component module.
+         *
+         * \note Only used by ComponentLoader.
+         */
+        ComponentModule() : mInit(C2_NO_INIT) {}
+
+        /**
+         * Initializes a component module with a given library path. Must be called exactly once.
+         *
+         * \note Only used by ComponentLoader.
+         *
+         * \param libPath[in] library path (or name)
+         *
+         * \retval C2_OK        the component module has been successfully loaded
+         * \retval C2_NO_MEMORY not enough memory to loading the component module
+         * \retval C2_NOT_FOUND could not locate the component module
+         * \retval C2_CORRUPTED the component module could not be loaded (unexpected)
+         * \retval C2_REFUSED   permission denied to load the component module (unexpected)
+         * \retval C2_TIMED_OUT could not load the module within the time limit (unexpected)
+         */
+        c2_status_t init(std::string libPath);
+
+        virtual ~ComponentModule() override;
+
+    protected:
+        std::recursive_mutex mLock; ///< lock protecting mTraits
+        std::shared_ptr<C2Component::Traits> mTraits; ///< cached component traits
+
+        c2_status_t mInit; ///< initialization result
+
+        void *mLibHandle; ///< loaded library handle
+        C2ComponentFactory::CreateCodec2FactoryFunc createFactory; ///< loaded create function
+        C2ComponentFactory::DestroyCodec2FactoryFunc destroyFactory; ///< loaded destroy function
+        C2ComponentFactory *mComponentFactory; ///< loaded/created component factory
+    };
+
+    /**
+     * An object encapsulating a loadable component module.
+     *
+     * \todo make this also work for enumerations
+     */
+    struct ComponentLoader {
+        /**
+         * Load the component module.
+         *
+         * This method simply returns the component module if it is already currently loaded, or
+         * attempts to load it if it is not.
+         *
+         * \param module[out] pointer to the shared pointer where the loaded module shall be stored.
+         *                    This will be nullptr on error.
+         *
+         * \retval C2_OK        the component module has been successfully loaded
+         * \retval C2_NO_MEMORY not enough memory to loading the component module
+         * \retval C2_NOT_FOUND could not locate the component module
+         * \retval C2_CORRUPTED the component module could not be loaded
+         * \retval C2_REFUSED   permission denied to load the component module
+         */
+        c2_status_t fetchModule(std::shared_ptr<ComponentModule> *module) {
+            c2_status_t res = C2_OK;
+            std::lock_guard<std::mutex> lock(mMutex);
+            std::shared_ptr<ComponentModule> localModule = mModule.lock();
+            if (localModule == nullptr) {
+                localModule = std::make_shared<ComponentModule>();
+                res = localModule->init(mLibPath);
+                if (res == C2_OK) {
+                    mModule = localModule;
+                }
+            }
+            *module = localModule;
+            return res;
+        }
+
+        /**
+         * Creates a component loader for a specific library path (or name).
+         */
+        ComponentLoader(std::string libPath)
+            : mLibPath(libPath) {}
+
+    private:
+        std::mutex mMutex; ///< mutex guarding the module
+        std::weak_ptr<ComponentModule> mModule; ///< weak reference to the loaded module
+        std::string mLibPath; ///< library path (or name)
+    };
+
+    /**
+     * Retrieves the component loader for a component.
+     *
+     * \return a non-ref-holding pointer to the component loader.
+     *
+     * \retval C2_OK        the component loader has been successfully retrieved
+     * \retval C2_NO_MEMORY not enough memory to locate the component loader
+     * \retval C2_NOT_FOUND could not locate the component to be loaded
+     * \retval C2_CORRUPTED the component loader could not be identified due to some modules being
+     *                      corrupted (this can happen if the name does not refer to an already
+     *                      identified component but some components could not be loaded due to
+     *                      bad library)
+     * \retval C2_REFUSED   permission denied to find the component loader for the named component
+     *                      (this can happen if the name does not refer to an already identified
+     *                      component but some components could not be loaded due to lack of
+     *                      permissions)
+     */
+    c2_status_t findComponent(C2String name, ComponentLoader **loader);
+
+    std::map<C2String, ComponentLoader> mComponents; ///< list of components
+};
+
+c2_status_t C2PlatformComponentStore::ComponentModule::init(std::string libPath) {
+    ALOGV("in %s", __func__);
+    ALOGV("loading dll");
+    mLibHandle = dlopen(libPath.c_str(), RTLD_NOW|RTLD_NODELETE);
+    if (mLibHandle == nullptr) {
+        // could be access/symbol or simply not being there
+        ALOGD("could not dlopen %s: %s", libPath.c_str(), dlerror());
+        mInit = C2_CORRUPTED;
+    } else {
+        createFactory =
+            (C2ComponentFactory::CreateCodec2FactoryFunc)dlsym(mLibHandle, "CreateCodec2Factory");
+        destroyFactory =
+            (C2ComponentFactory::DestroyCodec2FactoryFunc)dlsym(mLibHandle, "DestroyCodec2Factory");
+
+        mComponentFactory = createFactory();
+        if (mComponentFactory == nullptr) {
+            ALOGD("could not create factory in %s", libPath.c_str());
+            mInit = C2_NO_MEMORY;
+        } else {
+            mInit = C2_OK;
+        }
+    }
+    return mInit;
+}
+
+C2PlatformComponentStore::ComponentModule::~ComponentModule() {
+    ALOGV("in %s", __func__);
+    if (destroyFactory && mComponentFactory) {
+        destroyFactory(mComponentFactory);
+    }
+    if (mLibHandle) {
+        ALOGV("unloading dll");
+        dlclose(mLibHandle);
+    }
+}
+
+c2_status_t C2PlatformComponentStore::ComponentModule::createInterface(
+        c2_node_id_t id, std::shared_ptr<C2ComponentInterface> *interface,
+        std::function<void(::android::C2ComponentInterface*)> deleter) {
+    interface->reset();
+    if (mInit != C2_OK) {
+        return mInit;
+    }
+    std::shared_ptr<ComponentModule> module = shared_from_this();
+    c2_status_t res = mComponentFactory->createInterface(
+            id, interface, [module, deleter](C2ComponentInterface *p) mutable {
+                // capture module so that we ensure we still have it while deleting interface
+                deleter(p); // delete interface first
+                module.reset(); // remove module ref (not technically needed)
+    });
+    return res;
+}
+
+c2_status_t C2PlatformComponentStore::ComponentModule::createComponent(
+        c2_node_id_t id, std::shared_ptr<C2Component> *component,
+        std::function<void(::android::C2Component*)> deleter) {
+    component->reset();
+    if (mInit != C2_OK) {
+        return mInit;
+    }
+    std::shared_ptr<ComponentModule> module = shared_from_this();
+    c2_status_t res = mComponentFactory->createComponent(
+            id, component, [module, deleter](C2Component *p) mutable {
+                // capture module so that we ensure we still have it while deleting component
+                deleter(p); // delete component first
+                module.reset(); // remove module ref (not technically needed)
+    });
+    return res;
+}
+
+std::shared_ptr<const C2Component::Traits> C2PlatformComponentStore::ComponentModule::getTraits() {
+    std::unique_lock<std::recursive_mutex> lock(mLock);
+    if (!mTraits) {
+        std::shared_ptr<C2ComponentInterface> intf;
+        c2_status_t res = createInterface(0, &intf);
+        if (res != C2_OK) {
+            return nullptr;
+        }
+
+        std::shared_ptr<C2Component::Traits> traits(new (std::nothrow) C2Component::Traits);
+        if (traits) {
+            // traits->name = intf->getName();
+        }
+
+        mTraits = traits;
+    }
+    return mTraits;
+}
+
+C2PlatformComponentStore::C2PlatformComponentStore() {
+    // TODO: move this also into a .so so it can be updated
+    mComponents.emplace("c2.google.avc.decoder", "libstagefright_soft_c2avcdec.so");
+}
+
+c2_status_t C2PlatformComponentStore::copyBuffer(
+        std::shared_ptr<C2GraphicBuffer> src, std::shared_ptr<C2GraphicBuffer> dst) {
+    (void)src;
+    (void)dst;
+    return C2_OMITTED;
+}
+
+c2_status_t C2PlatformComponentStore::query_sm(
+        const std::vector<C2Param *const> &stackParams,
+        const std::vector<C2Param::Index> &heapParamIndices,
+        std::vector<std::unique_ptr<C2Param>> *const heapParams) const {
+    // there are no supported configs
+    (void)heapParams;
+    return stackParams.empty() && heapParamIndices.empty() ? C2_OK : C2_BAD_INDEX;
+}
+
+c2_status_t C2PlatformComponentStore::config_sm(
+        const std::vector<C2Param *const> &params,
+        std::vector<std::unique_ptr<C2SettingResult>> *const failures) {
+    // there are no supported configs
+    (void)failures;
+    return params.empty() ? C2_OK : C2_BAD_INDEX;
+}
+
+c2_status_t C2PlatformComponentStore::commit_sm(
+        const std::vector<C2Param *const> &params,
+        std::vector<std::unique_ptr<C2SettingResult>> *const failures) {
+    // there are no supported configs
+    (void)failures;
+    return params.empty() ? C2_OK : C2_BAD_INDEX;
+}
+
+std::vector<std::shared_ptr<const C2Component::Traits>> C2PlatformComponentStore::listComponents() {
+    // This method SHALL return within 500ms.
+    std::vector<std::shared_ptr<const C2Component::Traits>> list;
+    for (auto &it : mComponents) {
+        ComponentLoader &loader = it.second;
+        std::shared_ptr<ComponentModule> module;
+        c2_status_t res = loader.fetchModule(&module);
+        if (res == C2_OK) {
+            std::shared_ptr<const C2Component::Traits> traits = module->getTraits();
+            if (traits) {
+                list.push_back(traits);
+            }
+        }
+    }
+    return list;
+}
+
+c2_status_t C2PlatformComponentStore::findComponent(C2String name, ComponentLoader **loader) {
+    *loader = nullptr;
+    auto pos = mComponents.find(name);
+    // TODO: check aliases
+    if (pos == mComponents.end()) {
+        return C2_NOT_FOUND;
+    }
+    *loader = &pos->second;
+    return C2_OK;
+}
+
+c2_status_t C2PlatformComponentStore::createComponent(
+        C2String name, std::shared_ptr<C2Component> *const component) {
+    // This method SHALL return within 100ms.
+    component->reset();
+    ComponentLoader *loader;
+    c2_status_t res = findComponent(name, &loader);
+    if (res == C2_OK) {
+        std::shared_ptr<ComponentModule> module;
+        res = loader->fetchModule(&module);
+        if (res == C2_OK) {
+            // TODO: get a unique node ID
+            res = module->createComponent(0, component);
+        }
+    }
+    return res;
+}
+
+c2_status_t C2PlatformComponentStore::createInterface(
+        C2String name, std::shared_ptr<C2ComponentInterface> *const interface) {
+    // This method SHALL return within 100ms.
+    interface->reset();
+    ComponentLoader *loader;
+    c2_status_t res = findComponent(name, &loader);
+    if (res == C2_OK) {
+        std::shared_ptr<ComponentModule> module;
+        res = loader->fetchModule(&module);
+        if (res == C2_OK) {
+            // TODO: get a unique node ID
+            res = module->createInterface(0, interface);
+        }
+    }
+    return res;
+}
+
+c2_status_t C2PlatformComponentStore::querySupportedParams_nb(
+        std::vector<std::shared_ptr<C2ParamDescriptor>> *const params) const {
+    // there are no supported config params
+    (void)params;
+    return C2_OK;
+}
+
+c2_status_t C2PlatformComponentStore::querySupportedValues_nb(
+        std::vector<C2FieldSupportedValuesQuery> &fields) const {
+    // there are no supported config params
+    return fields.empty() ? C2_OK : C2_BAD_INDEX;
+}
+
+C2String C2PlatformComponentStore::getName() const {
+    return "android.componentStore.platform";
+}
+
+std::shared_ptr<C2ParamReflector> C2PlatformComponentStore::getParamReflector() const {
+    // TODO
+    return nullptr;
+}
+
+std::shared_ptr<C2ComponentStore> GetCodec2PlatformComponentStore() {
+    static std::mutex mutex;
+    static std::weak_ptr<C2ComponentStore> platformStore;
+    std::lock_guard<std::mutex> lock(mutex);
+    std::shared_ptr<C2ComponentStore> store = platformStore.lock();
+    if (store == nullptr) {
+        store = std::make_shared<C2PlatformComponentStore>();
+        platformStore = store;
+    }
+    return store;
+}
+
+} // namespace android
diff --git a/media/libstagefright/codec2/vndk/include/C2PlatformSupport.h b/media/libstagefright/codec2/vndk/include/C2PlatformSupport.h
index 8e45705..2281dab 100644
--- a/media/libstagefright/codec2/vndk/include/C2PlatformSupport.h
+++ b/media/libstagefright/codec2/vndk/include/C2PlatformSupport.h
@@ -19,6 +19,7 @@
 
 #include <C2Component.h>
 
+#include <functional>
 #include <memory>
 
 namespace android {
@@ -64,14 +65,17 @@
  */
 class C2ComponentFactory {
 public:
+    typedef std::function<void(::android::C2Component*)> ComponentDeleter;
+    typedef std::function<void(::android::C2ComponentInterface*)> InterfaceDeleter;
+
     /**
      * Creates a component.
      *
      * This method SHALL return within 100ms.
      *
+     * \param id        component ID for the created component
      * \param component shared pointer where the created component is stored. Cleared on
      *                  failure and updated on success.
-     * \param id        component ID for the created component
      *
      * \retval C2_OK        the component was created successfully
      * \retval C2_TIMED_OUT could not create the component within the time limit (unexpected)
@@ -80,16 +84,17 @@
      * \retval C2_NO_MEMORY not enough memory to create the component
      */
     virtual c2_status_t createComponent(
-            std::shared_ptr<C2Component>* const component, c2_node_id_t id) = 0;
+            c2_node_id_t id, std::shared_ptr<C2Component>* const component,
+            ComponentDeleter deleter = std::default_delete<C2Component>()) = 0;
 
     /**
      * Creates a component interface.
      *
      * This method SHALL return within 100ms.
      *
+     * \param id        component interface ID for the created interface
      * \param interface shared pointer where the created interface is stored. Cleared on
      *                  failure and updated on success.
-     * \param id        component interface ID for the created interface
      *
      * \retval C2_OK        the component interface was created successfully
      * \retval C2_TIMED_OUT could not create the component interface within the time limit
@@ -100,11 +105,22 @@
      * \retval C2_NO_MEMORY not enough memory to create the component interface
      */
     virtual c2_status_t createInterface(
-            std::shared_ptr<C2ComponentInterface>* const interface, c2_node_id_t id) = 0;
+            c2_node_id_t id, std::shared_ptr<C2ComponentInterface>* const interface,
+            InterfaceDeleter deleter = std::default_delete<C2ComponentInterface>()) = 0;
 
     virtual ~C2ComponentFactory() = default;
+
+    typedef ::android::C2ComponentFactory* (*CreateCodec2FactoryFunc)(void);
+    typedef void (*DestroyCodec2FactoryFunc)(::android::C2ComponentFactory*);
 };
 
+/**
+ * Returns the platform component store.
+ * \retval nullptr if the platform component store could not be obtained
+ */
+std::shared_ptr<C2ComponentStore> GetCodec2PlatformComponentStore();
+
+
 } // namespace android
 
 #endif // STAGEFRIGHT_CODEC2_PLATFORM_SUPPORT_H_
diff --git a/media/libstagefright/codecs/avcdec/C2SoftAvcDec.cpp b/media/libstagefright/codecs/avcdec/C2SoftAvcDec.cpp
index b904d26..2423629 100644
--- a/media/libstagefright/codecs/avcdec/C2SoftAvcDec.cpp
+++ b/media/libstagefright/codecs/avcdec/C2SoftAvcDec.cpp
@@ -261,7 +261,7 @@
       mFrameRate(0u, 0),
       mBlocksPerSecond(0u, 0),
       mParamReflector(new ParamReflector) {
-
+    ALOGV("in %s", __func__);
     mInputPortMime = C2PortMimeConfig::input::alloc_unique(strlen(CODEC_MIME_TYPE) + 1);
     strcpy(mInputPortMime->m.mValue, CODEC_MIME_TYPE);
     mOutputPortMime = C2PortMimeConfig::output::alloc_unique(strlen(MEDIA_MIMETYPE_VIDEO_RAW) + 1);
@@ -430,6 +430,10 @@
             false, "_output_block_pools", mOutputBlockPools.get()));
 }
 
+C2SoftAvcDecIntf::~C2SoftAvcDecIntf() {
+    ALOGV("in %s", __func__);
+}
+
 C2String C2SoftAvcDecIntf::getName() const {
     return mName;
 }
@@ -653,6 +657,7 @@
       mWidth(320),
       mHeight(240),
       mInputOffset(0) {
+    ALOGV("in %s", __func__);
     GETTIME(&mTimeStart, NULL);
 
     // If input dump is enabled, then open create an empty file
@@ -661,6 +666,7 @@
 }
 
 C2SoftAvcDec::~C2SoftAvcDec() {
+    ALOGV("in %s", __func__);
     CHECK_EQ(deInitDecoder(), (status_t)OK);
 }
 
@@ -1505,14 +1511,17 @@
 class C2SoftAvcDecFactory : public C2ComponentFactory {
 public:
     virtual c2_status_t createComponent(
-            std::shared_ptr<C2Component>* const component, c2_node_id_t id) override {
-        *component = std::make_shared<C2SoftAvcDec>("avc", id);
+            c2_node_id_t id, std::shared_ptr<C2Component>* const component,
+            std::function<void(::android::C2Component*)> deleter) override {
+        *component = std::shared_ptr<C2Component>(new C2SoftAvcDec("avc", id), deleter);
         return C2_OK;
     }
 
     virtual c2_status_t createInterface(
-            std::shared_ptr<C2ComponentInterface>* const interface, c2_node_id_t id) override {
-        *interface = std::make_shared<C2SoftAvcDecIntf>("avc", id);
+            c2_node_id_t id, std::shared_ptr<C2ComponentInterface>* const interface,
+            std::function<void(::android::C2ComponentInterface*)> deleter) override {
+        *interface =
+            std::shared_ptr<C2ComponentInterface>(new C2SoftAvcDecIntf("avc", id), deleter);
         return C2_OK;
     }
 
@@ -1522,9 +1531,11 @@
 }  // namespace android
 
 extern "C" ::android::C2ComponentFactory* CreateCodec2Factory() {
+    ALOGV("in %s", __func__);
     return new ::android::C2SoftAvcDecFactory();
 }
 
 extern "C" void DestroyCodec2Factory(::android::C2ComponentFactory* factory) {
+    ALOGV("in %s", __func__);
     delete factory;
 }
diff --git a/media/libstagefright/codecs/avcdec/C2SoftAvcDec.h b/media/libstagefright/codecs/avcdec/C2SoftAvcDec.h
index 91ec003..28f1dfd 100644
--- a/media/libstagefright/codecs/avcdec/C2SoftAvcDec.h
+++ b/media/libstagefright/codecs/avcdec/C2SoftAvcDec.h
@@ -77,7 +77,7 @@
     };
 
     C2SoftAvcDecIntf(const char *name, c2_node_id_t id);
-    virtual ~C2SoftAvcDecIntf() = default;
+    virtual ~C2SoftAvcDecIntf() override;
 
     // From C2ComponentInterface
     virtual C2String getName() const override;
diff --git a/media/libstagefright/codecs/cmds/Android.bp b/media/libstagefright/codecs/cmds/Android.bp
index e44e53c..ad0bd2d 100644
--- a/media/libstagefright/codecs/cmds/Android.bp
+++ b/media/libstagefright/codecs/cmds/Android.bp
@@ -22,7 +22,6 @@
         "libstagefright",
         "libstagefright_codec2",
         "libstagefright_foundation",
-        "libstagefright_soft_c2avcdec",
         "libui",
         "libutils",
     ],
diff --git a/media/libstagefright/codecs/cmds/codec2.cpp b/media/libstagefright/codecs/cmds/codec2.cpp
index 8e2c4b9..1972a7a 100644
--- a/media/libstagefright/codecs/cmds/codec2.cpp
+++ b/media/libstagefright/codecs/cmds/codec2.cpp
@@ -211,10 +211,9 @@
         return;
     }
 
-    std::unique_ptr<C2ComponentFactory> factory(CreateCodec2Factory());
+    std::shared_ptr<C2ComponentStore> store = GetCodec2PlatformComponentStore();
     std::shared_ptr<C2Component> component;
-    (void)factory->createComponent(&component, 0);
-    DestroyCodec2Factory(factory.release());
+    (void)store->createComponent("c2.google.avc.decoder", &component);
 
     (void)component->setListener_sm(mListener);
     std::unique_ptr<C2PortBlockPoolsTuning::output> pools =
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 71d625f..bc3e57c 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -898,6 +898,9 @@
         }
     }
 
+    if (meta->get() == NULL) {
+        return ERROR_MALFORMED;
+    }
     return OK;
 }
 
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index a70005e..f331dbb 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -46,6 +46,36 @@
 
 namespace android {
 
+namespace {
+// kTimestampFluctuation is an upper bound of timestamp fluctuation from the
+// source that GraphicBufferSource allows. The unit of kTimestampFluctuation is
+// frames. More specifically, GraphicBufferSource will drop a frame if
+//
+// expectedNewFrametimestamp - actualNewFrameTimestamp <
+//     (0.5 - kTimestampFluctuation) * expectedtimePeriodBetweenFrames
+//
+// where
+// - expectedNewFrameTimestamp is the calculated ideal timestamp of the new
+//   incoming frame
+// - actualNewFrameTimestamp is the timestamp received from the source
+// - expectedTimePeriodBetweenFrames is the ideal difference of the timestamps
+//   of two adjacent frames
+//
+// See GraphicBufferSource::calculateCodecTimestamp_l() for more detail about
+// how kTimestampFluctuation is used.
+//
+// kTimestampFluctuation should be non-negative. A higher value causes a smaller
+// chance of dropping frames, but at the same time a higher bound on the
+// difference between the source timestamp and the interpreted (snapped)
+// timestamp.
+//
+// The value of 0.05 means that GraphicBufferSource expects the input timestamps
+// to fluctuate no more than 5% from the regular time period.
+//
+// TODO: Justify the choice of this value, or make it configurable.
+constexpr double kTimestampFluctuation = 0.05;
+}
+
 /**
  * A copiable object managing a buffer in the buffer cache managed by the producer. This object
  * holds a reference to the buffer, and maintains which buffer slot it belongs to (if any), and
@@ -732,14 +762,16 @@
             mFrameCount = 0;
         } else {
             // snap to nearest capture point
-            int64_t nFrames = std::llround(
-                    (timeUs - mPrevCaptureUs) * mCaptureFps / 1000000);
-            if (nFrames <= 0) {
+            double nFrames = (timeUs - mPrevCaptureUs) * mCaptureFps / 1000000;
+            if (nFrames < 0.5 - kTimestampFluctuation) {
                 // skip this frame as it's too close to previous capture
                 ALOGV("skipping frame, timeUs %lld", static_cast<long long>(timeUs));
                 return false;
             }
-            mFrameCount += nFrames;
+            if (nFrames <= 1.0) {
+                nFrames = 1.0;
+            }
+            mFrameCount += std::llround(nFrames);
             mPrevCaptureUs = mBaseCaptureUs + std::llround(
                     mFrameCount * 1000000 / mCaptureFps);
             mPrevFrameUs = mBaseFrameUs + std::llround(
diff --git a/media/mtp/MtpDatabase.h b/media/mtp/MtpDatabase.h
index 2395f4f..f3f9720 100644
--- a/media/mtp/MtpDatabase.h
+++ b/media/mtp/MtpDatabase.h
@@ -45,6 +45,8 @@
                                             MtpObjectFormat format,
                                             bool succeeded) = 0;
 
+    virtual void                    doScanDirectory(const char* path) = 0;
+
     virtual MtpObjectHandleList*    getObjectList(MtpStorageID storageID,
                                             MtpObjectFormat format,
                                             MtpObjectHandle parent) = 0;
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index 6080868..bb0414d 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -1148,6 +1148,7 @@
     ALOGV("Copying file from %s to %s", (const char*)fromPath, (const char*)path);
     if (format == MTP_FORMAT_ASSOCIATION) {
         int ret = makeFolder((const char *)path);
+        ret += copyRecursive(fromPath, path);
         if (ret) {
             result = MTP_RESPONSE_GENERAL_ERROR;
         }
@@ -1158,6 +1159,8 @@
     }
 
     mDatabase->endSendObject(path, handle, format, result);
+    if (format == MTP_FORMAT_ASSOCIATION)
+        mDatabase->doScanDirectory(path);
     mResponse.setParameter(1, handle);
     return result;
 }
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index 11dedbb..6b20bca 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -52,6 +52,7 @@
 
 enum {
     kWhatActivityNotify,
+    kWhatAsyncNotify,
     kWhatRequestActivityNotifications,
     kWhatStopActivityNotifications,
 };
@@ -88,6 +89,11 @@
     bool mRequestedActivityNotification;
     OnCodecEvent mCallback;
     void *mCallbackUserData;
+
+    sp<AMessage> mAsyncNotify;
+    mutable Mutex mAsyncCallbackLock;
+    AMediaCodecOnAsyncNotifyCallback mAsyncCallback;
+    void *mAsyncCallbackUserData;
 };
 
 CodecHandler::CodecHandler(AMediaCodec *codec) {
@@ -128,6 +134,147 @@
             break;
         }
 
+        case kWhatAsyncNotify:
+        {
+             int32_t cbID;
+             if (!msg->findInt32("callbackID", &cbID)) {
+                 ALOGE("kWhatAsyncNotify: callbackID is expected.");
+                 break;
+             }
+
+             ALOGV("kWhatAsyncNotify: cbID = %d", cbID);
+
+             switch (cbID) {
+                 case MediaCodec::CB_INPUT_AVAILABLE:
+                 {
+                     int32_t index;
+                     if (!msg->findInt32("index", &index)) {
+                         ALOGE("CB_INPUT_AVAILABLE: index is expected.");
+                         break;
+                     }
+
+                     Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+                     if (mCodec->mAsyncCallbackUserData != NULL
+                         || mCodec->mAsyncCallback.onAsyncInputAvailable != NULL) {
+                         mCodec->mAsyncCallback.onAsyncInputAvailable(
+                                 mCodec,
+                                 mCodec->mAsyncCallbackUserData,
+                                 index);
+                     }
+
+                     break;
+                 }
+
+                 case MediaCodec::CB_OUTPUT_AVAILABLE:
+                 {
+                     int32_t index;
+                     size_t offset;
+                     size_t size;
+                     int64_t timeUs;
+                     int32_t flags;
+
+                     if (!msg->findInt32("index", &index)) {
+                         ALOGE("CB_OUTPUT_AVAILABLE: index is expected.");
+                         break;
+                     }
+                     if (!msg->findSize("offset", &offset)) {
+                         ALOGE("CB_OUTPUT_AVAILABLE: offset is expected.");
+                         break;
+                     }
+                     if (!msg->findSize("size", &size)) {
+                         ALOGE("CB_OUTPUT_AVAILABLE: size is expected.");
+                         break;
+                     }
+                     if (!msg->findInt64("timeUs", &timeUs)) {
+                         ALOGE("CB_OUTPUT_AVAILABLE: timeUs is expected.");
+                         break;
+                     }
+                     if (!msg->findInt32("flags", &flags)) {
+                         ALOGE("CB_OUTPUT_AVAILABLE: flags is expected.");
+                         break;
+                     }
+
+                     AMediaCodecBufferInfo bufferInfo = {
+                         (int32_t)offset,
+                         (int32_t)size,
+                         timeUs,
+                         (uint32_t)flags};
+
+                     Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+                     if (mCodec->mAsyncCallbackUserData != NULL
+                         || mCodec->mAsyncCallback.onAsyncOutputAvailable != NULL) {
+                         mCodec->mAsyncCallback.onAsyncOutputAvailable(
+                                 mCodec,
+                                 mCodec->mAsyncCallbackUserData,
+                                 index,
+                                 &bufferInfo);
+                     }
+
+                     break;
+                 }
+
+                 case MediaCodec::CB_OUTPUT_FORMAT_CHANGED:
+                 {
+                     sp<AMessage> format;
+                     if (!msg->findMessage("format", &format)) {
+                         ALOGE("CB_OUTPUT_FORMAT_CHANGED: format is expected.");
+                         break;
+                     }
+
+                     AMediaFormat *aMediaFormat = AMediaFormat_fromMsg(&format);
+
+                     Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+                     if (mCodec->mAsyncCallbackUserData != NULL
+                         || mCodec->mAsyncCallback.onAsyncFormatChanged != NULL) {
+                         mCodec->mAsyncCallback.onAsyncFormatChanged(
+                                 mCodec,
+                                 mCodec->mAsyncCallbackUserData,
+                                 aMediaFormat);
+                     }
+
+                     break;
+                 }
+
+                 case MediaCodec::CB_ERROR:
+                 {
+                     status_t err;
+                     int32_t actionCode;
+                     AString detail;
+                     if (!msg->findInt32("err", &err)) {
+                         ALOGE("CB_ERROR: err is expected.");
+                         break;
+                     }
+                     if (!msg->findInt32("action", &actionCode)) {
+                         ALOGE("CB_ERROR: action is expected.");
+                         break;
+                     }
+                     msg->findString("detail", &detail);
+                     ALOGE("Decoder reported error(0x%x), actionCode(%d), detail(%s)",
+                           err, actionCode, detail.c_str());
+
+                     Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
+                     if (mCodec->mAsyncCallbackUserData != NULL
+                         || mCodec->mAsyncCallback.onAsyncError != NULL) {
+                         mCodec->mAsyncCallback.onAsyncError(
+                                 mCodec,
+                                 mCodec->mAsyncCallbackUserData,
+                                 translate_error(err),
+                                 actionCode,
+                                 detail.c_str());
+                     }
+
+                     break;
+                 }
+
+                 default:
+                 {
+                     ALOGE("kWhatAsyncNotify: callbackID(%d) is unexpected.", cbID);
+                     break;
+                 }
+             }
+             break;
+        }
+
         case kWhatStopActivityNotifications:
         {
             sp<AReplyToken> replyID;
@@ -162,7 +309,7 @@
     size_t res = mData->mLooper->start(
             false,      // runOnCallingThread
             true,       // canCallJava XXX
-            PRIORITY_FOREGROUND);
+            PRIORITY_AUDIO);
     if (res != OK) {
         ALOGE("Failed to start the looper");
         AMediaCodec_delete(mData);
@@ -183,6 +330,9 @@
     mData->mRequestedActivityNotification = false;
     mData->mCallback = NULL;
 
+    mData->mAsyncCallback = {};
+    mData->mAsyncCallbackUserData = NULL;
+
     return mData;
 }
 
@@ -222,6 +372,32 @@
 }
 
 EXPORT
+media_status_t AMediaCodec_getName(
+        AMediaCodec *mData,
+        char** out_name) {
+    if (out_name == NULL) {
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+
+    AString compName;
+    status_t err = mData->mCodec->getName(&compName);
+    if (err != OK) {
+        return translate_error(err);
+    }
+    *out_name = strdup(compName.c_str());
+    return AMEDIA_OK;
+}
+
+EXPORT
+void AMediaCodec_releaseName(
+        AMediaCodec * /* mData */,
+        char* name) {
+    if (name != NULL) {
+        free(name);
+    }
+}
+
+EXPORT
 media_status_t AMediaCodec_configure(
         AMediaCodec *mData,
         const AMediaFormat* format,
@@ -236,8 +412,40 @@
         surface = (Surface*) window;
     }
 
-    return translate_error(mData->mCodec->configure(nativeFormat, surface,
-            crypto ? crypto->mCrypto : NULL, flags));
+    status_t err = mData->mCodec->configure(nativeFormat, surface,
+            crypto ? crypto->mCrypto : NULL, flags);
+    if (err != OK) {
+        ALOGE("configure: err(%d), failed with format: %s",
+              err, nativeFormat->debugString(0).c_str());
+    }
+    return translate_error(err);
+}
+
+EXPORT
+media_status_t AMediaCodec_setAsyncNotifyCallback(
+        AMediaCodec *mData,
+        AMediaCodecOnAsyncNotifyCallback callback,
+        void *userdata) {
+    if (mData->mAsyncNotify == NULL && userdata != NULL) {
+        mData->mAsyncNotify = new AMessage(kWhatAsyncNotify, mData->mHandler);
+        status_t err = mData->mCodec->setCallback(mData->mAsyncNotify);
+        if (err != OK) {
+            ALOGE("setAsyncNotifyCallback: err(%d), failed to set async callback", err);
+            return translate_error(err);
+        }
+    }
+
+    Mutex::Autolock _l(mData->mAsyncCallbackLock);
+    mData->mAsyncCallback = callback;
+    mData->mAsyncCallbackUserData = userdata;
+
+    return AMEDIA_OK;
+}
+
+
+EXPORT
+media_status_t AMediaCodec_releaseCrypto(AMediaCodec *mData) {
+    return translate_error(mData->mCodec->releaseCrypto());
 }
 
 EXPORT
@@ -282,6 +490,19 @@
 
 EXPORT
 uint8_t* AMediaCodec_getInputBuffer(AMediaCodec *mData, size_t idx, size_t *out_size) {
+    if (mData->mAsyncNotify != NULL) {
+        // Asynchronous mode
+        sp<MediaCodecBuffer> abuf;
+        if (mData->mCodec->getInputBuffer(idx, &abuf) != 0) {
+            return NULL;
+        }
+
+        if (out_size != NULL) {
+            *out_size = abuf->capacity();
+        }
+        return abuf->data();
+    }
+
     android::Vector<android::sp<android::MediaCodecBuffer> > abufs;
     if (mData->mCodec->getInputBuffers(&abufs) == 0) {
         size_t n = abufs.size();
@@ -304,6 +525,19 @@
 
 EXPORT
 uint8_t* AMediaCodec_getOutputBuffer(AMediaCodec *mData, size_t idx, size_t *out_size) {
+    if (mData->mAsyncNotify != NULL) {
+        // Asynchronous mode
+        sp<MediaCodecBuffer> abuf;
+        if (mData->mCodec->getOutputBuffer(idx, &abuf) != 0) {
+            return NULL;
+        }
+
+        if (out_size != NULL) {
+            *out_size = abuf->capacity();
+        }
+        return abuf->data();
+    }
+
     android::Vector<android::sp<android::MediaCodecBuffer> > abufs;
     if (mData->mCodec->getOutputBuffers(&abufs) == 0) {
         size_t n = abufs.size();
@@ -367,6 +601,13 @@
 }
 
 EXPORT
+AMediaFormat* AMediaCodec_getInputFormat(AMediaCodec *mData) {
+    sp<AMessage> format;
+    mData->mCodec->getInputFormat(&format);
+    return AMediaFormat_fromMsg(&format);
+}
+
+EXPORT
 AMediaFormat* AMediaCodec_getBufferFormat(AMediaCodec *mData, size_t index) {
     sp<AMessage> format;
     mData->mCodec->getOutputFormat(index, &format);
@@ -542,6 +783,16 @@
     return translate_error(err);
 }
 
+EXPORT
+bool AMediaCodecActionCode_isRecoverable(int32_t actionCode) {
+    return (actionCode == ACTION_CODE_RECOVERABLE);
+}
+
+EXPORT
+bool AMediaCodecActionCode_isTransient(int32_t actionCode) {
+    return (actionCode == ACTION_CODE_TRANSIENT);
+}
+
 
 EXPORT
 void AMediaCodecCryptoInfo_setPattern(AMediaCodecCryptoInfo *info,
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index ee27520..a9025c0 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -125,6 +125,14 @@
                 ret.appendFormat("double(%f)", val);
                 break;
             }
+            case AMessage::kTypeRect:
+            {
+                int32_t left, top, right, bottom;
+                f->findRect(name, &left, &top, &right, &bottom);
+                ret.appendFormat("Rect(%" PRId32 ", %" PRId32 ", %" PRId32 ", %" PRId32 ")",
+                                 left, top, right, bottom);
+                break;
+            }
             case AMessage::kTypeString:
             {
                 AString val;
@@ -165,11 +173,22 @@
 }
 
 EXPORT
+bool AMediaFormat_getDouble(AMediaFormat* format, const char *name, double *out) {
+    return format->mFormat->findDouble(name, out);
+}
+
+EXPORT
 bool AMediaFormat_getSize(AMediaFormat* format, const char *name, size_t *out) {
     return format->mFormat->findSize(name, out);
 }
 
 EXPORT
+bool AMediaFormat_getRect(AMediaFormat* format, const char *name,
+                          int32_t *left, int32_t *top, int32_t *right, int32_t *bottom) {
+    return format->mFormat->findRect(name, left, top, right, bottom);
+}
+
+EXPORT
 bool AMediaFormat_getBuffer(AMediaFormat* format, const char *name, void** data, size_t *outsize) {
     sp<ABuffer> buf;
     if (format->mFormat->findBuffer(name, &buf)) {
@@ -216,6 +235,22 @@
 }
 
 EXPORT
+void AMediaFormat_setDouble(AMediaFormat* format, const char* name, double value) {
+    format->mFormat->setDouble(name, value);
+}
+
+EXPORT
+void AMediaFormat_setSize(AMediaFormat* format, const char* name, size_t value) {
+    format->mFormat->setSize(name, value);
+}
+
+EXPORT
+void AMediaFormat_setRect(AMediaFormat* format, const char *name,
+                          int32_t left, int32_t top, int32_t right, int32_t bottom) {
+    format->mFormat->setRect(name, left, top, right, bottom);
+}
+
+EXPORT
 void AMediaFormat_setString(AMediaFormat* format, const char* name, const char* value) {
     // AMessage::setString() makes a copy of the string
     format->mFormat->setString(name, value, strlen(value));
@@ -233,30 +268,61 @@
 }
 
 
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR = "aac-drc-cut-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR = "aac-drc-boost-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION = "aac-drc-heavy-compression";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL = "aac-target-ref-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL = "aac-encoded-target-level";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT = "aac-max-output-channel_count";
 EXPORT const char* AMEDIAFORMAT_KEY_AAC_PROFILE = "aac-profile";
+EXPORT const char* AMEDIAFORMAT_KEY_AAC_SBR_MODE = "aac-sbr-mode";
+EXPORT const char* AMEDIAFORMAT_KEY_AUDIO_SESSION_ID = "audio-session-id";
+EXPORT const char* AMEDIAFORMAT_KEY_BITRATE_MODE = "bitrate-mode";
 EXPORT const char* AMEDIAFORMAT_KEY_BIT_RATE = "bitrate";
+EXPORT const char* AMEDIAFORMAT_KEY_CAPTURE_RATE = "capture-rate";
 EXPORT const char* AMEDIAFORMAT_KEY_CHANNEL_COUNT = "channel-count";
 EXPORT const char* AMEDIAFORMAT_KEY_CHANNEL_MASK = "channel-mask";
 EXPORT const char* AMEDIAFORMAT_KEY_COLOR_FORMAT = "color-format";
+EXPORT const char* AMEDIAFORMAT_KEY_COLOR_RANGE = "color-range";
+EXPORT const char* AMEDIAFORMAT_KEY_COLOR_STANDARD = "color-standard";
+EXPORT const char* AMEDIAFORMAT_KEY_COLOR_TRANSFER = "color-transfer";
+EXPORT const char* AMEDIAFORMAT_KEY_COMPLEXITY = "complexity";
+EXPORT const char* AMEDIAFORMAT_KEY_DISPLAY_CROP = "crop";
 EXPORT const char* AMEDIAFORMAT_KEY_DURATION = "durationUs";
 EXPORT const char* AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL = "flac-compression-level";
 EXPORT const char* AMEDIAFORMAT_KEY_FRAME_RATE = "frame-rate";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_COLS = "grid-cols";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_HEIGHT = "grid-height";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_ROWS = "grid-rows";
+EXPORT const char* AMEDIAFORMAT_KEY_GRID_WIDTH = "grid-width";
+EXPORT const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO = "hdr-static-info";
 EXPORT const char* AMEDIAFORMAT_KEY_HEIGHT = "height";
+EXPORT const char* AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD = "intra-refresh-period";
 EXPORT const char* AMEDIAFORMAT_KEY_IS_ADTS = "is-adts";
 EXPORT const char* AMEDIAFORMAT_KEY_IS_AUTOSELECT = "is-autoselect";
 EXPORT const char* AMEDIAFORMAT_KEY_IS_DEFAULT = "is-default";
 EXPORT const char* AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE = "is-forced-subtitle";
 EXPORT const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL = "i-frame-interval";
 EXPORT const char* AMEDIAFORMAT_KEY_LANGUAGE = "language";
+EXPORT const char* AMEDIAFORMAT_KEY_LATENCY = "latency";
+EXPORT const char* AMEDIAFORMAT_KEY_LEVEL = "level";
 EXPORT const char* AMEDIAFORMAT_KEY_MAX_HEIGHT = "max-height";
 EXPORT const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE = "max-input-size";
 EXPORT const char* AMEDIAFORMAT_KEY_MAX_WIDTH = "max-width";
 EXPORT const char* AMEDIAFORMAT_KEY_MIME = "mime";
+EXPORT const char* AMEDIAFORMAT_KEY_OPERATING_RATE = "operating-rate";
+EXPORT const char* AMEDIAFORMAT_KEY_PCM_ENCODING = "pcm-encoding";
+EXPORT const char* AMEDIAFORMAT_KEY_PRIORITY = "priority";
+EXPORT const char* AMEDIAFORMAT_KEY_PROFILE = "profile";
 EXPORT const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP = "push-blank-buffers-on-shutdown";
 EXPORT const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER = "repeat-previous-frame-after";
+EXPORT const char* AMEDIAFORMAT_KEY_ROTATION = "rotation-degrees";
 EXPORT const char* AMEDIAFORMAT_KEY_SAMPLE_RATE = "sample-rate";
-EXPORT const char* AMEDIAFORMAT_KEY_WIDTH = "width";
+EXPORT const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT = "slice-height";
 EXPORT const char* AMEDIAFORMAT_KEY_STRIDE = "stride";
+EXPORT const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYERING = "ts-schema";
+EXPORT const char* AMEDIAFORMAT_KEY_TRACK_ID = "track-id";
+EXPORT const char* AMEDIAFORMAT_KEY_WIDTH = "width";
 
 
 } // extern "C"
diff --git a/media/ndk/include/media/NdkMediaCodec.h b/media/ndk/include/media/NdkMediaCodec.h
index b15de38..f4a51d0 100644
--- a/media/ndk/include/media/NdkMediaCodec.h
+++ b/media/ndk/include/media/NdkMediaCodec.h
@@ -53,11 +53,63 @@
 typedef struct AMediaCodecCryptoInfo AMediaCodecCryptoInfo;
 
 enum {
+    AMEDIACODEC_BUFFER_FLAG_CODEC_CONFIG = 2,
     AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM = 4,
+    AMEDIACODEC_BUFFER_FLAG_PARTIAL_FRAME = 8,
+
     AMEDIACODEC_CONFIGURE_FLAG_ENCODE = 1,
     AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED = -3,
     AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED = -2,
-    AMEDIACODEC_INFO_TRY_AGAIN_LATER = -1
+    AMEDIACODEC_INFO_TRY_AGAIN_LATER = -1,
+};
+
+/**
+ * Called when an input buffer becomes available.
+ * The specified index is the index of the available input buffer.
+ */
+typedef void (*AMediaCodecOnAsyncInputAvailable)(
+        AMediaCodec *codec,
+        void *userdata,
+        int32_t index);
+/**
+ * Called when an output buffer becomes available.
+ * The specified index is the index of the available output buffer.
+ * The specified bufferInfo contains information regarding the available output buffer.
+ */
+typedef void (*AMediaCodecOnAsyncOutputAvailable)(
+        AMediaCodec *codec,
+        void *userdata,
+        int32_t index,
+        AMediaCodecBufferInfo *bufferInfo);
+/**
+ * Called when the output format has changed.
+ * The specified format contains the new output format.
+ */
+typedef void (*AMediaCodecOnAsyncFormatChanged)(
+        AMediaCodec *codec,
+        void *userdata,
+        AMediaFormat *format);
+/**
+ * Called when the MediaCodec encountered an error.
+ * The specified actionCode indicates the possible actions that client can take,
+ * and it can be checked by calling AMediaCodecActionCode_isRecoverable or
+ * AMediaCodecActionCode_isTransient. If both AMediaCodecActionCode_isRecoverable()
+ * and AMediaCodecActionCode_isTransient() return false, then the codec error is fatal
+ * and the codec must be deleted.
+ * The specified detail may contain more detailed messages about this error.
+ */
+typedef void (*AMediaCodecOnAsyncError)(
+        AMediaCodec *codec,
+        void *userdata,
+        media_status_t error,
+        int32_t actionCode,
+        const char *detail);
+
+struct AMediaCodecOnAsyncNotifyCallback {
+      AMediaCodecOnAsyncInputAvailable  onAsyncInputAvailable;
+      AMediaCodecOnAsyncOutputAvailable onAsyncOutputAvailable;
+      AMediaCodecOnAsyncFormatChanged   onAsyncFormatChanged;
+      AMediaCodecOnAsyncError           onAsyncError;
 };
 
 #if __ANDROID_API__ >= 21
@@ -289,6 +341,71 @@
 
 #endif /* __ANDROID_API__ >= 26 */
 
+#if __ANDROID_API__ >= 28
+
+/**
+ * Get the component name. If the codec was created by createDecoderByType
+ * or createEncoderByType, what component is chosen is not known beforehand.
+ * Caller shall call AMediaCodec_releaseName to free the returned pointer.
+ */
+media_status_t AMediaCodec_getName(AMediaCodec*, char** out_name);
+
+/**
+ * Free the memory pointed by name which is returned by AMediaCodec_getName.
+ */
+void AMediaCodec_releaseName(AMediaCodec*, char* name);
+
+/**
+ * Set an asynchronous callback for actionable AMediaCodec events.
+ * When asynchronous callback is enabled, the client should not call
+ * AMediaCodec_getInputBuffers(), AMediaCodec_getOutputBuffers(),
+ * AMediaCodec_dequeueInputBuffer() or AMediaCodec_dequeueOutputBuffer().
+ *
+ * Also, AMediaCodec_flush() behaves differently in asynchronous mode.
+ * After calling AMediaCodec_flush(), you must call AMediaCodec_start() to
+ * "resume" receiving input buffers, even if an input surface was created.
+ *
+ * Refer to the definition of AMediaCodecOnAsyncNotifyCallback on how each
+ * callback function is called and what are specified.
+ * The specified userdata is the pointer used when those callback functions are
+ * called.
+ *
+ * All callbacks are fired on one NDK internal thread.
+ * AMediaCodec_setAsyncNotifyCallback should not be called on the callback thread.
+ * No heavy duty task should be performed on callback thread.
+ */
+media_status_t AMediaCodec_setAsyncNotifyCallback(
+        AMediaCodec*,
+        AMediaCodecOnAsyncNotifyCallback callback,
+        void *userdata);
+
+/**
+ * Release the crypto if applicable.
+ */
+media_status_t AMediaCodec_releaseCrypto(AMediaCodec*);
+
+/**
+ * Call this after AMediaCodec_configure() returns successfully to get the input
+ * format accepted by the codec. Do this to determine what optional configuration
+ * parameters were supported by the codec.
+ */
+AMediaFormat* AMediaCodec_getInputFormat(AMediaCodec*);
+
+/**
+ * Returns true if the codec cannot proceed further, but can be recovered by stopping,
+ * configuring, and starting again.
+ */
+bool AMediaCodecActionCode_isRecoverable(int32_t actionCode);
+
+/**
+ * Returns true if the codec error is a transient issue, perhaps due to
+ * resource constraints, and that the method (or encoding/decoding) may be
+ * retried at a later time.
+ */
+bool AMediaCodecActionCode_isTransient(int32_t actionCode);
+
+#endif /* __ANDROID_API__ >= 28 */
+
 typedef enum {
     AMEDIACODECRYPTOINFO_MODE_CLEAR = 0,
     AMEDIACODECRYPTOINFO_MODE_AES_CTR = 1,
diff --git a/media/ndk/include/media/NdkMediaError.h b/media/ndk/include/media/NdkMediaError.h
index da61b64..e48fcbe 100644
--- a/media/ndk/include/media/NdkMediaError.h
+++ b/media/ndk/include/media/NdkMediaError.h
@@ -35,6 +35,17 @@
 typedef enum {
     AMEDIA_OK = 0,
 
+    /**
+     * This indicates required resource was not able to be allocated.
+     */
+    AMEDIACODEC_ERROR_INSUFFICIENT_RESOURCE = 1100,
+
+    /**
+     * This indicates the resource manager reclaimed the media resource used by the codec.
+     * With this error, the codec must be released, as it has moved to terminal state.
+     */
+    AMEDIACODEC_ERROR_RECLAIMED             = 1101,
+
     AMEDIA_ERROR_BASE                  = -10000,
     AMEDIA_ERROR_UNKNOWN               = AMEDIA_ERROR_BASE,
     AMEDIA_ERROR_MALFORMED             = AMEDIA_ERROR_BASE - 1,
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 018ab76..b6489c7 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -51,6 +51,7 @@
 bool AMediaFormat_getInt32(AMediaFormat*, const char *name, int32_t *out);
 bool AMediaFormat_getInt64(AMediaFormat*, const char *name, int64_t *out);
 bool AMediaFormat_getFloat(AMediaFormat*, const char *name, float *out);
+bool AMediaFormat_getSize(AMediaFormat*, const char *name, size_t *out);
 /**
  * The returned data is owned by the format and remains valid as long as the named entry
  * is part of the format.
@@ -80,33 +81,75 @@
 /**
  * XXX should these be ints/enums that we look up in a table as needed?
  */
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR;
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR;
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION;
+extern const char* AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL;
+extern const char* AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL;
+extern const char* AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT;
 extern const char* AMEDIAFORMAT_KEY_AAC_PROFILE;
+extern const char* AMEDIAFORMAT_KEY_AAC_SBR_MODE;
+extern const char* AMEDIAFORMAT_KEY_AUDIO_SESSION_ID;
+extern const char* AMEDIAFORMAT_KEY_BITRATE_MODE;
 extern const char* AMEDIAFORMAT_KEY_BIT_RATE;
+extern const char* AMEDIAFORMAT_KEY_CAPTURE_RATE;
 extern const char* AMEDIAFORMAT_KEY_CHANNEL_COUNT;
 extern const char* AMEDIAFORMAT_KEY_CHANNEL_MASK;
 extern const char* AMEDIAFORMAT_KEY_COLOR_FORMAT;
+extern const char* AMEDIAFORMAT_KEY_COLOR_RANGE;
+extern const char* AMEDIAFORMAT_KEY_COLOR_STANDARD;
+extern const char* AMEDIAFORMAT_KEY_COLOR_TRANSFER;
+extern const char* AMEDIAFORMAT_KEY_COMPLEXITY;
+extern const char* AMEDIAFORMAT_KEY_DISPLAY_CROP;
 extern const char* AMEDIAFORMAT_KEY_DURATION;
 extern const char* AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL;
 extern const char* AMEDIAFORMAT_KEY_FRAME_RATE;
+extern const char* AMEDIAFORMAT_KEY_GRID_COLS;
+extern const char* AMEDIAFORMAT_KEY_GRID_HEIGHT;
+extern const char* AMEDIAFORMAT_KEY_GRID_ROWS;
+extern const char* AMEDIAFORMAT_KEY_GRID_WIDTH;
+extern const char* AMEDIAFORMAT_KEY_HDR_STATIC_INFO;
 extern const char* AMEDIAFORMAT_KEY_HEIGHT;
+extern const char* AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD;
 extern const char* AMEDIAFORMAT_KEY_IS_ADTS;
 extern const char* AMEDIAFORMAT_KEY_IS_AUTOSELECT;
 extern const char* AMEDIAFORMAT_KEY_IS_DEFAULT;
 extern const char* AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE;
 extern const char* AMEDIAFORMAT_KEY_I_FRAME_INTERVAL;
 extern const char* AMEDIAFORMAT_KEY_LANGUAGE;
+extern const char* AMEDIAFORMAT_KEY_LATENCY;
+extern const char* AMEDIAFORMAT_KEY_LEVEL;
 extern const char* AMEDIAFORMAT_KEY_MAX_HEIGHT;
 extern const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE;
 extern const char* AMEDIAFORMAT_KEY_MAX_WIDTH;
 extern const char* AMEDIAFORMAT_KEY_MIME;
+extern const char* AMEDIAFORMAT_KEY_OPERATING_RATE;
+extern const char* AMEDIAFORMAT_KEY_PCM_ENCODING;
+extern const char* AMEDIAFORMAT_KEY_PRIORITY;
+extern const char* AMEDIAFORMAT_KEY_PROFILE;
 extern const char* AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP;
 extern const char* AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER;
+extern const char* AMEDIAFORMAT_KEY_ROTATION;
 extern const char* AMEDIAFORMAT_KEY_SAMPLE_RATE;
-extern const char* AMEDIAFORMAT_KEY_WIDTH;
+extern const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT;
 extern const char* AMEDIAFORMAT_KEY_STRIDE;
+extern const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYERING;
+extern const char* AMEDIAFORMAT_KEY_TRACK_ID;
+extern const char* AMEDIAFORMAT_KEY_WIDTH;
 
 #endif /* __ANDROID_API__ >= 21 */
 
+#if __ANDROID_API__ >= 28
+bool AMediaFormat_getDouble(AMediaFormat*, const char *name, double *out);
+bool AMediaFormat_getRect(AMediaFormat*, const char *name,
+                          int32_t *left, int32_t *top, int32_t *right, int32_t *bottom);
+
+void AMediaFormat_setDouble(AMediaFormat*, const char* name, double value);
+void AMediaFormat_setSize(AMediaFormat*, const char* name, size_t value);
+void AMediaFormat_setRect(AMediaFormat*, const char* name,
+                          int32_t left, int32_t top, int32_t right, int32_t bottom);
+#endif /* __ANDROID_API__ >= 28 */
+
 __END_DECLS
 
 #endif // _NDK_MEDIA_FORMAT_H
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index d7ad370..f2d97cd 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -26,30 +26,63 @@
     AImage_getPlaneRowStride; # introduced=24
     AImage_getTimestamp; # introduced=24
     AImage_getWidth; # introduced=24
+    AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR; # var introduced=28
+    AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR; # var introduced=28
+    AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION; # var introduced=28
+    AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL; # var introduced=28
+    AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL; # var introduced=28
+    AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT; # var introduced=28
     AMEDIAFORMAT_KEY_AAC_PROFILE; # var
+    AMEDIAFORMAT_KEY_AAC_SBR_MODE; # var introduced=28
+    AMEDIAFORMAT_KEY_AUDIO_SESSION_ID; # var introduced=28
+    AMEDIAFORMAT_KEY_BITRATE_MODE; # var introduced=28
     AMEDIAFORMAT_KEY_BIT_RATE; # var
+    AMEDIAFORMAT_KEY_CAPTURE_RATE; # var introduced=28
     AMEDIAFORMAT_KEY_CHANNEL_COUNT; # var
     AMEDIAFORMAT_KEY_CHANNEL_MASK; # var
     AMEDIAFORMAT_KEY_COLOR_FORMAT; # var
+    AMEDIAFORMAT_KEY_COLOR_RANGE; # var introduced=28
+    AMEDIAFORMAT_KEY_COLOR_STANDARD; # var introduced=28
+    AMEDIAFORMAT_KEY_COLOR_TRANSFER; # var introduced=28
+    AMEDIAFORMAT_KEY_COMPLEXITY; # var introduced=28
+    AMEDIAFORMAT_KEY_DISPLAY_CROP; # var introduced=28
     AMEDIAFORMAT_KEY_DURATION; # var
     AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL; # var
     AMEDIAFORMAT_KEY_FRAME_RATE; # var
+    AMEDIAFORMAT_KEY_GRID_COLS; # var introduced=28
+    AMEDIAFORMAT_KEY_GRID_HEIGHT; # var introduced=28
+    AMEDIAFORMAT_KEY_GRID_ROWS; # var introduced=28
+    AMEDIAFORMAT_KEY_GRID_WIDTH; # var introduced=28
+    AMEDIAFORMAT_KEY_HDR_STATIC_INFO; # var introduced=28
     AMEDIAFORMAT_KEY_HEIGHT; # var
+    AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD; # var introduced=28
     AMEDIAFORMAT_KEY_IS_ADTS; # var
     AMEDIAFORMAT_KEY_IS_AUTOSELECT; # var
     AMEDIAFORMAT_KEY_IS_DEFAULT; # var
     AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE; # var
     AMEDIAFORMAT_KEY_I_FRAME_INTERVAL; # var
     AMEDIAFORMAT_KEY_LANGUAGE; # var
+    AMEDIAFORMAT_KEY_LATENCY; # var introduced=28
+    AMEDIAFORMAT_KEY_LEVEL; # var introduced=28
     AMEDIAFORMAT_KEY_MAX_HEIGHT; # var
     AMEDIAFORMAT_KEY_MAX_INPUT_SIZE; # var
     AMEDIAFORMAT_KEY_MAX_WIDTH; # var
     AMEDIAFORMAT_KEY_MIME; # var
+    AMEDIAFORMAT_KEY_OPERATING_RATE; # var introduced=28
+    AMEDIAFORMAT_KEY_PCM_ENCODING; # var introduced=28
+    AMEDIAFORMAT_KEY_PRIORITY; # var introduced=28
+    AMEDIAFORMAT_KEY_PROFILE; # var introduced=28
     AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP; # var
     AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER; # var
+    AMEDIAFORMAT_KEY_ROTATION; # var introduced=28
     AMEDIAFORMAT_KEY_SAMPLE_RATE; # var
+    AMEDIAFORMAT_KEY_SLICE_HEIGHT; # var introduced=28
     AMEDIAFORMAT_KEY_STRIDE; # var
+    AMEDIAFORMAT_KEY_TEMPORAL_LAYERING; # var introduced=28
+    AMEDIAFORMAT_KEY_TRACK_ID; # var introduced=28
     AMEDIAFORMAT_KEY_WIDTH; # var
+    AMediaCodecActionCode_isRecoverable; # introduced=28
+    AMediaCodecActionCode_isTransient; # introduced=28
     AMediaCodecCryptoInfo_delete;
     AMediaCodecCryptoInfo_getClearBytes;
     AMediaCodecCryptoInfo_getEncryptedBytes;
@@ -68,12 +101,16 @@
     AMediaCodec_dequeueOutputBuffer;
     AMediaCodec_flush;
     AMediaCodec_getInputBuffer;
+    AMediaCodec_getInputFormat; # introduced=28
+    AMediaCodec_getName; # introduced=28
     AMediaCodec_getOutputBuffer;
     AMediaCodec_getOutputFormat;
     AMediaCodec_queueInputBuffer;
     AMediaCodec_queueSecureInputBuffer;
+    AMediaCodec_releaseCrypto; # introduced=28
     AMediaCodec_releaseOutputBuffer;
     AMediaCodec_releaseOutputBufferAtTime;
+    AMediaCodec_setAsyncNotifyCallback; # introduced=28
     AMediaCodec_setOutputSurface; # introduced=24
     AMediaCodec_setParameters; # introduced=26
     AMediaCodec_setInputSurface; # introduced=26
@@ -127,16 +164,21 @@
     AMediaExtractor_unselectTrack;
     AMediaFormat_delete;
     AMediaFormat_getBuffer;
+    AMediaFormat_getDouble; # introduced=28
     AMediaFormat_getFloat;
     AMediaFormat_getInt32;
     AMediaFormat_getInt64;
+    AMediaFormat_getRect; # introduced=28
     AMediaFormat_getSize;
     AMediaFormat_getString;
     AMediaFormat_new;
     AMediaFormat_setBuffer;
+    AMediaFormat_setDouble; # introduced=28
     AMediaFormat_setFloat;
     AMediaFormat_setInt32;
     AMediaFormat_setInt64;
+    AMediaFormat_setRect; # introduced=28
+    AMediaFormat_setSize; # introduced=28
     AMediaFormat_setString;
     AMediaFormat_toString;
     AMediaMuxer_addTrack;
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 9cb0357..aeb32bb 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -674,7 +674,11 @@
     audio_session_t sessionId = input.sessionId;
     if (sessionId == AUDIO_SESSION_ALLOCATE) {
         sessionId = (audio_session_t) newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+    } else if (audio_unique_id_get_use(sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
+        lStatus = BAD_VALUE;
+        goto Exit;
     }
+
     output.sessionId = sessionId;
     output.outputId = AUDIO_IO_HANDLE_NONE;
     output.selectedDeviceId = input.selectedDeviceId;
@@ -1568,120 +1572,144 @@
 
 // ----------------------------------------------------------------------------
 
-sp<media::IAudioRecord> AudioFlinger::openRecord(
-        audio_io_handle_t input,
-        uint32_t sampleRate,
-        audio_format_t format,
-        audio_channel_mask_t channelMask,
-        const String16& opPackageName,
-        size_t *frameCount,
-        audio_input_flags_t *flags,
-        pid_t pid,
-        pid_t tid,
-        int clientUid,
-        audio_session_t *sessionId,
-        size_t *notificationFrames,
-        sp<IMemory>& cblk,
-        sp<IMemory>& buffers,
-        status_t *status,
-        audio_port_handle_t portId)
+sp<media::IAudioRecord> AudioFlinger::createRecord(const CreateRecordInput& input,
+                                                   CreateRecordOutput& output,
+                                                   status_t *status)
 {
     sp<RecordThread::RecordTrack> recordTrack;
     sp<RecordHandle> recordHandle;
     sp<Client> client;
     status_t lStatus;
-    audio_session_t lSessionId;
+    audio_session_t sessionId = input.sessionId;
+    audio_port_handle_t portId;
 
-    cblk.clear();
-    buffers.clear();
+    output.cblk.clear();
+    output.buffers.clear();
 
-    bool updatePid = (pid == -1);
+    bool updatePid = (input.clientInfo.clientPid == -1);
     const uid_t callingUid = IPCThreadState::self()->getCallingUid();
+    uid_t clientUid = input.clientInfo.clientUid;
     if (!isTrustedCallingUid(callingUid)) {
-        ALOGW_IF((uid_t)clientUid != callingUid,
-                "%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, clientUid);
+        ALOGW_IF(clientUid != callingUid,
+                "%s uid %d tried to pass itself off as %d",
+                __FUNCTION__, callingUid, clientUid);
         clientUid = callingUid;
         updatePid = true;
     }
-
+    pid_t clientPid = input.clientInfo.clientPid;
     if (updatePid) {
         const pid_t callingPid = IPCThreadState::self()->getCallingPid();
-        ALOGW_IF(pid != -1 && pid != callingPid,
+        ALOGW_IF(clientPid != -1 && clientPid != callingPid,
                  "%s uid %d pid %d tried to pass itself off as pid %d",
-                 __func__, callingUid, callingPid, pid);
-        pid = callingPid;
+                 __func__, callingUid, callingPid, clientPid);
+        clientPid = callingPid;
     }
 
     // check calling permissions
-    if (!recordingAllowed(opPackageName, tid, clientUid)) {
-        ALOGE("openRecord() permission denied: recording not allowed");
+    if (!recordingAllowed(input.opPackageName, input.clientInfo.clientTid, clientUid)) {
+        ALOGE("createRecord() permission denied: recording not allowed");
         lStatus = PERMISSION_DENIED;
         goto Exit;
     }
-
-    // further sample rate checks are performed by createRecordTrack_l()
-    if (sampleRate == 0) {
-        ALOGE("openRecord() invalid sample rate %u", sampleRate);
-        lStatus = BAD_VALUE;
-        goto Exit;
-    }
-
     // we don't yet support anything other than linear PCM
-    if (!audio_is_valid_format(format) || !audio_is_linear_pcm(format)) {
-        ALOGE("openRecord() invalid format %#x", format);
+    if (!audio_is_valid_format(input.config.format) || !audio_is_linear_pcm(input.config.format)) {
+        ALOGE("createRecord() invalid format %#x", input.config.format);
         lStatus = BAD_VALUE;
         goto Exit;
     }
 
     // further channel mask checks are performed by createRecordTrack_l()
-    if (!audio_is_input_channel(channelMask)) {
-        ALOGE("openRecord() invalid channel mask %#x", channelMask);
+    if (!audio_is_input_channel(input.config.channel_mask)) {
+        ALOGE("createRecord() invalid channel mask %#x", input.config.channel_mask);
         lStatus = BAD_VALUE;
         goto Exit;
     }
 
+    if (sessionId == AUDIO_SESSION_ALLOCATE) {
+        sessionId = (audio_session_t) newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+    } else if (audio_unique_id_get_use(sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
+        lStatus = BAD_VALUE;
+        goto Exit;
+    }
+
+    output.sessionId = sessionId;
+    output.inputId = AUDIO_IO_HANDLE_NONE;
+    output.selectedDeviceId = input.selectedDeviceId;
+    output.flags = input.flags;
+
+    client = registerPid(clientPid);
+
+    // Not a conventional loop, but a retry loop for at most two iterations total.
+    // Try first maybe with FAST flag then try again without FAST flag if that fails.
+    // Exits loop via break on no error of got exit on error
+    // The sp<> references will be dropped when re-entering scope.
+    // The lack of indentation is deliberate, to reduce code churn and ease merges.
+    for (;;) {
+    lStatus = AudioSystem::getInputForAttr(&input.attr, &output.inputId,
+                                      sessionId,
+                                    // FIXME compare to AudioTrack
+                                      clientPid,
+                                      clientUid,
+                                      &input.config,
+                                      output.flags, &output.selectedDeviceId, &portId);
+
     {
         Mutex::Autolock _l(mLock);
-        RecordThread *thread = checkRecordThread_l(input);
+        RecordThread *thread = checkRecordThread_l(output.inputId);
         if (thread == NULL) {
-            ALOGE("openRecord() checkRecordThread_l failed");
+            ALOGE("createRecord() checkRecordThread_l failed");
             lStatus = BAD_VALUE;
             goto Exit;
         }
 
-        client = registerPid(pid);
+        ALOGV("createRecord() lSessionId: %d input %d", sessionId, output.inputId);
 
-        if (sessionId != NULL && *sessionId != AUDIO_SESSION_ALLOCATE) {
-            if (audio_unique_id_get_use(*sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
-                lStatus = BAD_VALUE;
-                goto Exit;
-            }
-            lSessionId = *sessionId;
-        } else {
-            // if no audio session id is provided, create one here
-            lSessionId = (audio_session_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
-            if (sessionId != NULL) {
-                *sessionId = lSessionId;
-            }
-        }
-        ALOGV("openRecord() lSessionId: %d input %d", lSessionId, input);
+        output.sampleRate = input.config.sample_rate;
+        output.frameCount = input.frameCount;
+        output.notificationFrameCount = input.notificationFrameCount;
 
-        recordTrack = thread->createRecordTrack_l(client, sampleRate, format, channelMask,
-                                                  frameCount, lSessionId, notificationFrames,
-                                                  clientUid, flags, tid, &lStatus, portId);
+        recordTrack = thread->createRecordTrack_l(client, &output.sampleRate,
+                                                  input.config.format, input.config.channel_mask,
+                                                  &output.frameCount, sessionId,
+                                                  &output.notificationFrameCount,
+                                                  clientUid, &output.flags,
+                                                  input.clientInfo.clientTid,
+                                                  &lStatus, portId);
         LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (recordTrack == 0));
 
-        if (lStatus == NO_ERROR) {
-            // Check if one effect chain was awaiting for an AudioRecord to be created on this
-            // session and move it to this thread.
-            sp<EffectChain> chain = getOrphanEffectChain_l(lSessionId);
-            if (chain != 0) {
-                Mutex::Autolock _l(thread->mLock);
-                thread->addEffectChain_l(chain);
-            }
+        // lStatus == BAD_TYPE means FAST flag was rejected: request a new input from
+        // audio policy manager without FAST constraint
+        if (lStatus == BAD_TYPE) {
+            AudioSystem::releaseInput(output.inputId, sessionId);
+            recordTrack.clear();
+            continue;
         }
+
+        if (lStatus != NO_ERROR) {
+            recordTrack.clear();
+            goto Exit;
+        }
+
+        // Check if one effect chain was awaiting for an AudioRecord to be created on this
+        // session and move it to this thread.
+        sp<EffectChain> chain = getOrphanEffectChain_l(sessionId);
+        if (chain != 0) {
+            Mutex::Autolock _l(thread->mLock);
+            thread->addEffectChain_l(chain);
+        }
+        break;
+    }
+    // End of retry loop.
+    // The lack of indentation is deliberate, to reduce code churn and ease merges.
     }
 
+    output.cblk = recordTrack->getCblk();
+    output.buffers = recordTrack->getBuffers();
+
+    // return handle to client
+    recordHandle = new RecordHandle(recordTrack);
+
+Exit:
     if (lStatus != NO_ERROR) {
         // remove local strong reference to Client before deleting the RecordTrack so that the
         // Client destructor is called by the TrackBase destructor with mClientLock held
@@ -1691,17 +1719,8 @@
             Mutex::Autolock _cl(mClientLock);
             client.clear();
         }
-        recordTrack.clear();
-        goto Exit;
     }
 
-    cblk = recordTrack->getCblk();
-    buffers = recordTrack->getBuffers();
-
-    // return handle to client
-    recordHandle = new RecordHandle(recordTrack);
-
-Exit:
     *status = lStatus;
     return recordHandle;
 }
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 506420c..bc73ffd 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -118,23 +118,9 @@
                                         CreateTrackOutput& output,
                                         status_t *status);
 
-    virtual sp<media::IAudioRecord> openRecord(
-                                audio_io_handle_t input,
-                                uint32_t sampleRate,
-                                audio_format_t format,
-                                audio_channel_mask_t channelMask,
-                                const String16& opPackageName,
-                                size_t *pFrameCount,
-                                audio_input_flags_t *flags,
-                                pid_t pid,
-                                pid_t tid,
-                                int clientUid,
-                                audio_session_t *sessionId,
-                                size_t *notificationFrames,
-                                sp<IMemory>& cblk,
-                                sp<IMemory>& buffers,
-                                status_t *status /*non-NULL*/,
-                                audio_port_handle_t portId);
+    virtual sp<media::IAudioRecord> createRecord(const CreateRecordInput& input,
+                                                 CreateRecordOutput& output,
+                                                 status_t *status);
 
     virtual     uint32_t    sampleRate(audio_io_handle_t ioHandle) const;
     virtual     audio_format_t format(audio_io_handle_t output) const;
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index e0d0d7b..ef6e223 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -328,21 +328,21 @@
             } else {
                 {   // convert input to int16_t as effect doesn't support float.
                     if (!auxType) {
-                        if (mInBuffer16.get() == nullptr) {
-                            ALOGW("%s: mInBuffer16 is null, bypassing", __func__);
+                        if (mInConversionBuffer.get() == nullptr) {
+                            ALOGW("%s: mInConversionBuffer is null, bypassing", __func__);
                             goto data_bypass;
                         }
                         const float * const pIn = mInBuffer->audioBuffer()->f32;
-                        int16_t * const pIn16 = mInBuffer16->audioBuffer()->s16;
+                        int16_t * const pIn16 = mInConversionBuffer->audioBuffer()->s16;
                         memcpy_to_i16_from_float(
                                 pIn16, pIn, inChannelCount * mConfig.inputCfg.buffer.frameCount);
                     }
                     if (mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
-                        if (mOutBuffer16.get() == nullptr) {
-                            ALOGW("%s: mOutBuffer16 is null, bypassing", __func__);
+                        if (mOutConversionBuffer.get() == nullptr) {
+                            ALOGW("%s: mOutConversionBuffer is null, bypassing", __func__);
                             goto data_bypass;
                         }
-                        int16_t * const pOut16 = mOutBuffer16->audioBuffer()->s16;
+                        int16_t * const pOut16 = mOutConversionBuffer->audioBuffer()->s16;
                         const float * const pOut = mOutBuffer->audioBuffer()->f32;
                         memcpy_to_i16_from_float(
                                 pOut16,
@@ -354,7 +354,7 @@
                 ret = mEffectInterface->process();
 
                 {   // convert output back to float.
-                    const int16_t * const pOut16 = mOutBuffer16->audioBuffer()->s16;
+                    const int16_t * const pOut16 = mOutConversionBuffer->audioBuffer()->s16;
                     float * const pOut = mOutBuffer->audioBuffer()->f32;
                     memcpy_to_float_from_i16(
                             pOut, pOut16, outChannelCount * mConfig.outputCfg.buffer.frameCount);
@@ -906,7 +906,7 @@
     mEffectInterface->setInBuffer(buffer);
 
 #ifdef FLOAT_EFFECT_CHAIN
-    // aux effects do in place conversion to float - we don't allocate mInBuffer16 for them.
+    // aux effects do in place conversion to float - we don't allocate mInConversionBuffer.
     // Theoretically insert effects can also do in-place conversions (destroying
     // the original buffer) when the output buffer is identical to the input buffer,
     // but we don't optimize for it here.
@@ -920,17 +920,18 @@
         ALOGV("%s: setInBuffer updating for inChannels:%d inFrameCount:%zu total size:%zu",
                 __func__, inChannels, inFrameCount, size);
 
-        if (size > 0 && (mInBuffer16.get() == nullptr || size > mInBuffer16->getSize())) {
-            mInBuffer16.clear();
-            ALOGV("%s: allocating mInBuffer16 %zu", __func__, size);
-            (void)EffectBufferHalInterface::allocate(size, &mInBuffer16);
+        if (size > 0 && (mInConversionBuffer.get() == nullptr
+                || size > mInConversionBuffer->getSize())) {
+            mInConversionBuffer.clear();
+            ALOGV("%s: allocating mInConversionBuffer %zu", __func__, size);
+            (void)EffectBufferHalInterface::allocate(size, &mInConversionBuffer);
         }
-        if (mInBuffer16.get() != nullptr) {
+        if (mInConversionBuffer.get() != nullptr) {
             // FIXME: confirm buffer has enough size.
-            mInBuffer16->setFrameCount(inFrameCount);
-            mEffectInterface->setInBuffer(mInBuffer16);
+            mInConversionBuffer->setFrameCount(inFrameCount);
+            mEffectInterface->setInBuffer(mInConversionBuffer);
         } else if (size > 0) {
-            ALOGE("%s cannot create mInBuffer16", __func__);
+            ALOGE("%s cannot create mInConversionBuffer", __func__);
         }
     }
 #endif
@@ -948,7 +949,7 @@
     mEffectInterface->setOutBuffer(buffer);
 
 #ifdef FLOAT_EFFECT_CHAIN
-    // Note: Any effect that does not accumulate does not need mOutBuffer16 and
+    // Note: Any effect that does not accumulate does not need mOutConversionBuffer and
     // can do in-place conversion from int16_t to float.  We don't optimize here.
     if (!mSupportsFloat && mOutBuffer.get() != nullptr) {
         const size_t outFrameCount = mConfig.outputCfg.buffer.frameCount;
@@ -958,16 +959,17 @@
         ALOGV("%s: setOutBuffer updating for outChannels:%d outFrameCount:%zu total size:%zu",
                 __func__, outChannels, outFrameCount, size);
 
-        if (size > 0 && (mOutBuffer16.get() == nullptr || size > mOutBuffer16->getSize())) {
-            mOutBuffer16.clear();
-            ALOGV("%s: allocating mOutBuffer16 %zu", __func__, size);
-            (void)EffectBufferHalInterface::allocate(size, &mOutBuffer16);
+        if (size > 0 && (mOutConversionBuffer.get() == nullptr
+                || size > mOutConversionBuffer->getSize())) {
+            mOutConversionBuffer.clear();
+            ALOGV("%s: allocating mOutConversionBuffer %zu", __func__, size);
+            (void)EffectBufferHalInterface::allocate(size, &mOutConversionBuffer);
         }
-        if (mOutBuffer16.get() != nullptr) {
-            mOutBuffer16->setFrameCount(outFrameCount);
-            mEffectInterface->setOutBuffer(mOutBuffer16);
+        if (mOutConversionBuffer.get() != nullptr) {
+            mOutConversionBuffer->setFrameCount(outFrameCount);
+            mEffectInterface->setOutBuffer(mOutConversionBuffer);
         } else if (size > 0) {
-            ALOGE("%s cannot create mOutBuffer16", __func__);
+            ALOGE("%s cannot create mOutConversionBuffer", __func__);
         }
     }
 #endif
@@ -1241,6 +1243,20 @@
     return s;
 }
 
+static std::string dumpInOutBuffer(bool isInput, const sp<EffectBufferHalInterface> &buffer) {
+    std::stringstream ss;
+
+    if (buffer.get() == nullptr) {
+        return "nullptr"; // make different than below
+    } else if (buffer->externalData() != nullptr) {
+        ss << (isInput ? buffer->externalData() : buffer->audioBuffer()->raw)
+                << " -> "
+                << (isInput ? buffer->audioBuffer()->raw : buffer->externalData());
+    } else {
+        ss << buffer->audioBuffer()->raw;
+    }
+    return ss.str();
+}
 
 void AudioFlinger::EffectModule::dump(int fd, const Vector<String16>& args __unused)
 {
@@ -1305,19 +1321,13 @@
     result.append(buffer);
 
 #ifdef FLOAT_EFFECT_CHAIN
-    if (!mSupportsFloat) {
-        int16_t* pIn16 = mInBuffer16 != 0 ? mInBuffer16->audioBuffer()->s16 : NULL;
-        int16_t* pOut16 = mOutBuffer16 != 0 ? mOutBuffer16->audioBuffer()->s16 : NULL;
 
-        result.append("\t\t- Float and int16 buffers\n");
-        result.append("\t\t\tIn_float   In_int16   Out_float  Out_int16\n");
-        snprintf(buffer, SIZE,"\t\t\t%p %p %p %p\n",
-                mConfig.inputCfg.buffer.raw,
-                pIn16,
-                pOut16,
-                mConfig.outputCfg.buffer.raw);
-        result.append(buffer);
-    }
+    result.appendFormat("\t\t- HAL buffers:\n"
+            "\t\t\tIn(%s) InConversion(%s) Out(%s) OutConversion(%s)\n",
+            dumpInOutBuffer(true /* isInput */, mInBuffer).c_str(),
+            dumpInOutBuffer(true /* isInput */, mInConversionBuffer).c_str(),
+            dumpInOutBuffer(false /* isInput */, mOutBuffer).c_str(),
+            dumpInOutBuffer(false /* isInput */, mOutConversionBuffer).c_str());
 #endif
 
     snprintf(buffer, SIZE, "\t\t%zu Clients:\n", mHandles.size());
@@ -2161,19 +2171,6 @@
     }
 }
 
-static void dumpInOutBuffer(
-        char *dump, size_t dumpSize, bool isInput, EffectBufferHalInterface *buffer) {
-    if (buffer == nullptr) {
-        snprintf(dump, dumpSize, "%p", buffer);
-    } else if (buffer->externalData() != nullptr) {
-        snprintf(dump, dumpSize, "%p -> %p",
-                isInput ? buffer->externalData() : buffer->audioBuffer()->raw,
-                isInput ? buffer->audioBuffer()->raw : buffer->externalData());
-    } else {
-        snprintf(dump, dumpSize, "%p", buffer->audioBuffer()->raw);
-    }
-}
-
 void AudioFlinger::EffectChain::dump(int fd, const Vector<String16>& args)
 {
     const size_t SIZE = 256;
@@ -2191,15 +2188,13 @@
             result.append("\tCould not lock mutex:\n");
         }
 
-        char inBufferStr[64], outBufferStr[64];
-        dumpInOutBuffer(inBufferStr, sizeof(inBufferStr), true, mInBuffer.get());
-        dumpInOutBuffer(outBufferStr, sizeof(outBufferStr), false, mOutBuffer.get());
-        snprintf(buffer, SIZE, "\t%-*s%-*s   Active tracks:\n",
-                (int)strlen(inBufferStr), "In buffer    ",
-                (int)strlen(outBufferStr), "Out buffer      ");
-        result.append(buffer);
-        snprintf(buffer, SIZE, "\t%s   %s   %d\n", inBufferStr, outBufferStr, mActiveTrackCnt);
-        result.append(buffer);
+        const std::string inBufferStr = dumpInOutBuffer(true /* isInput */, mInBuffer);
+        const std::string outBufferStr = dumpInOutBuffer(false /* isInput */, mOutBuffer);
+        result.appendFormat("\t%-*s%-*s   Active tracks:\n",
+                (int)inBufferStr.size(), "In buffer    ",
+                (int)outBufferStr.size(), "Out buffer      ");
+        result.appendFormat("\t%s   %s   %d\n",
+                inBufferStr.c_str(), outBufferStr.c_str(), mActiveTrackCnt);
         write(fd, result.string(), result.size());
 
         for (size_t i = 0; i < numEffects; ++i) {
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 1864e0f..eea3208 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -171,8 +171,8 @@
 
 #ifdef FLOAT_EFFECT_CHAIN
     bool    mSupportsFloat;         // effect supports float processing
-    sp<EffectBufferHalInterface> mInBuffer16;  // Buffers for interacting with HAL at 16 bits
-    sp<EffectBufferHalInterface> mOutBuffer16;
+    sp<EffectBufferHalInterface> mInConversionBuffer;  // Buffers for HAL conversion if needed.
+    sp<EffectBufferHalInterface> mOutConversionBuffer;
 #endif
 };
 
diff --git a/services/audioflinger/FastMixerDumpState.cpp b/services/audioflinger/FastMixerDumpState.cpp
index 6475f22..2e4fb8c 100644
--- a/services/audioflinger/FastMixerDumpState.cpp
+++ b/services/audioflinger/FastMixerDumpState.cpp
@@ -78,7 +78,12 @@
     uint32_t bounds = mBounds;
     uint32_t newestOpen = bounds & 0xFFFF;
     uint32_t oldestClosed = bounds >> 16;
-    uint32_t n = (newestOpen - oldestClosed) & 0xFFFF;
+
+    //uint32_t n = (newestOpen - oldestClosed) & 0xFFFF;
+    uint32_t n;
+    __builtin_sub_overflow(newestOpen, oldestClosed, &n);
+    n = n & 0xFFFF;
+
     if (n > mSamplingN) {
         ALOGE("too many samples %u", n);
         n = mSamplingN;
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index b2a1e18..7636df6 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -6708,12 +6708,12 @@
 // RecordThread::createRecordTrack_l() must be called with AudioFlinger::mLock held
 sp<AudioFlinger::RecordThread::RecordTrack> AudioFlinger::RecordThread::createRecordTrack_l(
         const sp<AudioFlinger::Client>& client,
-        uint32_t sampleRate,
+        uint32_t *pSampleRate,
         audio_format_t format,
         audio_channel_mask_t channelMask,
         size_t *pFrameCount,
         audio_session_t sessionId,
-        size_t *notificationFrames,
+        size_t *pNotificationFrameCount,
         uid_t uid,
         audio_input_flags_t *flags,
         pid_t tid,
@@ -6721,16 +6721,30 @@
         audio_port_handle_t portId)
 {
     size_t frameCount = *pFrameCount;
+    size_t notificationFrameCount = *pNotificationFrameCount;
     sp<RecordTrack> track;
     status_t lStatus;
     audio_input_flags_t inputFlags = mInput->flags;
+    audio_input_flags_t requestedFlags = *flags;
+    uint32_t sampleRate;
+
+    lStatus = initCheck();
+    if (lStatus != NO_ERROR) {
+        ALOGE("createRecordTrack_l() audio driver not initialized");
+        goto Exit;
+    }
+
+    if (*pSampleRate == 0) {
+        *pSampleRate = mSampleRate;
+    }
+    sampleRate = *pSampleRate;
 
     // special case for FAST flag considered OK if fast capture is present
     if (hasFastCapture()) {
         inputFlags = (audio_input_flags_t)(inputFlags | AUDIO_INPUT_FLAG_FAST);
     }
 
-    // Check if requested flags are compatible with output stream flags
+    // Check if requested flags are compatible with input stream flags
     if ((*flags & inputFlags) != *flags) {
         ALOGW("createRecordTrack_l(): mismatch between requested flags (%08x) and"
                 " input flags (%08x)",
@@ -6785,12 +6799,20 @@
       }
     }
 
+    // If FAST or RAW flags were corrected, ask caller to request new input from audio policy
+    if ((*flags & AUDIO_INPUT_FLAG_FAST) !=
+            (requestedFlags & AUDIO_INPUT_FLAG_FAST)) {
+        *flags = (audio_input_flags_t) (*flags & ~(AUDIO_INPUT_FLAG_FAST | AUDIO_INPUT_FLAG_RAW));
+        lStatus = BAD_TYPE;
+        goto Exit;
+    }
+
     // compute track buffer size in frames, and suggest the notification frame count
     if (*flags & AUDIO_INPUT_FLAG_FAST) {
         // fast track: frame count is exactly the pipe depth
         frameCount = mPipeFramesP2;
         // ignore requested notificationFrames, and always notify exactly once every HAL buffer
-        *notificationFrames = mFrameCount;
+        notificationFrameCount = mFrameCount;
     } else {
         // not fast track: max notification period is resampled equivalent of one HAL buffer time
         //                 or 20 ms if there is a fast capture
@@ -6809,17 +6831,12 @@
         const size_t minFrameCount = maxNotificationFrames *
                 max(kMinNotifications, minNotificationsByMs);
         frameCount = max(frameCount, minFrameCount);
-        if (*notificationFrames == 0 || *notificationFrames > maxNotificationFrames) {
-            *notificationFrames = maxNotificationFrames;
+        if (notificationFrameCount == 0 || notificationFrameCount > maxNotificationFrames) {
+            notificationFrameCount = maxNotificationFrames;
         }
     }
     *pFrameCount = frameCount;
-
-    lStatus = initCheck();
-    if (lStatus != NO_ERROR) {
-        ALOGE("createRecordTrack_l() audio driver not initialized");
-        goto Exit;
-    }
+    *pNotificationFrameCount = notificationFrameCount;
 
     { // scope for mLock
         Mutex::Autolock _l(mLock);
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index c7b60d6..17f26c5 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1327,12 +1327,12 @@
 
             sp<AudioFlinger::RecordThread::RecordTrack>  createRecordTrack_l(
                     const sp<AudioFlinger::Client>& client,
-                    uint32_t sampleRate,
+                    uint32_t *pSampleRate,
                     audio_format_t format,
                     audio_channel_mask_t channelMask,
                     size_t *pFrameCount,
                     audio_session_t sessionId,
-                    size_t *notificationFrames,
+                    size_t *pNotificationFrameCount,
                     uid_t uid,
                     audio_input_flags_t *flags,
                     pid_t tid,
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index d4ce0b4..a3ea756 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -192,7 +192,7 @@
                                     // where for AudioTrack (but not AudioRecord),
                                     // 8-bit PCM samples are stored as 16-bit
     const size_t        mFrameCount;// size of track buffer given at createTrack() or
-                                    // openRecord(), and then adjusted as needed
+                                    // createRecord(), and then adjusted as needed
 
     const audio_session_t mSessionId;
     uid_t               mUid;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index b169bac..d9cd121 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -34,8 +34,8 @@
 class AudioInputDescriptor: public AudioPortConfig, public AudioSessionInfoProvider
 {
 public:
-    explicit AudioInputDescriptor(const sp<IOProfile>& profile);
-    void setIoHandle(audio_io_handle_t ioHandle);
+    explicit AudioInputDescriptor(const sp<IOProfile>& profile,
+                                  AudioPolicyClientInterface *clientInterface);
     audio_port_handle_t getId() const;
     audio_module_handle_t getModuleHandle() const;
     uint32_t getOpenRefCount() const;
@@ -73,6 +73,14 @@
 
     void setPatchHandle(audio_patch_handle_t handle);
 
+    status_t open(const audio_config_t *config,
+                  audio_devices_t device,
+                  const String8& address,
+                  audio_source_t source,
+                  audio_input_flags_t flags,
+                  audio_io_handle_t *input);
+    void close();
+
 private:
     audio_patch_handle_t          mPatchHandle;
     audio_port_handle_t           mId;
@@ -85,6 +93,7 @@
     // a particular input started and prevent preemption of this active input by this session.
     // We also inherit sessions from the preempted input to avoid a 3 way preemption loop etc...
     SortedVector<audio_session_t> mPreemptedSessions;
+    AudioPolicyClientInterface *mClientInterface;
 };
 
 class AudioInputCollection :
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index c09cb5a..0be8fc1 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -101,8 +101,6 @@
 
     status_t    dump(int fd);
 
-    void setIoHandle(audio_io_handle_t ioHandle);
-
     virtual audio_devices_t device() const;
     virtual bool sharesHwModuleWith(const sp<AudioOutputDescriptor>& outputDesc);
     virtual audio_devices_t supportedDevices();
@@ -122,6 +120,14 @@
                            const struct audio_port_config *srcConfig = NULL) const;
     virtual void toAudioPort(struct audio_port *port) const;
 
+            status_t open(const audio_config_t *config,
+                          audio_devices_t device,
+                          const String8& address,
+                          audio_stream_type_t stream,
+                          audio_output_flags_t flags,
+                          audio_io_handle_t *output);
+            void close();
+
     const sp<IOProfile> mProfile;          // I/O profile this output derives from
     audio_io_handle_t mIoHandle;           // output handle
     uint32_t mLatency;                  //
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index ec04ef7..118f0d2 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -34,7 +34,11 @@
 {
 public:
     IOProfile(const String8 &name, audio_port_role_t role)
-        : AudioPort(name, AUDIO_PORT_TYPE_MIX, role) {}
+        : AudioPort(name, AUDIO_PORT_TYPE_MIX, role),
+          maxOpenCount((role == AUDIO_PORT_ROLE_SOURCE) ? 1 : 0),
+          curOpenCount(0),
+          maxActiveCount(1),
+          curActiveCount(0) {}
 
     // For a Profile aka MixPort, tag name and name are equivalent.
     virtual const String8 getTagName() const { return getName(); }
@@ -103,6 +107,34 @@
 
     const DeviceVector &getSupportedDevices() const { return mSupportedDevices; }
 
+    bool canOpenNewIo() {
+        if (maxOpenCount == 0 || curOpenCount < maxOpenCount) {
+            return true;
+        }
+        return false;
+    }
+
+    bool canStartNewIo() {
+        if (maxActiveCount == 0 || curActiveCount < maxActiveCount) {
+            return true;
+        }
+        return false;
+    }
+
+    // Maximum number of input or output streams that can be simultaneously opened for this profile.
+    // By convention 0 means no limit. To respect legacy behavior, initialized to 1 for output
+    // profiles and 0 for input profiles
+    uint32_t     maxOpenCount;
+    // Number of streams currently opened for this profile.
+    uint32_t     curOpenCount;
+    // Maximum number of input or output streams that can be simultaneously active for this profile.
+    // By convention 0 means no limit. To respect legacy behavior, initialized to 0 for output
+    // profiles and 1 for input profiles
+    uint32_t     maxActiveCount;
+    // Number of streams currently active for this profile. This is not the number of active clients
+    // (AudioTrack or AudioRecord) but the number of active HAL streams.
+    uint32_t     curActiveCount;
+
 private:
     DeviceVector mSupportedDevices; // supported devices: this input/output can be routed from/to
 };
diff --git a/services/audiopolicy/common/managerdefinitions/include/Serializer.h b/services/audiopolicy/common/managerdefinitions/include/Serializer.h
index 078b582..3b0e209 100644
--- a/services/audiopolicy/common/managerdefinitions/include/Serializer.h
+++ b/services/audiopolicy/common/managerdefinitions/include/Serializer.h
@@ -92,6 +92,8 @@
         static const char name[];
         static const char role[];
         static const char flags[];
+        static const char maxOpenCount[];
+        static const char maxActiveCount[];
     };
 
     typedef IOProfile Element;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 2492ed6..46168a4 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -17,6 +17,7 @@
 #define LOG_TAG "APM::AudioInputDescriptor"
 //#define LOG_NDEBUG 0
 
+#include <AudioPolicyInterface.h>
 #include "AudioInputDescriptor.h"
 #include "IOProfile.h"
 #include "AudioGain.h"
@@ -26,10 +27,12 @@
 
 namespace android {
 
-AudioInputDescriptor::AudioInputDescriptor(const sp<IOProfile>& profile)
+AudioInputDescriptor::AudioInputDescriptor(const sp<IOProfile>& profile,
+                                           AudioPolicyClientInterface *clientInterface)
     : mIoHandle(0),
       mDevice(AUDIO_DEVICE_NONE), mPolicyMix(NULL),
-      mProfile(profile), mPatchHandle(AUDIO_PATCH_HANDLE_NONE), mId(0)
+      mProfile(profile), mPatchHandle(AUDIO_PATCH_HANDLE_NONE), mId(0),
+      mClientInterface(clientInterface)
 {
     if (profile != NULL) {
         profile->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
@@ -39,12 +42,6 @@
     }
 }
 
-void AudioInputDescriptor::setIoHandle(audio_io_handle_t ioHandle)
-{
-    mId = AudioPort::getNextUniqueId();
-    mIoHandle = ioHandle;
-}
-
 audio_module_handle_t AudioInputDescriptor::getModuleHandle() const
 {
     if (mProfile == 0) {
@@ -192,6 +189,74 @@
     return config;
 }
 
+status_t AudioInputDescriptor::open(const audio_config_t *config,
+                                       audio_devices_t device,
+                                       const String8& address,
+                                       audio_source_t source,
+                                       audio_input_flags_t flags,
+                                       audio_io_handle_t *input)
+{
+    audio_config_t lConfig;
+    if (config == nullptr) {
+        lConfig = AUDIO_CONFIG_INITIALIZER;
+        lConfig.sample_rate = mSamplingRate;
+        lConfig.channel_mask = mChannelMask;
+        lConfig.format = mFormat;
+    } else {
+        lConfig = *config;
+    }
+
+    String8 lAddress = address;
+    if (lAddress == "") {
+        const DeviceVector& supportedDevices = mProfile->getSupportedDevices();
+        const DeviceVector& devicesForType = supportedDevices.getDevicesFromType(device);
+        lAddress = devicesForType.size() > 0 ? devicesForType.itemAt(0)->mAddress
+                  : String8("");
+    }
+
+    mDevice = device;
+
+    ALOGV("opening input for device %08x address %s profile %p name %s",
+          mDevice, lAddress.string(), mProfile.get(), mProfile->getName().string());
+
+    status_t status = mClientInterface->openInput(mProfile->getModuleHandle(),
+                                                  input,
+                                                  &lConfig,
+                                                  &mDevice,
+                                                  lAddress,
+                                                  source,
+                                                  flags);
+    LOG_ALWAYS_FATAL_IF(mDevice != device,
+                        "%s openInput returned device %08x when given device %08x",
+                        __FUNCTION__, mDevice, device);
+
+    if (status == NO_ERROR) {
+        mSamplingRate = lConfig.sample_rate;
+        mChannelMask = lConfig.channel_mask;
+        mFormat = lConfig.format;
+        mId = AudioPort::getNextUniqueId();
+        mIoHandle = *input;
+        mProfile->curOpenCount++;
+    }
+
+    return status;
+}
+
+
+void AudioInputDescriptor::close()
+{
+    if (mIoHandle != AUDIO_IO_HANDLE_NONE) {
+        mClientInterface->closeInput(mIoHandle);
+        LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < 1, "%s profile open count %u",
+                            __FUNCTION__, mProfile->curOpenCount);
+        if (isActive()) {
+            mProfile->curActiveCount--;
+        }
+        mProfile->curOpenCount--;
+        mIoHandle = AUDIO_IO_HANDLE_NONE;
+    }
+}
+
 status_t AudioInputDescriptor::dump(int fd)
 {
     const size_t SIZE = 256;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 4d3c3b5..f6ee1c3 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -23,6 +23,7 @@
 #include "AudioGain.h"
 #include "Volume.h"
 #include "HwModule.h"
+#include <media/AudioParameter.h>
 #include <media/AudioPolicy.h>
 
 // A device mask for all audio output devices that are considered "remote" when evaluating
@@ -231,13 +232,6 @@
     }
 }
 
-void SwAudioOutputDescriptor::setIoHandle(audio_io_handle_t ioHandle)
-{
-    mId = AudioPort::getNextUniqueId();
-    mIoHandle = ioHandle;
-}
-
-
 status_t SwAudioOutputDescriptor::dump(int fd)
 {
     const size_t SIZE = 256;
@@ -387,6 +381,96 @@
     return changed;
 }
 
+status_t SwAudioOutputDescriptor::open(const audio_config_t *config,
+                                       audio_devices_t device,
+                                       const String8& address,
+                                       audio_stream_type_t stream,
+                                       audio_output_flags_t flags,
+                                       audio_io_handle_t *output)
+{
+    audio_config_t lConfig;
+    if (config == nullptr) {
+        lConfig = AUDIO_CONFIG_INITIALIZER;
+        lConfig.sample_rate = mSamplingRate;
+        lConfig.channel_mask = mChannelMask;
+        lConfig.format = mFormat;
+    } else {
+        lConfig = *config;
+    }
+
+    String8 lAddress = address;
+    if (lAddress == "") {
+        const DeviceVector& supportedDevices = mProfile->getSupportedDevices();
+        const DeviceVector& devicesForType = supportedDevices.getDevicesFromType(device);
+        lAddress = devicesForType.size() > 0 ? devicesForType.itemAt(0)->mAddress
+                  : String8("");
+    }
+
+    mDevice = device;
+    // if the selected profile is offloaded and no offload info was specified,
+    // create a default one
+    if ((mProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) &&
+            lConfig.offload_info.format == AUDIO_FORMAT_DEFAULT) {
+        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
+        lConfig.offload_info = AUDIO_INFO_INITIALIZER;
+        lConfig.offload_info.sample_rate = lConfig.sample_rate;
+        lConfig.offload_info.channel_mask = lConfig.channel_mask;
+        lConfig.offload_info.format = lConfig.format;
+        lConfig.offload_info.stream_type = stream;
+        lConfig.offload_info.duration_us = -1;
+        lConfig.offload_info.has_video = true; // conservative
+        lConfig.offload_info.is_streaming = true; // likely
+    }
+
+    mFlags = (audio_output_flags_t)(mFlags | flags);
+
+    ALOGV("opening output for device %08x address %s profile %p name %s",
+          mDevice, lAddress.string(), mProfile.get(), mProfile->getName().string());
+
+    status_t status = mClientInterface->openOutput(mProfile->getModuleHandle(),
+                                                   output,
+                                                   &lConfig,
+                                                   &mDevice,
+                                                   lAddress,
+                                                   &mLatency,
+                                                   mFlags);
+    LOG_ALWAYS_FATAL_IF(mDevice != device,
+                        "%s openOutput returned device %08x when given device %08x",
+                        __FUNCTION__, mDevice, device);
+
+    if (status == NO_ERROR) {
+        mSamplingRate = lConfig.sample_rate;
+        mChannelMask = lConfig.channel_mask;
+        mFormat = lConfig.format;
+        mId = AudioPort::getNextUniqueId();
+        mIoHandle = *output;
+        mProfile->curOpenCount++;
+    }
+
+    return status;
+}
+
+
+void SwAudioOutputDescriptor::close()
+{
+    if (mIoHandle != AUDIO_IO_HANDLE_NONE) {
+        AudioParameter param;
+        param.add(String8("closing"), String8("true"));
+        mClientInterface->setParameters(mIoHandle, param.toString());
+
+        mClientInterface->closeOutput(mIoHandle);
+
+        LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < 1, "%s profile open count %u",
+                            __FUNCTION__, mProfile->curOpenCount);
+        if (isActive()) {
+            mProfile->curActiveCount--;
+        }
+        mProfile->curOpenCount--;
+        mIoHandle = AUDIO_IO_HANDLE_NONE;
+    }
+}
+
+
 // HwAudioOutputDescriptor implementation
 HwAudioOutputDescriptor::HwAudioOutputDescriptor(const sp<AudioSourceDescriptor>& source,
                                                  AudioPolicyClientInterface *clientInterface)
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index 74ef4ec..fc89672 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -122,6 +122,16 @@
     result.append("\n");
     write(fd, result.string(), result.size());
     mSupportedDevices.dump(fd, String8("Supported"), 4, false);
+
+    result.clear();
+    snprintf(buffer, SIZE, "\n    - maxOpenCount: %u - curOpenCount: %u\n",
+             maxOpenCount, curOpenCount);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "    - maxActiveCount: %u - curActiveCount: %u\n",
+             maxActiveCount, curActiveCount);
+    result.append(buffer);
+
+    write(fd, result.string(), result.size());
 }
 
 void IOProfile::log()
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 0908ffc..aa589f4 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -217,6 +217,8 @@
 const char MixPortTraits::Attributes::name[] = "name";
 const char MixPortTraits::Attributes::role[] = "role";
 const char MixPortTraits::Attributes::flags[] = "flags";
+const char MixPortTraits::Attributes::maxOpenCount[] = "maxOpenCount";
+const char MixPortTraits::Attributes::maxActiveCount[] = "maxActiveCount";
 
 status_t MixPortTraits::deserialize(_xmlDoc *doc, const _xmlNode *child, PtrElement &mixPort,
                                     PtrSerializingCtx /*serializingContext*/)
@@ -259,6 +261,14 @@
             mixPort->setFlags(InputFlagConverter::maskFromString(flags));
         }
     }
+    string maxOpenCount = getXmlAttribute(child, Attributes::maxOpenCount);
+    if (!maxOpenCount.empty()) {
+        convertTo(maxOpenCount, mixPort->maxOpenCount);
+    }
+    string maxActiveCount = getXmlAttribute(child, Attributes::maxActiveCount);
+    if (!maxActiveCount.empty()) {
+        convertTo(maxActiveCount, mixPort->maxActiveCount);
+    }
     // Deserialize children
     AudioGainTraits::Collection gains;
     deserializeCollection<AudioGainTraits>(doc, child, gains, NULL);
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 62cbfc1..b363779 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -843,12 +843,10 @@
         flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
     }
 
-    ALOGV("getOutputForAttr() device 0x%x, samplingRate %d, format %x, channelMask %x, flags %x",
+    ALOGV("getOutputForAttr() device 0x%x, sampling rate %d, format %x, channel mask %x, flags %x",
           device, config->sample_rate, config->format, config->channel_mask, flags);
 
-    *output = getOutputForDevice(device, session, *stream,
-                                 config->sample_rate, config->format, config->channel_mask,
-                                 flags, &config->offload_info);
+    *output = getOutputForDevice(device, session, *stream, config, flags);
     if (*output == AUDIO_IO_HANDLE_NONE) {
         mOutputRoutes.removeRoute(session);
         return INVALID_OPERATION;
@@ -867,11 +865,8 @@
         audio_devices_t device,
         audio_session_t session,
         audio_stream_type_t stream,
-        uint32_t samplingRate,
-        audio_format_t format,
-        audio_channel_mask_t channelMask,
-        audio_output_flags_t flags,
-        const audio_offload_info_t *offloadInfo)
+        const audio_config_t *config,
+        audio_output_flags_t flags)
 {
     audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
     status_t status;
@@ -898,7 +893,7 @@
     if (stream == AUDIO_STREAM_TTS) {
         flags = AUDIO_OUTPUT_FLAG_TTS;
     } else if (stream == AUDIO_STREAM_VOICE_CALL &&
-               audio_is_linear_pcm(format)) {
+               audio_is_linear_pcm(config->format)) {
         flags = (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_VOIP_RX |
                                        AUDIO_OUTPUT_FLAG_DIRECT);
         ALOGV("Set VoIP and Direct output flags for PCM format");
@@ -909,8 +904,8 @@
     // skip direct output selection if the request can obviously be attached to a mixed output
     // and not explicitly requested
     if (((flags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) &&
-            audio_is_linear_pcm(format) && samplingRate <= SAMPLE_RATE_HZ_MAX &&
-            audio_channel_count_from_out_mask(channelMask) <= 2) {
+            audio_is_linear_pcm(config->format) && config->sample_rate <= SAMPLE_RATE_HZ_MAX &&
+            audio_channel_count_from_out_mask(config->channel_mask) <= 2) {
         goto non_direct_output;
     }
 
@@ -924,102 +919,58 @@
     if (((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) ||
             !(mEffects.isNonOffloadableEffectEnabled() || mMasterMono)) {
         profile = getProfileForDirectOutput(device,
-                                           samplingRate,
-                                           format,
-                                           channelMask,
+                                           config->sample_rate,
+                                           config->format,
+                                           config->channel_mask,
                                            (audio_output_flags_t)flags);
     }
 
     if (profile != 0) {
-        sp<SwAudioOutputDescriptor> outputDesc = NULL;
-
         for (size_t i = 0; i < mOutputs.size(); i++) {
             sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
             if (!desc->isDuplicated() && (profile == desc->mProfile)) {
-                outputDesc = desc;
                 // reuse direct output if currently open by the same client
                 // and configured with same parameters
-                if ((samplingRate == outputDesc->mSamplingRate) &&
-                    audio_formats_match(format, outputDesc->mFormat) &&
-                    (channelMask == outputDesc->mChannelMask)) {
-                  if (session == outputDesc->mDirectClientSession) {
-                      outputDesc->mDirectOpenCount++;
-                      ALOGV("getOutputForDevice() reusing direct output %d for session %d",
-                            mOutputs.keyAt(i), session);
-                      return mOutputs.keyAt(i);
-                  } else {
-                      ALOGV("getOutputForDevice() do not reuse direct output because"
-                              "current client (%d) is not the same as requesting client (%d)",
-                            outputDesc->mDirectClientSession, session);
-                      goto non_direct_output;
-                  }
+                if ((config->sample_rate == desc->mSamplingRate) &&
+                    audio_formats_match(config->format, desc->mFormat) &&
+                    (config->channel_mask == desc->mChannelMask) &&
+                    (session == desc->mDirectClientSession)) {
+                    desc->mDirectOpenCount++;
+                    ALOGV("getOutputForDevice() reusing direct output %d for session %d",
+                        mOutputs.keyAt(i), session);
+                    return mOutputs.keyAt(i);
                 }
             }
         }
-        // close direct output if currently open and configured with different parameters
-        if (outputDesc != NULL) {
-            closeOutput(outputDesc->mIoHandle);
+
+        if (!profile->canOpenNewIo()) {
+            goto non_direct_output;
         }
 
-        // if the selected profile is offloaded and no offload info was specified,
-        // create a default one
-        audio_offload_info_t defaultOffloadInfo = AUDIO_INFO_INITIALIZER;
-        if ((profile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) && !offloadInfo) {
-            flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
-            defaultOffloadInfo.sample_rate = samplingRate;
-            defaultOffloadInfo.channel_mask = channelMask;
-            defaultOffloadInfo.format = format;
-            defaultOffloadInfo.stream_type = stream;
-            defaultOffloadInfo.bit_rate = 0;
-            defaultOffloadInfo.duration_us = -1;
-            defaultOffloadInfo.has_video = true; // conservative
-            defaultOffloadInfo.is_streaming = true; // likely
-            offloadInfo = &defaultOffloadInfo;
-        }
-
-        outputDesc = new SwAudioOutputDescriptor(profile, mpClientInterface);
-        outputDesc->mDevice = device;
-        outputDesc->mLatency = 0;
-        outputDesc->mFlags = (audio_output_flags_t)(outputDesc->mFlags | flags);
-        audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-        config.sample_rate = samplingRate;
-        config.channel_mask = channelMask;
-        config.format = format;
-        if (offloadInfo != NULL) {
-            config.offload_info = *offloadInfo;
-        }
-        DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromType(device);
-        String8 address = outputDevices.size() > 0 ? outputDevices.itemAt(0)->mAddress
-                : String8("");
-        status = mpClientInterface->openOutput(profile->getModuleHandle(),
-                                               &output,
-                                               &config,
-                                               &outputDesc->mDevice,
-                                               address,
-                                               &outputDesc->mLatency,
-                                               outputDesc->mFlags);
+        sp<SwAudioOutputDescriptor> outputDesc =
+                new SwAudioOutputDescriptor(profile, mpClientInterface);
+        status = outputDesc->open(config, device, String8(""), stream, flags, &output);
 
         // only accept an output with the requested parameters
         if (status != NO_ERROR ||
-            (samplingRate != 0 && samplingRate != config.sample_rate) ||
-            (format != AUDIO_FORMAT_DEFAULT && !audio_formats_match(format, config.format)) ||
-            (channelMask != 0 && channelMask != config.channel_mask)) {
-            ALOGV("getOutputForDevice() failed opening direct output: output %d samplingRate %d %d,"
-                    "format %d %d, channelMask %04x %04x", output, samplingRate,
-                    outputDesc->mSamplingRate, format, outputDesc->mFormat, channelMask,
-                    outputDesc->mChannelMask);
+            (config->sample_rate != 0 && config->sample_rate != outputDesc->mSamplingRate) ||
+            (config->format != AUDIO_FORMAT_DEFAULT &&
+                    !audio_formats_match(config->format, outputDesc->mFormat)) ||
+            (config->channel_mask != 0 && config->channel_mask != outputDesc->mChannelMask)) {
+            ALOGV("getOutputForDevice() failed opening direct output: output %d sample rate %d %d,"
+                    "format %d %d, channel mask %04x %04x", output, config->sample_rate,
+                    outputDesc->mSamplingRate, config->format, outputDesc->mFormat,
+                    config->channel_mask, outputDesc->mChannelMask);
             if (output != AUDIO_IO_HANDLE_NONE) {
-                mpClientInterface->closeOutput(output);
+                outputDesc->close();
             }
             // fall back to mixer output if possible when the direct output could not be open
-            if (audio_is_linear_pcm(format) && samplingRate <= SAMPLE_RATE_HZ_MAX) {
+            if (audio_is_linear_pcm(config->format) &&
+                    config->sample_rate  <= SAMPLE_RATE_HZ_MAX) {
                 goto non_direct_output;
             }
             return AUDIO_IO_HANDLE_NONE;
         }
-        outputDesc->mSamplingRate = config.sample_rate;
-        outputDesc->mChannelMask = config.channel_mask;
-        outputDesc->mFormat = config.format;
         outputDesc->mRefCount[stream] = 0;
         outputDesc->mStopTime[stream] = 0;
         outputDesc->mDirectOpenCount = 1;
@@ -1045,18 +996,18 @@
     // open a non direct output
 
     // for non direct outputs, only PCM is supported
-    if (audio_is_linear_pcm(format)) {
+    if (audio_is_linear_pcm(config->format)) {
         // get which output is suitable for the specified stream. The actual
         // routing change will happen when startOutput() will be called
         SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
 
         // at this stage we should ignore the DIRECT flag as no direct output could be found earlier
         flags = (audio_output_flags_t)(flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
-        output = selectOutput(outputs, flags, format);
+        output = selectOutput(outputs, flags, config->format);
     }
     ALOGW_IF((output == 0), "getOutputForDevice() could not find output for stream %d, "
-            "samplingRate %d, format %d, channels %x, flags %x",
-            stream, samplingRate, format, channelMask, flags);
+            "sampling rate %d, format %d, channels %x, flags %x",
+            stream, config->sample_rate, config->format, config->channel_mask, flags);
 
     return output;
 }
@@ -1155,6 +1106,13 @@
 
     sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
 
+    if (!outputDesc->isActive()) {
+        if (!outputDesc->mProfile->canStartNewIo()) {
+            return INVALID_OPERATION;
+        }
+        outputDesc->mProfile->curActiveCount++;
+    }
+
     // Routing?
     mOutputRoutes.incRouteActivity(session);
 
@@ -1182,6 +1140,12 @@
 
     if (status != NO_ERROR) {
         mOutputRoutes.decRouteActivity(session);
+        if (!outputDesc->isActive()) {
+            LOG_ALWAYS_FATAL_IF(outputDesc->mProfile->curActiveCount < 1,
+                                "%s invalid profile active count %u",
+                                __FUNCTION__, outputDesc->mProfile->curActiveCount);
+            outputDesc->mProfile->curActiveCount--;
+        }
         return status;
     }
     // Automatically enable the remote submix input when output is started on a re routing mix
@@ -1370,7 +1334,15 @@
         }
     }
 
-    return stopSource(outputDesc, stream, forceDeviceUpdate);
+    status_t status = stopSource(outputDesc, stream, forceDeviceUpdate);
+
+    if (status == NO_ERROR && !outputDesc->isActive()) {
+        LOG_ALWAYS_FATAL_IF(outputDesc->mProfile->curActiveCount < 1,
+                            "%s invalid profile active count %u",
+                            __FUNCTION__, outputDesc->mProfile->curActiveCount);
+        outputDesc->mProfile->curActiveCount--;
+    }
+    return status;
 }
 
 status_t AudioPolicyManager::stopSource(const sp<AudioOutputDescriptor>& outputDesc,
@@ -1473,7 +1445,7 @@
                                              input_type_t *inputType,
                                              audio_port_handle_t *portId)
 {
-    ALOGV("getInputForAttr() source %d, samplingRate %d, format %d, channelMask %x,"
+    ALOGV("getInputForAttr() source %d, sampling rate %d, format %d, channel mask %x,"
             "session %d, flags %#x",
           attr->source, config->sample_rate, config->format, config->channel_mask, session, flags);
 
@@ -1485,6 +1457,10 @@
     AudioMix *policyMix = NULL;
     DeviceVector inputDevices;
 
+    if (inputSource == AUDIO_SOURCE_DEFAULT) {
+        inputSource = AUDIO_SOURCE_MIC;
+    }
+
     // Explicit routing?
     sp<DeviceDescriptor> deviceDesc;
     if (*selectedDeviceId != AUDIO_PORT_HANDLE_NONE) {
@@ -1541,9 +1517,6 @@
     *input = AUDIO_IO_HANDLE_NONE;
     *inputType = API_INPUT_INVALID;
 
-    if (inputSource == AUDIO_SOURCE_DEFAULT) {
-        inputSource = AUDIO_SOURCE_MIC;
-    }
     halInputSource = inputSource;
 
     // TODO: check for existing client for this port ID
@@ -1593,7 +1566,7 @@
     }
 
     *input = getInputForDevice(device, address, session, uid, inputSource,
-                               config->sample_rate, config->format, config->channel_mask, flags,
+                               config, flags,
                                policyMix);
     if (*input == AUDIO_IO_HANDLE_NONE) {
         status = INVALID_OPERATION;
@@ -1620,9 +1593,7 @@
                                                         audio_session_t session,
                                                         uid_t uid,
                                                         audio_source_t inputSource,
-                                                        uint32_t samplingRate,
-                                                        audio_format_t format,
-                                                        audio_channel_mask_t channelMask,
+                                                        const audio_config_base_t *config,
                                                         audio_input_flags_t flags,
                                                         AudioMix *policyMix)
 {
@@ -1641,16 +1612,17 @@
             halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
         }
     } else if (inputSource == AUDIO_SOURCE_VOICE_COMMUNICATION &&
-               audio_is_linear_pcm(format)) {
+               audio_is_linear_pcm(config->format)) {
         flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_VOIP_TX);
     }
 
     // find a compatible input profile (not necessarily identical in parameters)
     sp<IOProfile> profile;
-    // samplingRate and flags may be updated by getInputProfile
-    uint32_t profileSamplingRate = (samplingRate == 0) ? SAMPLE_RATE_HZ_DEFAULT : samplingRate;
-    audio_format_t profileFormat = format;
-    audio_channel_mask_t profileChannelMask = channelMask;
+    // sampling rate and flags may be updated by getInputProfile
+    uint32_t profileSamplingRate = (config->sample_rate == 0) ?
+            SAMPLE_RATE_HZ_DEFAULT : config->sample_rate;
+    audio_format_t profileFormat = config->format;
+    audio_channel_mask_t profileChannelMask = config->channel_mask;
     audio_input_flags_t profileFlags = flags;
     for (;;) {
         profile = getInputProfile(device, address,
@@ -1664,12 +1636,13 @@
             profileFlags = AUDIO_INPUT_FLAG_NONE; // retry
         } else { // fail
             ALOGW("getInputForDevice() could not find profile for device 0x%X, "
-                  "samplingRate %u, format %#x, channelMask 0x%X, flags %#x",
-                    device, samplingRate, format, channelMask, flags);
+                  "sampling rate %u, format %#x, channel mask 0x%X, flags %#x",
+                    device, config->sample_rate, config->format, config->channel_mask, flags);
             return input;
         }
     }
     // Pick input sampling rate if not specified by client
+    uint32_t samplingRate = config->sample_rate;
     if (samplingRate == 0) {
         samplingRate = profileSamplingRate;
     }
@@ -1680,14 +1653,14 @@
     }
 
     sp<AudioSession> audioSession = new AudioSession(session,
-                                                              inputSource,
-                                                              format,
-                                                              samplingRate,
-                                                              channelMask,
-                                                              flags,
-                                                              uid,
-                                                              isSoundTrigger,
-                                                              policyMix, mpClientInterface);
+                                                     inputSource,
+                                                     config->format,
+                                                     samplingRate,
+                                                     config->channel_mask,
+                                                     flags,
+                                                     uid,
+                                                     isSoundTrigger,
+                                                     policyMix, mpClientInterface);
 
 // FIXME: disable concurrent capture until UI is ready
 #if 0
@@ -1731,8 +1704,8 @@
             // can be selected.
             if (!isConcurrentSource(inputSource) &&
                     ((desc->mSamplingRate != samplingRate ||
-                    desc->mChannelMask != channelMask ||
-                    !audio_formats_match(desc->mFormat, format)) &&
+                    desc->mChannelMask != config->channel_mask ||
+                    !audio_formats_match(desc->mFormat, config->format)) &&
                     (source_priority(desc->getHighestPrioritySource(false /*activeOnly*/)) <
                      source_priority(inputSource)))) {
                 reusedInputDesc = desc;
@@ -1755,44 +1728,34 @@
     }
 #endif
 
-    audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-    config.sample_rate = profileSamplingRate;
-    config.channel_mask = profileChannelMask;
-    config.format = profileFormat;
-
-    if (address == "") {
-        DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(device);
-        //   the inputs vector must be of size 1, but we don't want to crash here
-        address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress : String8("");
+    if (!profile->canOpenNewIo()) {
+        return AUDIO_IO_HANDLE_NONE;
     }
 
-    status_t status = mpClientInterface->openInput(profile->getModuleHandle(),
-                                                   &input,
-                                                   &config,
-                                                   &device,
-                                                   address,
-                                                   halInputSource,
-                                                   profileFlags);
+    sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile, mpClientInterface);
+
+    audio_config_t lConfig = AUDIO_CONFIG_INITIALIZER;
+    lConfig.sample_rate = profileSamplingRate;
+    lConfig.channel_mask = profileChannelMask;
+    lConfig.format = profileFormat;
+
+    status_t status = inputDesc->open(&lConfig, device, address,
+            halInputSource, profileFlags, &input);
 
     // only accept input with the exact requested set of parameters
     if (status != NO_ERROR || input == AUDIO_IO_HANDLE_NONE ||
-        (profileSamplingRate != config.sample_rate) ||
-        !audio_formats_match(profileFormat, config.format) ||
-        (profileChannelMask != config.channel_mask)) {
-        ALOGW("getInputForAttr() failed opening input: samplingRate %d"
-              ", format %d, channelMask %x",
-                samplingRate, format, channelMask);
+        (profileSamplingRate != lConfig.sample_rate) ||
+        !audio_formats_match(profileFormat, lConfig.format) ||
+        (profileChannelMask != lConfig.channel_mask)) {
+        ALOGW("getInputForAttr() failed opening input: sampling rate %d"
+              ", format %d, channel mask %x",
+              profileSamplingRate, profileFormat, profileChannelMask);
         if (input != AUDIO_IO_HANDLE_NONE) {
-            mpClientInterface->closeInput(input);
+            inputDesc->close();
         }
         return AUDIO_IO_HANDLE_NONE;
     }
 
-    sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile);
-    inputDesc->mSamplingRate = profileSamplingRate;
-    inputDesc->mFormat = profileFormat;
-    inputDesc->mChannelMask = profileChannelMask;
-    inputDesc->mDevice = device;
     inputDesc->mPolicyMix = policyMix;
     inputDesc->addAudioSession(session, audioSession);
 
@@ -2006,6 +1969,13 @@
         setInputDevice(input, device, true /* force */);
 
         if (inputDesc->getAudioSessionCount(true/*activeOnly*/) == 1) {
+            if (!inputDesc->mProfile->canStartNewIo()) {
+                mInputRoutes.decRouteActivity(session);
+                audioSession->changeActiveCount(-1);
+                return INVALID_OPERATION;
+            }
+            inputDesc->mProfile->curActiveCount++;
+
             // if input maps to a dynamic policy with an activity listener, notify of state change
             if ((inputDesc->mPolicyMix != NULL)
                     && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
@@ -2075,6 +2045,11 @@
         if (inputDesc->isActive()) {
             setInputDevice(input, getNewInputDevice(inputDesc), false /* force */);
         } else {
+            LOG_ALWAYS_FATAL_IF(inputDesc->mProfile->curActiveCount < 1,
+                                "%s invalid profile active count %u",
+                                __FUNCTION__, inputDesc->mProfile->curActiveCount);
+            inputDesc->mProfile->curActiveCount--;
+
             // if input maps to a dynamic policy with an activity listener, notify of state change
             if ((inputDesc->mPolicyMix != NULL)
                     && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
@@ -2169,7 +2144,7 @@
             mAudioPatches.removeItemsAt(patch_index);
             patchRemoved = true;
         }
-        mpClientInterface->closeInput(mInputs.keyAt(input_index));
+        inputDesc->close();
     }
     mInputs.clear();
     SoundTrigger::setCaptureState(false);
@@ -3632,6 +3607,12 @@
         {
             const sp<IOProfile> outProfile = mHwModules[i]->mOutputProfiles[j];
 
+            if (!outProfile->canOpenNewIo()) {
+                ALOGE("Invalid Output profile max open count %u for profile %s",
+                      outProfile->maxOpenCount, outProfile->getTagName().c_str());
+                continue;
+            }
+
             if (!outProfile->hasSupportedDevices()) {
                 ALOGW("Output profile contains no device on module %s", mHwModules[i]->getName());
                 continue;
@@ -3660,30 +3641,15 @@
             const DeviceVector &devicesForType = supportedDevices.getDevicesFromType(profileType);
             String8 address = devicesForType.size() > 0 ? devicesForType.itemAt(0)->mAddress
                     : String8("");
-
-            outputDesc->mDevice = profileType;
-            audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-            config.sample_rate = outputDesc->mSamplingRate;
-            config.channel_mask = outputDesc->mChannelMask;
-            config.format = outputDesc->mFormat;
             audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
-            status_t status = mpClientInterface->openOutput(outProfile->getModuleHandle(),
-                                                            &output,
-                                                            &config,
-                                                            &outputDesc->mDevice,
-                                                            address,
-                                                            &outputDesc->mLatency,
-                                                            outputDesc->mFlags);
+            status_t status = outputDesc->open(nullptr, profileType, address,
+                                           AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
 
             if (status != NO_ERROR) {
                 ALOGW("Cannot open output stream for device %08x on hw module %s",
                       outputDesc->mDevice,
                       mHwModules[i]->getName());
             } else {
-                outputDesc->mSamplingRate = config.sample_rate;
-                outputDesc->mChannelMask = config.channel_mask;
-                outputDesc->mFormat = config.format;
-
                 for (size_t k = 0; k  < supportedDevices.size(); k++) {
                     ssize_t index = mAvailableOutputDevices.indexOf(supportedDevices[k]);
                     // give a valid ID to an attached device once confirmed it is reachable
@@ -3697,11 +3663,11 @@
                 }
                 addOutput(output, outputDesc);
                 setOutputDevice(outputDesc,
-                                outputDesc->mDevice,
+                                profileType,
                                 true,
                                 0,
                                 NULL,
-                                address.string());
+                                address);
             }
         }
         // open input streams needed to access attached devices to validate
@@ -3710,6 +3676,12 @@
         {
             const sp<IOProfile> inProfile = mHwModules[i]->mInputProfiles[j];
 
+            if (!inProfile->canOpenNewIo()) {
+                ALOGE("Invalid Input profile max open count %u for profile %s",
+                      inProfile->maxOpenCount, inProfile->getTagName().c_str());
+                continue;
+            }
+
             if (!inProfile->hasSupportedDevices()) {
                 ALOGW("Input profile contains no device on module %s", mHwModules[i]->getName());
                 continue;
@@ -3722,30 +3694,15 @@
                 continue;
             }
             sp<AudioInputDescriptor> inputDesc =
-                    new AudioInputDescriptor(inProfile);
+                    new AudioInputDescriptor(inProfile, mpClientInterface);
 
-            inputDesc->mDevice = profileType;
-
-            // find the address
-            DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(profileType);
-            //   the inputs vector must be of size 1, but we don't want to crash here
-            String8 address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress
-                    : String8("");
-            ALOGV("  for input device 0x%x using address %s", profileType, address.string());
-            ALOGE_IF(inputDevices.size() == 0, "Input device list is empty!");
-
-            audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-            config.sample_rate = inputDesc->mSamplingRate;
-            config.channel_mask = inputDesc->mChannelMask;
-            config.format = inputDesc->mFormat;
             audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
-            status_t status = mpClientInterface->openInput(inProfile->getModuleHandle(),
-                                                           &input,
-                                                           &config,
-                                                           &inputDesc->mDevice,
-                                                           address,
-                                                           AUDIO_SOURCE_MIC,
-                                                           AUDIO_INPUT_FLAG_NONE);
+            status_t status = inputDesc->open(nullptr,
+                                              profileType,
+                                              String8(""),
+                                              AUDIO_SOURCE_MIC,
+                                              AUDIO_INPUT_FLAG_NONE,
+                                              &input);
 
             if (status == NO_ERROR) {
                 const DeviceVector &supportedDevices = inProfile->getSupportedDevices();
@@ -3760,10 +3717,10 @@
                         }
                     }
                 }
-                mpClientInterface->closeInput(input);
+                inputDesc->close();
             } else {
                 ALOGW("Cannot open input stream for device %08x on hw module %s",
-                      inputDesc->mDevice,
+                      profileType,
                       mHwModules[i]->getName());
             }
         }
@@ -3804,10 +3761,10 @@
 AudioPolicyManager::~AudioPolicyManager()
 {
    for (size_t i = 0; i < mOutputs.size(); i++) {
-        mpClientInterface->closeOutput(mOutputs.keyAt(i));
+        mOutputs.valueAt(i)->close();
    }
    for (size_t i = 0; i < mInputs.size(); i++) {
-        mpClientInterface->closeInput(mInputs.keyAt(i));
+        mInputs.valueAt(i)->close();
    }
    mAvailableOutputDevices.clear();
    mAvailableInputDevices.clear();
@@ -3825,7 +3782,6 @@
 
 void AudioPolicyManager::addOutput(audio_io_handle_t output, const sp<SwAudioOutputDescriptor>& outputDesc)
 {
-    outputDesc->setIoHandle(output);
     mOutputs.add(output, outputDesc);
     updateMono(output); // update mono status when adding to output list
     selectOutputForMusicEffects();
@@ -3840,7 +3796,6 @@
 
 void AudioPolicyManager::addInput(audio_io_handle_t input, const sp<AudioInputDescriptor>& inputDesc)
 {
-    inputDesc->setIoHandle(input);
     mInputs.add(input, inputDesc);
     nextAudioPortGeneration();
 }
@@ -3934,30 +3889,20 @@
                 continue;
             }
 
+            if (!profile->canOpenNewIo()) {
+                ALOGW("Max Output number %u already opened for this profile %s",
+                      profile->maxOpenCount, profile->getTagName().c_str());
+                continue;
+            }
+
             ALOGV("opening output for device %08x with params %s profile %p name %s",
                   device, address.string(), profile.get(), profile->getName().string());
             desc = new SwAudioOutputDescriptor(profile, mpClientInterface);
-            desc->mDevice = device;
-            audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-            config.sample_rate = desc->mSamplingRate;
-            config.channel_mask = desc->mChannelMask;
-            config.format = desc->mFormat;
-            config.offload_info.sample_rate = desc->mSamplingRate;
-            config.offload_info.channel_mask = desc->mChannelMask;
-            config.offload_info.format = desc->mFormat;
             audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
-            status_t status = mpClientInterface->openOutput(profile->getModuleHandle(),
-                                                            &output,
-                                                            &config,
-                                                            &desc->mDevice,
-                                                            address,
-                                                            &desc->mLatency,
-                                                            desc->mFlags);
-            if (status == NO_ERROR) {
-                desc->mSamplingRate = config.sample_rate;
-                desc->mChannelMask = config.channel_mask;
-                desc->mFormat = config.format;
+            status_t status = desc->open(nullptr, device, address,
+                                         AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
 
+            if (status == NO_ERROR) {
                 // Here is where the out_set_parameters() for card & device gets called
                 if (!address.isEmpty()) {
                     char *param = audio_device_address_to_parameter(device, address);
@@ -3967,27 +3912,21 @@
                 updateAudioProfiles(device, output, profile->getAudioProfiles());
                 if (!profile->hasValidAudioProfile()) {
                     ALOGW("checkOutputsForDevice() missing param");
-                    mpClientInterface->closeOutput(output);
+                    desc->close();
                     output = AUDIO_IO_HANDLE_NONE;
                 } else if (profile->hasDynamicAudioProfile()) {
-                    mpClientInterface->closeOutput(output);
+                    desc->close();
                     output = AUDIO_IO_HANDLE_NONE;
-                    profile->pickAudioProfile(config.sample_rate, config.channel_mask, config.format);
+                    audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+                    profile->pickAudioProfile(
+                            config.sample_rate, config.channel_mask, config.format);
                     config.offload_info.sample_rate = config.sample_rate;
                     config.offload_info.channel_mask = config.channel_mask;
                     config.offload_info.format = config.format;
-                    status = mpClientInterface->openOutput(profile->getModuleHandle(),
-                                                           &output,
-                                                           &config,
-                                                           &desc->mDevice,
-                                                           address,
-                                                           &desc->mLatency,
-                                                           desc->mFlags);
-                    if (status == NO_ERROR) {
-                        desc->mSamplingRate = config.sample_rate;
-                        desc->mChannelMask = config.channel_mask;
-                        desc->mFormat = config.format;
-                    } else {
+
+                    status_t status = desc->open(&config, device, address, AUDIO_STREAM_DEFAULT,
+                                                 AUDIO_OUTPUT_FLAG_NONE, &output);
+                    if (status != NO_ERROR) {
                         output = AUDIO_IO_HANDLE_NONE;
                     }
                 }
@@ -4033,7 +3972,7 @@
                         } else {
                             ALOGW("checkOutputsForDevice() could not open dup output for %d and %d",
                                     mPrimaryOutput->mIoHandle, output);
-                            mpClientInterface->closeOutput(output);
+                            desc->close();
                             removeOutput(output);
                             nextAudioPortGeneration();
                             output = AUDIO_IO_HANDLE_NONE;
@@ -4161,6 +4100,7 @@
         for (ssize_t profile_index = 0; profile_index < (ssize_t)profiles.size(); profile_index++) {
 
             sp<IOProfile> profile = profiles[profile_index];
+
             // nothing to do if one input is already opened for this profile
             size_t input_index;
             for (input_index = 0; input_index < mInputs.size(); input_index++) {
@@ -4176,31 +4116,22 @@
                 continue;
             }
 
-            ALOGV("opening input for device 0x%X with params %s", device, address.string());
-            desc = new AudioInputDescriptor(profile);
-            desc->mDevice = device;
-            audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-            config.sample_rate = desc->mSamplingRate;
-            config.channel_mask = desc->mChannelMask;
-            config.format = desc->mFormat;
+            if (!profile->canOpenNewIo()) {
+                ALOGW("Max Input number %u already opened for this profile %s",
+                      profile->maxOpenCount, profile->getTagName().c_str());
+                continue;
+            }
+
+            desc = new AudioInputDescriptor(profile, mpClientInterface);
             audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
-
-            ALOGV("opening inputput for device %08x with params %s profile %p name %s",
-                  desc->mDevice, address.string(), profile.get(), profile->getName().string());
-
-            status_t status = mpClientInterface->openInput(profile->getModuleHandle(),
-                                                           &input,
-                                                           &config,
-                                                           &desc->mDevice,
-                                                           address,
-                                                           AUDIO_SOURCE_MIC,
-                                                           AUDIO_INPUT_FLAG_NONE /*FIXME*/);
+            status_t status = desc->open(nullptr,
+                                         device,
+                                         address,
+                                         AUDIO_SOURCE_MIC,
+                                         AUDIO_INPUT_FLAG_NONE,
+                                         &input);
 
             if (status == NO_ERROR) {
-                desc->mSamplingRate = config.sample_rate;
-                desc->mChannelMask = config.channel_mask;
-                desc->mFormat = config.format;
-
                 if (!address.isEmpty()) {
                     char *param = audio_device_address_to_parameter(device, address);
                     mpClientInterface->setParameters(input, String8(param));
@@ -4209,7 +4140,7 @@
                 updateAudioProfiles(device, input, profile->getAudioProfiles());
                 if (!profile->hasValidAudioProfile()) {
                     ALOGW("checkInputsForDevice() direct input missing param");
-                    mpClientInterface->closeInput(input);
+                    desc->close();
                     input = AUDIO_IO_HANDLE_NONE;
                 }
 
@@ -4317,11 +4248,8 @@
         mpClientInterface->onAudioPatchListUpdate();
     }
 
-    AudioParameter param;
-    param.add(String8("closing"), String8("true"));
-    mpClientInterface->setParameters(output, param.toString());
+    outputDesc->close();
 
-    mpClientInterface->closeOutput(output);
     removeOutput(output);
     mPreviousOutputs = mOutputs;
 }
@@ -4346,7 +4274,7 @@
         mpClientInterface->onAudioPatchListUpdate();
     }
 
-    mpClientInterface->closeInput(input);
+    inputDesc->close();
     mInputs.removeItem(input);
 }
 
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 11894dc..2d41bd1 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -601,20 +601,15 @@
                 audio_devices_t device,
                 audio_session_t session,
                 audio_stream_type_t stream,
-                uint32_t samplingRate,
-                audio_format_t format,
-                audio_channel_mask_t channelMask,
-                audio_output_flags_t flags,
-                const audio_offload_info_t *offloadInfo);
+                const audio_config_t *config,
+                audio_output_flags_t flags);
         // internal method to return the input handle for the given device and format
         audio_io_handle_t getInputForDevice(audio_devices_t device,
                 String8 address,
                 audio_session_t session,
                 uid_t uid,
                 audio_source_t inputSource,
-                uint32_t samplingRate,
-                audio_format_t format,
-                audio_channel_mask_t channelMask,
+                const audio_config_base_t *config,
                 audio_input_flags_t flags,
                 AudioMix *policyMix);
 
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index bd94e3e..1ee5ccf 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -278,8 +278,8 @@
         return NO_INIT;
     }
     // already checked by client, but double-check in case the client wrapper is bypassed
-    if (attr->source >= AUDIO_SOURCE_CNT && attr->source != AUDIO_SOURCE_HOTWORD &&
-        attr->source != AUDIO_SOURCE_FM_TUNER) {
+    if (attr->source < AUDIO_SOURCE_DEFAULT && attr->source >= AUDIO_SOURCE_CNT &&
+            attr->source != AUDIO_SOURCE_HOTWORD && attr->source != AUDIO_SOURCE_FM_TUNER) {
         return BAD_VALUE;
     }
 
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 7ec3ccb..1fbba58 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -91,9 +91,6 @@
 
 LOCAL_CFLAGS += -Wall -Wextra -Werror
 
-# Workaround for invalid unused-lambda-capture warning http://b/38349491
-LOCAL_CLANG_CFLAGS += -Wno-error=unused-lambda-capture
-
 LOCAL_MODULE:= libcameraservice
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 2cf648f..585d2eb 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -859,6 +859,12 @@
 
     outputStreams.push(getPreviewStreamId());
 
+    if (params.isDeviceZslSupported) {
+        // If device ZSL is supported, resume preview buffers that may be paused
+        // during last takePicture().
+        mDevice->dropStreamBuffers(false, getPreviewStreamId());
+    }
+
     if (!params.recordingHint) {
         if (!restart) {
             res = mStreamingProcessor->updatePreviewRequest(params);
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index a407d0b..910dd78 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -136,7 +136,12 @@
     const char *enddump = "\n\n";
     write(fd, enddump, strlen(enddump));
 
-    return mHardware->dump(fd, args);
+    sp<CameraHardwareInterface> hardware = mHardware;
+    if (hardware != nullptr) {
+        return hardware->dump(fd, args);
+    }
+    ALOGI("%s: camera device closed already, skip dumping", __FUNCTION__);
+    return OK;
 }
 
 // ----------------------------------------------------------------------------
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index b65f1c7..1ee216f 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -553,6 +553,12 @@
         return DONE;
     }
 
+    if (l.mParameters.isDeviceZslSupported) {
+        // If device ZSL is supported, drop all pending preview buffers to reduce the chance of
+        // rendering preview frames newer than the still frame.
+        client->getCameraDevice()->dropStreamBuffers(true, client->getPreviewStreamId());
+    }
+
     /**
      * Clear the streaming request for still-capture pictures
      *   (as opposed to i.e. video snapshots)
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 68384b0..f1f96c3 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -359,6 +359,12 @@
             const std::vector<android::camera3::OutputStreamInfo> &outputInfo,
             const std::vector<size_t> &removedSurfaceIds,
             KeyedVector<sp<Surface>, size_t> *outputMap/*out*/) = 0;
+
+    /**
+     * Drop buffers for stream of streamId if dropping is true. If dropping is false, do not
+     * drop buffers for stream of streamId.
+     */
+    virtual status_t dropStreamBuffers(bool /*dropping*/, int /*streamId*/) = 0;
 };
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index e0a2dd4..c0db8e7 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -2039,6 +2039,20 @@
     return res;
 }
 
+status_t Camera3Device::dropStreamBuffers(bool dropping, int streamId) {
+    Mutex::Autolock il(mInterfaceLock);
+    Mutex::Autolock l(mLock);
+
+    int idx = mOutputStreams.indexOfKey(streamId);
+    if (idx == NAME_NOT_FOUND) {
+        ALOGE("%s: Stream %d is not found.", __FUNCTION__, streamId);
+        return BAD_VALUE;
+    }
+
+    sp<Camera3OutputStreamInterface> stream = mOutputStreams.editValueAt(idx);
+    return stream->dropBuffers(dropping);
+}
+
 /**
  * Camera3Device private methods
  */
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 357b893..e9466ab 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -192,6 +192,12 @@
             const std::vector<size_t> &removedSurfaceIds,
             KeyedVector<sp<Surface>, size_t> *outputMap/*out*/);
 
+    /**
+     * Drop buffers for stream of streamId if dropping is true. If dropping is false, do not
+     * drop buffers for stream of streamId.
+     */
+    status_t dropStreamBuffers(bool dropping, int streamId) override;
+
   private:
 
     // internal typedefs
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
index 4b36ea2..0a245c4 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -108,6 +108,10 @@
     return false;
 }
 
+status_t Camera3DummyStream::dropBuffers(bool /*dropping*/) {
+    return OK;
+}
+
 status_t Camera3DummyStream::setConsumers(const std::vector<sp<Surface>>& /*consumers*/) {
     ALOGE("%s: Stream %d: Dummy stream doesn't support set consumer surface!",
             __FUNCTION__, mId);
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
index 3212031..684f4b0 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.h
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -57,6 +57,12 @@
     virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
 
     /**
+     * Drop buffers for stream of streamId if dropping is true. If dropping is false, do not
+     * drop buffers for stream of streamId.
+     */
+    virtual status_t dropBuffers(bool /*dropping*/) override;
+
+    /**
      * Return if this output stream is for video encoding.
      */
     bool isVideoStream() const;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 329172a..e79eecc 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -44,6 +44,7 @@
         mUseBufferManager(false),
         mTimestampOffset(timestampOffset),
         mConsumerUsage(0),
+        mDropBuffers(false),
         mDequeueBufferLatency(kDequeueLatencyBinSize) {
 
     if (mConsumer == NULL) {
@@ -70,6 +71,7 @@
         mUseBufferManager(false),
         mTimestampOffset(timestampOffset),
         mConsumerUsage(0),
+        mDropBuffers(false),
         mDequeueBufferLatency(kDequeueLatencyBinSize) {
 
     if (format != HAL_PIXEL_FORMAT_BLOB && format != HAL_PIXEL_FORMAT_RAW_OPAQUE) {
@@ -100,6 +102,7 @@
         mUseBufferManager(false),
         mTimestampOffset(timestampOffset),
         mConsumerUsage(consumerUsage),
+        mDropBuffers(false),
         mDequeueBufferLatency(kDequeueLatencyBinSize) {
     // Deferred consumer only support preview surface format now.
     if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
@@ -139,6 +142,7 @@
         mUseBufferManager(false),
         mTimestampOffset(timestampOffset),
         mConsumerUsage(consumerUsage),
+        mDropBuffers(false),
         mDequeueBufferLatency(kDequeueLatencyBinSize) {
 
     if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
@@ -227,9 +231,14 @@
     /**
      * Return buffer back to ANativeWindow
      */
-    if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) {
+    if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR || mDropBuffers) {
         // Cancel buffer
-        ALOGW("A frame is dropped for stream %d", mId);
+        if (mDropBuffers) {
+            ALOGV("%s: Dropping a frame for stream %d.", __FUNCTION__, mId);
+        } else {
+            ALOGW("%s: A frame is dropped for stream %d due to buffer error.", __FUNCTION__, mId);
+        }
+
         res = currentConsumer->cancelBuffer(currentConsumer.get(),
                 anwBuffer,
                 anwReleaseFence);
@@ -785,6 +794,12 @@
     return res;
 }
 
+status_t Camera3OutputStream::dropBuffers(bool dropping) {
+    Mutex::Autolock l(mLock);
+    mDropBuffers = dropping;
+    return OK;
+}
+
 status_t Camera3OutputStream::notifyBufferReleased(ANativeWindowBuffer* /*anwBuffer*/) {
     return OK;
 }
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index fbb14fe..18b1901 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -166,6 +166,11 @@
     virtual status_t notifyBufferReleased(ANativeWindowBuffer *anwBuffer);
 
     /**
+     * Drop buffers if dropping is true. If dropping is false, do not drop buffers.
+     */
+    virtual status_t dropBuffers(bool dropping) override;
+
+    /**
      * Set the graphic buffer manager to get/return the stream buffers.
      *
      * It is only legal to call this method when stream is in STATE_CONSTRUCTED state.
@@ -260,6 +265,9 @@
      */
     uint64_t    mConsumerUsage;
 
+    // Whether to drop valid buffers.
+    bool mDropBuffers;
+
     /**
      * Internal Camera3Stream interface
      */
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
index edfbab1..08fcf38 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
@@ -73,6 +73,11 @@
             const std::vector<OutputStreamInfo> &outputInfo,
             const std::vector<size_t> &removedSurfaceIds,
             KeyedVector<sp<Surface>, size_t> *outputMap/*out*/) = 0;
+
+    /**
+     * Drop buffers if dropping is true. If dropping is false, do not drop buffers.
+     */
+    virtual status_t dropBuffers(bool /*dropping*/) = 0;
 };
 
 } // namespace camera3
diff --git a/services/mediaanalytics/MediaAnalyticsService.cpp b/services/mediaanalytics/MediaAnalyticsService.cpp
index 8444444..7f42b1b 100644
--- a/services/mediaanalytics/MediaAnalyticsService.cpp
+++ b/services/mediaanalytics/MediaAnalyticsService.cpp
@@ -272,7 +272,7 @@
     }
 
     ALOGV("given uid %d; sanitized uid: %d sanitized pkg: %s "
-          "sanitized pkg version: %d",
+          "sanitized pkg version: %"  PRId64,
           uid_given, item->getUid(),
           item->getPkgName().c_str(),
           item->getPkgVersionCode());
@@ -856,7 +856,7 @@
     } else {
         AString pkg;
         std::string installer = "";
-        int32_t versionCode = 0;
+        int64_t versionCode = 0;
 
         struct passwd *pw = getpwuid(uid);
         if (pw) {
@@ -926,7 +926,7 @@
                 }
 
 
-                ALOGV("package '%s' installed by '%s' versioncode %d / %08x",
+                ALOGV("package '%s' installed by '%s' versioncode %"  PRId64 " / %" PRIx64,
                       pkg.c_str(), installer.c_str(), versionCode, versionCode);
 
                 if (strncmp(installer.c_str(), "com.android.", 12) == 0) {
diff --git a/services/mediaanalytics/MediaAnalyticsService.h b/services/mediaanalytics/MediaAnalyticsService.h
index 3b34f44..fce7d08 100644
--- a/services/mediaanalytics/MediaAnalyticsService.h
+++ b/services/mediaanalytics/MediaAnalyticsService.h
@@ -138,7 +138,7 @@
         uid_t uid;
         AString pkg;
         AString installer;
-        int32_t versionCode;
+        int64_t versionCode;
         nsecs_t expiration;
     };
 
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index 9348ecd..caa0703 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -26,10 +26,6 @@
 LOCAL_32_BIT_ONLY := true
 LOCAL_INIT_RC := android.hardware.media.omx@1.0-service.rc
 
-ifeq ($(PRODUCT_FULL_TREBLE),true)
-LOCAL_CFLAGS += -DUSE_VNDBINDER
-endif
-
 include $(BUILD_EXECUTABLE)
 
 # service seccomp policy
diff --git a/services/mediacodec/main_codecservice.cpp b/services/mediacodec/main_codecservice.cpp
index 6f14a42..701ca6e 100644
--- a/services/mediacodec/main_codecservice.cpp
+++ b/services/mediacodec/main_codecservice.cpp
@@ -40,10 +40,8 @@
     signal(SIGPIPE, SIG_IGN);
     SetUpMinijail(kSystemSeccompPolicyPath, kVendorSeccompPolicyPath);
 
-#ifdef USE_VNDBINDER
     android::ProcessState::initWithDriver("/dev/vndbinder");
     android::ProcessState::self()->startThreadPool();
-#endif // USE_VNDBINDER
 
     ::android::hardware::configureRpcThreadpool(64, false);
 
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 51ae665..ac3202b 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -142,7 +142,31 @@
     }
 }
 
+// If a close request is pending then close the stream
+bool AAudioService::releaseStream(const sp<AAudioServiceStreamBase> &serviceStream) {
+    bool closed = false;
+    if ((serviceStream->decrementServiceReferenceCount() == 0) && serviceStream->isCloseNeeded()) {
+        // removeStreamByHandle() uses a lock so that if there are two simultaneous closes
+        // then only one will get the pointer and do the close.
+        sp<AAudioServiceStreamBase> foundStream = mStreamTracker.removeStreamByHandle(serviceStream->getHandle());
+        if (foundStream.get() != nullptr) {
+            foundStream->close();
+            pid_t pid = foundStream->getOwnerProcessId();
+            AAudioClientTracker::getInstance().unregisterClientStream(pid, foundStream);
+        }
+        closed = true;
+    }
+    return closed;
+}
+
+aaudio_result_t AAudioService::checkForPendingClose(
+        const sp<AAudioServiceStreamBase> &serviceStream,
+        aaudio_result_t defaultResult) {
+    return releaseStream(serviceStream) ? AAUDIO_ERROR_INVALID_STATE : defaultResult;
+}
+
 aaudio_result_t AAudioService::closeStream(aaudio_handle_t streamHandle) {
+    ALOGD("closeStream(0x%08X)", streamHandle);
     // Check permission and ownership first.
     sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
     if (serviceStream.get() == nullptr) {
@@ -150,22 +174,13 @@
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
 
-    ALOGD("closeStream(0x%08X)", streamHandle);
-    // Remove handle from tracker so that we cannot look up the raw address any more.
-    // removeStreamByHandle() uses a lock so that if there are two simultaneous closes
-    // then only one will get the pointer and do the close.
-    serviceStream = mStreamTracker.removeStreamByHandle(streamHandle);
-    if (serviceStream.get() != nullptr) {
-        serviceStream->close();
-        pid_t pid = serviceStream->getOwnerProcessId();
-        AAudioClientTracker::getInstance().unregisterClientStream(pid, serviceStream);
-        return AAUDIO_OK;
-    } else {
-        ALOGW("closeStream(0x%0x) being handled by another thread", streamHandle);
-        return AAUDIO_ERROR_INVALID_HANDLE;
-    }
-}
+    pid_t pid = serviceStream->getOwnerProcessId();
+    AAudioClientTracker::getInstance().unregisterClientStream(pid, serviceStream);
 
+    serviceStream->setCloseNeeded(true);
+    (void) releaseStream(serviceStream);
+    return AAUDIO_OK;
+}
 
 sp<AAudioServiceStreamBase> AAudioService::convertHandleToServiceStream(
         aaudio_handle_t streamHandle) {
@@ -181,7 +196,9 @@
         if (!allowed) {
             ALOGE("AAudioService: calling uid %d cannot access stream 0x%08X owned by %d",
                   callingUserId, streamHandle, ownerUserId);
-            serviceStream = nullptr;
+            serviceStream.clear();
+        } else {
+            serviceStream->incrementServiceReferenceCount();
         }
     }
     return serviceStream;
@@ -198,7 +215,7 @@
 
     aaudio_result_t result = serviceStream->getDescription(parcelable);
     // parcelable.dump();
-    return result;
+    return checkForPendingClose(serviceStream, result);
 }
 
 aaudio_result_t AAudioService::startStream(aaudio_handle_t streamHandle) {
@@ -208,7 +225,8 @@
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
 
-    return serviceStream->start();
+    aaudio_result_t result = serviceStream->start();
+    return checkForPendingClose(serviceStream, result);
 }
 
 aaudio_result_t AAudioService::pauseStream(aaudio_handle_t streamHandle) {
@@ -218,7 +236,7 @@
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
     aaudio_result_t result = serviceStream->pause();
-    return result;
+    return checkForPendingClose(serviceStream, result);
 }
 
 aaudio_result_t AAudioService::stopStream(aaudio_handle_t streamHandle) {
@@ -228,7 +246,7 @@
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
     aaudio_result_t result = serviceStream->stop();
-    return result;
+    return checkForPendingClose(serviceStream, result);
 }
 
 aaudio_result_t AAudioService::flushStream(aaudio_handle_t streamHandle) {
@@ -237,48 +255,51 @@
         ALOGE("flushStream(), illegal stream handle = 0x%0x", streamHandle);
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
-    return serviceStream->flush();
+    aaudio_result_t result = serviceStream->flush();
+    return checkForPendingClose(serviceStream, result);
 }
 
 aaudio_result_t AAudioService::registerAudioThread(aaudio_handle_t streamHandle,
                                                    pid_t clientThreadId,
                                                    int64_t periodNanoseconds) {
+    aaudio_result_t result = AAUDIO_OK;
     sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
     if (serviceStream.get() == nullptr) {
         ALOGE("registerAudioThread(), illegal stream handle = 0x%0x", streamHandle);
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
     if (serviceStream->getRegisteredThread() != AAudioServiceStreamBase::ILLEGAL_THREAD_ID) {
-        ALOGE("registerAudioThread(), thread already registered");
-        return AAUDIO_ERROR_INVALID_STATE;
-    }
-
-    const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
-    serviceStream->setRegisteredThread(clientThreadId);
-    int err = android::requestPriority(ownerPid, clientThreadId,
-                                       DEFAULT_AUDIO_PRIORITY, true /* isForApp */);
-    if (err != 0){
-        ALOGE("registerAudioThread(%d) failed, errno = %d, priority = %d",
-              clientThreadId, errno, DEFAULT_AUDIO_PRIORITY);
-        return AAUDIO_ERROR_INTERNAL;
+        ALOGE("AAudioService::registerAudioThread(), thread already registered");
+        result = AAUDIO_ERROR_INVALID_STATE;
     } else {
-        return AAUDIO_OK;
+        const pid_t ownerPid = IPCThreadState::self()->getCallingPid(); // TODO review
+        serviceStream->setRegisteredThread(clientThreadId);
+        int err = android::requestPriority(ownerPid, clientThreadId,
+                                           DEFAULT_AUDIO_PRIORITY, true /* isForApp */);
+        if (err != 0) {
+            ALOGE("AAudioService::registerAudioThread(%d) failed, errno = %d, priority = %d",
+                  clientThreadId, errno, DEFAULT_AUDIO_PRIORITY);
+            result = AAUDIO_ERROR_INTERNAL;
+        }
     }
+    return checkForPendingClose(serviceStream, result);
 }
 
 aaudio_result_t AAudioService::unregisterAudioThread(aaudio_handle_t streamHandle,
                                                      pid_t clientThreadId) {
+    aaudio_result_t result = AAUDIO_OK;
     sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
     if (serviceStream.get() == nullptr) {
         ALOGE("unregisterAudioThread(), illegal stream handle = 0x%0x", streamHandle);
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
     if (serviceStream->getRegisteredThread() != clientThreadId) {
-        ALOGE("unregisterAudioThread(), wrong thread");
-        return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+        ALOGE("AAudioService::unregisterAudioThread(), wrong thread");
+        result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+    } else {
+        serviceStream->setRegisteredThread(0);
     }
-    serviceStream->setRegisteredThread(0);
-    return AAUDIO_OK;
+    return checkForPendingClose(serviceStream, result);
 }
 
 aaudio_result_t AAudioService::startClient(aaudio_handle_t streamHandle,
@@ -289,7 +310,8 @@
         ALOGE("startClient(), illegal stream handle = 0x%0x", streamHandle);
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
-    return serviceStream->startClient(client, clientHandle);
+    aaudio_result_t result = serviceStream->startClient(client, clientHandle);
+    return checkForPendingClose(serviceStream, result);
 }
 
 aaudio_result_t AAudioService::stopClient(aaudio_handle_t streamHandle,
@@ -299,5 +321,6 @@
         ALOGE("stopClient(), illegal stream handle = 0x%0x", streamHandle);
         return AAUDIO_ERROR_INVALID_HANDLE;
     }
-    return serviceStream->stopClient(clientHandle);
+    aaudio_result_t result = serviceStream->stopClient(clientHandle);
+    return checkForPendingClose(serviceStream, result);
 }
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
index eef0824..bdd9e0b 100644
--- a/services/oboeservice/AAudioService.h
+++ b/services/oboeservice/AAudioService.h
@@ -94,9 +94,15 @@
             aaudio::aaudio_handle_t streamHandle);
 
 
-    android::AudioClient mAudioClient;
 
-    aaudio::AAudioStreamTracker                 mStreamTracker;
+    bool releaseStream(const sp<aaudio::AAudioServiceStreamBase> &serviceStream);
+
+    aaudio_result_t checkForPendingClose(const sp<aaudio::AAudioServiceStreamBase> &serviceStream,
+                                         aaudio_result_t defaultResult);
+
+    android::AudioClient            mAudioClient;
+
+    aaudio::AAudioStreamTracker     mStreamTracker;
 
     enum constants {
         DEFAULT_AUDIO_PRIORITY = 2
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 635b45c..53d2860 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -402,3 +402,13 @@
 void AAudioServiceStreamBase::onVolumeChanged(float volume) {
     sendServiceEvent(AAUDIO_SERVICE_EVENT_VOLUME, volume);
 }
+
+int32_t AAudioServiceStreamBase::incrementServiceReferenceCount() {
+    std::lock_guard<std::mutex> lock(mCallingCountLock);
+    return ++mCallingCount;
+}
+
+int32_t AAudioServiceStreamBase::decrementServiceReferenceCount() {
+    std::lock_guard<std::mutex> lock(mCallingCountLock);
+    return --mCallingCount;
+}
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 29987f6..5f5bb98 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -199,6 +199,26 @@
         return mFlowing;
     }
 
+    /**
+     * Atomically increment the number of active references to the stream by AAudioService.
+     * @return value after the increment
+     */
+    int32_t incrementServiceReferenceCount();
+
+    /**
+     * Atomically decrement the number of active references to the stream by AAudioService.
+     * @return value after the decrement
+     */
+    int32_t decrementServiceReferenceCount();
+
+    bool isCloseNeeded() const {
+        return mCloseNeeded.load();
+    }
+
+    void setCloseNeeded(bool needed) {
+        mCloseNeeded.store(needed);
+    }
+
 protected:
 
     /**
@@ -256,8 +276,11 @@
 
 private:
     aaudio_handle_t         mHandle = -1;
-
     bool                    mFlowing = false;
+
+    std::mutex              mCallingCountLock;
+    std::atomic<int32_t>    mCallingCount{0};
+    std::atomic<bool>       mCloseNeeded{false};
 };
 
 } /* namespace aaudio */