Merge "Notify swcodec crashes too"
diff --git a/apex/ld.config.txt b/apex/ld.config.txt
index b342206..d50b353 100644
--- a/apex/ld.config.txt
+++ b/apex/ld.config.txt
@@ -49,6 +49,8 @@
 # namespace.default.link.platform.shared_libs  = %LLNDK_LIBRARIES%
 # namespace.default.link.platform.shared_libs += %SANITIZER_RUNTIME_LIBRARIES%
 namespace.default.link.platform.shared_libs = libEGL.so:libGLESv1_CM.so:libGLESv2.so:libGLESv3.so:libRS.so:libandroid_net.so:libc.so:libclang_rt.asan-aarch64-android.so:libclang_rt.asan-arm-android.so:libclang_rt.hwasan-aarch64-android.so:libclang_rt.asan-i686-android.so:libclang_rt.asan-x86_64-android.so:libdl.so:libft2.so:liblog.so:libm.so:libmediandk.so:libnativewindow.so:libneuralnetworks.so:libsync.so:libvndksupport.so:libvulkan.so
+# FIXME: b/129552044
+namespace.default.link.platform.shared_libs += libz.so
 
 ###############################################################################
 # "platform" namespace
diff --git a/apex/manifest.json b/apex/manifest.json
index e2df3a3..c6c63f6 100644
--- a/apex/manifest.json
+++ b/apex/manifest.json
@@ -1,4 +1,4 @@
 {
   "name": "com.android.media",
-  "version": 1
+  "version": 200000000
 }
diff --git a/apex/manifest_codec.json b/apex/manifest_codec.json
index e2bceec..4f31b15 100644
--- a/apex/manifest_codec.json
+++ b/apex/manifest_codec.json
@@ -1,4 +1,4 @@
 {
   "name": "com.android.media.swcodec",
-  "version": 1
+  "version": 200000000
 }
diff --git a/camera/CaptureResult.cpp b/camera/CaptureResult.cpp
index 928a6bc..1d8e8c4 100644
--- a/camera/CaptureResult.cpp
+++ b/camera/CaptureResult.cpp
@@ -39,6 +39,16 @@
     parcel->readInt64(&frameNumber);
     parcel->readInt32(&partialResultCount);
     parcel->readInt32(&errorStreamId);
+    auto physicalCameraIdPresent = parcel->readBool();
+    if (physicalCameraIdPresent) {
+        String16 cameraId;
+        status_t res = OK;
+        if ((res = parcel->readString16(&cameraId)) != OK) {
+            ALOGE("%s: Failed to read camera id: %d", __FUNCTION__, res);
+            return res;
+        }
+        errorPhysicalCameraId = cameraId;
+    }
 
     return OK;
 }
@@ -56,6 +66,16 @@
     parcel->writeInt64(frameNumber);
     parcel->writeInt32(partialResultCount);
     parcel->writeInt32(errorStreamId);
+    if (errorPhysicalCameraId.size() > 0) {
+        parcel->writeBool(true);
+        status_t res = OK;
+        if ((res = parcel->writeString16(errorPhysicalCameraId)) != OK) {
+            ALOGE("%s: Failed to write physical camera ID to parcel: %d", __FUNCTION__, res);
+            return res;
+        }
+    } else {
+        parcel->writeBool(false);
+    }
 
     return OK;
 }
diff --git a/camera/include/camera/CaptureResult.h b/camera/include/camera/CaptureResult.h
index 56fa178..ef830b5 100644
--- a/camera/include/camera/CaptureResult.h
+++ b/camera/include/camera/CaptureResult.h
@@ -70,6 +70,13 @@
     int32_t errorStreamId;
 
     /**
+     * For capture result errors, the physical camera ID in case the respective request contains
+     * a reference to physical camera device.
+     * Empty otherwise.
+     */
+    String16  errorPhysicalCameraId;
+
+    /**
      * Constructor initializes object as invalid by setting requestId to be -1.
      */
     CaptureResultExtras()
@@ -79,7 +86,8 @@
           precaptureTriggerId(0),
           frameNumber(0),
           partialResultCount(0),
-          errorStreamId(-1) {
+          errorStreamId(-1),
+          errorPhysicalCameraId() {
     }
 
     /**
diff --git a/camera/ndk/impl/ACameraDevice.cpp b/camera/ndk/impl/ACameraDevice.cpp
index c9db01e..25a81eb 100644
--- a/camera/ndk/impl/ACameraDevice.cpp
+++ b/camera/ndk/impl/ACameraDevice.cpp
@@ -45,6 +45,7 @@
 const char* CameraDevice::kSequenceIdKey     = "SequenceId";
 const char* CameraDevice::kFrameNumberKey    = "FrameNumber";
 const char* CameraDevice::kAnwKey            = "Anw";
+const char* CameraDevice::kFailingPhysicalCameraId= "FailingPhysicalCameraId";
 
 /**
  * CameraDevice Implementation
@@ -867,10 +868,19 @@
         failure->wasImageCaptured = (errorCode ==
                 hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT);
 
-        sp<AMessage> msg = new AMessage(kWhatCaptureFail, mHandler);
+        sp<AMessage> msg = new AMessage(cbh.mIsLogicalCameraCallback ? kWhatLogicalCaptureFail :
+                kWhatCaptureFail, mHandler);
         msg->setPointer(kContextKey, cbh.mContext);
         msg->setObject(kSessionSpKey, session);
-        msg->setPointer(kCallbackFpKey, (void*) onError);
+        if (cbh.mIsLogicalCameraCallback) {
+            if (resultExtras.errorPhysicalCameraId.size() > 0) {
+                String8 cameraId(resultExtras.errorPhysicalCameraId);
+                msg->setString(kFailingPhysicalCameraId, cameraId.string(), cameraId.size());
+            }
+            msg->setPointer(kCallbackFpKey, (void*) cbh.mOnLogicalCameraCaptureFailed);
+        } else {
+            msg->setPointer(kCallbackFpKey, (void*) onError);
+        }
         msg->setObject(kCaptureRequestKey, request);
         msg->setObject(kCaptureFailureKey, failure);
         postSessionMsgAndCleanup(msg);
@@ -895,6 +905,7 @@
         case kWhatCaptureResult:
         case kWhatLogicalCaptureResult:
         case kWhatCaptureFail:
+        case kWhatLogicalCaptureFail:
         case kWhatCaptureSeqEnd:
         case kWhatCaptureSeqAbort:
         case kWhatCaptureBufferLost:
@@ -966,6 +977,7 @@
         case kWhatCaptureResult:
         case kWhatLogicalCaptureResult:
         case kWhatCaptureFail:
+        case kWhatLogicalCaptureFail:
         case kWhatCaptureSeqEnd:
         case kWhatCaptureSeqAbort:
         case kWhatCaptureBufferLost:
@@ -984,6 +996,7 @@
                 case kWhatCaptureResult:
                 case kWhatLogicalCaptureResult:
                 case kWhatCaptureFail:
+                case kWhatLogicalCaptureFail:
                 case kWhatCaptureBufferLost:
                     found = msg->findObject(kCaptureRequestKey, &obj);
                     if (!found) {
@@ -1138,6 +1151,39 @@
                     freeACaptureRequest(request);
                     break;
                 }
+                case kWhatLogicalCaptureFail:
+                {
+                    ACameraCaptureSession_logicalCamera_captureCallback_failed onFail;
+                    found = msg->findPointer(kCallbackFpKey, (void**) &onFail);
+                    if (!found) {
+                        ALOGE("%s: Cannot find capture fail callback!", __FUNCTION__);
+                        return;
+                    }
+                    if (onFail == nullptr) {
+                        return;
+                    }
+
+                    found = msg->findObject(kCaptureFailureKey, &obj);
+                    if (!found) {
+                        ALOGE("%s: Cannot find capture failure!", __FUNCTION__);
+                        return;
+                    }
+                    sp<CameraCaptureFailure> failureSp(
+                            static_cast<CameraCaptureFailure*>(obj.get()));
+                    ALogicalCameraCaptureFailure failure;
+                    AString physicalCameraId;
+                    found = msg->findString(kFailingPhysicalCameraId, &physicalCameraId);
+                    if (found && !physicalCameraId.empty()) {
+                        failure.physicalCameraId = physicalCameraId.c_str();
+                    } else {
+                        failure.physicalCameraId = nullptr;
+                    }
+                    failure.captureFailure = *failureSp;
+                    ACaptureRequest* request = allocateACaptureRequest(requestSp, mId);
+                    (*onFail)(context, session.get(), request, &failure);
+                    freeACaptureRequest(request);
+                    break;
+                }
                 case kWhatCaptureSeqEnd:
                 {
                     ACameraCaptureSession_captureCallback_sequenceEnd onSeqEnd;
@@ -1233,6 +1279,7 @@
 
     if (cbs != nullptr) {
         mOnCaptureCompleted = cbs->onCaptureCompleted;
+        mOnCaptureFailed = cbs->onCaptureFailed;
     }
 }
 
@@ -1248,6 +1295,7 @@
 
     if (lcbs != nullptr) {
         mOnLogicalCameraCaptureCompleted = lcbs->onLogicalCameraCaptureCompleted;
+        mOnLogicalCameraCaptureFailed = lcbs->onLogicalCameraCaptureFailed;
     }
 }
 
diff --git a/camera/ndk/impl/ACameraDevice.h b/camera/ndk/impl/ACameraDevice.h
index 56741ce..c92a95f 100644
--- a/camera/ndk/impl/ACameraDevice.h
+++ b/camera/ndk/impl/ACameraDevice.h
@@ -214,6 +214,7 @@
         kWhatCaptureResult,    // onCaptureProgressed, onCaptureCompleted
         kWhatLogicalCaptureResult, // onLogicalCameraCaptureCompleted
         kWhatCaptureFail,      // onCaptureFailed
+        kWhatLogicalCaptureFail, // onLogicalCameraCaptureFailed
         kWhatCaptureSeqEnd,    // onCaptureSequenceCompleted
         kWhatCaptureSeqAbort,  // onCaptureSequenceAborted
         kWhatCaptureBufferLost,// onCaptureBufferLost
@@ -233,6 +234,7 @@
     static const char* kSequenceIdKey;
     static const char* kFrameNumberKey;
     static const char* kAnwKey;
+    static const char* kFailingPhysicalCameraId;
 
     class CallbackHandler : public AHandler {
       public:
@@ -281,6 +283,7 @@
             mOnCaptureProgressed = nullptr;
             mOnCaptureCompleted = nullptr;
             mOnLogicalCameraCaptureCompleted = nullptr;
+            mOnLogicalCameraCaptureFailed = nullptr;
             mOnCaptureFailed = nullptr;
             mOnCaptureSequenceCompleted = nullptr;
             mOnCaptureSequenceAborted = nullptr;
@@ -289,7 +292,6 @@
                 mContext = cbs->context;
                 mOnCaptureStarted = cbs->onCaptureStarted;
                 mOnCaptureProgressed = cbs->onCaptureProgressed;
-                mOnCaptureFailed = cbs->onCaptureFailed;
                 mOnCaptureSequenceCompleted = cbs->onCaptureSequenceCompleted;
                 mOnCaptureSequenceAborted = cbs->onCaptureSequenceAborted;
                 mOnCaptureBufferLost = cbs->onCaptureBufferLost;
@@ -305,6 +307,7 @@
         ACameraCaptureSession_captureCallback_result mOnCaptureProgressed;
         ACameraCaptureSession_captureCallback_result mOnCaptureCompleted;
         ACameraCaptureSession_logicalCamera_captureCallback_result mOnLogicalCameraCaptureCompleted;
+        ACameraCaptureSession_logicalCamera_captureCallback_failed mOnLogicalCameraCaptureFailed;
         ACameraCaptureSession_captureCallback_failed mOnCaptureFailed;
         ACameraCaptureSession_captureCallback_sequenceEnd mOnCaptureSequenceCompleted;
         ACameraCaptureSession_captureCallback_sequenceAbort mOnCaptureSequenceAborted;
diff --git a/camera/ndk/include/camera/NdkCameraCaptureSession.h b/camera/ndk/include/camera/NdkCameraCaptureSession.h
index d13a818..07176cf 100644
--- a/camera/ndk/include/camera/NdkCameraCaptureSession.h
+++ b/camera/ndk/include/camera/NdkCameraCaptureSession.h
@@ -676,6 +676,41 @@
         size_t physicalResultCount, const char** physicalCameraIds,
         const ACameraMetadata** physicalResults);
 
+/// Struct to describe a logical camera capture failure
+typedef struct ALogicalCameraCaptureFailure {
+    /**
+     * The {@link ACameraCaptureFailure} contains information about regular logical device capture
+     * failure.
+     */
+    struct ACameraCaptureFailure captureFailure;
+
+    /**
+     * The physical camera device ID in case the capture failure comes from a capture request
+     * with configured physical camera streams for a logical camera. physicalCameraId will be set
+     * to NULL in case the capture request has no associated physical camera device.
+     *
+     */
+    const char*    physicalCameraId;
+} ALogicalCameraCaptureFailure;
+
+/**
+ * The definition of logical camera capture failure callback.
+ *
+ * @param context The optional application context provided by user in
+ *                {@link ACameraCaptureSession_captureCallbacks}.
+ * @param session The camera capture session of interest.
+ * @param request The capture request of interest. Note that this pointer points to a copy of
+ *                capture request sent by application, so the address is different to what
+ *                application sent but the content will match. This request will be freed by
+ *                framework immediately after this callback returns.
+ * @param failure The {@link ALogicalCameraCaptureFailure} desribes the capture failure. The memory
+ *                is managed by camera framework. Do not access this pointer after this callback
+ *                returns.
+ */
+typedef void (*ACameraCaptureSession_logicalCamera_captureCallback_failed)(
+        void* context, ACameraCaptureSession* session,
+        ACaptureRequest* request, ALogicalCameraCaptureFailure* failure);
+
 /**
  * This has the same functionality as ACameraCaptureSession_captureCallbacks,
  * with the exception that an onLogicalCameraCaptureCompleted callback is
@@ -708,9 +743,24 @@
     ACameraCaptureSession_logicalCamera_captureCallback_result onLogicalCameraCaptureCompleted;
 
     /**
+     * This callback is called instead of {@link onLogicalCameraCaptureCompleted} when the
+     * camera device failed to produce a capture result for the
+     * request.
+     *
+     * <p>Other requests are unaffected, and some or all image buffers from
+     * the capture may have been pushed to their respective output
+     * streams.</p>
+     *
+     * <p>Note that the ACaptureRequest pointer in the callback will not match what application has
+     * submitted, but the contents the ACaptureRequest will match what application submitted.</p>
+     *
+     * @see ALogicalCameraCaptureFailure
+     */
+    ACameraCaptureSession_logicalCamera_captureCallback_failed onLogicalCameraCaptureFailed;
+
+    /**
      * Same as ACameraCaptureSession_captureCallbacks
      */
-    ACameraCaptureSession_captureCallback_failed        onCaptureFailed;
     ACameraCaptureSession_captureCallback_sequenceEnd   onCaptureSequenceCompleted;
     ACameraCaptureSession_captureCallback_sequenceAbort onCaptureSequenceAborted;
     ACameraCaptureSession_captureCallback_bufferLost    onCaptureBufferLost;
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
index d7d774b..bffab22 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
@@ -55,6 +55,7 @@
 const char* CameraDevice::kSequenceIdKey     = "SequenceId";
 const char* CameraDevice::kFrameNumberKey    = "FrameNumber";
 const char* CameraDevice::kAnwKey            = "Anw";
+const char* CameraDevice::kFailingPhysicalCameraId= "FailingPhysicalCameraId";
 
 /**
  * CameraDevice Implementation
@@ -894,10 +895,19 @@
         failure->sequenceId  = sequenceId;
         failure->wasImageCaptured = (errorCode == ErrorCode::CAMERA_RESULT);
 
-        sp<AMessage> msg = new AMessage(kWhatCaptureFail, mHandler);
+        sp<AMessage> msg = new AMessage(cbh.mIsLogicalCameraCallback ? kWhatLogicalCaptureFail :
+                kWhatCaptureFail, mHandler);
         msg->setPointer(kContextKey, cbh.mContext);
         msg->setObject(kSessionSpKey, session);
-        msg->setPointer(kCallbackFpKey, (void*) onError);
+        if (cbh.mIsLogicalCameraCallback) {
+            if (resultExtras.errorPhysicalCameraId.size() > 0) {
+                msg->setString(kFailingPhysicalCameraId, resultExtras.errorPhysicalCameraId.c_str(),
+                        resultExtras.errorPhysicalCameraId.size());
+            }
+            msg->setPointer(kCallbackFpKey, (void*) cbh.mOnLogicalCameraCaptureFailed);
+        } else {
+            msg->setPointer(kCallbackFpKey, (void*) onError);
+        }
         msg->setObject(kCaptureRequestKey, request);
         msg->setObject(kCaptureFailureKey, failure);
         postSessionMsgAndCleanup(msg);
@@ -919,6 +929,7 @@
         case kWhatCaptureResult:
         case kWhatLogicalCaptureResult:
         case kWhatCaptureFail:
+        case kWhatLogicalCaptureFail:
         case kWhatCaptureSeqEnd:
         case kWhatCaptureSeqAbort:
         case kWhatCaptureBufferLost:
@@ -990,6 +1001,7 @@
         case kWhatCaptureResult:
         case kWhatLogicalCaptureResult:
         case kWhatCaptureFail:
+        case kWhatLogicalCaptureFail:
         case kWhatCaptureSeqEnd:
         case kWhatCaptureSeqAbort:
         case kWhatCaptureBufferLost:
@@ -1009,6 +1021,7 @@
                 case kWhatCaptureResult:
                 case kWhatLogicalCaptureResult:
                 case kWhatCaptureFail:
+                case kWhatLogicalCaptureFail:
                 case kWhatCaptureBufferLost:
                     found = msg->findObject(kCaptureRequestKey, &obj);
                     if (!found) {
@@ -1161,6 +1174,39 @@
                     freeACaptureRequest(request);
                     break;
                 }
+                case kWhatLogicalCaptureFail:
+                {
+                    ACameraCaptureSession_logicalCamera_captureCallback_failed onFail;
+                    found = msg->findPointer(kCallbackFpKey, (void**) &onFail);
+                    if (!found) {
+                        ALOGE("%s: Cannot find capture fail callback!", __FUNCTION__);
+                        return;
+                    }
+                    if (onFail == nullptr) {
+                        return;
+                    }
+
+                    found = msg->findObject(kCaptureFailureKey, &obj);
+                    if (!found) {
+                        ALOGE("%s: Cannot find capture failure!", __FUNCTION__);
+                        return;
+                    }
+                    sp<CameraCaptureFailure> failureSp(
+                            static_cast<CameraCaptureFailure*>(obj.get()));
+                    ALogicalCameraCaptureFailure failure;
+                    AString physicalCameraId;
+                    found = msg->findString(kFailingPhysicalCameraId, &physicalCameraId);
+                    if (found && !physicalCameraId.empty()) {
+                        failure.physicalCameraId = physicalCameraId.c_str();
+                    } else {
+                        failure.physicalCameraId = nullptr;
+                    }
+                    failure.captureFailure = *failureSp;
+                    ACaptureRequest* request = allocateACaptureRequest(requestSp, device->getId());
+                    (*onFail)(context, session.get(), request, &failure);
+                    freeACaptureRequest(request);
+                    break;
+                }
                 case kWhatCaptureSeqEnd:
                 {
                     ACameraCaptureSession_captureCallback_sequenceEnd onSeqEnd;
@@ -1256,6 +1302,7 @@
 
     if (cbs != nullptr) {
         mOnCaptureCompleted = cbs->onCaptureCompleted;
+        mOnCaptureFailed = cbs->onCaptureFailed;
     }
 }
 
@@ -1271,6 +1318,7 @@
 
     if (lcbs != nullptr) {
         mOnLogicalCameraCaptureCompleted = lcbs->onLogicalCameraCaptureCompleted;
+        mOnLogicalCameraCaptureFailed = lcbs->onLogicalCameraCaptureFailed;
     }
 }
 
@@ -1368,8 +1416,9 @@
 CameraDevice::ServiceCallback::onDeviceError(
         ErrorCode errorCode,
         const CaptureResultExtras& resultExtras) {
-    ALOGD("Device error received, code %d, frame number %" PRId64 ", request ID %d, subseq ID %d",
-            errorCode, resultExtras.frameNumber, resultExtras.requestId, resultExtras.burstId);
+    ALOGD("Device error received, code %d, frame number %" PRId64 ", request ID %d, subseq ID %d"
+            " physical camera ID %s", errorCode, resultExtras.frameNumber, resultExtras.requestId,
+            resultExtras.burstId, resultExtras.errorPhysicalCameraId.c_str());
     auto ret = Void();
     sp<CameraDevice> dev = mDevice.promote();
     if (dev == nullptr) {
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.h b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
index 47e6f56..7036017 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
@@ -242,6 +242,7 @@
         kWhatCaptureResult,    // onCaptureProgressed, onCaptureCompleted
         kWhatLogicalCaptureResult, // onLogicalCameraCaptureCompleted
         kWhatCaptureFail,      // onCaptureFailed
+        kWhatLogicalCaptureFail, // onLogicalCameraCaptureFailed
         kWhatCaptureSeqEnd,    // onCaptureSequenceCompleted
         kWhatCaptureSeqAbort,  // onCaptureSequenceAborted
         kWhatCaptureBufferLost,// onCaptureBufferLost
@@ -261,6 +262,7 @@
     static const char* kSequenceIdKey;
     static const char* kFrameNumberKey;
     static const char* kAnwKey;
+    static const char* kFailingPhysicalCameraId;
 
     class CallbackHandler : public AHandler {
       public:
@@ -307,6 +309,7 @@
             mOnCaptureProgressed = nullptr;
             mOnCaptureCompleted = nullptr;
             mOnLogicalCameraCaptureCompleted = nullptr;
+            mOnLogicalCameraCaptureFailed = nullptr;
             mOnCaptureFailed = nullptr;
             mOnCaptureSequenceCompleted = nullptr;
             mOnCaptureSequenceAborted = nullptr;
@@ -315,7 +318,6 @@
                 mContext = cbs->context;
                 mOnCaptureStarted = cbs->onCaptureStarted;
                 mOnCaptureProgressed = cbs->onCaptureProgressed;
-                mOnCaptureFailed = cbs->onCaptureFailed;
                 mOnCaptureSequenceCompleted = cbs->onCaptureSequenceCompleted;
                 mOnCaptureSequenceAborted = cbs->onCaptureSequenceAborted;
                 mOnCaptureBufferLost = cbs->onCaptureBufferLost;
@@ -332,6 +334,7 @@
         ACameraCaptureSession_captureCallback_result mOnCaptureProgressed;
         ACameraCaptureSession_captureCallback_result mOnCaptureCompleted;
         ACameraCaptureSession_logicalCamera_captureCallback_result mOnLogicalCameraCaptureCompleted;
+        ACameraCaptureSession_logicalCamera_captureCallback_failed mOnLogicalCameraCaptureFailed;
         ACameraCaptureSession_captureCallback_failed mOnCaptureFailed;
         ACameraCaptureSession_captureCallback_sequenceEnd mOnCaptureSequenceCompleted;
         ACameraCaptureSession_captureCallback_sequenceAbort mOnCaptureSequenceAborted;
diff --git a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
index c51f93b..7368775 100644
--- a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
+++ b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
@@ -323,7 +323,13 @@
             }
             ch->mCompletedCaptureCallbackCount++;
         },
-        nullptr, // onCaptureFailed
+        [] (void * /*ctx*/, ACameraCaptureSession* /*session*/, ACaptureRequest* /*request*/,
+                ALogicalCameraCaptureFailure* failure) {
+            if (failure->physicalCameraId) {
+                ALOGD("%s: Physical camera id: %s result failure", __FUNCTION__,
+                        failure->physicalCameraId);
+            }
+        },
         nullptr, // onCaptureSequenceCompleted
         nullptr, // onCaptureSequenceAborted
         nullptr, // onCaptureBufferLost
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index c7c42eb..49d921f 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -40,7 +40,8 @@
 // V0.4.01 = add -h hang option
 //           fix -n option to set output buffer for -tm
 //           plot first glitch
-#define APP_VERSION             "0.4.01"
+// V0.4.02 = allow -n0 for minimal buffer size
+#define APP_VERSION             "0.4.02"
 
 // Tag for machine readable results as property = value pairs
 #define RESULT_TAG              "RESULT: "
@@ -535,7 +536,7 @@
     printf("INPUT  stream ----------------------------------------\n");
     // Use different parameters for the input.
     argParser.setDeviceId(requestedInputDeviceId);
-    argParser.setNumberOfBursts(AAUDIO_UNSPECIFIED);
+    argParser.setNumberOfBursts(AAudioParameters::kDefaultNumberOfBursts);
     argParser.setFormat(requestedInputFormat);
     argParser.setPerformanceMode(inputPerformanceLevel);
     argParser.setChannelCount(requestedInputChannelCount);
diff --git a/media/libaaudio/examples/utils/AAudioArgsParser.h b/media/libaaudio/examples/utils/AAudioArgsParser.h
index 755ecc5..9115778 100644
--- a/media/libaaudio/examples/utils/AAudioArgsParser.h
+++ b/media/libaaudio/examples/utils/AAudioArgsParser.h
@@ -251,6 +251,8 @@
         }
     }
 
+    static constexpr int32_t   kDefaultNumberOfBursts = 2;
+
 private:
     int32_t                    mChannelCount    = AAUDIO_UNSPECIFIED;
     aaudio_format_t            mFormat          = AAUDIO_FORMAT_UNSPECIFIED;
@@ -266,7 +268,7 @@
     aaudio_input_preset_t      mInputPreset     = AAUDIO_UNSPECIFIED;
     aaudio_allowed_capture_policy_t mAllowedCapturePolicy     = AAUDIO_UNSPECIFIED;
 
-    int32_t                    mNumberOfBursts  = AAUDIO_UNSPECIFIED;
+    int32_t                    mNumberOfBursts  = kDefaultNumberOfBursts;
     int32_t                    mFramesPerCallback = AAUDIO_UNSPECIFIED;
 };
 
@@ -386,7 +388,7 @@
         printf("          1 = _NEVER, never use MMAP\n");
         printf("          2 = _AUTO, use MMAP if available, default for -m with no number\n");
         printf("          3 = _ALWAYS, use MMAP or fail\n");
-        printf("      -n{numberOfBursts} for setBufferSize\n");
+        printf("      -n{numberOfBursts} for setBufferSize, default %d\n", kDefaultNumberOfBursts);
         printf("      -p{performanceMode} set output AAUDIO_PERFORMANCE_MODE*, default NONE\n");
         printf("          n for _NONE\n");
         printf("          l for _LATENCY\n");
@@ -460,17 +462,28 @@
                getFormat(), AAudioStream_getFormat(stream));
 
         int32_t framesPerBurst = AAudioStream_getFramesPerBurst(stream);
-        int32_t sizeFrames = AAudioStream_getBufferSizeInFrames(stream);
         printf("  Buffer:       burst     = %d\n", framesPerBurst);
+
+        int32_t sizeFrames = AAudioStream_getBufferSizeInFrames(stream);
         if (framesPerBurst > 0) {
-            printf("  Buffer:       size      = %d = (%d * %d) + %d\n",
+            int32_t requestedSize = getNumberOfBursts() * framesPerBurst;
+            printf("  BufferSize:   requested = %4d, actual = %4d = (%d * %d) + %d\n",
+                   requestedSize,
                    sizeFrames,
                    (sizeFrames / framesPerBurst),
                    framesPerBurst,
                    (sizeFrames % framesPerBurst));
+        } else {
+             printf("  BufferSize:    %d\n", sizeFrames);
         }
-        printf("  Capacity:     requested = %d, actual = %d\n", getBufferCapacity(),
-               AAudioStream_getBufferCapacityInFrames(stream));
+
+        int32_t capacityFrames = AAudioStream_getBufferCapacityInFrames(stream);
+        printf("  Capacity:     requested = %4d, actual = %4d = (%d * %d) + %d\n",
+               getBufferCapacity(),
+               capacityFrames,
+               (capacityFrames / framesPerBurst),
+               framesPerBurst,
+               (capacityFrames % framesPerBurst));
 
         printf("  CallbackSize: requested = %d, actual = %d\n", getFramesPerCallback(),
                AAudioStream_getFramesPerDataCallback(stream));
diff --git a/media/libaaudio/examples/utils/AAudioSimplePlayer.h b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
index 4373fa9..fd1fc45 100644
--- a/media/libaaudio/examples/utils/AAudioSimplePlayer.h
+++ b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
@@ -120,10 +120,9 @@
 
         if (result == AAUDIO_OK) {
             int32_t sizeInBursts = parameters.getNumberOfBursts();
-            if (sizeInBursts > 0) {
-                int32_t framesPerBurst = AAudioStream_getFramesPerBurst(mStream);
-                AAudioStream_setBufferSizeInFrames(mStream, sizeInBursts * framesPerBurst);
-            }
+            int32_t framesPerBurst = AAudioStream_getFramesPerBurst(mStream);
+            int32_t bufferSizeFrames = sizeInBursts * framesPerBurst;
+            AAudioStream_setBufferSizeInFrames(mStream, bufferSizeFrames);
         }
 
         AAudioStreamBuilder_delete(builder);
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
index 2b05f10..ca60233 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
@@ -31,7 +31,7 @@
 #include "AAudioSimplePlayer.h"
 #include "AAudioArgsParser.h"
 
-#define APP_VERSION  "0.1.6"
+#define APP_VERSION  "0.1.7"
 
 constexpr int32_t kDefaultHangTimeMSec = 10;
 
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index e359c1c..d628bf7 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -422,6 +422,10 @@
 
 aaudio_result_t AudioStreamTrack::setBufferSize(int32_t requestedFrames)
 {
+    // Do not ask for less than one burst.
+    if (requestedFrames < getFramesPerBurst()) {
+        requestedFrames = getFramesPerBurst();
+    }
     ssize_t result = mAudioTrack->setBufferSizeInFrames(requestedFrames);
     if (result < 0) {
         return AAudioConvert_androidToAAudioResult(result);
diff --git a/media/libeffects/dynamicsproc/EffectDynamicsProcessing.cpp b/media/libeffects/dynamicsproc/EffectDynamicsProcessing.cpp
index 0b883f1..c03c6ed 100644
--- a/media/libeffects/dynamicsproc/EffectDynamicsProcessing.cpp
+++ b/media/libeffects/dynamicsproc/EffectDynamicsProcessing.cpp
@@ -25,6 +25,7 @@
 #include <new>
 
 #include <log/log.h>
+#include <sys/param.h>
 
 #include <audio_effects/effect_dynamicsprocessing.h>
 #include <dsp/DPBase.h>
@@ -225,10 +226,6 @@
     } //switch
 }
 
-static inline bool isPowerOf2(unsigned long n) {
-    return (n & (n - 1)) == 0;
-}
-
 void DP_configureVariant(DynamicsProcessingContext *pContext, int newVariant) {
     ALOGV("DP_configureVariant %d", newVariant);
     switch(newVariant) {
@@ -242,7 +239,7 @@
                 desiredBlock);
         if (desiredBlock < minBlockSize) {
             currentBlock = minBlockSize;
-        } else if (!isPowerOf2(desiredBlock)) {
+        } else if (!powerof2(desiredBlock)) {
             //find next highest power of 2.
             currentBlock = 1 << (32 - __builtin_clz(desiredBlock));
         }
@@ -1297,4 +1294,3 @@
 };
 
 }; // extern "C"
-
diff --git a/media/libeffects/dynamicsproc/dsp/DPFrequency.cpp b/media/libeffects/dynamicsproc/dsp/DPFrequency.cpp
index d06fd70..1f53978 100644
--- a/media/libeffects/dynamicsproc/dsp/DPFrequency.cpp
+++ b/media/libeffects/dynamicsproc/dsp/DPFrequency.cpp
@@ -20,6 +20,7 @@
 #include <log/log.h>
 #include "DPFrequency.h"
 #include <algorithm>
+#include <sys/param.h>
 
 namespace dp_fx {
 
@@ -30,10 +31,6 @@
 #define CIRCULAR_BUFFER_UPSAMPLE 4  //4 times buffer size
 
 static constexpr float MIN_ENVELOPE = 1e-6f; //-120 dB
-//helper functionS
-static inline bool isPowerOf2(unsigned long n) {
-    return (n & (n - 1)) == 0;
-}
 static constexpr float EPSILON = 0.0000001f;
 
 static inline bool isZero(float f) {
@@ -151,7 +148,7 @@
     } else if (mBlockSize < MIN_BLOCKSIZE) {
         mBlockSize = MIN_BLOCKSIZE;
     } else {
-        if (!isPowerOf2(blockSize)) {
+        if (!powerof2(blockSize)) {
             //find next highest power of 2.
             mBlockSize = 1 << (32 - __builtin_clz(blockSize));
         }
diff --git a/media/libeffects/lvm/lib/Common/lib/LVM_Types.h b/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
index 59586e0..fbfdd4d 100644
--- a/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
+++ b/media/libeffects/lvm/lib/Common/lib/LVM_Types.h
@@ -94,6 +94,7 @@
 
 typedef     int32_t             LVM_INT32;          /* Signed 32-bit word */
 typedef     uint32_t            LVM_UINT32;         /* Unsigned 32-bit word */
+typedef     int64_t             LVM_INT64;          /* Signed 64-bit word */
 
 #ifdef BUILD_FLOAT
 
diff --git a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Process.c b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Process.c
index ea5f74a..61899fe 100644
--- a/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Process.c
+++ b/media/libeffects/lvm/lib/SpectrumAnalyzer/src/LVPSA_Process.c
@@ -22,6 +22,17 @@
 
 #define LVM_MININT_32   0x80000000
 
+static LVM_INT32 mult32x32in32_shiftr(LVM_INT32 a, LVM_INT32 b, LVM_INT32 c) {
+  LVM_INT64 result = ((LVM_INT64)a * b) >> c;
+
+  if (result >= INT32_MAX) {
+    return INT32_MAX;
+  } else if (result <= INT32_MIN) {
+    return INT32_MIN;
+  } else {
+    return (LVM_INT32)result;
+  }
+}
 
 /************************************************************************************/
 /*                                                                                  */
@@ -123,10 +134,10 @@
 
     if(pLVPSA_Inst->pSpectralDataBufferWritePointer != pWrite_Save)
     {
-        MUL32x32INTO32((AudioTime + (LVM_INT32)((LVM_INT32)pLVPSA_Inst->LocalSamplesCount*1000)),
-                        (LVM_INT32)LVPSA_SampleRateInvTab[pLVPSA_Inst->CurrentParams.Fs],
-                        AudioTimeInc,
-                        LVPSA_FsInvertShift)
+        AudioTimeInc = mult32x32in32_shiftr(
+                (AudioTime + ((LVM_INT32)pLVPSA_Inst->LocalSamplesCount * 1000)),
+                (LVM_INT32)LVPSA_SampleRateInvTab[pLVPSA_Inst->CurrentParams.Fs],
+                LVPSA_FsInvertShift);
         pLVPSA_Inst->SpectralDataBufferAudioTime = AudioTime + AudioTimeInc;
     }
 
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index a52da45..1ec419a 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -71,6 +71,7 @@
 static const uint8_t kNalUnitTypePicParamSet = 0x08;
 static const int64_t kInitialDelayTimeUs     = 700000LL;
 static const int64_t kMaxMetadataSize = 0x4000000LL;   // 64MB max per-frame metadata size
+static const int64_t kMaxCttsOffsetTimeUs = 30 * 60 * 1000000LL;  // 30 minutes
 
 static const char kMetaKey_Version[]    = "com.android.version";
 static const char kMetaKey_Manufacturer[]      = "com.android.manufacturer";
@@ -136,13 +137,6 @@
     void resetInternal();
 
 private:
-    enum {
-        // TODO: need to increase this considering the bug
-        // about camera app not sending video frames continuously?
-        kMaxCttsOffsetTimeUs = 1000000LL,  // 1 second
-        kSampleArraySize = 1000,
-    };
-
     // A helper class to handle faster write box with table entries
     template<class TYPE, unsigned ENTRY_SIZE>
     // ENTRY_SIZE: # of values in each entry
@@ -2920,14 +2914,18 @@
 }
 
 void MPEG4Writer::Track::dumpTimeStamps() {
-    ALOGE("Dumping %s track's last 10 frames timestamp and frame type ", getTrackType());
-    std::string timeStampString;
-    for (std::list<TimestampDebugHelperEntry>::iterator entry = mTimestampDebugHelper.begin();
-            entry != mTimestampDebugHelper.end(); ++entry) {
-        timeStampString += "(" + std::to_string(entry->pts)+
-                "us, " + std::to_string(entry->dts) + "us " + entry->frameType + ") ";
+    if (!mTimestampDebugHelper.empty()) {
+        std::string timeStampString = "Dumping " + std::string(getTrackType()) + " track's last " +
+                                      std::to_string(mTimestampDebugHelper.size()) +
+                                      " frames' timestamps(pts, dts) and frame type : ";
+        for (const TimestampDebugHelperEntry& entry : mTimestampDebugHelper) {
+            timeStampString += "\n(" + std::to_string(entry.pts) + "us, " +
+                               std::to_string(entry.dts) + "us " + entry.frameType + ") ";
+        }
+        ALOGE("%s", timeStampString.c_str());
+    } else {
+        ALOGE("0 frames to dump timeStamps in %s track ", getTrackType());
     }
-    ALOGE("%s", timeStampString.c_str());
 }
 
 status_t MPEG4Writer::Track::threadEntry() {
diff --git a/media/libstagefright/include/media/stagefright/MPEG4Writer.h b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
index 803155d..6f19023 100644
--- a/media/libstagefright/include/media/stagefright/MPEG4Writer.h
+++ b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
@@ -82,10 +82,6 @@
         kWhatSwitch                          = 'swch',
     };
 
-    enum {
-        kMaxCttsOffsetTimeUs = 1000000LL,  // 1 second
-    };
-
     int  mFd;
     int mNextFd;
     sp<MetaData> mStartMeta;
diff --git a/media/mediaserver/mediaserver.rc b/media/mediaserver/mediaserver.rc
index 8cfcd79..f6c325c 100644
--- a/media/mediaserver/mediaserver.rc
+++ b/media/mediaserver/mediaserver.rc
@@ -2,7 +2,5 @@
     class main
     user media
     group audio camera inet net_bt net_bt_admin net_bw_acct drmrpc mediadrm
-    # TODO(b/123275379): Remove updatable when http://aosp/878198 has landed
-    updatable
     ioprio rt 4
     writepid /dev/cpuset/foreground/tasks /dev/stune/foreground/tasks
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 984d9fe..b51c570 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -786,12 +786,8 @@
     }
 }
 
-void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args __unused)
+void AudioFlinger::ThreadBase::dump(int fd, const Vector<String16>& args)
 {
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
     dprintf(fd, "\n%s thread %p, name %s, tid %d, type %d (%s):\n", isOutput() ? "Output" : "Input",
             this, mThreadName, getTid(), type(), threadTypeToString(type()));
 
@@ -800,6 +796,21 @@
         dprintf(fd, "  Thread may be deadlocked\n");
     }
 
+    dumpBase_l(fd, args);
+    dumpInternals_l(fd, args);
+    dumpTracks_l(fd, args);
+    dumpEffectChains_l(fd, args);
+
+    if (locked) {
+        mLock.unlock();
+    }
+
+    dprintf(fd, "  Local log:\n");
+    mLocalLog.dump(fd, "   " /* prefix */, 40 /* lines */);
+}
+
+void AudioFlinger::ThreadBase::dumpBase_l(int fd, const Vector<String16>& args __unused)
+{
     dprintf(fd, "  I/O handle: %d\n", mId);
     dprintf(fd, "  Standby: %s\n", mStandby ? "yes" : "no");
     dprintf(fd, "  Sample rate: %u Hz\n", mSampleRate);
@@ -814,6 +825,8 @@
     dprintf(fd, "  Pending config events:");
     size_t numConfig = mConfigEvents.size();
     if (numConfig) {
+        const size_t SIZE = 256;
+        char buffer[SIZE];
         for (size_t i = 0; i < numConfig; i++) {
             mConfigEvents[i]->dump(buffer, SIZE);
             dprintf(fd, "\n    %s", buffer);
@@ -858,17 +871,12 @@
                 isOutput() ? "write" : "read",
                 mLatencyMs.toString().c_str());
     }
-
-    if (locked) {
-        mLock.unlock();
-    }
 }
 
-void AudioFlinger::ThreadBase::dumpEffectChains(int fd, const Vector<String16>& args)
+void AudioFlinger::ThreadBase::dumpEffectChains_l(int fd, const Vector<String16>& args)
 {
     const size_t SIZE = 256;
     char buffer[SIZE];
-    String8 result;
 
     size_t numEffectChains = mEffectChains.size();
     snprintf(buffer, SIZE, "  %zu Effect Chains\n", numEffectChains);
@@ -1819,16 +1827,24 @@
     free(mEffectBuffer);
 }
 
-void AudioFlinger::PlaybackThread::dump(int fd, const Vector<String16>& args)
+// Thread virtuals
+
+void AudioFlinger::PlaybackThread::onFirstRef()
 {
-    dumpInternals(fd, args);
-    dumpTracks(fd, args);
-    dumpEffectChains(fd, args);
-    dprintf(fd, "  Local log:\n");
-    mLocalLog.dump(fd, "   " /* prefix */, 40 /* lines */);
+    run(mThreadName, ANDROID_PRIORITY_URGENT_AUDIO);
 }
 
-void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& args __unused)
+// ThreadBase virtuals
+void AudioFlinger::PlaybackThread::preExit()
+{
+    ALOGV("  preExit()");
+    // FIXME this is using hard-coded strings but in the future, this functionality will be
+    //       converted to use audio HAL extensions required to support tunneling
+    status_t result = mOutput->stream->setParameters(String8("exiting=1"));
+    ALOGE_IF(result != OK, "Error when setting parameters on exit: %d", result);
+}
+
+void AudioFlinger::PlaybackThread::dumpTracks_l(int fd, const Vector<String16>& args __unused)
 {
     String8 result;
 
@@ -1893,10 +1909,8 @@
     write(fd, result.string(), result.size());
 }
 
-void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>& args)
+void AudioFlinger::PlaybackThread::dumpInternals_l(int fd, const Vector<String16>& args __unused)
 {
-    dumpBase(fd, args);
-
     dprintf(fd, "  Master mute: %s\n", mMasterMute ? "on" : "off");
     if (mHapticChannelMask != AUDIO_CHANNEL_NONE) {
         dprintf(fd, "  Haptic channel mask: %#x (%s)\n", mHapticChannelMask,
@@ -1927,23 +1941,6 @@
     }
 }
 
-// Thread virtuals
-
-void AudioFlinger::PlaybackThread::onFirstRef()
-{
-    run(mThreadName, ANDROID_PRIORITY_URGENT_AUDIO);
-}
-
-// ThreadBase virtuals
-void AudioFlinger::PlaybackThread::preExit()
-{
-    ALOGV("  preExit()");
-    // FIXME this is using hard-coded strings but in the future, this functionality will be
-    //       converted to use audio HAL extensions required to support tunneling
-    status_t result = mOutput->stream->setParameters(String8("exiting=1"));
-    ALOGE_IF(result != OK, "Error when setting parameters on exit: %d", result);
-}
-
 // PlaybackThread::createTrack_l() must be called with AudioFlinger::mLock held
 sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l(
         const sp<AudioFlinger::Client>& client,
@@ -5350,9 +5347,9 @@
 }
 
 
-void AudioFlinger::MixerThread::dumpInternals(int fd, const Vector<String16>& args)
+void AudioFlinger::MixerThread::dumpInternals_l(int fd, const Vector<String16>& args)
 {
-    PlaybackThread::dumpInternals(fd, args);
+    PlaybackThread::dumpInternals_l(fd, args);
     dprintf(fd, "  Thread throttle time (msecs): %u\n", mThreadThrottleTimeMs);
     dprintf(fd, "  AudioMixer tracks: %s\n", mAudioMixer->trackNames().c_str());
     dprintf(fd, "  Master mono: %s\n", mMasterMono ? "on" : "off");
@@ -5426,9 +5423,9 @@
 {
 }
 
-void AudioFlinger::DirectOutputThread::dumpInternals(int fd, const Vector<String16>& args)
+void AudioFlinger::DirectOutputThread::dumpInternals_l(int fd, const Vector<String16>& args)
 {
-    PlaybackThread::dumpInternals(fd, args);
+    PlaybackThread::dumpInternals_l(fd, args);
     dprintf(fd, "  Master balance: %f  Left: %f  Right: %f\n",
             mMasterBalance.load(), mMasterBalanceLeft, mMasterBalanceRight);
 }
@@ -6443,9 +6440,9 @@
     }
 }
 
-void AudioFlinger::DuplicatingThread::dumpInternals(int fd, const Vector<String16>& args __unused)
+void AudioFlinger::DuplicatingThread::dumpInternals_l(int fd, const Vector<String16>& args __unused)
 {
-    MixerThread::dumpInternals(fd, args);
+    MixerThread::dumpInternals_l(fd, args);
 
     std::stringstream ss;
     const size_t numTracks = mOutputTracks.size();
@@ -7775,19 +7772,8 @@
     }
 }
 
-void AudioFlinger::RecordThread::dump(int fd, const Vector<String16>& args)
+void AudioFlinger::RecordThread::dumpInternals_l(int fd, const Vector<String16>& args __unused)
 {
-    dumpInternals(fd, args);
-    dumpTracks(fd, args);
-    dumpEffectChains(fd, args);
-    dprintf(fd, "  Local log:\n");
-    mLocalLog.dump(fd, "   " /* prefix */, 40 /* lines */);
-}
-
-void AudioFlinger::RecordThread::dumpInternals(int fd, const Vector<String16>& args)
-{
-    dumpBase(fd, args);
-
     AudioStreamIn *input = mInput;
     audio_input_flags_t flags = input != NULL ? input->flags : AUDIO_INPUT_FLAG_NONE;
     dprintf(fd, "  AudioStreamIn: %p flags %#x (%s)\n",
@@ -7814,7 +7800,7 @@
     copy->dump(fd);
 }
 
-void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args __unused)
+void AudioFlinger::RecordThread::dumpTracks_l(int fd, const Vector<String16>& args __unused)
 {
     String8 result;
     size_t numtracks = mTracks.size();
@@ -9079,19 +9065,8 @@
     }
 }
 
-void AudioFlinger::MmapThread::dump(int fd, const Vector<String16>& args)
+void AudioFlinger::MmapThread::dumpInternals_l(int fd, const Vector<String16>& args __unused)
 {
-    dumpInternals(fd, args);
-    dumpTracks(fd, args);
-    dumpEffectChains(fd, args);
-    dprintf(fd, "  Local log:\n");
-    mLocalLog.dump(fd, "   " /* prefix */, 40 /* lines */);
-}
-
-void AudioFlinger::MmapThread::dumpInternals(int fd, const Vector<String16>& args)
-{
-    dumpBase(fd, args);
-
     dprintf(fd, "  Attributes: content type %d usage %d source %d\n",
             mAttr.content_type, mAttr.usage, mAttr.source);
     dprintf(fd, "  Session: %d port Id: %d\n", mSessionId, mPortId);
@@ -9100,7 +9075,7 @@
     }
 }
 
-void AudioFlinger::MmapThread::dumpTracks(int fd, const Vector<String16>& args __unused)
+void AudioFlinger::MmapThread::dumpTracks_l(int fd, const Vector<String16>& args __unused)
 {
     String8 result;
     size_t numtracks = mActiveTracks.size();
@@ -9323,9 +9298,9 @@
     }
 }
 
-void AudioFlinger::MmapPlaybackThread::dumpInternals(int fd, const Vector<String16>& args)
+void AudioFlinger::MmapPlaybackThread::dumpInternals_l(int fd, const Vector<String16>& args)
 {
-    MmapThread::dumpInternals(fd, args);
+    MmapThread::dumpInternals_l(fd, args);
 
     dprintf(fd, "  Stream type: %d Stream volume: %f HAL volume: %f Stream mute %d\n",
             mStreamType, mStreamVolume, mHalVolFloat, mStreamMute);
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index e5abce7..18cb361 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -43,9 +43,6 @@
 
     virtual status_t    readyToRun();
 
-    void dumpBase(int fd, const Vector<String16>& args);
-    void dumpEffectChains(int fd, const Vector<String16>& args);
-
     void clearPowerManager();
 
     // base for record and playback
@@ -418,7 +415,7 @@
 
                 bool                isMsdDevice() const { return mIsMsdDevice; }
 
-    virtual     void                dump(int fd, const Vector<String16>& args) = 0;
+                void                dump(int fd, const Vector<String16>& args);
 
                 // deliver stats to mediametrics.
                 void                sendStatistics(bool force);
@@ -470,6 +467,11 @@
                                 return INVALID_OPERATION;
                             }
 
+    virtual     void        dumpInternals_l(int fd __unused, const Vector<String16>& args __unused)
+                            { }
+    virtual     void        dumpTracks_l(int fd __unused, const Vector<String16>& args __unused) { }
+
+
     friend class AudioFlinger;      // for mEffectChains
 
                 const type_t            mType;
@@ -657,6 +659,10 @@
                 };
 
                 SimpleLog mLocalLog;
+
+private:
+                void dumpBase_l(int fd, const Vector<String16>& args);
+                void dumpEffectChains_l(int fd, const Vector<String16>& args);
 };
 
 class VolumeInterface {
@@ -709,8 +715,6 @@
                    audio_io_handle_t id, audio_devices_t device, type_t type, bool systemReady);
     virtual             ~PlaybackThread();
 
-                void        dump(int fd, const Vector<String16>& args) override;
-
     // Thread virtuals
     virtual     bool        threadLoop();
 
@@ -760,6 +764,9 @@
                                 mActiveTracks.updatePowerState(this, true /* force */);
                             }
 
+                void        dumpInternals_l(int fd, const Vector<String16>& args) override;
+                void        dumpTracks_l(int fd, const Vector<String16>& args) override;
+
 public:
 
     virtual     status_t    initCheck() const { return (mOutput == NULL) ? NO_INIT : NO_ERROR; }
@@ -1009,9 +1016,6 @@
     void        updateMetadata_l() final;
     virtual void sendMetadataToBackend_l(const StreamOutHalInterface::SourceMetadata& metadata);
 
-    virtual void dumpInternals(int fd, const Vector<String16>& args);
-    void        dumpTracks(int fd, const Vector<String16>& args);
-
     // The Tracks class manages tracks added and removed from the Thread.
     template <typename T>
     class Tracks {
@@ -1166,7 +1170,6 @@
 
     virtual     bool        checkForNewParameter_l(const String8& keyValuePair,
                                                    status_t& status);
-    virtual     void        dumpInternals(int fd, const Vector<String16>& args);
 
     virtual     bool        isTrackAllowed_l(
                                     audio_channel_mask_t channelMask, audio_format_t format,
@@ -1185,6 +1188,8 @@
         }
     }
 
+                void        dumpInternals_l(int fd, const Vector<String16>& args) override;
+
     // threadLoop snippets
     virtual     ssize_t     threadLoop_write();
     virtual     void        threadLoop_standby();
@@ -1266,8 +1271,6 @@
     virtual     bool        checkForNewParameter_l(const String8& keyValuePair,
                                                    status_t& status);
 
-                void        dumpInternals(int fd, const Vector<String16>& args) override;
-
     virtual     void        flushHw_l();
 
                 void        setMasterBalance(float balance) override;
@@ -1278,6 +1281,8 @@
     virtual     uint32_t    suspendSleepTimeUs() const;
     virtual     void        cacheParameters_l();
 
+                void        dumpInternals_l(int fd, const Vector<String16>& args) override;
+
     // threadLoop snippets
     virtual     mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove);
     virtual     void        threadLoop_mix();
@@ -1397,8 +1402,6 @@
     virtual                 ~DuplicatingThread();
 
     // Thread virtuals
-    virtual     void        dumpInternals(int fd, const Vector<String16>& args) override;
-
                 void        addOutputTrack(MixerThread* thread);
                 void        removeOutputTrack(MixerThread* thread);
                 uint32_t    waitTimeMs() const { return mWaitTimeMs; }
@@ -1407,6 +1410,7 @@
                         const StreamOutHalInterface::SourceMetadata& metadata) override;
 protected:
     virtual     uint32_t    activeSleepTimeUs() const;
+                void        dumpInternals_l(int fd, const Vector<String16>& args) override;
 
 private:
                 bool        outputsReady(const SortedVector< sp<OutputTrack> > &outputTracks);
@@ -1512,9 +1516,6 @@
     void        destroyTrack_l(const sp<RecordTrack>& track);
     void        removeTrack_l(const sp<RecordTrack>& track);
 
-    void        dumpInternals(int fd, const Vector<String16>& args);
-    void        dumpTracks(int fd, const Vector<String16>& args);
-
     // Thread virtuals
     virtual bool        threadLoop();
     virtual void        preExit();
@@ -1551,7 +1552,6 @@
             // return true if the caller should then do it's part of the stopping process
             bool        stop(RecordTrack* recordTrack);
 
-            void        dump(int fd, const Vector<String16>& args) override;
             AudioStreamIn* clearInput();
             virtual sp<StreamHalInterface> stream() const;
 
@@ -1619,6 +1619,11 @@
                             return audio_is_input_device(
                                     mInDevice & mTimestampCorrectedDevices);
                         }
+
+protected:
+            void        dumpInternals_l(int fd, const Vector<String16>& args) override;
+            void        dumpTracks_l(int fd, const Vector<String16>& args) override;
+
 private:
             // Enter standby if not already in standby, and set mStandby flag
             void    standbyIfNotAlreadyInStandby();
@@ -1768,11 +1773,9 @@
                 // Sets the UID records silence
     virtual     void        setRecordSilenced(uid_t uid __unused, bool silenced __unused) {}
 
-                void        dump(int fd, const Vector<String16>& args) override;
-    virtual     void        dumpInternals(int fd, const Vector<String16>& args);
-                void        dumpTracks(int fd, const Vector<String16>& args);
-
  protected:
+                void        dumpInternals_l(int fd, const Vector<String16>& args) override;
+                void        dumpTracks_l(int fd, const Vector<String16>& args) override;
 
                 audio_attributes_t      mAttr;
                 audio_session_t         mSessionId;
@@ -1822,8 +1825,6 @@
     virtual     void        checkSilentMode_l();
                 void        processVolume_l() override;
 
-    virtual     void        dumpInternals(int fd, const Vector<String16>& args);
-
     virtual     bool        isOutput() const override { return true; }
 
                 void        updateMetadata_l() override;
@@ -1831,6 +1832,7 @@
     virtual     void        toAudioPortConfig(struct audio_port_config *config);
 
 protected:
+                void        dumpInternals_l(int fd, const Vector<String16>& args) override;
 
                 audio_stream_type_t         mStreamType;
                 float                       mMasterVolume;
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index cc43fe6..33e506f 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -135,11 +135,18 @@
     /**
      * @brief filter the devices supported by this collection against another collection
      * @param devices to filter against
-     * @return
+     * @return a filtered DeviceVector
      */
     DeviceVector filter(const DeviceVector &devices) const;
 
     /**
+     * @brief filter the devices supported by this collection before sending
+     * then to the Engine via AudioPolicyManagerObserver interface
+     * @return a filtered DeviceVector
+     */
+    DeviceVector filterForEngine() const;
+
+    /**
      * @brief merge two vectors. As SortedVector Implementation is buggy (it does not check the size
      * of the destination vector, only of the source, it provides a safe implementation
      * @param devices source device vector to merge with
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index f02db6a9..8f15016 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -47,32 +47,29 @@
 
     int indexCriterion = 0;
     for (const auto &criterion : mCriteria) {
-        dst->appendFormat("%*s- Criterion %d:\n", spaces + 2, "", indexCriterion++);
+        dst->appendFormat("%*s- Criterion %d: ", spaces + 2, "", indexCriterion++);
 
-        std::string usageLiteral;
-        if (!UsageTypeConverter::toString(criterion.mValue.mUsage, usageLiteral)) {
-            ALOGE("%s: failed to convert usage %d", __FUNCTION__, criterion.mValue.mUsage);
-            return;
+        std::string ruleType, ruleValue;
+        bool unknownRule = !RuleTypeConverter::toString(criterion.mRule, ruleType);
+        switch (criterion.mRule & ~RULE_EXCLUSION_MASK) { // no need to match RULE_EXCLUDE_...
+        case RULE_MATCH_ATTRIBUTE_USAGE:
+            UsageTypeConverter::toString(criterion.mValue.mUsage, ruleValue);
+            break;
+        case RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET:
+            SourceTypeConverter::toString(criterion.mValue.mSource, ruleValue);
+            break;
+        case RULE_MATCH_UID:
+            ruleValue = std::to_string(criterion.mValue.mUid);
+            break;
+        default:
+            unknownRule = true;
         }
-        dst->appendFormat("%*s- Usage:%s\n", spaces + 4, "", usageLiteral.c_str());
 
-        if (mMixType == MIX_TYPE_RECORDERS) {
-            std::string sourceLiteral;
-            if (!SourceTypeConverter::toString(criterion.mValue.mSource, sourceLiteral)) {
-                ALOGE("%s: failed to convert source %d", __FUNCTION__, criterion.mValue.mSource);
-                return;
-            }
-            dst->appendFormat("%*s- Source:%s\n", spaces + 4, "", sourceLiteral.c_str());
-
+        if (!unknownRule) {
+            dst->appendFormat("%s %s\n", ruleType.c_str(), ruleValue.c_str());
+        } else {
+            dst->appendFormat("Unknown rule type value 0x%x\n", criterion.mRule);
         }
-        dst->appendFormat("%*s- Uid:%d\n", spaces + 4, "", criterion.mValue.mUid);
-
-        std::string ruleLiteral;
-        if (!RuleTypeConverter::toString(criterion.mRule, ruleLiteral)) {
-            ALOGE("%s: failed to convert source %d", __FUNCTION__,criterion.mRule);
-            return;
-        }
-        dst->appendFormat("%*s- Rule:%s\n", spaces + 4, "", ruleLiteral.c_str());
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 1a74f48..ecd5b34 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -399,6 +399,18 @@
     return filter(devices).size() == devices.size();
 }
 
+DeviceVector DeviceVector::filterForEngine() const
+{
+    DeviceVector filteredDevices;
+    for (const auto &device : *this) {
+        if (audio_is_remote_submix_device(device->type()) && device->address() != "0") {
+            continue;
+        }
+        filteredDevices.add(device);
+    }
+    return filteredDevices;
+}
+
 void DeviceDescriptor::log() const
 {
     ALOGI("Device id:%d type:0x%08X:%s, addr:%s", mId,  mDeviceType,
diff --git a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
index 7c76d8a..2b5455e 100644
--- a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
@@ -52,7 +52,6 @@
 
 template <>
 const RuleTypeConverter::Table RuleTypeConverter::mTable[] = {
-    MAKE_STRING_FROM_ENUM(RULE_EXCLUSION_MASK),
     MAKE_STRING_FROM_ENUM(RULE_MATCH_ATTRIBUTE_USAGE),
     MAKE_STRING_FROM_ENUM(RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET),
     MAKE_STRING_FROM_ENUM(RULE_MATCH_UID),
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
index 43ba625..ebd82a7 100644
--- a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
@@ -46,9 +46,9 @@
 
     virtual const AudioInputCollection &getInputs() const = 0;
 
-    virtual const DeviceVector &getAvailableOutputDevices() const = 0;
+    virtual const DeviceVector getAvailableOutputDevices() const = 0;
 
-    virtual const DeviceVector &getAvailableInputDevices() const = 0;
+    virtual const DeviceVector getAvailableInputDevices() const = 0;
 
     virtual const sp<DeviceDescriptor> &getDefaultOutputDevice() const = 0;
 
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.cpp b/services/audiopolicy/engineconfigurable/src/Engine.cpp
index 89aaa84..e59d983 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Engine.cpp
@@ -207,7 +207,7 @@
         ALOGE("%s: Trying to get device on invalid strategy %d", __FUNCTION__, ps);
         return {};
     }
-    const DeviceVector &availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
+    const DeviceVector availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
     const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
     uint32_t availableOutputDevicesType = availableOutputDevices.types();
 
@@ -272,7 +272,7 @@
         return DeviceVector(preferredDevice);
     }
     product_strategy_t strategy = getProductStrategyForAttributes(attributes);
-    const DeviceVector &availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
+    const DeviceVector availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
     const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
     //
     // @TODO: what is the priority of explicit routing? Shall it be considered first as it used to
@@ -298,7 +298,7 @@
                                                          sp<AudioPolicyMix> *mix) const
 {
     const auto &policyMixes = getApmObserver()->getAudioPolicyMixCollection();
-    const auto &availableInputDevices = getApmObserver()->getAvailableInputDevices();
+    const auto availableInputDevices = getApmObserver()->getAvailableInputDevices();
     const auto &inputs = getApmObserver()->getInputs();
     std::string address;
     //
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 66a6965..592a0b9 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -502,8 +502,8 @@
 
 audio_devices_t Engine::getDeviceForInputSource(audio_source_t inputSource) const
 {
-    const DeviceVector &availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
-    const DeviceVector &availableInputDevices = getApmObserver()->getAvailableInputDevices();
+    const DeviceVector availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
+    const DeviceVector availableInputDevices = getApmObserver()->getAvailableInputDevices();
     const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
     audio_devices_t availableDeviceTypes = availableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
 
@@ -704,7 +704,7 @@
         return DeviceVector(preferredDevice);
     }
     product_strategy_t strategy = getProductStrategyForAttributes(attributes);
-    const DeviceVector &availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
+    const DeviceVector availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
     const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
     //
     // @TODO: what is the priority of explicit routing? Shall it be considered first as it used to
@@ -730,7 +730,7 @@
                                                          sp<AudioPolicyMix> *mix) const
 {
     const auto &policyMixes = getApmObserver()->getAudioPolicyMixCollection();
-    const auto &availableInputDevices = getApmObserver()->getAvailableInputDevices();
+    const auto availableInputDevices = getApmObserver()->getAvailableInputDevices();
     const auto &inputs = getApmObserver()->getInputs();
     std::string address;
 
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index a12bdaa..b9cd7d0 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -423,8 +423,7 @@
     if (!audio_is_output_device(device) && !audio_is_input_device(device)) return BAD_VALUE;
 
     // Check if the device is currently connected
-    DeviceVector availableDevices = getAvailableOutputDevices();
-    DeviceVector deviceList = availableDevices.getDevicesFromTypeMask(device);
+    DeviceVector deviceList = mAvailableOutputDevices.getDevicesFromTypeMask(device);
     if (deviceList.empty()) {
         // Nothing to do: device is not connected
         return NO_ERROR;
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 26208c8..a700868 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -342,13 +342,13 @@
         {
             return mInputs;
         }
-        virtual const DeviceVector &getAvailableOutputDevices() const
+        virtual const DeviceVector getAvailableOutputDevices() const
         {
-            return mAvailableOutputDevices;
+            return mAvailableOutputDevices.filterForEngine();
         }
-        virtual const DeviceVector &getAvailableInputDevices() const
+        virtual const DeviceVector getAvailableInputDevices() const
         {
-            return mAvailableInputDevices;
+            return mAvailableInputDevices.filterForEngine();
         }
         virtual const sp<DeviceDescriptor> &getDefaultOutputDevice() const
         {
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 22e09e4..bc0dafe 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -1062,14 +1062,18 @@
             nsecs_t waitDuration = kBaseGetBufferWait + getExpectedInFlightDuration();
             status_t res = outputStream->getBuffer(&sb, waitDuration);
             if (res != OK) {
-                ALOGE("%s: Can't get output buffer for stream %d: %s (%d)",
-                        __FUNCTION__, streamId, strerror(-res), res);
                 if (res == NO_INIT || res == DEAD_OBJECT) {
+                    ALOGV("%s: Can't get output buffer for stream %d: %s (%d)",
+                            __FUNCTION__, streamId, strerror(-res), res);
                     bufRet.val.error(StreamBufferRequestError::STREAM_DISCONNECTED);
-                } else if (res == TIMED_OUT || res == NO_MEMORY) {
-                    bufRet.val.error(StreamBufferRequestError::NO_BUFFER_AVAILABLE);
                 } else {
-                    bufRet.val.error(StreamBufferRequestError::UNKNOWN_ERROR);
+                    ALOGE("%s: Can't get output buffer for stream %d: %s (%d)",
+                            __FUNCTION__, streamId, strerror(-res), res);
+                    if (res == TIMED_OUT || res == NO_MEMORY) {
+                        bufRet.val.error(StreamBufferRequestError::NO_BUFFER_AVAILABLE);
+                    } else {
+                        bufRet.val.error(StreamBufferRequestError::UNKNOWN_ERROR);
+                    }
                 }
                 currentReqSucceeds = false;
                 break;
@@ -3154,9 +3158,10 @@
 
         // Note: stream may be deallocated at this point, if this buffer was
         // the last reference to it.
-        if (res != OK) {
-            ALOGE("Can't return buffer to its stream: %s (%d)",
-                strerror(-res), res);
+        if (res == NO_INIT || res == DEAD_OBJECT) {
+            ALOGV("Can't return buffer to its stream: %s (%d)", strerror(-res), res);
+        } else if (res != OK) {
+            ALOGE("Can't return buffer to its stream: %s (%d)", strerror(-res), res);
         }
 
         // Long processing consumers can cause returnBuffer timeout for shared stream
@@ -3777,10 +3782,12 @@
             hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_INVALID_ERROR;
 
     int streamId = 0;
+    String16 physicalCameraId;
     if (msg.error_stream != NULL) {
         Camera3Stream *stream =
                 Camera3Stream::cast(msg.error_stream);
         streamId = stream->getId();
+        physicalCameraId = String16(stream->physicalCameraId());
     }
     ALOGV("Camera %s: %s: HAL error, frame %d, stream %d: %d",
             mId.string(), __FUNCTION__, msg.frame_number,
@@ -3802,13 +3809,29 @@
                     InFlightRequest &r = mInFlightMap.editValueAt(idx);
                     r.requestStatus = msg.error_code;
                     resultExtras = r.resultExtras;
-                    if (hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT == errorCode
+                    bool logicalDeviceResultError = false;
+                    if (hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT ==
+                            errorCode) {
+                        if (physicalCameraId.size() > 0) {
+                            String8 cameraId(physicalCameraId);
+                            if (r.physicalCameraIds.find(cameraId) == r.physicalCameraIds.end()) {
+                                ALOGE("%s: Reported result failure for physical camera device: %s "
+                                        " which is not part of the respective request!",
+                                        __FUNCTION__, cameraId.string());
+                                break;
+                            }
+                            resultExtras.errorPhysicalCameraId = physicalCameraId;
+                        } else {
+                            logicalDeviceResultError = true;
+                        }
+                    }
+
+                    if (logicalDeviceResultError
                             ||  hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST ==
                             errorCode) {
                         r.skipResultMetadata = true;
                     }
-                    if (hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT ==
-                            errorCode) {
+                    if (logicalDeviceResultError) {
                         // In case of missing result check whether the buffers
                         // returned. If they returned, then remove inflight
                         // request.
@@ -5580,7 +5603,7 @@
 
             if (mUseHalBufManager) {
                 if (outputStream->isAbandoned()) {
-                    ALOGE("%s: stream %d is abandoned.", __FUNCTION__, streamId);
+                    ALOGV("%s: stream %d is abandoned, skipping request", __FUNCTION__, streamId);
                     return TIMED_OUT;
                 }
                 // HAL will request buffer through requestStreamBuffer API
@@ -5598,7 +5621,7 @@
                     // Can't get output buffer from gralloc queue - this could be due to
                     // abandoned queue or other consumer misbehavior, so not a fatal
                     // error
-                    ALOGE("RequestThread: Can't get output buffer, skipping request:"
+                    ALOGV("RequestThread: Can't get output buffer, skipping request:"
                             " %s (%d)", strerror(-res), res);
 
                     return TIMED_OUT;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index baba856..1c77581 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -233,6 +233,7 @@
      * queueBuffer
      */
     sp<ANativeWindow> currentConsumer = mConsumer;
+    StreamState state = mState;
     mLock.unlock();
 
     ANativeWindowBuffer *anwBuffer = container_of(buffer.buffer, ANativeWindowBuffer, handle);
@@ -244,7 +245,7 @@
         if (mDropBuffers) {
             ALOGV("%s: Dropping a frame for stream %d.", __FUNCTION__, mId);
         } else if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) {
-            ALOGW("%s: A frame is dropped for stream %d due to buffer error.", __FUNCTION__, mId);
+            ALOGV("%s: A frame is dropped for stream %d due to buffer error.", __FUNCTION__, mId);
         } else {
             ALOGE("%s: Stream %d: timestamp shouldn't be 0", __FUNCTION__, mId);
         }
@@ -252,7 +253,7 @@
         res = currentConsumer->cancelBuffer(currentConsumer.get(),
                 anwBuffer,
                 anwReleaseFence);
-        if (res != OK) {
+        if (shouldLogError(res, state)) {
             ALOGE("%s: Stream %d: Error cancelling buffer to native window:"
                   " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
         }
@@ -284,9 +285,9 @@
         }
 
         res = queueBufferToConsumer(currentConsumer, anwBuffer, anwReleaseFence, surface_ids);
-        if (res != OK) {
-            ALOGE("%s: Stream %d: Error queueing buffer to native window: "
-                  "%s (%d)", __FUNCTION__, mId, strerror(-res), res);
+        if (shouldLogError(res, state)) {
+            ALOGE("%s: Stream %d: Error queueing buffer to native window:"
+                  " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
         }
     }
     mLock.lock();
@@ -534,10 +535,11 @@
             // successful return.
             *anb = gb.get();
             res = mConsumer->attachBuffer(*anb);
-            if (res != OK) {
+            if (shouldLogError(res, mState)) {
                 ALOGE("%s: Stream %d: Can't attach the output buffer to this surface: %s (%d)",
                         __FUNCTION__, mId, strerror(-res), res);
-
+            }
+            if (res != OK) {
                 checkRetAndSetAbandonedLocked(res);
                 return res;
             }
@@ -592,9 +594,10 @@
                 ALOGV("Stream %d: Attached new buffer", getId());
 
                 if (res != OK) {
-                    ALOGE("%s: Stream %d: Can't attach the output buffer to this surface: %s (%d)",
-                            __FUNCTION__, mId, strerror(-res), res);
-
+                    if (shouldLogError(res, mState)) {
+                        ALOGE("%s: Stream %d: Can't attach the output buffer to this surface:"
+                                " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
+                    }
                     checkRetAndSetAbandonedLocked(res);
                     return res;
                 }
@@ -604,9 +607,10 @@
                 return res;
             }
         } else if (res != OK) {
-            ALOGE("%s: Stream %d: Can't dequeue next output buffer: %s (%d)",
-                    __FUNCTION__, mId, strerror(-res), res);
-
+            if (shouldLogError(res, mState)) {
+                ALOGE("%s: Stream %d: Can't dequeue next output buffer: %s (%d)",
+                        __FUNCTION__, mId, strerror(-res), res);
+            }
             checkRetAndSetAbandonedLocked(res);
             return res;
         }
@@ -639,6 +643,16 @@
     }
 }
 
+bool Camera3OutputStream::shouldLogError(status_t res, StreamState state) {
+    if (res == OK) {
+        return false;
+    }
+    if ((res == DEAD_OBJECT || res == NO_INIT) && state == STATE_ABANDONED) {
+        return false;
+    }
+    return true;
+}
+
 status_t Camera3OutputStream::disconnectLocked() {
     status_t res;
 
@@ -838,7 +852,9 @@
         ALOGW("%s: the released buffer has already been freed by the buffer queue!", __FUNCTION__);
     } else if (res != OK) {
         // Treat other errors as abandonment
-        ALOGE("%s: detach next buffer failed: %s (%d).", __FUNCTION__, strerror(-res), res);
+        if (shouldLogError(res, mState)) {
+            ALOGE("%s: detach next buffer failed: %s (%d).", __FUNCTION__, strerror(-res), res);
+        }
         mState = STATE_ABANDONED;
         return res;
     }
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 30fc2f7..729c655 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -317,6 +317,10 @@
     // Check return status of IGBP calls and set abandoned state accordingly
     void checkRetAndSetAbandonedLocked(status_t res);
 
+    // If the status indicates abandonded stream, only log when state hasn't been updated to
+    // STATE_ABANDONED
+    static bool shouldLogError(status_t res, StreamState state);
+
     static const int32_t kDequeueLatencyBinSize = 5; // in ms
     CameraLatencyHistogram mDequeueBufferLatency;
 
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 5eb6a23..3d21029 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -458,7 +458,7 @@
     // Zero for formats with fixed buffer size for given dimensions.
     const size_t mMaxSize;
 
-    enum {
+    enum StreamState {
         STATE_ERROR,
         STATE_CONSTRUCTED,
         STATE_IN_CONFIG,
diff --git a/services/camera/libcameraservice/hidl/Convert.cpp b/services/camera/libcameraservice/hidl/Convert.cpp
index c2ed23a..866c3b5 100644
--- a/services/camera/libcameraservice/hidl/Convert.cpp
+++ b/services/camera/libcameraservice/hidl/Convert.cpp
@@ -157,6 +157,8 @@
     hCaptureResultExtras.frameNumber = captureResultExtras.frameNumber;
     hCaptureResultExtras.partialResultCount = captureResultExtras.partialResultCount;
     hCaptureResultExtras.errorStreamId = captureResultExtras.errorStreamId;
+    hCaptureResultExtras.errorPhysicalCameraId = hidl_string(String8(
+            captureResultExtras.errorPhysicalCameraId).string());
     return hCaptureResultExtras;
 }
 
diff --git a/services/mediaextractor/mediaextractor.rc b/services/mediaextractor/mediaextractor.rc
index 6b2d0a5..5fc2941 100644
--- a/services/mediaextractor/mediaextractor.rc
+++ b/services/mediaextractor/mediaextractor.rc
@@ -2,7 +2,5 @@
     class main
     user mediaex
     group drmrpc mediadrm
-    # TODO(b/123275379): Remove updatable when http://aosp/878198 has landed
-    updatable
     ioprio rt 4
     writepid /dev/cpuset/foreground/tasks