Merge "Fix typo in the output binary" into qt-dev
am: aba76c4d48

Change-Id: Ic4993bedf522a1cc986aeb747bb3a1514c970ec1
diff --git a/apex/ld.config.txt b/apex/ld.config.txt
index a5937fd..af8ec06 100644
--- a/apex/ld.config.txt
+++ b/apex/ld.config.txt
@@ -37,9 +37,11 @@
 
 namespace.platform.isolated = true
 
-namespace.platform.search.paths = /system/${LIB}
+namespace.platform.search.paths  = /system/${LIB}
+namespace.platform.search.paths += /apex/com.android.runtime/${LIB}
 namespace.platform.asan.search.paths  = /data/asan/system/${LIB}
 namespace.platform.asan.search.paths +=           /system/${LIB}
+namespace.platform.asan.search.paths += /apex/com.android.runtime/${LIB}
 
 # /system/lib/libc.so, etc are symlinks to /apex/com.android.lib/lib/bionic/libc.so, etc.
 # Add /apex/... pat to the permitted paths because linker uses realpath(3)
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.h b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
index 3328a85..9e034c4 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
@@ -138,6 +138,7 @@
 
   private:
     friend ACameraCaptureSession;
+    friend ACameraDevice;
 
     camera_status_t checkCameraClosedOrErrorLocked() const;
 
@@ -387,7 +388,6 @@
             mDevice(new android::acam::CameraDevice(id, cb, std::move(chars), this)) {}
 
     ~ACameraDevice();
-
     /*******************
      * NDK public APIs *
      *******************/
diff --git a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
index 37de30a..7ab0124 100644
--- a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
+++ b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
@@ -24,6 +24,7 @@
 #include <algorithm>
 #include <mutex>
 #include <string>
+#include <variant>
 #include <vector>
 #include <stdio.h>
 #include <stdio.h>
@@ -49,6 +50,7 @@
 static constexpr int kTestImageFormat = AIMAGE_FORMAT_YUV_420_888;
 
 using android::hardware::camera::common::V1_0::helper::VendorTagDescriptorCache;
+using ConfiguredWindows = std::set<native_handle_t *>;
 
 class CameraHelper {
    public:
@@ -60,9 +62,12 @@
         const char* physicalCameraId;
         native_handle_t* anw;
     };
-    int initCamera(native_handle_t* imgReaderAnw,
+
+    // Retaining the error code in case the caller needs to analyze it.
+    std::variant<int, ConfiguredWindows> initCamera(native_handle_t* imgReaderAnw,
             const std::vector<PhysicalImgReaderInfo>& physicalImgReaders,
             bool usePhysicalSettings) {
+        ConfiguredWindows configuredWindows;
         if (imgReaderAnw == nullptr) {
             ALOGE("Cannot initialize camera before image reader get initialized.");
             return -1;
@@ -78,7 +83,7 @@
         ret = ACameraManager_openCamera(mCameraManager, mCameraId, &mDeviceCb, &mDevice);
         if (ret != AMEDIA_OK || mDevice == nullptr) {
             ALOGE("Failed to open camera, ret=%d, mDevice=%p.", ret, mDevice);
-            return -1;
+            return ret;
         }
 
         // Create capture session
@@ -97,8 +102,9 @@
             ALOGE("ACaptureSessionOutputContainer_add failed, ret=%d", ret);
             return ret;
         }
-
+        configuredWindows.insert(mImgReaderAnw);
         std::vector<const char*> idPointerList;
+        std::set<const native_handle_t*> physicalStreamMap;
         for (auto& physicalStream : physicalImgReaders) {
             ACaptureSessionOutput* sessionOutput = nullptr;
             ret = ACaptureSessionPhysicalOutput_create(physicalStream.anw,
@@ -112,21 +118,25 @@
                 ALOGE("ACaptureSessionOutputContainer_add failed, ret=%d", ret);
                 return ret;
             }
-            mExtraOutputs.push_back(sessionOutput);
+            ret = ACameraDevice_isSessionConfigurationSupported(mDevice, mOutputs);
+            if (ret != ACAMERA_OK && ret != ACAMERA_ERROR_UNSUPPORTED_OPERATION) {
+                ALOGW("ACameraDevice_isSessionConfigurationSupported failed, ret=%d camera id %s",
+                      ret, mCameraId);
+                ACaptureSessionOutputContainer_remove(mOutputs, sessionOutput);
+                ACaptureSessionOutput_free(sessionOutput);
+                continue;
+            }
+            configuredWindows.insert(physicalStream.anw);
             // Assume that at most one physical stream per physical camera.
             mPhysicalCameraIds.push_back(physicalStream.physicalCameraId);
             idPointerList.push_back(physicalStream.physicalCameraId);
+            physicalStreamMap.insert(physicalStream.anw);
+            mSessionPhysicalOutputs.push_back(sessionOutput);
         }
         ACameraIdList cameraIdList;
         cameraIdList.numCameras = idPointerList.size();
         cameraIdList.cameraIds = idPointerList.data();
 
-        ret = ACameraDevice_isSessionConfigurationSupported(mDevice, mOutputs);
-        if (ret != ACAMERA_OK && ret != ACAMERA_ERROR_UNSUPPORTED_OPERATION) {
-            ALOGE("ACameraDevice_isSessionConfigurationSupported failed, ret=%d", ret);
-            return ret;
-        }
-
         ret = ACameraDevice_createCaptureSession(mDevice, mOutputs, &mSessionCb, &mSession);
         if (ret != AMEDIA_OK) {
             ALOGE("ACameraDevice_createCaptureSession failed, ret=%d", ret);
@@ -157,6 +167,10 @@
         }
 
         for (auto& physicalStream : physicalImgReaders) {
+            if (physicalStreamMap.find(physicalStream.anw) == physicalStreamMap.end()) {
+                ALOGI("Skipping physicalStream anw=%p", physicalStream.anw);
+                continue;
+            }
             ACameraOutputTarget* outputTarget = nullptr;
             ret = ACameraOutputTarget_create(physicalStream.anw, &outputTarget);
             if (ret != AMEDIA_OK) {
@@ -168,11 +182,11 @@
                 ALOGE("ACaptureRequest_addTarget failed, ret=%d", ret);
                 return ret;
             }
-            mReqExtraOutputs.push_back(outputTarget);
+            mReqPhysicalOutputs.push_back(outputTarget);
         }
 
         mIsCameraReady = true;
-        return 0;
+        return configuredWindows;
     }
 
 
@@ -184,10 +198,10 @@
             ACameraOutputTarget_free(mReqImgReaderOutput);
             mReqImgReaderOutput = nullptr;
         }
-        for (auto& outputTarget : mReqExtraOutputs) {
+        for (auto& outputTarget : mReqPhysicalOutputs) {
             ACameraOutputTarget_free(outputTarget);
         }
-        mReqExtraOutputs.clear();
+        mReqPhysicalOutputs.clear();
         if (mStillRequest) {
             ACaptureRequest_free(mStillRequest);
             mStillRequest = nullptr;
@@ -201,10 +215,10 @@
             ACaptureSessionOutput_free(mImgReaderOutput);
             mImgReaderOutput = nullptr;
         }
-        for (auto& extraOutput : mExtraOutputs) {
+        for (auto& extraOutput : mSessionPhysicalOutputs) {
             ACaptureSessionOutput_free(extraOutput);
         }
-        mExtraOutputs.clear();
+        mSessionPhysicalOutputs.clear();
         if (mOutputs) {
             ACaptureSessionOutputContainer_free(mOutputs);
             mOutputs = nullptr;
@@ -262,13 +276,13 @@
     // Capture session
     ACaptureSessionOutputContainer* mOutputs = nullptr;
     ACaptureSessionOutput* mImgReaderOutput = nullptr;
-    std::vector<ACaptureSessionOutput*> mExtraOutputs;
+    std::vector<ACaptureSessionOutput*> mSessionPhysicalOutputs;
 
     ACameraCaptureSession* mSession = nullptr;
     // Capture request
     ACaptureRequest* mStillRequest = nullptr;
     ACameraOutputTarget* mReqImgReaderOutput = nullptr;
-    std::vector<ACameraOutputTarget*> mReqExtraOutputs;
+    std::vector<ACameraOutputTarget*> mReqPhysicalOutputs;
 
     bool mIsCameraReady = false;
     const char* mCameraId;
@@ -581,9 +595,11 @@
         }
 
         CameraHelper cameraHelper(id, mCameraManager);
-        ret = cameraHelper.initCamera(testCase.getNativeWindow(),
-                {}/*physicalImageReaders*/, false/*usePhysicalSettings*/);
-        if (ret < 0) {
+        std::variant<int, ConfiguredWindows> retInit =
+                cameraHelper.initCamera(testCase.getNativeWindow(), {}/*physicalImageReaders*/,
+                                        false/*usePhysicalSettings*/);
+        int *retp = std::get_if<int>(&retInit);
+        if (retp) {
             ALOGE("Unable to initialize camera helper");
             return false;
         }
@@ -751,10 +767,15 @@
         physicalImgReaderInfo.push_back({physicalCameraIds[0], testCases[1]->getNativeWindow()});
         physicalImgReaderInfo.push_back({physicalCameraIds[1], testCases[2]->getNativeWindow()});
 
-        int ret = cameraHelper.initCamera(testCases[0]->getNativeWindow(),
-                physicalImgReaderInfo, usePhysicalSettings);
-        ASSERT_EQ(ret, 0);
-
+        std::variant<int, ConfiguredWindows> retInit =
+                cameraHelper.initCamera(testCases[0]->getNativeWindow(), physicalImgReaderInfo,
+                                        usePhysicalSettings);
+        int *retp = std::get_if<int>(&retInit);
+        ASSERT_EQ(retp, nullptr);
+        ConfiguredWindows *configuredWindowsp = std::get_if<ConfiguredWindows>(&retInit);
+        ASSERT_NE(configuredWindowsp, nullptr);
+        ASSERT_LE(configuredWindowsp->size(), testCases.size());
+        int ret = 0;
         if (!cameraHelper.isCameraReady()) {
             ALOGW("Camera is not ready after successful initialization. It's either due to camera "
                   "on board lacks BACKWARDS_COMPATIBLE capability or the device does not have "
@@ -776,9 +797,15 @@
                 break;
             }
         }
-        ASSERT_EQ(testCases[0]->getAcquiredImageCount(), pictureCount);
-        ASSERT_EQ(testCases[1]->getAcquiredImageCount(), pictureCount);
-        ASSERT_EQ(testCases[2]->getAcquiredImageCount(), pictureCount);
+        for(auto &testCase : testCases) {
+            auto it = configuredWindowsp->find(testCase->getNativeWindow());
+            if (it == configuredWindowsp->end()) {
+                continue;
+            }
+            ALOGI("Testing window %p", testCase->getNativeWindow());
+            ASSERT_EQ(testCase->getAcquiredImageCount(), pictureCount);
+        }
+
         ASSERT_TRUE(cameraHelper.checkCallbacks(pictureCount));
 
         ACameraMetadata_free(staticMetadata);
diff --git a/media/codec2/sfplugin/Codec2InfoBuilder.cpp b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
index c54c601..6b75eba 100644
--- a/media/codec2/sfplugin/Codec2InfoBuilder.cpp
+++ b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
@@ -194,7 +194,8 @@
     // TODO: get this from intf() as well, but how do we map them to
     // MediaCodec color formats?
     bool encoder = trait.kind == C2Component::KIND_ENCODER;
-    if (mediaType.find("video") != std::string::npos) {
+    if (mediaType.find("video") != std::string::npos
+            || mediaType.find("image") != std::string::npos) {
         // vendor video codecs prefer opaque format
         if (trait.name.find("android") == std::string::npos) {
             caps->addColorFormat(COLOR_FormatSurface);
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index 747d0e1..d26b352 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -108,17 +108,24 @@
     case STATE_RUNNING:
         if (nanosDelta < expectedNanosDelta) {
             // Earlier than expected timestamp.
-            // This data is probably more accurate so use it.
-            // or we may be drifting due to a slow HW clock.
-//            ALOGD("processTimestamp() - STATE_RUNNING - %d < %d micros - EARLY",
-//                 (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000));
+            // This data is probably more accurate, so use it.
+            // Or we may be drifting due to a fast HW clock.
+//            int microsDelta = (int) (nanosDelta / 1000);
+//            int expectedMicrosDelta = (int) (expectedNanosDelta / 1000);
+//            ALOGD("processTimestamp() - STATE_RUNNING - %7d < %7d so %4d micros EARLY",
+//                 microsDelta, expectedMicrosDelta, (expectedMicrosDelta - microsDelta));
+
             setPositionAndTime(framePosition, nanoTime);
         } else if (nanosDelta > (expectedNanosDelta + mMaxLatenessInNanos)) {
             // Later than expected timestamp.
-//            ALOGD("processTimestamp() - STATE_RUNNING - %d > %d + %d micros - LATE",
-//                 (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000),
-//                 (int) (mMaxLatenessInNanos / 1000));
-            setPositionAndTime(framePosition - mFramesPerBurst,  nanoTime - mMaxLatenessInNanos);
+//            int microsDelta = (int) (nanosDelta / 1000);
+//            int expectedMicrosDeadline = (int) ((expectedNanosDelta + mMaxLatenessInNanos) / 1000);
+//            ALOGD("processTimestamp() - STATE_RUNNING - %7d > %7d so %4d micros LATE",
+//                  microsDelta, expectedMicrosDeadline, (microsDelta - expectedMicrosDeadline));
+
+            // When we are late it may be because of preemption in the kernel or
+            //  we may be drifting due to a slow HW clock.
+            setPositionAndTime(framePosition,  nanoTime - mMaxLatenessInNanos);
         }
         break;
     default:
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 3fbbc09..10dda19 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -302,6 +302,8 @@
         for (int i = 0; i < FIVEBAND_NUMBANDS; i++) {
             pContext->pBundledContext->bandGaindB[i] = EQNB_5BandSoftPresets[i];
         }
+        pContext->pBundledContext->effectProcessCalled      = 0;
+        pContext->pBundledContext->effectInDrain            = 0;
 
         ALOGV("\tEffectCreate - Calling LvmBundle_init");
         ret = LvmBundle_init(pContext);
@@ -394,6 +396,8 @@
 
     // Clear the instantiated flag for the effect
     // protect agains the case where an effect is un-instantiated without being disabled
+
+    int &effectInDrain = pContext->pBundledContext->effectInDrain;
     if(pContext->EffectType == LVM_BASS_BOOST) {
         ALOGV("\tEffectRelease LVM_BASS_BOOST Clearing global intstantiated flag");
         pSessionContext->bBassInstantiated = LVM_FALSE;
@@ -418,12 +422,16 @@
     } else if(pContext->EffectType == LVM_VOLUME) {
         ALOGV("\tEffectRelease LVM_VOLUME Clearing global intstantiated flag");
         pSessionContext->bVolumeInstantiated = LVM_FALSE;
-        if (pContext->pBundledContext->bVolumeEnabled == LVM_TRUE){
+        // There is no samplesToExitCount for volume so we also use the drain flag to check
+        // if we should decrement the effects enabled.
+        if (pContext->pBundledContext->bVolumeEnabled == LVM_TRUE
+                || (effectInDrain & 1 << LVM_VOLUME) != 0) {
             pContext->pBundledContext->NumberEffectsEnabled--;
         }
     } else {
         ALOGV("\tLVM_ERROR : EffectRelease : Unsupported effect\n\n\n\n\n\n\n");
     }
+    effectInDrain &= ~(1 << pContext->EffectType); // no need to drain if released
 
     // Disable effect, in this case ignore errors (return codes)
     // if an effect has already been disabled
@@ -3124,8 +3132,9 @@
 
 int Effect_setEnabled(EffectContext *pContext, bool enabled)
 {
-    ALOGV("\tEffect_setEnabled() type %d, enabled %d", pContext->EffectType, enabled);
-
+    ALOGV("%s effectType %d, enabled %d, currently enabled %d", __func__,
+            pContext->EffectType, enabled, pContext->pBundledContext->NumberEffectsEnabled);
+    int &effectInDrain = pContext->pBundledContext->effectInDrain;
     if (enabled) {
         // Bass boost or Virtualizer can be temporarily disabled if playing over device speaker due
         // to their nature.
@@ -3139,6 +3148,7 @@
                 if(pContext->pBundledContext->SamplesToExitCountBb <= 0){
                     pContext->pBundledContext->NumberEffectsEnabled++;
                 }
+                effectInDrain &= ~(1 << LVM_BASS_BOOST);
                 pContext->pBundledContext->SamplesToExitCountBb =
                      (LVM_INT32)(pContext->pBundledContext->SamplesPerSecond*0.1);
                 pContext->pBundledContext->bBassEnabled = LVM_TRUE;
@@ -3152,6 +3162,7 @@
                 if(pContext->pBundledContext->SamplesToExitCountEq <= 0){
                     pContext->pBundledContext->NumberEffectsEnabled++;
                 }
+                effectInDrain &= ~(1 << LVM_EQUALIZER);
                 pContext->pBundledContext->SamplesToExitCountEq =
                      (LVM_INT32)(pContext->pBundledContext->SamplesPerSecond*0.1);
                 pContext->pBundledContext->bEqualizerEnabled = LVM_TRUE;
@@ -3164,6 +3175,7 @@
                 if(pContext->pBundledContext->SamplesToExitCountVirt <= 0){
                     pContext->pBundledContext->NumberEffectsEnabled++;
                 }
+                effectInDrain &= ~(1 << LVM_VIRTUALIZER);
                 pContext->pBundledContext->SamplesToExitCountVirt =
                      (LVM_INT32)(pContext->pBundledContext->SamplesPerSecond*0.1);
                 pContext->pBundledContext->bVirtualizerEnabled = LVM_TRUE;
@@ -3174,7 +3186,10 @@
                     ALOGV("\tEffect_setEnabled() LVM_VOLUME is already enabled");
                     return -EINVAL;
                 }
-                pContext->pBundledContext->NumberEffectsEnabled++;
+                if ((effectInDrain & 1 << LVM_VOLUME) == 0) {
+                    pContext->pBundledContext->NumberEffectsEnabled++;
+                }
+                effectInDrain &= ~(1 << LVM_VOLUME);
                 pContext->pBundledContext->bVolumeEnabled = LVM_TRUE;
                 break;
             default:
@@ -3192,6 +3207,7 @@
                     return -EINVAL;
                 }
                 pContext->pBundledContext->bBassEnabled = LVM_FALSE;
+                effectInDrain |= 1 << LVM_BASS_BOOST;
                 break;
             case LVM_EQUALIZER:
                 if (pContext->pBundledContext->bEqualizerEnabled == LVM_FALSE) {
@@ -3199,6 +3215,7 @@
                     return -EINVAL;
                 }
                 pContext->pBundledContext->bEqualizerEnabled = LVM_FALSE;
+                effectInDrain |= 1 << LVM_EQUALIZER;
                 break;
             case LVM_VIRTUALIZER:
                 if (pContext->pBundledContext->bVirtualizerEnabled == LVM_FALSE) {
@@ -3206,6 +3223,7 @@
                     return -EINVAL;
                 }
                 pContext->pBundledContext->bVirtualizerEnabled = LVM_FALSE;
+                effectInDrain |= 1 << LVM_VIRTUALIZER;
                 break;
             case LVM_VOLUME:
                 if (pContext->pBundledContext->bVolumeEnabled == LVM_FALSE) {
@@ -3213,6 +3231,7 @@
                     return -EINVAL;
                 }
                 pContext->pBundledContext->bVolumeEnabled = LVM_FALSE;
+                effectInDrain |= 1 << LVM_VOLUME;
                 break;
             default:
                 ALOGV("\tEffect_setEnabled() invalid effect type");
@@ -3283,6 +3302,38 @@
         ALOGV("\tLVM_ERROR : Effect_process() ERROR NULL INPUT POINTER OR FRAME COUNT IS WRONG");
         return -EINVAL;
     }
+
+    int &effectProcessCalled = pContext->pBundledContext->effectProcessCalled;
+    int &effectInDrain = pContext->pBundledContext->effectInDrain;
+    if ((effectProcessCalled & 1 << pContext->EffectType) != 0) {
+        ALOGW("Effect %d already called", pContext->EffectType);
+        const int undrainedEffects = effectInDrain & ~effectProcessCalled;
+        if ((undrainedEffects & 1 << LVM_BASS_BOOST) != 0) {
+            ALOGW("Draining BASS_BOOST");
+            pContext->pBundledContext->SamplesToExitCountBb = 0;
+            --pContext->pBundledContext->NumberEffectsEnabled;
+            effectInDrain &= ~(1 << LVM_BASS_BOOST);
+        }
+        if ((undrainedEffects & 1 << LVM_EQUALIZER) != 0) {
+            ALOGW("Draining EQUALIZER");
+            pContext->pBundledContext->SamplesToExitCountEq = 0;
+            --pContext->pBundledContext->NumberEffectsEnabled;
+            effectInDrain &= ~(1 << LVM_EQUALIZER);
+        }
+        if ((undrainedEffects & 1 << LVM_VIRTUALIZER) != 0) {
+            ALOGW("Draining VIRTUALIZER");
+            pContext->pBundledContext->SamplesToExitCountVirt = 0;
+            --pContext->pBundledContext->NumberEffectsEnabled;
+            effectInDrain &= ~(1 << LVM_VIRTUALIZER);
+        }
+        if ((undrainedEffects & 1 << LVM_VOLUME) != 0) {
+            ALOGW("Draining VOLUME");
+            --pContext->pBundledContext->NumberEffectsEnabled;
+            effectInDrain &= ~(1 << LVM_VOLUME);
+        }
+    }
+    effectProcessCalled |= 1 << pContext->EffectType;
+
     if ((pContext->pBundledContext->bBassEnabled == LVM_FALSE)&&
         (pContext->EffectType == LVM_BASS_BOOST)){
         //ALOGV("\tEffect_process() LVM_BASS_BOOST Effect is not enabled");
@@ -3291,9 +3342,12 @@
             //ALOGV("\tEffect_process: Waiting to turn off BASS_BOOST, %d samples left",
             //    pContext->pBundledContext->SamplesToExitCountBb);
         }
-        if(pContext->pBundledContext->SamplesToExitCountBb <= 0) {
+        if (pContext->pBundledContext->SamplesToExitCountBb <= 0) {
             status = -ENODATA;
-            pContext->pBundledContext->NumberEffectsEnabled--;
+            if ((effectInDrain & 1 << LVM_BASS_BOOST) != 0) {
+                pContext->pBundledContext->NumberEffectsEnabled--;
+                effectInDrain &= ~(1 << LVM_BASS_BOOST);
+            }
             ALOGV("\tEffect_process() this is the last frame for LVM_BASS_BOOST");
         }
     }
@@ -3301,7 +3355,10 @@
         (pContext->EffectType == LVM_VOLUME)){
         //ALOGV("\tEffect_process() LVM_VOLUME Effect is not enabled");
         status = -ENODATA;
-        pContext->pBundledContext->NumberEffectsEnabled--;
+        if ((effectInDrain & 1 << LVM_VOLUME) != 0) {
+            pContext->pBundledContext->NumberEffectsEnabled--;
+            effectInDrain &= ~(1 << LVM_VOLUME);
+        }
     }
     if ((pContext->pBundledContext->bEqualizerEnabled == LVM_FALSE)&&
         (pContext->EffectType == LVM_EQUALIZER)){
@@ -3311,9 +3368,12 @@
             //ALOGV("\tEffect_process: Waiting to turn off EQUALIZER, %d samples left",
             //    pContext->pBundledContext->SamplesToExitCountEq);
         }
-        if(pContext->pBundledContext->SamplesToExitCountEq <= 0) {
+        if (pContext->pBundledContext->SamplesToExitCountEq <= 0) {
             status = -ENODATA;
-            pContext->pBundledContext->NumberEffectsEnabled--;
+            if ((effectInDrain & 1 << LVM_EQUALIZER) != 0) {
+                pContext->pBundledContext->NumberEffectsEnabled--;
+                effectInDrain &= ~(1 << LVM_EQUALIZER);
+            }
             ALOGV("\tEffect_process() this is the last frame for LVM_EQUALIZER");
         }
     }
@@ -3326,9 +3386,12 @@
             //ALOGV("\tEffect_process: Waiting for to turn off VIRTUALIZER, %d samples left",
             //    pContext->pBundledContext->SamplesToExitCountVirt);
         }
-        if(pContext->pBundledContext->SamplesToExitCountVirt <= 0) {
+        if (pContext->pBundledContext->SamplesToExitCountVirt <= 0) {
             status = -ENODATA;
-            pContext->pBundledContext->NumberEffectsEnabled--;
+            if ((effectInDrain & 1 << LVM_VIRTUALIZER) != 0) {
+                pContext->pBundledContext->NumberEffectsEnabled--;
+                effectInDrain &= ~(1 << LVM_VIRTUALIZER);
+            }
             ALOGV("\tEffect_process() this is the last frame for LVM_VIRTUALIZER");
         }
     }
@@ -3337,8 +3400,18 @@
         pContext->pBundledContext->NumberEffectsCalled++;
     }
 
-    if(pContext->pBundledContext->NumberEffectsCalled ==
-       pContext->pBundledContext->NumberEffectsEnabled){
+    if (pContext->pBundledContext->NumberEffectsCalled >=
+            pContext->pBundledContext->NumberEffectsEnabled) {
+
+        // We expect the # effects called to be equal to # effects enabled in sequence (including
+        // draining effects).  Warn if this is not the case due to inconsistent calls.
+        ALOGW_IF(pContext->pBundledContext->NumberEffectsCalled >
+                pContext->pBundledContext->NumberEffectsEnabled,
+                "%s Number of effects called %d is greater than number of effects enabled %d",
+                __func__, pContext->pBundledContext->NumberEffectsCalled,
+                pContext->pBundledContext->NumberEffectsEnabled);
+        effectProcessCalled = 0; // reset our consistency check.
+
         //ALOGV("\tEffect_process     Calling process with %d effects enabled, %d called: Effect %d",
         //pContext->pBundledContext->NumberEffectsEnabled,
         //pContext->pBundledContext->NumberEffectsCalled, pContext->EffectType);
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
index 6af4554..e4aacd0 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
@@ -110,6 +110,14 @@
 #ifdef SUPPORT_MC
     LVM_INT32                       ChMask;
 #endif
+
+    /* Bitmask whether drain is in progress due to disabling the effect.
+       The corresponding bit to an effect is set by 1 << lvm_effect_en. */
+    int                             effectInDrain;
+
+    /* Bitmask whether process() was called for a particular effect.
+       The corresponding bit to an effect is set by 1 << lvm_effect_en. */
+    int                             effectProcessCalled;
 };
 
 /* SessionContext : One session */
diff --git a/media/libstagefright/timedtext/TextDescriptions2.cpp b/media/libstagefright/timedtext/TextDescriptions2.cpp
index fd42d3a..f48eacc 100644
--- a/media/libstagefright/timedtext/TextDescriptions2.cpp
+++ b/media/libstagefright/timedtext/TextDescriptions2.cpp
@@ -145,7 +145,7 @@
         tmpData += 8;
         size_t remaining = size - 8;
 
-        if (chunkSize <= 8 || size < chunkSize) {
+        if (size < chunkSize) {
             return OK;
         }
         switch(chunkType) {
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index ce408be..bcd351d 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -3956,6 +3956,32 @@
     return INVALID_OPERATION;
 }
 
+// For dedicated VoIP outputs, let the HAL apply the stream volume. Track volume is
+// still applied by the mixer.
+// All tracks attached to a mixer with flag VOIP_RX are tied to the same
+// stream type STREAM_VOICE_CALL so this will only change the HAL volume once even
+// if more than one track are active
+status_t AudioFlinger::PlaybackThread::handleVoipVolume_l(float *volume)
+{
+    status_t result = NO_ERROR;
+    if ((mOutput->flags & AUDIO_OUTPUT_FLAG_VOIP_RX) != 0) {
+        if (*volume != mLeftVolFloat) {
+            result = mOutput->stream->setVolume(*volume, *volume);
+            ALOGE_IF(result != OK,
+                     "Error when setting output stream volume: %d", result);
+            if (result == NO_ERROR) {
+                mLeftVolFloat = *volume;
+            }
+        }
+        // if stream volume was successfully sent to the HAL, mLeftVolFloat == v here and we
+        // remove stream volume contribution from software volume.
+        if (mLeftVolFloat == *volume) {
+            *volume = 1.0f;
+        }
+    }
+    return result;
+}
+
 status_t AudioFlinger::MixerThread::createAudioPatch_l(const struct audio_patch *patch,
                                                           audio_patch_handle_t *handle)
 {
@@ -4758,22 +4784,25 @@
                     // no acknowledgement required for newly active tracks
                 }
                 sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
+                float volume;
+                if (track->isPlaybackRestricted() || mStreamTypes[track->streamType()].mute) {
+                    volume = 0.f;
+                } else {
+                    volume = masterVolume * mStreamTypes[track->streamType()].volume;
+                }
+
+                handleVoipVolume_l(&volume);
+
                 // cache the combined master volume and stream type volume for fast mixer; this
                 // lacks any synchronization or barrier so VolumeProvider may read a stale value
                 const float vh = track->getVolumeHandler()->getVolume(
-                        proxy->framesReleased()).first;
-                float volume;
-                if (track->isPlaybackRestricted()) {
-                    volume = 0.f;
-                } else {
-                    volume = masterVolume
-                        * mStreamTypes[track->streamType()].volume
-                        * vh;
-                }
+                    proxy->framesReleased()).first;
+                volume *= vh;
                 track->mCachedVolume = volume;
                 gain_minifloat_packed_t vlr = proxy->getVolumeLR();
                 float vlf = volume * float_from_gain(gain_minifloat_unpack_left(vlr));
                 float vrf = volume * float_from_gain(gain_minifloat_unpack_right(vlr));
+
                 track->setFinalVolume((vlf + vrf) / 2.f);
                 ++fastTracks;
             } else {
@@ -4916,20 +4945,22 @@
             uint32_t vl, vr;       // in U8.24 integer format
             float vlf, vrf, vaf;   // in [0.0, 1.0] float format
             // read original volumes with volume control
-            float typeVolume = mStreamTypes[track->streamType()].volume;
-            float v = masterVolume * typeVolume;
+            float v = masterVolume * mStreamTypes[track->streamType()].volume;
             // Always fetch volumeshaper volume to ensure state is updated.
             const sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
             const float vh = track->getVolumeHandler()->getVolume(
                     track->mAudioTrackServerProxy->framesReleased()).first;
 
-            if (track->isPausing() || mStreamTypes[track->streamType()].mute
-                    || track->isPlaybackRestricted()) {
+            if (mStreamTypes[track->streamType()].mute || track->isPlaybackRestricted()) {
+                v = 0;
+            }
+
+            handleVoipVolume_l(&v);
+
+            if (track->isPausing()) {
                 vl = vr = 0;
                 vlf = vrf = vaf = 0.;
-                if (track->isPausing()) {
-                    track->setPaused();
-                }
+                track->setPaused();
             } else {
                 gain_minifloat_packed_t vlr = proxy->getVolumeLR();
                 vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
@@ -4981,25 +5012,6 @@
                 track->mHasVolumeController = false;
             }
 
-            // For dedicated VoIP outputs, let the HAL apply the stream volume. Track volume is
-            // still applied by the mixer.
-            if ((mOutput->flags & AUDIO_OUTPUT_FLAG_VOIP_RX) != 0) {
-                v = mStreamTypes[track->streamType()].mute ? 0.0f : v;
-                if (v != mLeftVolFloat) {
-                    status_t result = mOutput->stream->setVolume(v, v);
-                    ALOGE_IF(result != OK, "Error when setting output stream volume: %d", result);
-                    if (result == OK) {
-                        mLeftVolFloat = v;
-                    }
-                }
-                // if stream volume was successfully sent to the HAL, mLeftVolFloat == v here and we
-                // remove stream volume contribution from software volume.
-                if (v != 0.0f && mLeftVolFloat == v) {
-                   vlf = min(1.0f, vlf / v);
-                   vrf = min(1.0f, vrf / v);
-                   vaf = min(1.0f, vaf / v);
-               }
-            }
             // XXX: these things DON'T need to be done each time
             mAudioMixer->setBufferProvider(trackId, track);
             mAudioMixer->enable(trackId);
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 336c2b4..fc8aa13 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -747,6 +747,7 @@
                 // is safe to do so. That will drop the final ref count and destroy the tracks.
     virtual     mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove) = 0;
                 void        removeTracks_l(const Vector< sp<Track> >& tracksToRemove);
+                status_t    handleVoipVolume_l(float *volume);
 
     // StreamOutHalInterfaceCallback implementation
     virtual     void        onWriteReady();
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 3e62102..048d0e6 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -1149,6 +1149,8 @@
                 clientPid,
                 states[states.size() - 1]);
 
+        resource_policy::ClientPriority clientPriority = clientDescriptor->getPriority();
+
         // Find clients that would be evicted
         auto evicted = mActiveClientManager.wouldEvict(clientDescriptor);
 
@@ -1166,8 +1168,7 @@
             String8 msg = String8::format("%s : DENIED connect device %s client for package %s "
                     "(PID %d, score %d state %d) due to eviction policy", curTime.string(),
                     cameraId.string(), packageName.string(), clientPid,
-                    priorityScores[priorityScores.size() - 1],
-                    states[states.size() - 1]);
+                    clientPriority.getScore(), clientPriority.getState());
 
             for (auto& i : incompatibleClients) {
                 msg.appendFormat("\n   - Blocked by existing device %s client for package %s"
@@ -1212,9 +1213,8 @@
                     i->getKey().string(), String8{clientSp->getPackageName()}.string(),
                     i->getOwnerId(), i->getPriority().getScore(),
                     i->getPriority().getState(), cameraId.string(),
-                    packageName.string(), clientPid,
-                    priorityScores[priorityScores.size() - 1],
-                    states[states.size() - 1]));
+                    packageName.string(), clientPid, clientPriority.getScore(),
+                    clientPriority.getState()));
 
             // Notify the client of disconnection
             clientSp->notifyError(hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
@@ -1348,14 +1348,19 @@
     Status ret = Status::ok();
     String8 id = String8(cameraId);
     sp<CameraDeviceClient> client = nullptr;
-
+    String16 clientPackageNameAdj = clientPackageName;
+    if (hardware::IPCThreadState::self()->isServingCall()) {
+        std::string vendorClient =
+                StringPrintf("vendor.client.pid<%d>", CameraThreadState::getCallingPid());
+        clientPackageNameAdj = String16(vendorClient.c_str());
+    }
     ret = connectHelper<hardware::camera2::ICameraDeviceCallbacks,CameraDeviceClient>(cameraCb, id,
             /*api1CameraId*/-1,
-            CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName,
+            CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageNameAdj,
             clientUid, USE_CALLING_PID, API_2, /*shimUpdateOnly*/ false, /*out*/client);
 
     if(!ret.isOk()) {
-        logRejected(id, CameraThreadState::getCallingPid(), String8(clientPackageName),
+        logRejected(id, CameraThreadState::getCallingPid(), String8(clientPackageNameAdj),
                 ret.toString8());
         return ret;
     }
@@ -2368,11 +2373,7 @@
         }
         mClientPackageName = packages[0];
     }
-    if (hardware::IPCThreadState::self()->isServingCall()) {
-        std::string vendorClient =
-                StringPrintf("vendor.client.pid<%d>", CameraThreadState::getCallingPid());
-        mClientPackageName = String16(vendorClient.c_str());
-    } else {
+    if (!hardware::IPCThreadState::self()->isServingCall()) {
         mAppOpsManager = std::make_unique<AppOpsManager>();
     }
 }
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 9771f9e..a1a4958 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -29,6 +29,9 @@
 #define CLOGE(fmt, ...) ALOGE("Camera %s: %s: " fmt, mId.string(), __FUNCTION__, \
             ##__VA_ARGS__)
 
+#define CLOGW(fmt, ...) ALOGW("Camera %s: %s: " fmt, mId.string(), __FUNCTION__, \
+            ##__VA_ARGS__)
+
 // Convenience macros for transitioning to the error state
 #define SET_ERR(fmt, ...) setErrorState(   \
     "%s: " fmt, __FUNCTION__,              \
@@ -3267,14 +3270,19 @@
         ALOGVV("%s: removed frame %d from InFlightMap", __FUNCTION__, frameNumber);
      }
 
-    // Sanity check - if we have too many in-flight frames, something has
-    // likely gone wrong
-    if (!mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() > kInFlightWarnLimit) {
-        CLOGE("In-flight list too large: %zu", mInFlightMap.size());
-    } else if (mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() >
-            kInFlightWarnLimitHighSpeed) {
-        CLOGE("In-flight list too large for high speed configuration: %zu",
-                mInFlightMap.size());
+    // Sanity check - if we have too many in-flight frames with long total inflight duration,
+    // something has likely gone wrong. This might still be legit only if application send in
+    // a long burst of long exposure requests.
+    if (mExpectedInflightDuration > kMinWarnInflightDuration) {
+        if (!mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() > kInFlightWarnLimit) {
+            CLOGW("In-flight list too large: %zu, total inflight duration %" PRIu64,
+                    mInFlightMap.size(), mExpectedInflightDuration);
+        } else if (mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() >
+                kInFlightWarnLimitHighSpeed) {
+            CLOGW("In-flight list too large for high speed configuration: %zu,"
+                    "total inflight duration %" PRIu64,
+                    mInFlightMap.size(), mExpectedInflightDuration);
+        }
     }
 }
 
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 6e8ac84..cae34ce 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -227,6 +227,7 @@
     static const size_t        kDumpLockAttempts  = 10;
     static const size_t        kDumpSleepDuration = 100000; // 0.10 sec
     static const nsecs_t       kActiveTimeout     = 500000000;  // 500 ms
+    static const nsecs_t       kMinWarnInflightDuration = 5000000000; // 5 s
     static const size_t        kInFlightWarnLimit = 30;
     static const size_t        kInFlightWarnLimitHighSpeed = 256; // batch size 32 * pipe depth 8
     static const nsecs_t       kDefaultExpectedDuration = 100000000; // 100 ms