Merge "Use returned config to retry opening MMAP stream when fails."
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index b277bed..79b1263 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -13,6 +13,9 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+#include <algorithm>
+#include <string_view>
+#include <type_traits>
 
 #include <assert.h>
 #include <ctype.h>
@@ -85,6 +88,7 @@
 using android::Vector;
 using android::sp;
 using android::status_t;
+using android::SurfaceControl;
 
 using android::INVALID_OPERATION;
 using android::NAME_NOT_FOUND;
@@ -100,7 +104,6 @@
 static const uint32_t kFallbackHeight = 720;
 static const char* kMimeTypeAvc = "video/avc";
 static const char* kMimeTypeApplicationOctetstream = "application/octet-stream";
-static const char* kWinscopeMagicString = "#VV1NSC0PET1ME!#";
 
 // Command-line parameters.
 static bool gVerbose = false;           // chatty on stdout
@@ -339,13 +342,20 @@
 static status_t prepareVirtualDisplay(
         const ui::DisplayState& displayState,
         const sp<IGraphicBufferProducer>& bufferProducer,
-        sp<IBinder>* pDisplayHandle) {
+        sp<IBinder>* pDisplayHandle, sp<SurfaceControl>* mirrorRoot) {
     sp<IBinder> dpy = SurfaceComposerClient::createDisplay(
             String8("ScreenRecorder"), false /*secure*/);
     SurfaceComposerClient::Transaction t;
     t.setDisplaySurface(dpy, bufferProducer);
     setDisplayProjection(t, dpy, displayState);
-    t.setDisplayLayerStack(dpy, displayState.layerStack);
+    ui::LayerStack layerStack = ui::LayerStack::fromValue(std::rand());
+    t.setDisplayLayerStack(dpy, layerStack);
+    *mirrorRoot = SurfaceComposerClient::getDefault()->mirrorDisplay(gPhysicalDisplayId);
+    if (*mirrorRoot == nullptr) {
+        ALOGE("Failed to create a mirror for screenrecord");
+        return UNKNOWN_ERROR;
+    }
+    t.setLayerStack(*mirrorRoot, layerStack);
     t.apply();
 
     *pDisplayHandle = dpy;
@@ -354,14 +364,15 @@
 }
 
 /*
- * Writes an unsigned integer byte-by-byte in little endian order regardless
+ * Writes an unsigned/signed integer byte-by-byte in little endian order regardless
  * of the platform endianness.
  */
-template <typename UINT>
-static void writeValueLE(UINT value, uint8_t* buffer) {
-    for (int i = 0; i < sizeof(UINT); ++i) {
-        buffer[i] = static_cast<uint8_t>(value);
-        value >>= 8;
+template <typename T>
+static void writeValueLE(T value, uint8_t* buffer) {
+    std::remove_const_t<T> temp = value;
+    for (int i = 0; i < sizeof(T); ++i) {
+        buffer[i] = static_cast<std::uint8_t>(temp & 0xff);
+        temp >>= 8;
     }
 }
 
@@ -377,16 +388,18 @@
  * - for every frame its presentation time relative to the elapsed realtime clock in microseconds
  *   (as little endian uint64).
  */
-static status_t writeWinscopeMetadata(const Vector<int64_t>& timestamps,
+static status_t writeWinscopeMetadataLegacy(const Vector<int64_t>& timestamps,
         const ssize_t metaTrackIdx, AMediaMuxer *muxer) {
-    ALOGV("Writing metadata");
+    static constexpr auto kWinscopeMagicStringLegacy = "#VV1NSC0PET1ME!#";
+
+    ALOGV("Writing winscope metadata legacy");
     int64_t systemTimeToElapsedTimeOffsetMicros = (android::elapsedRealtimeNano()
         - systemTime(SYSTEM_TIME_MONOTONIC)) / 1000;
     sp<ABuffer> buffer = new ABuffer(timestamps.size() * sizeof(int64_t)
-        + sizeof(uint32_t) + strlen(kWinscopeMagicString));
+        + sizeof(uint32_t) + strlen(kWinscopeMagicStringLegacy));
     uint8_t* pos = buffer->data();
-    strcpy(reinterpret_cast<char*>(pos), kWinscopeMagicString);
-    pos += strlen(kWinscopeMagicString);
+    strcpy(reinterpret_cast<char*>(pos), kWinscopeMagicStringLegacy);
+    pos += strlen(kWinscopeMagicStringLegacy);
     writeValueLE<uint32_t>(timestamps.size(), pos);
     pos += sizeof(uint32_t);
     for (size_t idx = 0; idx < timestamps.size(); ++idx) {
@@ -395,10 +408,68 @@
         pos += sizeof(uint64_t);
     }
     AMediaCodecBufferInfo bufferInfo = {
-        0,
+        0 /* offset */,
         static_cast<int32_t>(buffer->size()),
-        timestamps[0],
-        0
+        timestamps[0] /* presentationTimeUs */,
+        0 /* flags */
+    };
+    return AMediaMuxer_writeSampleData(muxer, metaTrackIdx, buffer->data(), &bufferInfo);
+}
+
+/*
+ * Saves metadata needed by Winscope to synchronize the screen recording playback with other traces.
+ *
+ * The metadata (version 1) is written as a binary array with the following format:
+ * - winscope magic string (#VV1NSC0PET1ME2#, 16B).
+ * - the metadata version number (4B).
+ * - Realtime-to-monotonic time offset in nanoseconds (8B).
+ * - the recorded frames count (8B)
+ * - for each recorded frame:
+ *     - System time in monotonic clock timebase in nanoseconds (8B).
+ *
+ * All numbers are Little Endian encoded.
+ */
+static status_t writeWinscopeMetadata(const Vector<std::int64_t>& timestampsMonotonicUs,
+        const ssize_t metaTrackIdx, AMediaMuxer *muxer) {
+    ALOGV("Writing winscope metadata");
+
+    static constexpr auto kWinscopeMagicString = std::string_view {"#VV1NSC0PET1ME2#"};
+    static constexpr std::uint32_t metadataVersion = 1;
+    const std::int64_t realToMonotonicTimeOffsetNs =
+            systemTime(SYSTEM_TIME_REALTIME) - systemTime(SYSTEM_TIME_MONOTONIC);
+    const std::uint32_t framesCount = static_cast<std::uint32_t>(timestampsMonotonicUs.size());
+
+    sp<ABuffer> buffer = new ABuffer(
+        kWinscopeMagicString.size() +
+        sizeof(decltype(metadataVersion)) +
+        sizeof(decltype(realToMonotonicTimeOffsetNs)) +
+        sizeof(decltype(framesCount)) +
+        framesCount * sizeof(std::uint64_t)
+    );
+    std::uint8_t* pos = buffer->data();
+
+    std::copy(kWinscopeMagicString.cbegin(), kWinscopeMagicString.cend(), pos);
+    pos += kWinscopeMagicString.size();
+
+    writeValueLE(metadataVersion, pos);
+    pos += sizeof(decltype(metadataVersion));
+
+    writeValueLE(realToMonotonicTimeOffsetNs, pos);
+    pos += sizeof(decltype(realToMonotonicTimeOffsetNs));
+
+    writeValueLE(framesCount, pos);
+    pos += sizeof(decltype(framesCount));
+
+    for (const auto timestampMonotonicUs : timestampsMonotonicUs) {
+        writeValueLE<std::uint64_t>(timestampMonotonicUs * 1000, pos);
+        pos += sizeof(std::uint64_t);
+    }
+
+    AMediaCodecBufferInfo bufferInfo = {
+        0 /* offset */,
+        static_cast<std::int32_t>(buffer->size()),
+        timestampsMonotonicUs[0] /* presentationTimeUs */,
+        0 /* flags */
     };
     return AMediaMuxer_writeSampleData(muxer, metaTrackIdx, buffer->data(), &bufferInfo);
 }
@@ -418,11 +489,12 @@
     static int kTimeout = 250000;   // be responsive on signal
     status_t err;
     ssize_t trackIdx = -1;
+    ssize_t metaLegacyTrackIdx = -1;
     ssize_t metaTrackIdx = -1;
     uint32_t debugNumFrames = 0;
     int64_t startWhenNsec = systemTime(CLOCK_MONOTONIC);
     int64_t endWhenNsec = startWhenNsec + seconds_to_nanoseconds(gTimeLimitSec);
-    Vector<int64_t> timestamps;
+    Vector<int64_t> timestampsMonotonicUs;
     bool firstFrame = true;
 
     assert((rawFp == NULL && muxer != NULL) || (rawFp != NULL && muxer == NULL));
@@ -520,9 +592,9 @@
                     sp<ABuffer> buffer = new ABuffer(
                             buffers[bufIndex]->data(), buffers[bufIndex]->size());
                     AMediaCodecBufferInfo bufferInfo = {
-                        0,
+                        0 /* offset */,
                         static_cast<int32_t>(buffer->size()),
-                        ptsUsec,
+                        ptsUsec /* presentationTimeUs */,
                         flags
                     };
                     err = AMediaMuxer_writeSampleData(muxer, trackIdx, buffer->data(), &bufferInfo);
@@ -532,7 +604,7 @@
                         return err;
                     }
                     if (gOutputFormat == FORMAT_MP4) {
-                        timestamps.add(ptsUsec);
+                        timestampsMonotonicUs.add(ptsUsec);
                     }
                 }
                 debugNumFrames++;
@@ -565,6 +637,7 @@
                     if (gOutputFormat == FORMAT_MP4) {
                         AMediaFormat *metaFormat = AMediaFormat_new();
                         AMediaFormat_setString(metaFormat, AMEDIAFORMAT_KEY_MIME, kMimeTypeApplicationOctetstream);
+                        metaLegacyTrackIdx = AMediaMuxer_addTrack(muxer, metaFormat);
                         metaTrackIdx = AMediaMuxer_addTrack(muxer, metaFormat);
                         AMediaFormat_delete(metaFormat);
                     }
@@ -604,10 +677,16 @@
                         systemTime(CLOCK_MONOTONIC) - startWhenNsec));
         fflush(stdout);
     }
-    if (metaTrackIdx >= 0 && !timestamps.isEmpty()) {
-        err = writeWinscopeMetadata(timestamps, metaTrackIdx, muxer);
+    if (metaLegacyTrackIdx >= 0 && metaTrackIdx >= 0 && !timestampsMonotonicUs.isEmpty()) {
+        err = writeWinscopeMetadataLegacy(timestampsMonotonicUs, metaLegacyTrackIdx, muxer);
         if (err != NO_ERROR) {
-            fprintf(stderr, "Failed writing metadata to muxer (err=%d)\n", err);
+            fprintf(stderr, "Failed writing legacy winscope metadata to muxer (err=%d)\n", err);
+            return err;
+        }
+
+        err = writeWinscopeMetadata(timestampsMonotonicUs, metaTrackIdx, muxer);
+        if (err != NO_ERROR) {
+            fprintf(stderr, "Failed writing winscope metadata to muxer (err=%d)\n", err);
             return err;
         }
     }
@@ -656,6 +735,23 @@
     return num & ~1;
 }
 
+struct RecordingData {
+    sp<MediaCodec> encoder;
+    // Configure virtual display.
+    sp<IBinder> dpy;
+
+    sp<Overlay> overlay;
+
+    ~RecordingData() {
+        if (dpy != nullptr) SurfaceComposerClient::destroyDisplay(dpy);
+        if (overlay != nullptr) overlay->stop();
+        if (encoder != nullptr) {
+            encoder->stop();
+            encoder->release();
+        }
+    }
+};
+
 /*
  * Main "do work" start point.
  *
@@ -713,12 +809,12 @@
         gVideoHeight = floorToEven(layerStackSpaceRect.getHeight());
     }
 
+    RecordingData recordingData = RecordingData();
     // Configure and start the encoder.
-    sp<MediaCodec> encoder;
     sp<FrameOutput> frameOutput;
     sp<IGraphicBufferProducer> encoderInputSurface;
     if (gOutputFormat != FORMAT_FRAMES && gOutputFormat != FORMAT_RAW_FRAMES) {
-        err = prepareEncoder(displayMode.refreshRate, &encoder, &encoderInputSurface);
+        err = prepareEncoder(displayMode.refreshRate, &recordingData.encoder, &encoderInputSurface);
 
         if (err != NO_ERROR && !gSizeSpecified) {
             // fallback is defined for landscape; swap if we're in portrait
@@ -731,7 +827,8 @@
                         gVideoWidth, gVideoHeight, newWidth, newHeight);
                 gVideoWidth = newWidth;
                 gVideoHeight = newHeight;
-                err = prepareEncoder(displayMode.refreshRate, &encoder, &encoderInputSurface);
+                err = prepareEncoder(displayMode.refreshRate, &recordingData.encoder,
+                                      &encoderInputSurface);
             }
         }
         if (err != NO_ERROR) return err;
@@ -758,13 +855,11 @@
 
     // Configure optional overlay.
     sp<IGraphicBufferProducer> bufferProducer;
-    sp<Overlay> overlay;
     if (gWantFrameTime) {
         // Send virtual display frames to an external texture.
-        overlay = new Overlay(gMonotonicTime);
-        err = overlay->start(encoderInputSurface, &bufferProducer);
+        recordingData.overlay = new Overlay(gMonotonicTime);
+        err = recordingData.overlay->start(encoderInputSurface, &bufferProducer);
         if (err != NO_ERROR) {
-            if (encoder != NULL) encoder->release();
             return err;
         }
         if (gVerbose) {
@@ -776,11 +871,13 @@
         bufferProducer = encoderInputSurface;
     }
 
+    // We need to hold a reference to mirrorRoot during the entire recording to ensure it's not
+    // cleaned up by SurfaceFlinger. When the reference is dropped, SurfaceFlinger will delete
+    // the resource.
+    sp<SurfaceControl> mirrorRoot;
     // Configure virtual display.
-    sp<IBinder> dpy;
-    err = prepareVirtualDisplay(displayState, bufferProducer, &dpy);
+    err = prepareVirtualDisplay(displayState, bufferProducer, &recordingData.dpy, &mirrorRoot);
     if (err != NO_ERROR) {
-        if (encoder != NULL) encoder->release();
         return err;
     }
 
@@ -820,7 +917,6 @@
         case FORMAT_RAW_FRAMES: {
             rawFp = prepareRawOutput(fileName);
             if (rawFp == NULL) {
-                if (encoder != NULL) encoder->release();
                 return -1;
             }
             break;
@@ -861,7 +957,8 @@
         }
     } else {
         // Main encoder loop.
-        err = runEncoder(encoder, muxer, rawFp, display, dpy, displayState.orientation);
+        err = runEncoder(recordingData.encoder, muxer, rawFp, display, recordingData.dpy,
+                         displayState.orientation);
         if (err != NO_ERROR) {
             fprintf(stderr, "Encoder failed (err=%d)\n", err);
             // fall through to cleanup
@@ -875,9 +972,6 @@
 
     // Shut everything down, starting with the producer side.
     encoderInputSurface = NULL;
-    SurfaceComposerClient::destroyDisplay(dpy);
-    if (overlay != NULL) overlay->stop();
-    if (encoder != NULL) encoder->stop();
     if (muxer != NULL) {
         // If we don't stop muxer explicitly, i.e. let the destructor run,
         // it may hang (b/11050628).
@@ -885,7 +979,6 @@
     } else if (rawFp != stdout) {
         fclose(rawFp);
     }
-    if (encoder != NULL) encoder->release();
 
     return err;
 }
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.cpp b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
index 4dec57f..d234f21 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.cpp
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
@@ -102,6 +102,29 @@
             .withSetter(Hdr10PlusInfoOutputSetter)
             .build());
 
+    // default static info
+    C2HdrStaticMetadataStruct defaultStaticInfo{};
+    helper->addStructDescriptors<C2MasteringDisplayColorVolumeStruct, C2ColorXyStruct>();
+    addParameter(
+        DefineParam(mHdrStaticInfo, C2_PARAMKEY_HDR_STATIC_INFO)
+            .withDefault(new C2StreamHdrStaticInfo::output(0u, defaultStaticInfo))
+            .withFields({
+                C2F(mHdrStaticInfo, mastering.red.x).inRange(0, 1),
+                C2F(mHdrStaticInfo, mastering.red.y).inRange(0, 1),
+                C2F(mHdrStaticInfo, mastering.green.x).inRange(0, 1),
+                C2F(mHdrStaticInfo, mastering.green.y).inRange(0, 1),
+                C2F(mHdrStaticInfo, mastering.blue.x).inRange(0, 1),
+                C2F(mHdrStaticInfo, mastering.blue.y).inRange(0, 1),
+                C2F(mHdrStaticInfo, mastering.white.x).inRange(0, 1),
+                C2F(mHdrStaticInfo, mastering.white.x).inRange(0, 1),
+                C2F(mHdrStaticInfo, mastering.maxLuminance).inRange(0, 65535),
+                C2F(mHdrStaticInfo, mastering.minLuminance).inRange(0, 6.5535),
+                C2F(mHdrStaticInfo, maxCll).inRange(0, 0XFFFF),
+                C2F(mHdrStaticInfo, maxFall).inRange(0, 0XFFFF)
+            })
+            .withSetter(HdrStaticInfoSetter)
+            .build());
+
     addParameter(
         DefineParam(mMaxSize, C2_PARAMKEY_MAX_PICTURE_SIZE)
             .withDefault(new C2StreamMaxPictureSizeTuning::output(0u, 320, 240))
@@ -331,6 +354,47 @@
   // unsafe getters
   std::shared_ptr<C2StreamPixelFormatInfo::output> getPixelFormat_l() const { return mPixelFormat; }
 
+  static C2R HdrStaticInfoSetter(bool mayBlock, C2P<C2StreamHdrStaticInfo::output> &me) {
+    (void)mayBlock;
+    if (me.v.mastering.red.x > 1) {
+      me.set().mastering.red.x = 1;
+    }
+    if (me.v.mastering.red.y > 1) {
+      me.set().mastering.red.y = 1;
+    }
+    if (me.v.mastering.green.x > 1) {
+      me.set().mastering.green.x = 1;
+    }
+    if (me.v.mastering.green.y > 1) {
+      me.set().mastering.green.y = 1;
+    }
+    if (me.v.mastering.blue.x > 1) {
+      me.set().mastering.blue.x = 1;
+    }
+    if (me.v.mastering.blue.y > 1) {
+      me.set().mastering.blue.y = 1;
+    }
+    if (me.v.mastering.white.x > 1) {
+      me.set().mastering.white.x = 1;
+    }
+    if (me.v.mastering.white.y > 1) {
+      me.set().mastering.white.y = 1;
+    }
+    if (me.v.mastering.maxLuminance > 65535.0) {
+      me.set().mastering.maxLuminance = 65535.0;
+    }
+    if (me.v.mastering.minLuminance > 6.5535) {
+      me.set().mastering.minLuminance = 6.5535;
+    }
+    if (me.v.maxCll > 65535.0) {
+      me.set().maxCll = 65535.0;
+    }
+    if (me.v.maxFall > 65535.0) {
+      me.set().maxFall = 65535.0;
+    }
+    return C2R::Ok();
+  }
+
  private:
   std::shared_ptr<C2StreamProfileLevelInfo::input> mProfileLevel;
   std::shared_ptr<C2StreamPictureSizeInfo::output> mSize;
@@ -343,6 +407,7 @@
   std::shared_ptr<C2StreamColorAspectsInfo::output> mColorAspects;
   std::shared_ptr<C2StreamHdr10PlusInfo::input> mHdr10PlusInfoInput;
   std::shared_ptr<C2StreamHdr10PlusInfo::output> mHdr10PlusInfoOutput;
+  std::shared_ptr<C2StreamHdrStaticInfo::output> mHdrStaticInfo;
 };
 
 C2SoftGav1Dec::C2SoftGav1Dec(const char *name, c2_node_id_t id,
@@ -558,6 +623,76 @@
   }
 }
 
+void C2SoftGav1Dec::getHDRStaticParams(const libgav1::DecoderBuffer *buffer,
+                                       const std::unique_ptr<C2Work> &work) {
+  C2StreamHdrStaticMetadataInfo::output hdrStaticMetadataInfo{};
+  bool infoPresent = false;
+  if (buffer->has_hdr_mdcv) {
+    // hdr_mdcv.primary_chromaticity_* values are in 0.16 fixed-point format.
+    hdrStaticMetadataInfo.mastering.red.x = buffer->hdr_mdcv.primary_chromaticity_x[0] / 65536.0;
+    hdrStaticMetadataInfo.mastering.red.y = buffer->hdr_mdcv.primary_chromaticity_y[0] / 65536.0;
+
+    hdrStaticMetadataInfo.mastering.green.x = buffer->hdr_mdcv.primary_chromaticity_x[1] / 65536.0;
+    hdrStaticMetadataInfo.mastering.green.y = buffer->hdr_mdcv.primary_chromaticity_y[1] / 65536.0;
+
+    hdrStaticMetadataInfo.mastering.blue.x = buffer->hdr_mdcv.primary_chromaticity_x[2] / 65536.0;
+    hdrStaticMetadataInfo.mastering.blue.y = buffer->hdr_mdcv.primary_chromaticity_y[2] / 65536.0;
+
+    // hdr_mdcv.white_point_chromaticity_* values are in 0.16 fixed-point format.
+    hdrStaticMetadataInfo.mastering.white.x = buffer->hdr_mdcv.white_point_chromaticity_x / 65536.0;
+    hdrStaticMetadataInfo.mastering.white.y = buffer->hdr_mdcv.white_point_chromaticity_y / 65536.0;
+
+    // hdr_mdcv.luminance_max is in 24.8 fixed-point format.
+    hdrStaticMetadataInfo.mastering.maxLuminance = buffer->hdr_mdcv.luminance_max / 256.0;
+    // hdr_mdcv.luminance_min is in 18.14 format.
+    hdrStaticMetadataInfo.mastering.minLuminance = buffer->hdr_mdcv.luminance_min / 16384.0;
+    infoPresent = true;
+  }
+
+  if (buffer->has_hdr_cll) {
+    hdrStaticMetadataInfo.maxCll = buffer->hdr_cll.max_cll;
+    hdrStaticMetadataInfo.maxFall = buffer->hdr_cll.max_fall;
+    infoPresent = true;
+  }
+  // config if static info has changed
+  if (infoPresent && !(hdrStaticMetadataInfo == mHdrStaticMetadataInfo)) {
+    mHdrStaticMetadataInfo = hdrStaticMetadataInfo;
+    work->worklets.front()->output.configUpdate.push_back(C2Param::Copy(mHdrStaticMetadataInfo));
+  }
+}
+
+void C2SoftGav1Dec::getHDR10PlusInfoData(const libgav1::DecoderBuffer *buffer,
+                                         const std::unique_ptr<C2Work> &work) {
+  if (buffer->has_itut_t35) {
+    std::vector<uint8_t> payload;
+    size_t payloadSize = buffer->itut_t35.payload_size;
+    if (payloadSize > 0) {
+      payload.push_back(buffer->itut_t35.country_code);
+      if (buffer->itut_t35.country_code == 0xFF) {
+        payload.push_back(buffer->itut_t35.country_code_extension_byte);
+      }
+      payload.insert(payload.end(), buffer->itut_t35.payload_bytes,
+                     buffer->itut_t35.payload_bytes + buffer->itut_t35.payload_size);
+    }
+
+    std::unique_ptr<C2StreamHdr10PlusInfo::output> hdr10PlusInfo =
+            C2StreamHdr10PlusInfo::output::AllocUnique(payload.size());
+    if (!hdr10PlusInfo) {
+      ALOGE("Hdr10PlusInfo allocation failed");
+      mSignalledError = true;
+      work->result = C2_NO_MEMORY;
+      return;
+    }
+    memcpy(hdr10PlusInfo->m.value, payload.data(), payload.size());
+
+    // config if hdr10Plus info has changed
+    if (nullptr == mHdr10PlusInfo || !(*hdr10PlusInfo == *mHdr10PlusInfo)) {
+      mHdr10PlusInfo = std::move(hdr10PlusInfo);
+      work->worklets.front()->output.configUpdate.push_back(std::move(mHdr10PlusInfo));
+    }
+  }
+}
+
 void C2SoftGav1Dec::getVuiParams(const libgav1::DecoderBuffer *buffer) {
     VuiColorAspects vuiColorAspects;
     vuiColorAspects.primaries = buffer->color_primary;
@@ -633,6 +768,9 @@
   }
 
   getVuiParams(buffer);
+  getHDRStaticParams(buffer, work);
+  getHDR10PlusInfoData(buffer, work);
+
   if (!(buffer->image_format == libgav1::kImageFormatYuv420 ||
         buffer->image_format == libgav1::kImageFormatMonochrome400)) {
     ALOGE("image_format %d not supported", buffer->image_format);
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.h b/media/codec2/components/gav1/C2SoftGav1Dec.h
index 3d4db55..e51c511 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.h
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.h
@@ -61,6 +61,9 @@
   bool mSignalledOutputEos;
   bool mSignalledError;
 
+  C2StreamHdrStaticMetadataInfo::output mHdrStaticMetadataInfo;
+  std::unique_ptr<C2StreamHdr10PlusInfo::output> mHdr10PlusInfo = nullptr;
+
   // Color aspects. These are ISO values and are meant to detect changes in aspects to avoid
   // converting them to C2 values for each frame
   struct VuiColorAspects {
@@ -86,6 +89,10 @@
   nsecs_t mTimeEnd = 0;    // Time at the end of decode()
 
   bool initDecoder();
+  void getHDRStaticParams(const libgav1::DecoderBuffer *buffer,
+                  const std::unique_ptr<C2Work> &work);
+  void getHDR10PlusInfoData(const libgav1::DecoderBuffer *buffer,
+                  const std::unique_ptr<C2Work> &work);
   void getVuiParams(const libgav1::DecoderBuffer *buffer);
   void destroyDecoder();
   void finishWork(uint64_t index, const std::unique_ptr<C2Work>& work,
diff --git a/media/codec2/components/hevc/C2SoftHevcDec.cpp b/media/codec2/components/hevc/C2SoftHevcDec.cpp
index 5a660c5..a27c218 100644
--- a/media/codec2/components/hevc/C2SoftHevcDec.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcDec.cpp
@@ -917,7 +917,8 @@
         if (0 < ps_decode_op->u4_pic_wd && 0 < ps_decode_op->u4_pic_ht) {
             if (mHeaderDecoded == false) {
                 mHeaderDecoded = true;
-                setParams(ALIGN128(ps_decode_op->u4_pic_wd), IVD_DECODE_FRAME);
+                mStride = ALIGN128(ps_decode_op->u4_pic_wd);
+                setParams(mStride, IVD_DECODE_FRAME);
             }
             if (ps_decode_op->u4_pic_wd != mWidth ||  ps_decode_op->u4_pic_ht != mHeight) {
                 mWidth = ps_decode_op->u4_pic_wd;
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 400a81b..9ca24aa 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -1173,7 +1173,10 @@
  * in the streams current data format to the audioData buffer.
  *
  * For an input stream, this function should read and process numFrames of data
- * from the audioData buffer.
+ * from the audioData buffer. The data in the audioData buffer must not be modified
+ * directly. Instead, it should be copied to another buffer before doing any modification.
+ * In many cases, writing to the audioData buffer of an input stream will result in a
+ * native exception.
  *
  * The audio data is passed through the buffer. So do NOT call AAudioStream_read() or
  * AAudioStream_write() on the stream that is making the callback.
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 08c76aa..1c414ec 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -26,8 +26,11 @@
 #include <android/media/AudioVibratorInfo.h>
 #include <android/media/BnAudioFlingerClient.h>
 #include <android/media/BnAudioPolicyServiceClient.h>
+#include <android/media/EffectDescriptor.h>
 #include <android/media/INativeSpatializerCallback.h>
 #include <android/media/ISpatializer.h>
+#include <android/media/RecordClientInfo.h>
+#include <android/media/audio/common/AudioConfigBase.h>
 #include <android/media/audio/common/AudioMMapPolicyInfo.h>
 #include <android/media/audio/common/AudioMMapPolicyType.h>
 #include <android/media/audio/common/AudioPort.h>
diff --git a/media/libaudioclient/tests/audioeffect_tests.cpp b/media/libaudioclient/tests/audioeffect_tests.cpp
index 93fe306..e6149e4 100644
--- a/media/libaudioclient/tests/audioeffect_tests.cpp
+++ b/media/libaudioclient/tests/audioeffect_tests.cpp
@@ -19,19 +19,43 @@
 
 #include <gtest/gtest.h>
 #include <media/AudioEffect.h>
+#include <system/audio_effects/effect_hapticgenerator.h>
+#include <system/audio_effects/effect_spatializer.h>
 #include <system/audio_effects/effect_visualizer.h>
 
 #include "audio_test_utils.h"
 
 using namespace android;
 
+class AudioEffectCallback : public AudioEffect::IAudioEffectCallback {
+  public:
+    bool receivedFramesProcessed = false;
+
+    void onFramesProcessed(int32_t framesProcessed) override {
+        ALOGE("number of frames processed %d", framesProcessed);
+        receivedFramesProcessed = true;
+    }
+};
+
 static constexpr int kDefaultInputEffectPriority = -1;
 static constexpr int kDefaultOutputEffectPriority = 0;
 
 static const char* gPackageName = "AudioEffectTest";
 
-bool isEffectExistsOnAudioSession(const effect_uuid_t* type, int priority,
-                                  audio_session_t sessionId) {
+bool doesDeviceSupportLowLatencyMode(std::vector<struct audio_port_v7>& ports) {
+    for (const auto& port : ports) {
+        if (port.role == AUDIO_PORT_ROLE_SOURCE && port.type == AUDIO_PORT_TYPE_MIX) {
+            if ((port.active_config.flags.output & AUDIO_OUTPUT_FLAG_FAST) != 0) {
+                return true;
+            }
+        }
+    }
+    return false;
+}
+
+sp<AudioEffect> createEffect(const effect_uuid_t* type, const effect_uuid_t* uuid = nullptr,
+                             int priority = 0, audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX,
+                             const wp<AudioEffectCallback>& callback = nullptr) {
     std::string packageName{gPackageName};
     AttributionSourceState attributionSource;
     attributionSource.packageName = packageName;
@@ -39,11 +63,19 @@
     attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
     attributionSource.token = sp<BBinder>::make();
     sp<AudioEffect> effect = new AudioEffect(attributionSource);
-    effect->set(type, nullptr /* uid */, priority, nullptr /* callback */, sessionId);
-    return effect->initCheck() == ALREADY_EXISTS;
+    effect->set(type, uuid, priority, callback, sessionId, AUDIO_IO_HANDLE_NONE, {}, false,
+                (callback != nullptr));
+    return effect;
 }
 
-bool isEffectDefaultOnRecord(const effect_uuid_t* type, const sp<AudioRecord>& audioRecord) {
+status_t isEffectExistsOnAudioSession(const effect_uuid_t* type, const effect_uuid_t* uuid,
+                                      int priority, audio_session_t sessionId) {
+    sp<AudioEffect> effect = createEffect(type, uuid, priority, sessionId);
+    return effect->initCheck();
+}
+
+bool isEffectDefaultOnRecord(const effect_uuid_t* type, const effect_uuid_t* uuid,
+                             const sp<AudioRecord>& audioRecord) {
     effect_descriptor_t descriptors[AudioEffect::kMaxPreProcessing];
     uint32_t numEffects = AudioEffect::kMaxPreProcessing;
     status_t ret = AudioEffect::queryDefaultPreProcessing(audioRecord->getSessionId(), descriptors,
@@ -52,7 +84,8 @@
         return false;
     }
     for (int i = 0; i < numEffects; i++) {
-        if (memcmp(&descriptors[i].type, type, sizeof(effect_uuid_t)) == 0) {
+        if ((memcmp(&descriptors[i].type, type, sizeof(effect_uuid_t)) == 0) &&
+            (memcmp(&descriptors[i].uuid, uuid, sizeof(effect_uuid_t)) == 0)) {
             return true;
         }
     }
@@ -61,11 +94,11 @@
 
 void listEffectsAvailable(std::vector<effect_descriptor_t>& descriptors) {
     uint32_t numEffects = 0;
-    if (NO_ERROR == AudioEffect::queryNumberEffects(&numEffects)) {
-        for (auto i = 0; i < numEffects; i++) {
-            effect_descriptor_t des;
-            if (NO_ERROR == AudioEffect::queryEffect(i, &des)) descriptors.push_back(des);
-        }
+    ASSERT_EQ(NO_ERROR, AudioEffect::queryNumberEffects(&numEffects));
+    for (auto i = 0; i < numEffects; i++) {
+        effect_descriptor_t des;
+        ASSERT_EQ(NO_ERROR, AudioEffect::queryEffect(i, &des));
+        descriptors.push_back(des);
     }
 }
 
@@ -81,11 +114,31 @@
     return ((descriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY);
 }
 
+bool isPostproc(effect_descriptor_t& descriptor) {
+    return ((descriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC);
+}
+
 bool isFastCompatible(effect_descriptor_t& descriptor) {
     return !(((descriptor.flags & EFFECT_FLAG_HW_ACC_MASK) == 0) &&
              ((descriptor.flags & EFFECT_FLAG_NO_PROCESS) == 0));
 }
 
+bool isSpatializer(effect_descriptor_t& descriptor) {
+    return (memcmp(&descriptor.type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0);
+}
+
+bool isHapticGenerator(effect_descriptor_t& descriptor) {
+    return (memcmp(&descriptor.type, FX_IID_HAPTICGENERATOR, sizeof(effect_uuid_t)) == 0);
+}
+
+std::tuple<std::string, std::string> typeAndUuidToString(const effect_descriptor_t& desc) {
+    char type[512];
+    AudioEffect::guidToString(&desc.type, type, sizeof(type));
+    char uuid[512];
+    AudioEffect::guidToString(&desc.uuid, uuid, sizeof(uuid));
+    return std::make_tuple(type, uuid);
+}
+
 // UNIT TESTS
 TEST(AudioEffectTest, getEffectDescriptor) {
     effect_uuid_t randomType = {
@@ -99,7 +152,7 @@
                                                                EFFECT_FLAG_TYPE_MASK, &descriptor));
 
     std::vector<effect_descriptor_t> descriptors;
-    listEffectsAvailable(descriptors);
+    ASSERT_NO_FATAL_FAILURE(listEffectsAvailable(descriptors));
 
     for (auto i = 0; i < descriptors.size(); i++) {
         EXPECT_EQ(NO_ERROR,
@@ -124,15 +177,7 @@
 }
 
 TEST(AudioEffectTest, DISABLED_GetSetParameterForEffect) {
-    std::string packageName{gPackageName};
-    AttributionSourceState attributionSource;
-    attributionSource.packageName = packageName;
-    attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
-    attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
-    attributionSource.token = sp<BBinder>::make();
-    sp<AudioEffect> visualizer = new AudioEffect(attributionSource);
-    ASSERT_NE(visualizer, nullptr) << "effect not created";
-    visualizer->set(SL_IID_VISUALIZATION);
+    sp<AudioEffect> visualizer = createEffect(SL_IID_VISUALIZATION);
     status_t status = visualizer->initCheck();
     ASSERT_TRUE(status == NO_ERROR || status == ALREADY_EXISTS) << "Init check error";
     ASSERT_EQ(NO_ERROR, visualizer->setEnabled(true)) << "visualizer not enabled";
@@ -195,51 +240,58 @@
     sp<AudioCapture> capture = nullptr;
 
     std::vector<effect_descriptor_t> descriptors;
-    listEffectsAvailable(descriptors);
+    ASSERT_NO_FATAL_FAILURE(listEffectsAvailable(descriptors));
     for (auto i = 0; i < descriptors.size(); i++) {
         if (isPreprocessing(descriptors[i])) {
             capture = new AudioCapture(AUDIO_SOURCE_MIC, sampleRate, format, channelMask);
             ASSERT_NE(capture, nullptr) << "Unable to create Record Application";
             EXPECT_EQ(NO_ERROR, capture->create());
             EXPECT_EQ(NO_ERROR, capture->start());
-            if (!isEffectDefaultOnRecord(&descriptors[i].type, capture->getAudioRecordHandle())) {
+            if (!isEffectDefaultOnRecord(&descriptors[i].type, &descriptors[i].uuid,
+                                         capture->getAudioRecordHandle())) {
                 selectedEffect = i;
                 break;
             }
         }
     }
     if (selectedEffect == -1) GTEST_SKIP() << " expected at least one preprocessing effect";
-    effect_uuid_t selectedEffectType = descriptors[selectedEffect].type;
 
-    char type[512];
-    AudioEffect::guidToString(&selectedEffectType, type, sizeof(type));
-
+    effect_uuid_t* selectedEffectType = &descriptors[selectedEffect].type;
+    effect_uuid_t* selectedEffectUuid = &descriptors[selectedEffect].uuid;
+    auto [type, uuid] = typeAndUuidToString(descriptors[selectedEffect]);
     capture = new AudioCapture(AUDIO_SOURCE_MIC, sampleRate, format, channelMask);
     ASSERT_NE(capture, nullptr) << "Unable to create Record Application";
     EXPECT_EQ(NO_ERROR, capture->create());
     EXPECT_EQ(NO_ERROR, capture->start());
-    EXPECT_FALSE(isEffectDefaultOnRecord(&selectedEffectType, capture->getAudioRecordHandle()))
+    EXPECT_FALSE(isEffectDefaultOnRecord(selectedEffectType, selectedEffectUuid,
+                                         capture->getAudioRecordHandle()))
             << "Effect should not have been default on record. " << type;
-    EXPECT_FALSE(isEffectExistsOnAudioSession(&selectedEffectType, kDefaultInputEffectPriority - 1,
-                                              capture->getAudioRecordHandle()->getSessionId()))
+    EXPECT_EQ(NO_ERROR,
+              isEffectExistsOnAudioSession(selectedEffectType, selectedEffectUuid,
+                                           kDefaultInputEffectPriority - 1,
+                                           capture->getAudioRecordHandle()->getSessionId()))
             << "Effect should not have been added. " << type;
     EXPECT_EQ(OK, capture->audioProcess());
     EXPECT_EQ(OK, capture->stop());
 
     String16 name{gPackageName};
     audio_unique_id_t effectId;
-    status_t status = AudioEffect::addSourceDefaultEffect(
-            type, name, nullptr, kDefaultInputEffectPriority, AUDIO_SOURCE_MIC, &effectId);
+    status_t status = AudioEffect::addSourceDefaultEffect(type.c_str(), name, uuid.c_str(),
+                                                          kDefaultInputEffectPriority,
+                                                          AUDIO_SOURCE_MIC, &effectId);
     EXPECT_EQ(NO_ERROR, status) << "Adding default effect failed: " << type;
 
     capture = new AudioCapture(AUDIO_SOURCE_MIC, sampleRate, format, channelMask);
     ASSERT_NE(capture, nullptr) << "Unable to create Record Application";
     EXPECT_EQ(NO_ERROR, capture->create());
     EXPECT_EQ(NO_ERROR, capture->start());
-    EXPECT_TRUE(isEffectDefaultOnRecord(&selectedEffectType, capture->getAudioRecordHandle()))
+    EXPECT_TRUE(isEffectDefaultOnRecord(selectedEffectType, selectedEffectUuid,
+                                        capture->getAudioRecordHandle()))
             << "Effect should have been default on record. " << type;
-    EXPECT_TRUE(isEffectExistsOnAudioSession(&selectedEffectType, kDefaultInputEffectPriority - 1,
-                                             capture->getAudioRecordHandle()->getSessionId()))
+    EXPECT_EQ(ALREADY_EXISTS,
+              isEffectExistsOnAudioSession(selectedEffectType, selectedEffectUuid,
+                                           kDefaultInputEffectPriority - 1,
+                                           capture->getAudioRecordHandle()->getSessionId()))
             << "Effect should have been added. " << type;
     EXPECT_EQ(OK, capture->audioProcess());
     EXPECT_EQ(OK, capture->stop());
@@ -250,86 +302,258 @@
     ASSERT_NE(capture, nullptr) << "Unable to create Record Application";
     EXPECT_EQ(NO_ERROR, capture->create());
     EXPECT_EQ(NO_ERROR, capture->start());
-    EXPECT_FALSE(isEffectDefaultOnRecord(&selectedEffectType, capture->getAudioRecordHandle()))
+    EXPECT_FALSE(isEffectDefaultOnRecord(selectedEffectType, selectedEffectUuid,
+                                         capture->getAudioRecordHandle()))
             << "Effect should not have been default on record. " << type;
-    EXPECT_FALSE(isEffectExistsOnAudioSession(&selectedEffectType, kDefaultInputEffectPriority - 1,
-                                              capture->getAudioRecordHandle()->getSessionId()))
+    EXPECT_EQ(NO_ERROR,
+              isEffectExistsOnAudioSession(selectedEffectType, selectedEffectUuid,
+                                           kDefaultInputEffectPriority - 1,
+                                           capture->getAudioRecordHandle()->getSessionId()))
             << "Effect should not have been added. " << type;
     EXPECT_EQ(OK, capture->audioProcess());
     EXPECT_EQ(OK, capture->stop());
 }
 
-TEST(AudioEffectTest, ManageStreamDefaultEffects) {
+TEST(AudioEffectTest, AuxEffectSanityTest) {
     int32_t selectedEffect = -1;
-
     std::vector<effect_descriptor_t> descriptors;
-    listEffectsAvailable(descriptors);
+    ASSERT_NO_FATAL_FAILURE(listEffectsAvailable(descriptors));
     for (auto i = 0; i < descriptors.size(); i++) {
         if (isAux(descriptors[i])) {
             selectedEffect = i;
             break;
         }
     }
-    if (selectedEffect == -1) GTEST_SKIP() << " expected at least one Aux effect";
+    if (selectedEffect == -1) GTEST_SKIP() << "expected at least one aux effect";
     effect_uuid_t* selectedEffectType = &descriptors[selectedEffect].type;
+    effect_uuid_t* selectedEffectUuid = &descriptors[selectedEffect].uuid;
+    auto [type, uuid] = typeAndUuidToString(descriptors[selectedEffect]);
+    String16 name{gPackageName};
+    audio_session_t sessionId =
+            (audio_session_t)AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+    sp<AudioEffect> audioEffect = createEffect(selectedEffectType, selectedEffectUuid,
+                                               kDefaultInputEffectPriority, sessionId);
+    EXPECT_EQ(NO_INIT, audioEffect->initCheck())
+            << "error, creating auxiliary effect (" << type << ") on session id " << (int)sessionId
+            << " successful ";
+    audio_unique_id_t id;
+    status_t status = AudioEffect::addStreamDefaultEffect(
+            type.c_str(), name, uuid.c_str(), kDefaultOutputEffectPriority, AUDIO_USAGE_MEDIA, &id);
+    if (status == NO_ERROR) {
+        EXPECT_EQ(NO_ERROR, AudioEffect::removeStreamDefaultEffect(id));
+        EXPECT_NE(NO_ERROR, status) << "error, adding auxiliary effect (" << type
+                                    << ") as stream default effect is successful";
+    }
+}
 
-    char type[512];
-    AudioEffect::guidToString(selectedEffectType, type, sizeof(type));
+class AudioPlaybackEffectTest : public ::testing::TestWithParam<bool> {
+  public:
+    AudioPlaybackEffectTest() : mSelectFastMode(GetParam()){};
+
+    const bool mSelectFastMode;
+
+    bool mIsFastCompatibleEffect;
+    effect_uuid_t mType;
+    effect_uuid_t mUuid;
+    std::string mTypeStr;
+    std::string mUuidStr;
+
+    void SetUp() override {
+        if (mSelectFastMode) {
+            std::vector<struct audio_port_v7> ports;
+            ASSERT_EQ(OK, listAudioPorts(ports));
+            if (!doesDeviceSupportLowLatencyMode(ports)) {
+                GTEST_SKIP() << "device does not support low latency mode";
+            }
+        }
+
+        int32_t selectedEffect = -1;
+        std::vector<effect_descriptor_t> descriptors;
+        ASSERT_NO_FATAL_FAILURE(listEffectsAvailable(descriptors));
+        for (auto i = 0; i < descriptors.size(); i++) {
+            if (isSpatializer(descriptors[i])) continue;
+            if (isHapticGenerator(descriptors[i]) && !AudioSystem::isHapticPlaybackSupported())
+                continue;
+            if (!isInsert(descriptors[i])) continue;
+            selectedEffect = i;
+            mIsFastCompatibleEffect = isFastCompatible(descriptors[i]);
+            // in fast mode, pick fast compatible effect if available
+            if (mSelectFastMode == mIsFastCompatibleEffect) break;
+        }
+        if (selectedEffect == -1) {
+            GTEST_SKIP() << "expected at least one valid effect";
+        }
+
+        mType = descriptors[selectedEffect].type;
+        mUuid = descriptors[selectedEffect].uuid;
+        std::tie(mTypeStr, mUuidStr) = typeAndUuidToString(descriptors[selectedEffect]);
+    }
+};
+
+TEST_P(AudioPlaybackEffectTest, StreamDefaultEffectTest) {
+    SCOPED_TRACE(testing::Message()
+                 << "\n selected effect type is :: " << mTypeStr
+                 << "\n selected effect uuid is :: " << mUuidStr
+                 << "\n audiotrack output flag : " << (mSelectFastMode ? "fast" : "default")
+                 << "\n audio effect is fast compatible : "
+                 << (mIsFastCompatibleEffect ? "yes" : "no"));
+
+    bool compatCheck = !mSelectFastMode || (mSelectFastMode && mIsFastCompatibleEffect);
+
     // create track
     audio_attributes_t attributes;
     attributes.usage = AUDIO_USAGE_MEDIA;
     attributes.content_type = AUDIO_CONTENT_TYPE_MUSIC;
     auto playback = sp<AudioPlayback>::make(
-            44100 /* sampleRate */, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
-            AUDIO_OUTPUT_FLAG_NONE, AUDIO_SESSION_NONE, AudioTrack::TRANSFER_SHARED, &attributes);
+            0 /* sampleRate */, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+            mSelectFastMode ? AUDIO_OUTPUT_FLAG_FAST : AUDIO_OUTPUT_FLAG_NONE, AUDIO_SESSION_NONE,
+            AudioTrack::TRANSFER_SHARED, &attributes);
     ASSERT_NE(nullptr, playback);
     ASSERT_EQ(NO_ERROR, playback->loadResource("/data/local/tmp/bbb_2ch_24kHz_s16le.raw"));
     EXPECT_EQ(NO_ERROR, playback->create());
     EXPECT_EQ(NO_ERROR, playback->start());
-    EXPECT_FALSE(isEffectExistsOnAudioSession(selectedEffectType, kDefaultOutputEffectPriority - 1,
-                                              playback->getAudioTrackHandle()->getSessionId()))
-            << "Effect should not have been added. " << type;
+    EXPECT_EQ(compatCheck ? NO_ERROR : NO_INIT,
+              isEffectExistsOnAudioSession(&mType, &mUuid, kDefaultOutputEffectPriority - 1,
+                                           playback->getAudioTrackHandle()->getSessionId()))
+            << "Effect should not have been added. " << mTypeStr;
     EXPECT_EQ(NO_ERROR, playback->waitForConsumption());
     playback->stop();
     playback.clear();
 
     String16 name{gPackageName};
     audio_unique_id_t id;
-    status_t status = AudioEffect::addStreamDefaultEffect(
-            type, name, nullptr, kDefaultOutputEffectPriority, AUDIO_USAGE_MEDIA, &id);
-    EXPECT_EQ(NO_ERROR, status) << "Adding default effect failed: " << type;
+    status_t status = AudioEffect::addStreamDefaultEffect(mTypeStr.c_str(), name, mUuidStr.c_str(),
+                                                          kDefaultOutputEffectPriority,
+                                                          AUDIO_USAGE_MEDIA, &id);
+    EXPECT_EQ(NO_ERROR, status) << "Adding default effect failed: " << mTypeStr;
 
     playback = sp<AudioPlayback>::make(
-            44100 /* sampleRate */, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
-            AUDIO_OUTPUT_FLAG_NONE, AUDIO_SESSION_NONE, AudioTrack::TRANSFER_SHARED, &attributes);
+            0 /* sampleRate */, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+            mSelectFastMode ? AUDIO_OUTPUT_FLAG_FAST : AUDIO_OUTPUT_FLAG_NONE, AUDIO_SESSION_NONE,
+            AudioTrack::TRANSFER_SHARED, &attributes);
     ASSERT_NE(nullptr, playback);
     ASSERT_EQ(NO_ERROR, playback->loadResource("/data/local/tmp/bbb_2ch_24kHz_s16le.raw"));
     EXPECT_EQ(NO_ERROR, playback->create());
-    float level = 0.2f, levelGot;
-    playback->getAudioTrackHandle()->setAuxEffectSendLevel(level);
     EXPECT_EQ(NO_ERROR, playback->start());
-    EXPECT_TRUE(isEffectExistsOnAudioSession(selectedEffectType, kDefaultOutputEffectPriority - 1,
-                                             playback->getAudioTrackHandle()->getSessionId()))
-            << "Effect should have been added. " << type;
+    // If effect chosen is not compatible with the session, then effect won't be applied
+    EXPECT_EQ(compatCheck ? ALREADY_EXISTS : NO_INIT,
+              isEffectExistsOnAudioSession(&mType, &mUuid, kDefaultOutputEffectPriority - 1,
+                                           playback->getAudioTrackHandle()->getSessionId()))
+            << "Effect should have been added. " << mTypeStr;
     EXPECT_EQ(NO_ERROR, playback->waitForConsumption());
-    playback->getAudioTrackHandle()->getAuxEffectSendLevel(&levelGot);
-    EXPECT_EQ(level, levelGot);
+    if (mSelectFastMode) {
+        EXPECT_EQ(AUDIO_OUTPUT_FLAG_FAST,
+                  playback->getAudioTrackHandle()->getFlags() & AUDIO_OUTPUT_FLAG_FAST);
+    }
     playback->stop();
     playback.clear();
 
     status = AudioEffect::removeStreamDefaultEffect(id);
     EXPECT_EQ(NO_ERROR, status);
     playback = sp<AudioPlayback>::make(
-            44100 /*sampleRate */, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
-            AUDIO_OUTPUT_FLAG_NONE, AUDIO_SESSION_NONE, AudioTrack::TRANSFER_SHARED, &attributes);
+            0 /* sampleRate */, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+            mSelectFastMode ? AUDIO_OUTPUT_FLAG_FAST : AUDIO_OUTPUT_FLAG_NONE, AUDIO_SESSION_NONE,
+            AudioTrack::TRANSFER_SHARED, &attributes);
     ASSERT_NE(nullptr, playback);
     ASSERT_EQ(NO_ERROR, playback->loadResource("/data/local/tmp/bbb_2ch_24kHz_s16le.raw"));
     EXPECT_EQ(NO_ERROR, playback->create());
     EXPECT_EQ(NO_ERROR, playback->start());
-    EXPECT_FALSE(isEffectExistsOnAudioSession(selectedEffectType, kDefaultOutputEffectPriority - 1,
-                                              playback->getAudioTrackHandle()->getSessionId()))
-            << "Effect should not have been added. " << type;
+    EXPECT_EQ(compatCheck ? NO_ERROR : NO_INIT,
+              isEffectExistsOnAudioSession(&mType, &mUuid, kDefaultOutputEffectPriority - 1,
+                                           playback->getAudioTrackHandle()->getSessionId()))
+            << "Effect should not have been added. " << mTypeStr;
     EXPECT_EQ(NO_ERROR, playback->waitForConsumption());
     playback->stop();
     playback.clear();
 }
+
+TEST_P(AudioPlaybackEffectTest, CheckOutputFlagCompatibility) {
+    SCOPED_TRACE(testing::Message()
+                 << "\n selected effect type is :: " << mTypeStr
+                 << "\n selected effect uuid is :: " << mUuidStr
+                 << "\n audiotrack output flag : " << (mSelectFastMode ? "fast" : "default")
+                 << "\n audio effect is fast compatible : "
+                 << (mIsFastCompatibleEffect ? "yes" : "no"));
+
+    audio_attributes_t attributes;
+    attributes.usage = AUDIO_USAGE_MEDIA;
+    attributes.content_type = AUDIO_CONTENT_TYPE_MUSIC;
+    audio_session_t sessionId =
+            (audio_session_t)AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+    sp<AudioEffectCallback> cb = sp<AudioEffectCallback>::make();
+    sp<AudioEffect> audioEffect =
+            createEffect(&mType, &mUuid, kDefaultOutputEffectPriority, sessionId, cb);
+    ASSERT_EQ(OK, audioEffect->initCheck());
+    ASSERT_EQ(NO_ERROR, audioEffect->setEnabled(true));
+    auto playback = sp<AudioPlayback>::make(
+            0 /* sampleRate */, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_MONO,
+            mSelectFastMode ? AUDIO_OUTPUT_FLAG_FAST : AUDIO_OUTPUT_FLAG_NONE, sessionId,
+            AudioTrack::TRANSFER_SHARED, &attributes);
+    ASSERT_NE(nullptr, playback);
+    ASSERT_EQ(NO_ERROR, playback->loadResource("/data/local/tmp/bbb_1ch_8kHz_s16le.raw"));
+    EXPECT_EQ(NO_ERROR, playback->create());
+    EXPECT_EQ(NO_ERROR, playback->start());
+
+    EXPECT_EQ(ALREADY_EXISTS, isEffectExistsOnAudioSession(
+                                      &mType, &mUuid, kDefaultOutputEffectPriority - 1, sessionId))
+            << "Effect should have been added. " << mTypeStr;
+    if (mSelectFastMode) {
+        EXPECT_EQ(mIsFastCompatibleEffect ? AUDIO_OUTPUT_FLAG_FAST : 0,
+                  playback->getAudioTrackHandle()->getFlags() & AUDIO_OUTPUT_FLAG_FAST);
+    }
+    EXPECT_EQ(NO_ERROR, playback->waitForConsumption());
+    EXPECT_EQ(NO_ERROR, playback->getAudioTrackHandle()->attachAuxEffect(0));
+    playback->stop();
+    playback.clear();
+    EXPECT_TRUE(cb->receivedFramesProcessed)
+            << "AudioEffect frames processed callback not received";
+}
+
+INSTANTIATE_TEST_SUITE_P(EffectParameterizedTests, AudioPlaybackEffectTest, ::testing::Bool());
+
+TEST(AudioEffectTest, TestHapticEffect) {
+    if (!AudioSystem::isHapticPlaybackSupported())
+        GTEST_SKIP() << "Haptic playback is not supported";
+    int32_t selectedEffect = -1;
+    std::vector<effect_descriptor_t> descriptors;
+    ASSERT_NO_FATAL_FAILURE(listEffectsAvailable(descriptors));
+    for (auto i = 0; i < descriptors.size(); i++) {
+        if (!isHapticGenerator(descriptors[i])) continue;
+        selectedEffect = i;
+        break;
+    }
+    if (selectedEffect == -1) GTEST_SKIP() << "expected at least one valid effect";
+
+    effect_uuid_t* selectedEffectType = &descriptors[selectedEffect].type;
+    effect_uuid_t* selectedEffectUuid = &descriptors[selectedEffect].uuid;
+    auto [type, uuid] = typeAndUuidToString(descriptors[selectedEffect]);
+
+    SCOPED_TRACE(testing::Message() << "\n selected effect type is :: " << type
+                                    << "\n selected effect uuid is :: " << uuid);
+
+    audio_attributes_t attributes;
+    attributes.usage = AUDIO_USAGE_MEDIA;
+    attributes.content_type = AUDIO_CONTENT_TYPE_MUSIC;
+    audio_session_t sessionId =
+            (audio_session_t)AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+    sp<AudioEffectCallback> cb = sp<AudioEffectCallback>::make();
+    sp<AudioEffect> audioEffect = createEffect(selectedEffectType, selectedEffectUuid,
+                                               kDefaultOutputEffectPriority, sessionId, cb);
+    ASSERT_EQ(OK, audioEffect->initCheck());
+    ASSERT_EQ(NO_ERROR, audioEffect->setEnabled(true));
+    auto playback = sp<AudioPlayback>::make(0 /* sampleRate */, AUDIO_FORMAT_PCM_16_BIT,
+                                            AUDIO_CHANNEL_OUT_STEREO, AUDIO_OUTPUT_FLAG_NONE,
+                                            sessionId, AudioTrack::TRANSFER_SHARED, &attributes);
+    ASSERT_NE(nullptr, playback);
+    ASSERT_EQ(NO_ERROR, playback->loadResource("/data/local/tmp/bbb_2ch_24kHz_s16le.raw"));
+    EXPECT_EQ(NO_ERROR, playback->create());
+    EXPECT_EQ(NO_ERROR, playback->start());
+    EXPECT_TRUE(isEffectExistsOnAudioSession(selectedEffectType, selectedEffectUuid,
+                                             kDefaultOutputEffectPriority - 1, sessionId))
+            << "Effect should have been added. " << type;
+    EXPECT_EQ(NO_ERROR, playback->waitForConsumption());
+    playback->stop();
+    playback.clear();
+    EXPECT_TRUE(cb->receivedFramesProcessed)
+            << "AudioEffect frames processed callback not received";
+}
diff --git a/media/libstagefright/data/media_codecs_sw.xml b/media/libstagefright/data/media_codecs_sw.xml
index fb4f9a0..d667685 100644
--- a/media/libstagefright/data/media_codecs_sw.xml
+++ b/media/libstagefright/data/media_codecs_sw.xml
@@ -73,7 +73,7 @@
         <MediaCodec name="c2.android.raw.decoder" type="audio/raw">
             <Alias name="OMX.google.raw.decoder" />
             <Limit name="channel-count" max="8" />
-            <Limit name="sample-rate" ranges="8000-96000" />
+            <Limit name="sample-rate" ranges="8000-192000" />
             <Limit name="bitrate" range="1-10000000" />
         </MediaCodec>
         <MediaCodec name="c2.android.flac.decoder" type="audio/flac">
diff --git a/media/libstagefright/omx/OMXStore.cpp b/media/libstagefright/omx/OMXStore.cpp
index 4827d9e..0906433 100644
--- a/media/libstagefright/omx/OMXStore.cpp
+++ b/media/libstagefright/omx/OMXStore.cpp
@@ -140,7 +140,8 @@
 
         Vector<String8> roles;
         OMX_ERRORTYPE err = plugin->getRolesOfComponent(name, &roles);
-        if (err == OMX_ErrorNone) {
+        static_assert(std::string_view("OMX.google.").size() == 11);
+        if (err == OMX_ErrorNone && strncmp(name, "OMX.google.", 11) == 0) {
             bool skip = false;
             for (String8 role : roles) {
                 if (role.find("video_decoder") != -1 || role.find("video_encoder") != -1) {
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index 7495082..ed31c02 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include <charconv>
 #include <inttypes.h>
 #include <mutex>
 #include <set>
@@ -324,29 +325,61 @@
             }
 
             AMessage::Type type;
-            int64_t mediaTimeUs, systemNano;
-            size_t index = 0;
+            size_t n = data->countEntries();
 
-            // TODO. This code has dependency with MediaCodec::CreateFramesRenderedMessage.
-            for (size_t ix = 0; ix < data->countEntries(); ix++) {
-                AString name = data->getEntryNameAt(ix, &type);
-                if (name.startsWith(AStringPrintf("%zu-media-time-us", index).c_str())) {
-                    AMessage::ItemData data = msg->getEntryAt(index);
-                    data.find(&mediaTimeUs);
-                } else if (name.startsWith(AStringPrintf("%zu-system-nano", index).c_str())) {
-                    AMessage::ItemData data = msg->getEntryAt(index);
-                    data.find(&systemNano);
+            thread_local std::vector<std::optional<int64_t>> mediaTimesInUs;
+            thread_local std::vector<std::optional<int64_t>> systemTimesInNs;
+            mediaTimesInUs.resize(n);
+            systemTimesInNs.resize(n);
+            std::fill_n(mediaTimesInUs.begin(), n, std::nullopt);
+            std::fill_n(systemTimesInNs.begin(), n, std::nullopt);
+            for (size_t i = 0; i < n; i++) {
+                AString name = data->getEntryNameAt(i, &type);
+                if (name.endsWith("-media-time-us")) {
+                    int64_t mediaTimeUs;
+                    AMessage::ItemData itemData = data->getEntryAt(i);
+                    itemData.find(&mediaTimeUs);
 
-                    Mutex::Autolock _l(mCodec->mFrameRenderedCallbackLock);
-                    if (mCodec->mFrameRenderedCallback != NULL) {
+                    int index = -1;
+                    std::from_chars_result result = std::from_chars(
+                            name.c_str(), name.c_str() + name.find("-"), index);
+                    if (result.ec == std::errc() && 0 <= index && index < n) {
+                        mediaTimesInUs[index] = mediaTimeUs;
+                    } else {
+                        std::error_code ec = std::make_error_code(result.ec);
+                        ALOGE("Unexpected media time index: #%d with value %lldus (err=%d %s)",
+                              index, (long long)mediaTimeUs, ec.value(), ec.message().c_str());
+                    }
+                } else if (name.endsWith("-system-nano")) {
+                    int64_t systemNano;
+                    AMessage::ItemData itemData = data->getEntryAt(i);
+                    itemData.find(&systemNano);
+
+                    int index = -1;
+                    std::from_chars_result result = std::from_chars(
+                            name.c_str(), name.c_str() + name.find("-"), index);
+                    if (result.ec == std::errc() && 0 <= index && index < n) {
+                        systemTimesInNs[index] = systemNano;
+                    } else {
+                        std::error_code ec = std::make_error_code(result.ec);
+                        ALOGE("Unexpected system time index: #%d with value %lldns (err=%d %s)",
+                              index, (long long)systemNano, ec.value(), ec.message().c_str());
+                    }
+                }
+            }
+
+            Mutex::Autolock _l(mCodec->mFrameRenderedCallbackLock);
+            if (mCodec->mFrameRenderedCallback != NULL) {
+                for (size_t i = 0; i < n; ++i) {
+                    if (mediaTimesInUs[i] && systemTimesInNs[i]) {
                         mCodec->mFrameRenderedCallback(
                                 mCodec,
                                 mCodec->mFrameRenderedCallbackUserData,
-                                mediaTimeUs,
-                                systemNano);
+                                mediaTimesInUs[i].value(),
+                                systemTimesInNs[i].value());
+                    } else {
+                        break;
                     }
-
-                    index++;
                 }
             }
             break;
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 77e2421..745dbf2 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -5376,12 +5376,6 @@
                     volume = masterVolume * mStreamTypes[track->streamType()].volume;
                 }
 
-                track->processMuteEvent_l(mAudioFlinger->getOrCreateAudioManager(),
-                    /*muteState=*/{masterVolume == 0.f,
-                                   mStreamTypes[track->streamType()].volume == 0.f,
-                                   mStreamTypes[track->streamType()].mute,
-                                   track->isPlaybackRestricted()});
-
                 handleVoipVolume_l(&volume);
 
                 // cache the combined master volume and stream type volume for fast mixer; this
@@ -5391,8 +5385,19 @@
                 volume *= vh;
                 track->mCachedVolume = volume;
                 gain_minifloat_packed_t vlr = proxy->getVolumeLR();
-                float vlf = volume * float_from_gain(gain_minifloat_unpack_left(vlr));
-                float vrf = volume * float_from_gain(gain_minifloat_unpack_right(vlr));
+                float vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
+                float vrf = float_from_gain(gain_minifloat_unpack_right(vlr));
+
+                track->processMuteEvent_l(mAudioFlinger->getOrCreateAudioManager(),
+                    /*muteState=*/{masterVolume == 0.f,
+                                   mStreamTypes[track->streamType()].volume == 0.f,
+                                   mStreamTypes[track->streamType()].mute,
+                                   track->isPlaybackRestricted(),
+                                   vlf == 0.f && vrf == 0.f,
+                                   vh == 0.f});
+
+                vlf *= volume;
+                vrf *= volume;
 
                 track->setFinalVolume((vlf + vrf) / 2.f);
                 ++fastTracks;
@@ -5546,12 +5551,6 @@
                 v = 0;
             }
 
-            track->processMuteEvent_l(mAudioFlinger->getOrCreateAudioManager(),
-                /*muteState=*/{masterVolume == 0.f,
-                               mStreamTypes[track->streamType()].volume == 0.f,
-                               mStreamTypes[track->streamType()].mute,
-                               track->isPlaybackRestricted()});
-
             handleVoipVolume_l(&v);
 
             if (track->isPausing()) {
@@ -5571,6 +5570,15 @@
                     ALOGV("Track right volume out of range: %.3g", vrf);
                     vrf = GAIN_FLOAT_UNITY;
                 }
+
+                track->processMuteEvent_l(mAudioFlinger->getOrCreateAudioManager(),
+                    /*muteState=*/{masterVolume == 0.f,
+                                   mStreamTypes[track->streamType()].volume == 0.f,
+                                   mStreamTypes[track->streamType()].mute,
+                                   track->isPlaybackRestricted(),
+                                   vlf == 0.f && vrf == 0.f,
+                                   vh == 0.f});
+
                 // now apply the master volume and stream type volume and shaper volume
                 vlf *= v * vh;
                 vrf *= v * vh;
@@ -6157,28 +6165,33 @@
 {
     float left, right;
 
+
     // Ensure volumeshaper state always advances even when muted.
     const sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
     const auto [shaperVolume, shaperActive] = track->getVolumeHandler()->getVolume(
             proxy->framesReleased());
     mVolumeShaperActive = shaperActive;
 
+    gain_minifloat_packed_t vlr = proxy->getVolumeLR();
+    left = float_from_gain(gain_minifloat_unpack_left(vlr));
+    right = float_from_gain(gain_minifloat_unpack_right(vlr));
+
+    const bool clientVolumeMute = (left == 0.f && right == 0.f);
+
     if (mMasterMute || mStreamTypes[track->streamType()].mute || track->isPlaybackRestricted()) {
         left = right = 0;
     } else {
         float typeVolume = mStreamTypes[track->streamType()].volume;
         const float v = mMasterVolume * typeVolume * shaperVolume;
 
-        gain_minifloat_packed_t vlr = proxy->getVolumeLR();
-        left = float_from_gain(gain_minifloat_unpack_left(vlr));
         if (left > GAIN_FLOAT_UNITY) {
             left = GAIN_FLOAT_UNITY;
         }
-        left *= v * mMasterBalanceLeft; // DirectOutputThread balance applied as track volume
-        right = float_from_gain(gain_minifloat_unpack_right(vlr));
         if (right > GAIN_FLOAT_UNITY) {
             right = GAIN_FLOAT_UNITY;
         }
+
+        left *= v * mMasterBalanceLeft; // DirectOutputThread balance applied as track volume
         right *= v * mMasterBalanceRight;
     }
 
@@ -6186,7 +6199,9 @@
         /*muteState=*/{mMasterMute,
                        mStreamTypes[track->streamType()].volume == 0.f,
                        mStreamTypes[track->streamType()].mute,
-                       track->isPlaybackRestricted()});
+                       track->isPlaybackRestricted(),
+                       clientVolumeMute,
+                       shaperVolume == 0.f});
 
     if (lastTrack) {
         track->setFinalVolume((left + right) / 2.f);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index e12d8be..5a1ee9b 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -331,7 +331,7 @@
     status_t res =
             gbLocker.lockAsync(
                     GraphicBuffer::USAGE_SW_READ_OFTEN | GraphicBuffer::USAGE_SW_WRITE_RARELY,
-                    &mapped, fenceFd.get());
+                    &mapped, fenceFd.release());
     if (res != OK) {
         ALOGE("%s: Failed to lock the buffer: %s (%d)", __FUNCTION__, strerror(-res), res);
         return res;
@@ -507,10 +507,6 @@
         mStreamUnpreparable = true;
     }
 
-    if (res != OK) {
-        close(anwReleaseFence);
-    }
-
     *releaseFenceOut = releaseFence;
 
     return res;
@@ -1309,7 +1305,7 @@
     void* mapped = nullptr;
     base::unique_fd fenceFd(dup(fence));
     status_t res = graphicBuffer->lockAsync(GraphicBuffer::USAGE_SW_READ_OFTEN, &mapped,
-            fenceFd.get());
+            fenceFd.release());
     if (res != OK) {
         ALOGE("%s: Failed to lock the buffer: %s (%d)", __FUNCTION__, strerror(-res), res);
         return;
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index b4610bc..4d18876 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -271,10 +271,9 @@
             snprintf(buffer, SIZE, "        Id: %lld\n", (long long)infos[j].clientId);
             result.append(buffer);
 
-            std::string clientName;
-            Status status = infos[j].client->getName(&clientName);
-            if (!status.isOk()) {
-                clientName = "<unknown client>";
+            std::string clientName = "<unknown client>";
+            if (infos[j].client != nullptr) {
+                Status status = infos[j].client->getName(&clientName);
             }
             snprintf(buffer, SIZE, "        Name: %s\n", clientName.c_str());
             result.append(buffer);