Merge "CCodecBuffers: fix null buffer PCM conversion"
diff --git a/apex/manifest.json b/apex/manifest.json
index 5d72031..4b75b04 100644
--- a/apex/manifest.json
+++ b/apex/manifest.json
@@ -1,6 +1,6 @@
 {
   "name": "com.android.media",
-  "version": 319999900,
+  "version": 339990000,
   "requireNativeLibs": [
     "libandroid.so",
     "libbinder_ndk.so",
diff --git a/apex/manifest_codec.json b/apex/manifest_codec.json
index b0d962d..fbcbb69 100644
--- a/apex/manifest_codec.json
+++ b/apex/manifest_codec.json
@@ -1,6 +1,6 @@
 {
   "name": "com.android.media.swcodec",
-  "version": 319999900,
+  "version": 339990000,
   "requireNativeLibs": [
     ":sphal"
   ]
diff --git a/media/TEST_MAPPING b/media/TEST_MAPPING
index 41fe080..3e79897 100644
--- a/media/TEST_MAPPING
+++ b/media/TEST_MAPPING
@@ -1,24 +1,7 @@
 // for frameworks/av/media
 {
-    "presubmit-large": [
-        // runs whenever we change something in this tree
-        {
-            "name": "CtsMediaCodecTestCases",
-            "options": [
-                {
-                    "include-filter": "android.media.codec.cts.EncodeDecodeTest"
-                }
-            ]
-        },
-        {
-            "name": "CtsMediaCodecTestCases",
-            "options": [
-                {
-                    "include-filter": "android.media.codec.cts.DecodeEditEncodeTest"
-                }
-            ]
-        }
-    ],
+    // TODO (b/229286407) Add EncodeDecodeTest and DecodeEditEncodeTest to
+    // presubmit-large once issues in cuttlefish are fixed
     "presubmit": [
         {
             "name": "GtsMediaTestCases",
diff --git a/media/codec2/components/aom/C2SoftAomDec.cpp b/media/codec2/components/aom/C2SoftAomDec.cpp
index 39bbe1c..971b009 100644
--- a/media/codec2/components/aom/C2SoftAomDec.cpp
+++ b/media/codec2/components/aom/C2SoftAomDec.cpp
@@ -539,9 +539,10 @@
 
     std::shared_ptr<C2GraphicBlock> block;
     uint32_t format = HAL_PIXEL_FORMAT_YV12;
+    std::shared_ptr<C2StreamColorAspectsTuning::output> defaultColorAspects;
     if (img->fmt == AOM_IMG_FMT_I42016) {
         IntfImpl::Lock lock = mIntf->lock();
-        std::shared_ptr<C2StreamColorAspectsTuning::output> defaultColorAspects = mIntf->getDefaultColorAspects_l();
+        defaultColorAspects = mIntf->getDefaultColorAspects_l();
 
         if (defaultColorAspects->primaries == C2Color::PRIMARIES_BT2020 &&
             defaultColorAspects->matrix == C2Color::MATRIX_BT2020 &&
@@ -590,7 +591,8 @@
         if (format == HAL_PIXEL_FORMAT_RGBA_1010102) {
             convertYUV420Planar16ToY410OrRGBA1010102(
                     (uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2, srcUStride / 2,
-                    srcVStride / 2, dstYStride / sizeof(uint32_t), mWidth, mHeight);
+                    srcVStride / 2, dstYStride / sizeof(uint32_t), mWidth, mHeight,
+                    std::static_pointer_cast<const C2ColorAspectsStruct>(defaultColorAspects));
         } else {
             convertYUV420Planar16ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride / 2,
                                         srcUStride / 2, srcVStride / 2, dstYStride, dstUVStride,
diff --git a/media/codec2/components/avc/C2SoftAvcEnc.cpp b/media/codec2/components/avc/C2SoftAvcEnc.cpp
index d65ffa5..b2dbb1b 100644
--- a/media/codec2/components/avc/C2SoftAvcEnc.cpp
+++ b/media/codec2/components/avc/C2SoftAvcEnc.cpp
@@ -1515,7 +1515,8 @@
             vPlane = uPlane + yPlaneSize / 4;
             yStride = width;
             uStride = vStride = yStride / 2;
-            ConvertRGBToPlanarYUV(yPlane, yStride, height, conversionBuffer.size(), *input);
+            ConvertRGBToPlanarYUV(yPlane, yStride, height, conversionBuffer.size(), *input,
+                                  mColorAspects->matrix, mColorAspects->range);
             break;
         }
         case C2PlanarLayout::TYPE_YUV: {
diff --git a/media/codec2/components/base/Android.bp b/media/codec2/components/base/Android.bp
index 160e250..809f942 100644
--- a/media/codec2/components/base/Android.bp
+++ b/media/codec2/components/base/Android.bp
@@ -27,6 +27,11 @@
         "libsfplugin_ccodec_utils",
     ],
 
+    header_libs: [
+        "libarect_headers",
+        "libnativewindow_headers",
+    ],
+
     shared_libs: [
         "libcutils", // for properties
         "liblog", // for ALOG
@@ -61,6 +66,11 @@
         "libsfplugin_ccodec_utils",
     ],
 
+    header_libs: [
+        "libarect_headers",
+        "libnativewindow_headers",
+    ],
+
     shared_libs: [
         "libcodec2_soft_common",
         "libcutils", // for properties
diff --git a/media/codec2/components/base/SimpleC2Component.cpp b/media/codec2/components/base/SimpleC2Component.cpp
index 1eec8f9..e81b70a 100644
--- a/media/codec2/components/base/SimpleC2Component.cpp
+++ b/media/codec2/components/base/SimpleC2Component.cpp
@@ -18,6 +18,7 @@
 #define LOG_TAG "SimpleC2Component"
 #include <log/log.h>
 
+#include <android/hardware_buffer.h>
 #include <cutils/properties.h>
 #include <media/stagefright/foundation/AMessage.h>
 
@@ -26,19 +27,14 @@
 #include <C2Config.h>
 #include <C2Debug.h>
 #include <C2PlatformSupport.h>
+#include <Codec2BufferUtils.h>
+#include <Codec2CommonUtils.h>
 #include <SimpleC2Component.h>
 
 namespace android {
 constexpr uint8_t kNeutralUVBitDepth8 = 128;
 constexpr uint16_t kNeutralUVBitDepth10 = 512;
 
-bool isAtLeastT() {
-    char deviceCodeName[PROP_VALUE_MAX];
-    __system_property_get("ro.build.version.codename", deviceCodeName);
-    return android_get_device_api_level() >= __ANDROID_API_T__ ||
-           !strcmp(deviceCodeName, "Tiramisu");
-}
-
 void convertYUV420Planar8ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint8_t *srcY,
                                 const uint8_t *srcU, const uint8_t *srcV, size_t srcYStride,
                                 size_t srcUStride, size_t srcVStride, size_t dstYStride,
@@ -137,11 +133,126 @@
         dst += dstStride * 2;
     }
 }
+
+namespace {
+
+static C2ColorAspectsStruct FillMissingColorAspects(
+        std::shared_ptr<const C2ColorAspectsStruct> aspects,
+        int32_t width, int32_t height) {
+    C2ColorAspectsStruct _aspects;
+    if (aspects) {
+        _aspects = *aspects;
+    }
+
+    // use matrix for conversion
+    if (_aspects.matrix == C2Color::MATRIX_UNSPECIFIED) {
+        // if not specified, deduce matrix from primaries
+        if (_aspects.primaries == C2Color::PRIMARIES_UNSPECIFIED) {
+            // if those are also not specified, deduce primaries first from transfer, then from
+            // width and height
+            if (_aspects.transfer == C2Color::TRANSFER_ST2084
+                    || _aspects.transfer == C2Color::TRANSFER_HLG) {
+                _aspects.primaries = C2Color::PRIMARIES_BT2020;
+            } else if (width >= 3840 || height >= 3840 || width * (int64_t)height >= 3840 * 1634) {
+                // TODO: stagefright defaults to BT.2020 for UHD, but perhaps we should default to
+                // BT.709 for non-HDR 10-bit UHD content
+                // (see media/libstagefright/foundation/ColorUtils.cpp)
+                _aspects.primaries = C2Color::PRIMARIES_BT2020;
+            } else if ((width <= 720 && height <= 576)
+                    || (height <= 720 && width <= 576)) {
+                // note: it does not actually matter whether to use 525 or 625 here as the
+                // conversion is the same
+                _aspects.primaries = C2Color::PRIMARIES_BT601_625;
+            } else {
+                _aspects.primaries = C2Color::PRIMARIES_BT709;
+            }
+        }
+
+        switch (_aspects.primaries) {
+        case C2Color::PRIMARIES_BT601_525:
+        case C2Color::PRIMARIES_BT601_625:
+            _aspects.matrix = C2Color::MATRIX_BT601;
+            break;
+
+        case C2Color::PRIMARIES_BT709:
+            _aspects.matrix = C2Color::MATRIX_BT709;
+            break;
+
+        case C2Color::PRIMARIES_BT2020:
+        default:
+            _aspects.matrix = C2Color::MATRIX_BT2020;
+        }
+    }
+
+    return _aspects;
+}
+
+// matrix conversion coefficients
+// (see media/libstagefright/colorconverter/ColorConverter.cpp for more details)
+struct Coeffs {
+    int32_t _y, _b_u, _g_u, _g_v, _r_v, _c16;
+};
+
+static const struct Coeffs GetCoeffsForAspects(const C2ColorAspectsStruct &aspects) {
+    bool isFullRange = aspects.range == C2Color::RANGE_FULL;
+
+    switch (aspects.matrix) {
+    case C2Color::MATRIX_BT601:
+        /**
+         * BT.601:  K_R = 0.299;  K_B = 0.114
+         */
+        if (isFullRange) {
+            return Coeffs { 1024, 1436, 352, 731, 1815, 0 };
+        } else {
+            return Coeffs { 1196, 1639, 402, 835, 2072, 64 };
+        }
+        break;
+
+    case C2Color::MATRIX_BT709:
+        /**
+         * BT.709:  K_R = 0.2126;  K_B = 0.0722
+         */
+        if (isFullRange) {
+            return Coeffs { 1024, 1613, 192, 479, 1900, 0 };
+        } else {
+            return Coeffs { 1196, 1841, 219, 547, 2169, 64 };
+        }
+        break;
+
+    case C2Color::MATRIX_BT2020:
+    default:
+        /**
+         * BT.2020:  K_R = 0.2627;  K_B = 0.0593
+         */
+        if (isFullRange) {
+            return Coeffs { 1024, 1510, 169, 585, 1927, 0 };
+        } else {
+            return Coeffs { 1196, 1724, 192, 668, 2200, 64 };
+        }
+    }
+}
+
+}
+
 #define CLIP3(min, v, max) (((v) < (min)) ? (min) : (((max) > (v)) ? (v) : (max)))
-void convertYUV420Planar16ToRGBA1010102(uint32_t *dst, const uint16_t *srcY, const uint16_t *srcU,
-                                        const uint16_t *srcV, size_t srcYStride, size_t srcUStride,
-                                        size_t srcVStride, size_t dstStride, size_t width,
-                                        size_t height) {
+void convertYUV420Planar16ToRGBA1010102(
+        uint32_t *dst, const uint16_t *srcY, const uint16_t *srcU,
+        const uint16_t *srcV, size_t srcYStride, size_t srcUStride,
+        size_t srcVStride, size_t dstStride, size_t width,
+        size_t height,
+        std::shared_ptr<const C2ColorAspectsStruct> aspects) {
+
+    C2ColorAspectsStruct _aspects = FillMissingColorAspects(aspects, width, height);
+
+    struct Coeffs coeffs = GetCoeffsForAspects(_aspects);
+
+    int32_t _y = coeffs._y;
+    int32_t _b_u = coeffs._b_u;
+    int32_t _neg_g_u = -coeffs._g_u;
+    int32_t _neg_g_v = -coeffs._g_v;
+    int32_t _r_v = coeffs._r_v;
+    int32_t _c16 = coeffs._c16;
+
     // Converting two lines at a time, slightly faster
     for (size_t y = 0; y < height; y += 2) {
         uint32_t *dstTop = (uint32_t *)dst;
@@ -151,25 +262,6 @@
         uint16_t *uSrc = (uint16_t *)srcU;
         uint16_t *vSrc = (uint16_t *)srcV;
 
-        // BT.2020 Limited Range conversion
-
-        // B = 1.168  *(Y - 64) + 2.148  *(U - 512)
-        // G = 1.168  *(Y - 64) - 0.652  *(V - 512) - 0.188  *(U - 512)
-        // R = 1.168  *(Y - 64) + 1.683  *(V - 512)
-
-        // B = 1196/1024  *(Y - 64) + 2200/1024  *(U - 512)
-        // G = .................... -  668/1024  *(V - 512) - 192/1024  *(U - 512)
-        // R = .................... + 1723/1024  *(V - 512)
-
-        // min_B = (1196  *(- 64) + 2200  *(- 512)) / 1024 = -1175
-        // min_G = (1196  *(- 64) - 668  *(1023 - 512) - 192  *(1023 - 512)) / 1024 = -504
-        // min_R = (1196  *(- 64) + 1723  *(- 512)) / 1024 = -937
-
-        // max_B = (1196  *(1023 - 64) + 2200  *(1023 - 512)) / 1024 = 2218
-        // max_G = (1196  *(1023 - 64) - 668  *(- 512) - 192  *(- 512)) / 1024 = 1551
-        // max_R = (1196  *(1023 - 64) + 1723  *(1023 - 512)) / 1024 = 1980
-
-        int32_t mY = 1196, mU_B = 2200, mV_G = -668, mV_R = 1723, mU_G = -192;
         for (size_t x = 0; x < width; x += 2) {
             int32_t u, v, y00, y01, y10, y11;
             u = *uSrc - 512;
@@ -177,22 +269,22 @@
             v = *vSrc - 512;
             vSrc += 1;
 
-            y00 = *ySrcTop - 64;
+            y00 = *ySrcTop - _c16;
             ySrcTop += 1;
-            y01 = *ySrcTop - 64;
+            y01 = *ySrcTop - _c16;
             ySrcTop += 1;
-            y10 = *ySrcBot - 64;
+            y10 = *ySrcBot - _c16;
             ySrcBot += 1;
-            y11 = *ySrcBot - 64;
+            y11 = *ySrcBot - _c16;
             ySrcBot += 1;
 
-            int32_t u_b = u * mU_B;
-            int32_t u_g = u * mU_G;
-            int32_t v_g = v * mV_G;
-            int32_t v_r = v * mV_R;
+            int32_t u_b = u * _b_u;
+            int32_t u_g = u * _neg_g_u;
+            int32_t v_g = v * _neg_g_v;
+            int32_t v_r = v * _r_v;
 
             int32_t yMult, b, g, r;
-            yMult = y00 * mY;
+            yMult = y00 * _y + 512;
             b = (yMult + u_b) / 1024;
             g = (yMult + v_g + u_g) / 1024;
             r = (yMult + v_r) / 1024;
@@ -201,7 +293,7 @@
             r = CLIP3(0, r, 1023);
             *dstTop++ = 3 << 30 | (b << 20) | (g << 10) | r;
 
-            yMult = y01 * mY;
+            yMult = y01 * _y + 512;
             b = (yMult + u_b) / 1024;
             g = (yMult + v_g + u_g) / 1024;
             r = (yMult + v_r) / 1024;
@@ -210,7 +302,7 @@
             r = CLIP3(0, r, 1023);
             *dstTop++ = 3 << 30 | (b << 20) | (g << 10) | r;
 
-            yMult = y10 * mY;
+            yMult = y10 * _y + 512;
             b = (yMult + u_b) / 1024;
             g = (yMult + v_g + u_g) / 1024;
             r = (yMult + v_r) / 1024;
@@ -219,7 +311,7 @@
             r = CLIP3(0, r, 1023);
             *dstBot++ = 3 << 30 | (b << 20) | (g << 10) | r;
 
-            yMult = y11 * mY;
+            yMult = y11 * _y + 512;
             b = (yMult + u_b) / 1024;
             g = (yMult + v_g + u_g) / 1024;
             r = (yMult + v_r) / 1024;
@@ -236,19 +328,21 @@
     }
 }
 
-void convertYUV420Planar16ToY410OrRGBA1010102(uint32_t *dst, const uint16_t *srcY,
-                                              const uint16_t *srcU, const uint16_t *srcV,
-                                              size_t srcYStride, size_t srcUStride,
-                                              size_t srcVStride, size_t dstStride, size_t width,
-                                              size_t height) {
+void convertYUV420Planar16ToY410OrRGBA1010102(
+        uint32_t *dst, const uint16_t *srcY,
+        const uint16_t *srcU, const uint16_t *srcV,
+        size_t srcYStride, size_t srcUStride,
+        size_t srcVStride, size_t dstStride, size_t width, size_t height,
+        std::shared_ptr<const C2ColorAspectsStruct> aspects) {
     if (isAtLeastT()) {
         convertYUV420Planar16ToRGBA1010102(dst, srcY, srcU, srcV, srcYStride, srcUStride,
-                                           srcVStride, dstStride, width, height);
+                                           srcVStride, dstStride, width, height, aspects);
     } else {
         convertYUV420Planar16ToY410(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride,
                                     dstStride, width, height);
     }
 }
+
 void convertYUV420Planar16ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint16_t *srcY,
                                  const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
                                  size_t srcUStride, size_t srcVStride, size_t dstYStride,
@@ -885,25 +979,14 @@
     // Save supported hal pixel formats for bit depth of 10, the first time this is called
     if (!mBitDepth10HalPixelFormats.size()) {
         std::vector<int> halPixelFormats;
-        if (isAtLeastT()) {
-            halPixelFormats.push_back(HAL_PIXEL_FORMAT_YCBCR_P010);
-        }
+        halPixelFormats.push_back(HAL_PIXEL_FORMAT_YCBCR_P010);
+
         // since allowRGBA1010102 can chance in each call, but mBitDepth10HalPixelFormats
         // is populated only once, allowRGBA1010102 is not considered at this stage.
         halPixelFormats.push_back(HAL_PIXEL_FORMAT_RGBA_1010102);
 
         for (int halPixelFormat : halPixelFormats) {
-            std::shared_ptr<C2GraphicBlock> block;
-
-            uint32_t gpuConsumerFlags = halPixelFormat == HAL_PIXEL_FORMAT_RGBA_1010102
-                                                ? C2AndroidMemoryUsage::HW_TEXTURE_READ
-                                                : 0;
-            C2MemoryUsage usage = {C2MemoryUsage::CPU_READ | gpuConsumerFlags,
-                                   C2MemoryUsage::CPU_WRITE};
-            // TODO(b/214411172) Use AHardwareBuffer_isSupported once it supports P010
-            c2_status_t status =
-                    mOutputBlockPool->fetchGraphicBlock(320, 240, halPixelFormat, usage, &block);
-            if (status == C2_OK) {
+            if (isHalPixelFormatSupported((AHardwareBuffer_Format)halPixelFormat)) {
                 mBitDepth10HalPixelFormats.push_back(halPixelFormat);
             }
         }
diff --git a/media/codec2/components/base/include/SimpleC2Component.h b/media/codec2/components/base/include/SimpleC2Component.h
index 3172f29..7600c5b 100644
--- a/media/codec2/components/base/include/SimpleC2Component.h
+++ b/media/codec2/components/base/include/SimpleC2Component.h
@@ -26,28 +26,35 @@
 #include <media/stagefright/foundation/ALooper.h>
 #include <media/stagefright/foundation/Mutexed.h>
 
+struct C2ColorAspectsStruct;
+
 namespace android {
-bool isAtLeastT();
+
 void convertYUV420Planar8ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint8_t *srcY,
                                 const uint8_t *srcU, const uint8_t *srcV, size_t srcYStride,
                                 size_t srcUStride, size_t srcVStride, size_t dstYStride,
                                 size_t dstUVStride, uint32_t width, uint32_t height,
                                 bool isMonochrome = false);
-void convertYUV420Planar16ToY410OrRGBA1010102(uint32_t *dst, const uint16_t *srcY,
-                                              const uint16_t *srcU, const uint16_t *srcV,
-                                              size_t srcYStride, size_t srcUStride,
-                                              size_t srcVStride, size_t dstStride, size_t width,
-                                              size_t height);
+
+void convertYUV420Planar16ToY410OrRGBA1010102(
+        uint32_t *dst, const uint16_t *srcY,
+        const uint16_t *srcU, const uint16_t *srcV,
+        size_t srcYStride, size_t srcUStride,
+        size_t srcVStride, size_t dstStride, size_t width, size_t height,
+        std::shared_ptr<const C2ColorAspectsStruct> aspects = nullptr);
+
 void convertYUV420Planar16ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint16_t *srcY,
                                  const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
                                  size_t srcUStride, size_t srcVStride, size_t dstYStride,
                                  size_t dstUVStride, size_t width, size_t height,
                                  bool isMonochrome = false);
+
 void convertYUV420Planar16ToP010(uint16_t *dstY, uint16_t *dstUV, const uint16_t *srcY,
                                  const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
                                  size_t srcUStride, size_t srcVStride, size_t dstYStride,
                                  size_t dstUVStride, size_t width, size_t height,
                                  bool isMonochrome = false);
+
 class SimpleC2Component
         : public C2Component, public std::enable_shared_from_this<SimpleC2Component> {
 public:
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.cpp b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
index 701c22c..dc467c8 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.cpp
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
@@ -21,6 +21,7 @@
 #include <C2Debug.h>
 #include <C2PlatformSupport.h>
 #include <Codec2BufferUtils.h>
+#include <Codec2CommonUtils.h>
 #include <Codec2Mapper.h>
 #include <SimpleC2Interface.h>
 #include <log/log.h>
@@ -190,7 +191,7 @@
               .build());
 
     std::vector<uint32_t> pixelFormats = {HAL_PIXEL_FORMAT_YCBCR_420_888};
-    if (isAtLeastT()) {
+    if (isHalPixelFormatSupported((AHardwareBuffer_Format)HAL_PIXEL_FORMAT_YCBCR_P010)) {
         pixelFormats.push_back(HAL_PIXEL_FORMAT_YCBCR_P010);
     }
     // TODO: support more formats?
@@ -635,10 +636,10 @@
 
   std::shared_ptr<C2GraphicBlock> block;
   uint32_t format = HAL_PIXEL_FORMAT_YV12;
+  std::shared_ptr<C2StreamColorAspectsInfo::output> codedColorAspects;
   if (buffer->bitdepth == 10) {
     IntfImpl::Lock lock = mIntf->lock();
-    std::shared_ptr<C2StreamColorAspectsInfo::output> codedColorAspects =
-        mIntf->getColorAspects_l();
+    codedColorAspects = mIntf->getColorAspects_l();
     bool allowRGBA1010102 = false;
     if (codedColorAspects->primaries == C2Color::PRIMARIES_BT2020 &&
         codedColorAspects->matrix == C2Color::MATRIX_BT2020 &&
@@ -712,9 +713,11 @@
     const uint16_t *srcV = (const uint16_t *)buffer->plane[2];
 
     if (format == HAL_PIXEL_FORMAT_RGBA_1010102) {
-        convertYUV420Planar16ToY410OrRGBA1010102((uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2,
-                                                 srcUStride / 2, srcVStride / 2,
-                                                 dstYStride / sizeof(uint32_t), mWidth, mHeight);
+        convertYUV420Planar16ToY410OrRGBA1010102(
+                (uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2,
+                srcUStride / 2, srcVStride / 2,
+                dstYStride / sizeof(uint32_t), mWidth, mHeight,
+                std::static_pointer_cast<const C2ColorAspectsStruct>(codedColorAspects));
     } else if (format == HAL_PIXEL_FORMAT_YCBCR_P010) {
         convertYUV420Planar16ToP010((uint16_t *)dstY, (uint16_t *)dstU, srcY, srcU, srcV,
                                     srcYStride / 2, srcUStride / 2, srcVStride / 2, dstYStride / 2,
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.cpp b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
index 4f5caec..9e64868 100644
--- a/media/codec2/components/hevc/C2SoftHevcEnc.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
@@ -902,7 +902,8 @@
             yStride = width;
             uStride = vStride = yStride / 2;
             ConvertRGBToPlanarYUV(yPlane, yStride, height,
-                                  conversionBuffer.size(), *input);
+                                  conversionBuffer.size(), *input,
+                                  mColorAspects->matrix, mColorAspects->range);
             break;
         }
         case C2PlanarLayout::TYPE_YUV: {
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index c2ccfa0..b50fe7d 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -25,6 +25,7 @@
 #include <C2Debug.h>
 #include <C2PlatformSupport.h>
 #include <Codec2BufferUtils.h>
+#include <Codec2CommonUtils.h>
 #include <SimpleC2Interface.h>
 
 #include "C2SoftVpxDec.h"
@@ -219,7 +220,7 @@
         // TODO: support more formats?
         std::vector<uint32_t> pixelFormats = {HAL_PIXEL_FORMAT_YCBCR_420_888};
 #ifdef VP9
-        if (isAtLeastT()) {
+        if (isHalPixelFormatSupported((AHardwareBuffer_Format)HAL_PIXEL_FORMAT_YCBCR_P010)) {
             pixelFormats.push_back(HAL_PIXEL_FORMAT_YCBCR_P010);
         }
 #endif
@@ -687,9 +688,10 @@
 
     std::shared_ptr<C2GraphicBlock> block;
     uint32_t format = HAL_PIXEL_FORMAT_YV12;
+    std::shared_ptr<C2StreamColorAspectsTuning::output> defaultColorAspects;
     if (img->fmt == VPX_IMG_FMT_I42016) {
         IntfImpl::Lock lock = mIntf->lock();
-        std::shared_ptr<C2StreamColorAspectsTuning::output> defaultColorAspects = mIntf->getDefaultColorAspects_l();
+        defaultColorAspects = mIntf->getDefaultColorAspects_l();
         bool allowRGBA1010102 = false;
         if (defaultColorAspects->primaries == C2Color::PRIMARIES_BT2020 &&
             defaultColorAspects->matrix == C2Color::MATRIX_BT2020 &&
@@ -759,11 +761,14 @@
                 queue->entries.push_back(
                         [dstY, srcY, srcU, srcV,
                          srcYStride, srcUStride, srcVStride, dstYStride,
-                         width = mWidth, height = std::min(mHeight - i, kHeight)] {
+                         width = mWidth, height = std::min(mHeight - i, kHeight),
+                         defaultColorAspects] {
                             convertYUV420Planar16ToY410OrRGBA1010102(
                                     (uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2,
                                     srcUStride / 2, srcVStride / 2, dstYStride / sizeof(uint32_t),
-                                    width, height);
+                                    width, height,
+                                    std::static_pointer_cast<const C2ColorAspectsStruct>(
+                                            defaultColorAspects));
                         });
                 srcY += srcYStride / 2 * kHeight;
                 srcU += srcUStride / 2 * (kHeight / 2);
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.cpp b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
index 617769b..f99ee24 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
@@ -733,8 +733,14 @@
     switch (layout.type) {
         case C2PlanarLayout::TYPE_RGB:
         case C2PlanarLayout::TYPE_RGBA: {
+            std::shared_ptr<C2StreamColorAspectsInfo::output> colorAspects;
+            {
+                IntfImpl::Lock lock = mIntf->lock();
+                colorAspects = mIntf->getCodedColorAspects_l();
+            }
             ConvertRGBToPlanarYUV(mConversionBuffer.data(), stride, vstride,
-                                  mConversionBuffer.size(), *rView.get());
+                                  mConversionBuffer.size(), *rView.get(),
+                                  colorAspects->matrix, colorAspects->range);
             vpx_img_wrap(&raw_frame, VPX_IMG_FMT_I420, width, height,
                          mStrideAlign, mConversionBuffer.data());
             break;
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.h b/media/codec2/components/vpx/C2SoftVpxEnc.h
index e296c8f..714fadb 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.h
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.h
@@ -265,6 +265,9 @@
     std::shared_ptr<C2StreamTemporalLayeringTuning::output> getTemporalLayers_l() const {
         return mLayering;
     }
+    std::shared_ptr<C2StreamColorAspectsInfo::output> getCodedColorAspects_l() const {
+        return mCodedColorAspects;
+    }
     uint32_t getSyncFramePeriod() const;
     static C2R ColorAspectsSetter(bool mayBlock, C2P<C2StreamColorAspectsInfo::input> &me);
     static C2R CodedColorAspectsSetter(bool mayBlock, C2P<C2StreamColorAspectsInfo::output> &me,
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index 70e742c..fca8f4f 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -60,6 +60,7 @@
     enum drc_effect_type_t : int32_t;       ///< DRC effect type
     enum drc_album_mode_t : int32_t;        ///< DRC album mode
     enum hdr_dynamic_metadata_type_t : uint32_t;  ///< HDR dynamic metadata type
+    enum hdr_format_t : uint32_t;           ///< HDR format
     enum intra_refresh_mode_t : uint32_t;   ///< intra refresh modes
     enum level_t : uint32_t;                ///< coding level
     enum ordinal_key_t : uint32_t;          ///< work ordering keys
@@ -192,10 +193,9 @@
     kParamIndexPictureType,
     // deprecated
     kParamIndexHdr10PlusMetadata,
-
     kParamIndexPictureQuantization,
-
     kParamIndexHdrDynamicMetadata,
+    kParamIndexHdrFormat,
 
     /* ------------------------------------ video components ------------------------------------ */
 
@@ -1664,6 +1664,34 @@
 constexpr char C2_PARAMKEY_INPUT_HDR_DYNAMIC_INFO[] = "input.hdr-dynamic-info";
 constexpr char C2_PARAMKEY_OUTPUT_HDR_DYNAMIC_INFO[] = "output.hdr-dynamic-info";
 
+/**
+ * HDR Format
+ */
+C2ENUM(C2Config::hdr_format_t, uint32_t,
+    UNKNOWN,     ///< HDR format not known (default)
+    SDR,         ///< not HDR (SDR)
+    HLG,         ///< HLG
+    HDR10,       ///< HDR10
+    HDR10_PLUS,  ///< HDR10+
+);
+
+/**
+ * HDR Format Info
+ *
+ * This information may be present during configuration to allow encoders to
+ * prepare encoding certain HDR formats. When this information is not present
+ * before start, encoders should determine the HDR format based on the available
+ * HDR metadata on the first input frame.
+ *
+ * While this information is optional, it is not a hint. When present, encoders
+ * that do not support dynamic reconfiguration do not need to switch to the HDR
+ * format based on the metadata on the first input frame.
+ */
+typedef C2StreamParam<C2Info, C2SimpleValueStruct<C2EasyEnum<C2Config::hdr_format_t>>,
+                kParamIndexHdrFormat>
+        C2StreamHdrFormatInfo;
+constexpr char C2_PARAMKEY_HDR_FORMAT[] = "coded.hdr-format";
+
 /* ------------------------------------ block-based coding ----------------------------------- */
 
 /**
diff --git a/media/codec2/hidl/1.0/utils/types.cpp b/media/codec2/hidl/1.0/utils/types.cpp
index 72f7c43..5c24bd7 100644
--- a/media/codec2/hidl/1.0/utils/types.cpp
+++ b/media/codec2/hidl/1.0/utils/types.cpp
@@ -26,6 +26,7 @@
 #include <C2BlockInternal.h>
 #include <C2Buffer.h>
 #include <C2Component.h>
+#include <C2FenceFactory.h>
 #include <C2Param.h>
 #include <C2ParamInternal.h>
 #include <C2PlatformSupport.h>
@@ -759,17 +760,14 @@
 // Note: File descriptors are not duplicated. The original file descriptor must
 // not be closed before the transaction is complete.
 bool objcpy(hidl_handle* d, const C2Fence& s) {
-    (void)s; // TODO: implement s.fd()
-    int fenceFd = -1;
     d->setTo(nullptr);
-    if (fenceFd >= 0) {
-        native_handle_t *handle = native_handle_create(1, 0);
-        if (!handle) {
-            LOG(ERROR) << "Failed to create a native handle.";
-            return false;
-        }
-        handle->data[0] = fenceFd;
+    native_handle_t* handle = _C2FenceFactory::CreateNativeHandle(s);
+    if (handle) {
         d->setTo(handle, true /* owns */);
+//  } else if (!s.ready()) {
+//      // TODO: we should wait for unmarshallable fences but this may not be
+//      // the best place for it. We can safely ignore here as at this time
+//      // all fences used here are marshallable.
     }
     return true;
 }
@@ -1184,9 +1182,8 @@
 // Note: File descriptors are not duplicated. The original file descriptor must
 // not be closed before the transaction is complete.
 bool objcpy(C2Fence* d, const hidl_handle& s) {
-    // TODO: Implement.
-    (void)s;
-    *d = C2Fence();
+    const native_handle_t* handle = s.getNativeHandle();
+    *d = _C2FenceFactory::CreateFromNativeHandle(handle);
     return true;
 }
 
diff --git a/media/codec2/hidl/plugin/samples/SampleFilterPlugin.cpp b/media/codec2/hidl/plugin/samples/SampleFilterPlugin.cpp
index b942be7..5c13b0e 100644
--- a/media/codec2/hidl/plugin/samples/SampleFilterPlugin.cpp
+++ b/media/codec2/hidl/plugin/samples/SampleFilterPlugin.cpp
@@ -807,7 +807,8 @@
     // affectedParams
     {
         C2StreamHdrStaticInfo::output::PARAM_TYPE,
-        C2StreamHdr10PlusInfo::output::PARAM_TYPE,
+        C2StreamHdr10PlusInfo::output::PARAM_TYPE,  // will be deprecated
+        C2StreamHdrDynamicMetadataInfo::output::PARAM_TYPE,
         C2StreamColorAspectsInfo::output::PARAM_TYPE,
     },
 };
diff --git a/media/codec2/sfplugin/Android.bp b/media/codec2/sfplugin/Android.bp
index 39fa4fc..134bc53 100644
--- a/media/codec2/sfplugin/Android.bp
+++ b/media/codec2/sfplugin/Android.bp
@@ -37,6 +37,10 @@
         "media_ndk_headers",
     ],
 
+    static_libs: [
+        "SurfaceFlingerProperties",
+    ],
+
     shared_libs: [
         "android.hardware.cas.native@1.0",
         "android.hardware.drm@1.0",
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index c0a6816..94aa4ae 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -2396,7 +2396,8 @@
                         C2StreamColorAspectsInfo::output::PARAM_TYPE,
                         C2StreamDataSpaceInfo::output::PARAM_TYPE,
                         C2StreamHdrStaticInfo::output::PARAM_TYPE,
-                        C2StreamHdr10PlusInfo::output::PARAM_TYPE,
+                        C2StreamHdr10PlusInfo::output::PARAM_TYPE,  // will be deprecated
+                        C2StreamHdrDynamicMetadataInfo::output::PARAM_TYPE,
                         C2StreamPixelAspectRatioInfo::output::PARAM_TYPE,
                         C2StreamSurfaceScalingInfo::output::PARAM_TYPE
                     };
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 62a1d02..a00adde 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -880,6 +880,19 @@
         return UNKNOWN_ERROR;
     }
     const C2ConstGraphicBlock &block = blocks.front();
+    C2Fence c2fence = block.fence();
+    sp<Fence> fence = Fence::NO_FENCE;
+    // TODO: it's not sufficient to just check isHW() and then construct android::fence from it.
+    // Once C2Fence::type() is added, check the exact C2Fence type
+    if (c2fence.isHW()) {
+        int fenceFd = c2fence.fd();
+        fence = sp<Fence>::make(fenceFd);
+        if (!fence) {
+            ALOGE("[%s] Failed to allocate a fence", mName);
+            close(fenceFd);
+            return NO_MEMORY;
+        }
+    }
 
     // TODO: revisit this after C2Fence implementation.
     android::IGraphicBufferProducer::QueueBufferInput qbi(
@@ -892,7 +905,7 @@
                  blocks.front().crop().bottom()),
             videoScalingMode,
             transform,
-            Fence::NO_FENCE, 0);
+            fence, 0);
     if (hdrStaticInfo || hdrDynamicInfo) {
         HdrMetadata hdr;
         if (hdrStaticInfo) {
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index 0f998dd..ae14ae7 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -400,10 +400,10 @@
     // Rotation
     // Note: SDK rotation is clock-wise, while C2 rotation is counter-clock-wise
     add(ConfigMapper(KEY_ROTATION, C2_PARAMKEY_VUI_ROTATION, "value")
-        .limitTo(D::VIDEO & D::CODED)
+        .limitTo((D::VIDEO | D::IMAGE) & D::CODED)
         .withMappers(negate, negate));
     add(ConfigMapper(KEY_ROTATION, C2_PARAMKEY_ROTATION, "value")
-        .limitTo(D::VIDEO & D::RAW)
+        .limitTo((D::VIDEO | D::IMAGE) & D::RAW)
         .withMappers(negate, negate));
 
     // android 'video-scaling'
@@ -513,6 +513,9 @@
     add(ConfigMapper("cta861.max-fall", C2_PARAMKEY_HDR_STATIC_INFO, "max-fall")
         .limitTo((D::VIDEO | D::IMAGE) & D::RAW));
 
+    add(ConfigMapper(C2_PARAMKEY_HDR_FORMAT, C2_PARAMKEY_HDR_FORMAT, "value")
+        .limitTo((D::VIDEO | D::IMAGE) & D::CODED & D::CONFIG));
+
     add(ConfigMapper(std::string(KEY_FEATURE_) + FEATURE_SecurePlayback,
                      C2_PARAMKEY_SECURE_MODE, "value"));
 
@@ -612,10 +615,30 @@
     add(ConfigMapper("csd-0",           C2_PARAMKEY_INIT_DATA,       "value")
         .limitTo(D::OUTPUT & D::READ));
 
-    add(ConfigMapper(KEY_HDR10_PLUS_INFO, C2_PARAMKEY_INPUT_HDR10_PLUS_INFO, "value")
+    deprecated(ConfigMapper(KEY_HDR10_PLUS_INFO, C2_PARAMKEY_INPUT_HDR10_PLUS_INFO, "value")
         .limitTo(D::VIDEO & D::PARAM & D::INPUT));
 
-    add(ConfigMapper(KEY_HDR10_PLUS_INFO, C2_PARAMKEY_OUTPUT_HDR10_PLUS_INFO, "value")
+    deprecated(ConfigMapper(KEY_HDR10_PLUS_INFO, C2_PARAMKEY_OUTPUT_HDR10_PLUS_INFO, "value")
+        .limitTo(D::VIDEO & D::OUTPUT));
+
+    add(ConfigMapper(
+            std::string(C2_PARAMKEY_INPUT_HDR_DYNAMIC_INFO) + ".type",
+            C2_PARAMKEY_INPUT_HDR_DYNAMIC_INFO, "type")
+        .limitTo(D::VIDEO & D::PARAM & D::INPUT));
+
+    add(ConfigMapper(
+            std::string(C2_PARAMKEY_INPUT_HDR_DYNAMIC_INFO) + ".data",
+            C2_PARAMKEY_INPUT_HDR_DYNAMIC_INFO, "data")
+        .limitTo(D::VIDEO & D::PARAM & D::INPUT));
+
+    add(ConfigMapper(
+            std::string(C2_PARAMKEY_OUTPUT_HDR_DYNAMIC_INFO) + ".type",
+            C2_PARAMKEY_OUTPUT_HDR_DYNAMIC_INFO, "type")
+        .limitTo(D::VIDEO & D::OUTPUT));
+
+    add(ConfigMapper(
+            std::string(C2_PARAMKEY_OUTPUT_HDR_DYNAMIC_INFO) + ".data",
+            C2_PARAMKEY_OUTPUT_HDR_DYNAMIC_INFO, "data")
         .limitTo(D::VIDEO & D::OUTPUT));
 
     add(ConfigMapper(C2_PARAMKEY_TEMPORAL_LAYERING, C2_PARAMKEY_TEMPORAL_LAYERING, "")
@@ -1551,6 +1574,22 @@
             msg->removeEntryAt(msg->findEntryByName("cta861.max-cll"));
             msg->removeEntryAt(msg->findEntryByName("cta861.max-fall"));
         }
+
+        // HDR dynamic info
+        std::string keyPrefix = input ? C2_PARAMKEY_INPUT_HDR_DYNAMIC_INFO
+                                      : C2_PARAMKEY_OUTPUT_HDR_DYNAMIC_INFO;
+        std::string typeKey = keyPrefix + ".type";
+        std::string dataKey = keyPrefix + ".data";
+        int32_t type;
+        sp<ABuffer> data;
+        if (msg->findInt32(typeKey.c_str(), &type)
+                && msg->findBuffer(dataKey.c_str(), &data)) {
+            if (type == HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_40) {
+                msg->setBuffer(KEY_HDR10_PLUS_INFO, data);
+                msg->removeEntryAt(msg->findEntryByName(typeKey.c_str()));
+                msg->removeEntryAt(msg->findEntryByName(dataKey.c_str()));
+            }
+        }
     }
 
     ALOGV("converted to SDK values as %s", msg->debugString().c_str());
@@ -1634,6 +1673,27 @@
                 params->setFloat(C2_PARAMKEY_INPUT_TIME_STRETCH, captureRate / frameRate);
             }
         }
+
+        // add HDR format for video encoding
+        if (configDomain == IS_CONFIG) {
+            // don't assume here that transfer is set for HDR, only require it for HLG
+            int transfer = 0;
+            params->findInt32(KEY_COLOR_TRANSFER, &transfer);
+
+            int profile;
+            if (params->findInt32(KEY_PROFILE, &profile)) {
+                std::shared_ptr<C2Mapper::ProfileLevelMapper> mapper =
+                    C2Mapper::GetProfileLevelMapper(mCodingMediaType);
+                C2Config::hdr_format_t c2 = C2Config::hdr_format_t::UNKNOWN;
+                if (mapper && mapper->mapHdrFormat(profile, &c2)) {
+                    if (c2 == C2Config::hdr_format_t::HLG &&
+                        transfer != COLOR_TRANSFER_HLG) {
+                        c2 = C2Config::hdr_format_t::UNKNOWN;
+                    }
+                    params->setInt32(C2_PARAMKEY_HDR_FORMAT, c2);
+                }
+            }
+        }
     }
 
     {   // reflect temporal layering into a binary blob
@@ -1731,6 +1791,16 @@
                 params->setFloat("cta861.max-fall", meta->sType1.mMaxFrameAverageLightLevel);
             }
         }
+
+        sp<ABuffer> hdrDynamicInfo;
+        if (params->findBuffer(KEY_HDR10_PLUS_INFO, &hdrDynamicInfo)) {
+            for (const std::string &prefix : { C2_PARAMKEY_INPUT_HDR_DYNAMIC_INFO,
+                                               C2_PARAMKEY_OUTPUT_HDR_DYNAMIC_INFO }) {
+                params->setInt32((prefix + ".type").c_str(),
+                                 HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_40);
+                params->setBuffer((prefix + ".data").c_str(), hdrDynamicInfo);
+            }
+        }
     }
 
     // this is to verify that we set proper signedness for standard parameters
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
index c2405e8..6084ee3 100644
--- a/media/codec2/sfplugin/Codec2Buffer.cpp
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -1178,9 +1178,6 @@
     }
     if (dynamicInfo && *dynamicInfo && dynamicInfo->flexCount() > 0) {
         ALOGV("Setting dynamic HDR info as gralloc4 metadata");
-        hidl_vec<uint8_t> vec;
-        vec.resize(dynamicInfo->flexCount());
-        memcpy(vec.data(), dynamicInfo->m.data, dynamicInfo->flexCount());
         std::optional<IMapper4::MetadataType> metadataType;
         switch (dynamicInfo->m.type_) {
         case C2Config::HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_10:
@@ -1190,12 +1187,20 @@
             metadataType = MetadataType_Smpte2094_40;
             break;
         }
+
         if (metadataType) {
-            Return<Error4> ret = mapper->set(buffer.get(), *metadataType, vec);
-            if (!ret.isOk()) {
-                err = C2_REFUSED;
-            } else if (ret != Error4::NONE) {
-                err = C2_CORRUPTED;
+            std::vector<uint8_t> smpte2094_40;
+            smpte2094_40.resize(dynamicInfo->flexCount());
+            memcpy(smpte2094_40.data(), dynamicInfo->m.data, dynamicInfo->flexCount());
+
+            hidl_vec<uint8_t> vec;
+            if (gralloc4::encodeSmpte2094_40({ smpte2094_40 }, &vec) == OK) {
+                Return<Error4> ret = mapper->set(buffer.get(), *metadataType, vec);
+                if (!ret.isOk()) {
+                    err = C2_REFUSED;
+                } else if (ret != Error4::NONE) {
+                    err = C2_CORRUPTED;
+                }
             }
         } else {
             err = C2_BAD_VALUE;
diff --git a/media/codec2/sfplugin/Codec2InfoBuilder.cpp b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
index 2b8a160..3f9a40d 100644
--- a/media/codec2/sfplugin/Codec2InfoBuilder.cpp
+++ b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
@@ -33,6 +33,7 @@
 #include <OMX_Video.h>
 #include <OMX_VideoExt.h>
 #include <OMX_AsString.h>
+#include <SurfaceFlingerProperties.sysprop.h>
 
 #include <android/hardware/media/omx/1.0/IOmx.h>
 #include <android/hardware/media/omx/1.0/IOmxObserver.h>
@@ -136,7 +137,9 @@
                 continue;
             }
             switch (type.coreIndex()) {
-            case C2StreamHdr10PlusInfo::CORE_INDEX:
+            case C2StreamHdrDynamicMetadataInfo::CORE_INDEX:
+                [[fallthrough]];
+            case C2StreamHdr10PlusInfo::CORE_INDEX:  // will be deprecated
                 supportsHdr10Plus = true;
                 break;
             case C2StreamHdrStaticInfo::CORE_INDEX:
@@ -148,14 +151,21 @@
         }
     }
 
-    // For VP9/AV1, the static info is always propagated by framework.
+    // VP9 does not support HDR metadata in the bitstream and static metadata
+    // can always be carried by the framework. (The framework does not propagate
+    // dynamic metadata as that needs to be frame accurate.)
     supportsHdr |= (mediaType == MIMETYPE_VIDEO_VP9);
-    supportsHdr |= (mediaType == MIMETYPE_VIDEO_AV1);
 
     // HDR support implies 10-bit support.
     // TODO: directly check this from the component interface
     supports10Bit = (supportsHdr || supportsHdr10Plus);
 
+    // If the device doesn't support HDR display, then no codec on the device
+    // can advertise support for HDR profiles.
+    // Default to true to maintain backward compatibility
+    auto ret = sysprop::SurfaceFlingerProperties::has_HDR_display();
+    bool hasHDRDisplay = ret.has_value() ? *ret : true;
+
     bool added = false;
 
     for (C2Value::Primitive profile : profileQuery[0].values.values) {
@@ -181,8 +191,8 @@
         if (mapper && mapper->mapProfile(pl.profile, &sdkProfile)
                 && mapper->mapLevel(pl.level, &sdkLevel)) {
             caps->addProfileLevel((uint32_t)sdkProfile, (uint32_t)sdkLevel);
-            // also list HDR profiles if component supports HDR
-            if (supportsHdr) {
+            // also list HDR profiles if component supports HDR and device has HDR display
+            if (supportsHdr && hasHDRDisplay) {
                 auto hdrMapper = C2Mapper::GetHdrProfileLevelMapper(trait.mediaType);
                 if (hdrMapper && hdrMapper->mapProfile(pl.profile, &sdkProfile)) {
                     caps->addProfileLevel((uint32_t)sdkProfile, (uint32_t)sdkLevel);
diff --git a/media/codec2/sfplugin/utils/Android.bp b/media/codec2/sfplugin/utils/Android.bp
index 2f4d6b1..5c2f110 100644
--- a/media/codec2/sfplugin/utils/Android.bp
+++ b/media/codec2/sfplugin/utils/Android.bp
@@ -15,6 +15,7 @@
 
     srcs: [
         "Codec2BufferUtils.cpp",
+        "Codec2CommonUtils.cpp",
         "Codec2Mapper.cpp",
     ],
 
diff --git a/media/codec2/sfplugin/utils/Codec2CommonUtils.cpp b/media/codec2/sfplugin/utils/Codec2CommonUtils.cpp
new file mode 100644
index 0000000..ef5800d
--- /dev/null
+++ b/media/codec2/sfplugin/utils/Codec2CommonUtils.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Codec2BufferUtils"
+#define ATRACE_TAG  ATRACE_TAG_VIDEO
+#include <utils/Log.h>
+
+#include <android/hardware_buffer.h>
+#include <android-base/properties.h>
+#include <cutils/properties.h>
+#include <media/hardware/HardwareAPI.h>
+#include <system/graphics.h>
+
+#include <C2Debug.h>
+
+#include "Codec2CommonUtils.h"
+
+namespace android {
+
+bool isAtLeastT() {
+    char deviceCodeName[PROP_VALUE_MAX];
+    __system_property_get("ro.build.version.codename", deviceCodeName);
+    return android_get_device_api_level() >= __ANDROID_API_T__ ||
+           !strcmp(deviceCodeName, "Tiramisu");
+}
+
+bool isVendorApiOrFirstApiAtLeastT() {
+    // The first SDK the device shipped with.
+    static const int32_t kProductFirstApiLevel =
+        base::GetIntProperty<int32_t>("ro.product.first_api_level", 0);
+
+    // GRF devices (introduced in Android 11) list the first and possibly the current api levels
+    // to signal which VSR requirements they conform to even if the first device SDK was higher.
+    static const int32_t kBoardFirstApiLevel =
+        base::GetIntProperty<int32_t>("ro.board.first_api_level", 0);
+    static const int32_t kBoardApiLevel =
+        base::GetIntProperty<int32_t>("ro.board.api_level", 0);
+
+    // For non-GRF devices, use the first SDK version by the product.
+    static const int32_t kFirstApiLevel =
+        kBoardApiLevel != 0 ? kBoardApiLevel :
+        kBoardFirstApiLevel != 0 ? kBoardFirstApiLevel :
+        kProductFirstApiLevel;
+
+    return kFirstApiLevel >= __ANDROID_API_T__;
+}
+
+bool isHalPixelFormatSupported(AHardwareBuffer_Format format) {
+    // HAL_PIXEL_FORMAT_YCBCR_P010 requirement was added in T VSR, although it could have been
+    // supported prior to this.
+    //
+    // Unfortunately, we cannot detect if P010 is properly supported using AHardwareBuffer
+    // API alone. For now limit P010 to devices that launched with Android T or known to conform
+    // to Android T VSR (as opposed to simply limiting to a T vendor image).
+    if (format == (AHardwareBuffer_Format)HAL_PIXEL_FORMAT_YCBCR_P010 &&
+            !isVendorApiOrFirstApiAtLeastT()) {
+        return false;
+    }
+
+    const AHardwareBuffer_Desc desc = {
+            .width = 320,
+            .height = 240,
+            .format = format,
+            .layers = 1,
+            .usage = AHARDWAREBUFFER_USAGE_CPU_READ_RARELY |
+                     AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN |
+                     AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE |
+                     AHARDWAREBUFFER_USAGE_COMPOSER_OVERLAY,
+            .stride = 0,
+            .rfu0 = 0,
+            .rfu1 = 0,
+    };
+
+    return AHardwareBuffer_isSupported(&desc);
+}
+
+}  // namespace android
diff --git a/media/codec2/sfplugin/utils/Codec2CommonUtils.h b/media/codec2/sfplugin/utils/Codec2CommonUtils.h
new file mode 100644
index 0000000..98dd65b
--- /dev/null
+++ b/media/codec2/sfplugin/utils/Codec2CommonUtils.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2022, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CODEC2_COMMON_UTILS_H_
+#define CODEC2_COMMON_UTILS_H_
+
+#include <android/hardware_buffer.h>
+
+namespace android {
+
+bool isAtLeastT();
+
+bool isVendorApiOrFirstApiAtLeastT();
+
+/**
+ * Check if a given pixel format is supported.
+ * enums listed in android_pixel_format_t, android_pixel_format_v1_1_t
+ * and so on can be passed as these enums have an equivalent definition in
+ * AHardwareBuffer_Format as well.
+ */
+bool isHalPixelFormatSupported(AHardwareBuffer_Format format);
+
+} // namespace android
+
+#endif  // CODEC2_COMMON_UTILS_H_
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.cpp b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
index 3a94016..c606d6f 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.cpp
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
@@ -276,6 +276,13 @@
     { C2Config::PROFILE_HEVC_MAIN_10, HEVCProfileMain10HDR10Plus },
 };
 
+ALookup<C2Config::hdr_format_t, int32_t> sHevcHdrFormats = {
+    { C2Config::hdr_format_t::SDR, HEVCProfileMain },
+    { C2Config::hdr_format_t::HLG, HEVCProfileMain10 },
+    { C2Config::hdr_format_t::HDR10, HEVCProfileMain10HDR10 },
+    { C2Config::hdr_format_t::HDR10_PLUS, HEVCProfileMain10HDR10Plus },
+};
+
 ALookup<C2Config::level_t, int32_t> sMpeg2Levels = {
     { C2Config::LEVEL_MP2V_LOW,         MPEG2LevelLL },
     { C2Config::LEVEL_MP2V_MAIN,        MPEG2LevelML },
@@ -365,6 +372,17 @@
     { C2Config::PROFILE_VP9_3, VP9Profile3HDR10Plus },
 };
 
+ALookup<C2Config::hdr_format_t, int32_t> sVp9HdrFormats = {
+    { C2Config::hdr_format_t::SDR, VP9Profile0 },
+    { C2Config::hdr_format_t::SDR, VP9Profile1 },
+    { C2Config::hdr_format_t::HLG, VP9Profile2 },
+    { C2Config::hdr_format_t::HLG, VP9Profile3 },
+    { C2Config::hdr_format_t::HDR10, VP9Profile2HDR },
+    { C2Config::hdr_format_t::HDR10, VP9Profile3HDR },
+    { C2Config::hdr_format_t::HDR10_PLUS, VP9Profile2HDR10Plus },
+    { C2Config::hdr_format_t::HDR10_PLUS, VP9Profile3HDR10Plus },
+};
+
 ALookup<C2Config::level_t, int32_t> sAv1Levels = {
     { C2Config::LEVEL_AV1_2,    AV1Level2  },
     { C2Config::LEVEL_AV1_2_1,  AV1Level21 },
@@ -411,6 +429,13 @@
     { C2Config::PROFILE_AV1_0, AV1ProfileMain10HDR10Plus },
 };
 
+ALookup<C2Config::hdr_format_t, int32_t> sAv1HdrFormats = {
+    { C2Config::hdr_format_t::SDR, AV1ProfileMain8 },
+    { C2Config::hdr_format_t::HLG, AV1ProfileMain10 },
+    { C2Config::hdr_format_t::HDR10, AV1ProfileMain10HDR10 },
+    { C2Config::hdr_format_t::HDR10_PLUS, AV1ProfileMain10HDR10Plus },
+};
+
 // HAL_PIXEL_FORMAT_* -> COLOR_Format*
 ALookup<uint32_t, int32_t> sPixelFormats = {
     { HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, COLOR_FormatSurface },
@@ -494,6 +519,10 @@
     virtual bool simpleMap(int32_t from, C2Config::profile_t *to) {
         return sAacProfiles.map(from, to);
     }
+    // AAC does not have HDR format
+    virtual bool mapHdrFormat(int32_t, C2Config::hdr_format_t*) override {
+        return false;
+    }
 };
 
 struct AvcProfileLevelMapper : ProfileLevelMapperHelper {
@@ -524,6 +553,12 @@
     virtual bool simpleMap(int32_t from, C2Config::profile_t *to) {
         return sDolbyVisionProfiles.map(from, to);
     }
+    // Dolby Vision is always HDR and the profile is fully expressive so use unknown
+    // HDR format
+    virtual bool mapHdrFormat(int32_t, C2Config::hdr_format_t *to) override {
+        *to = C2Config::hdr_format_t::UNKNOWN;
+        return true;
+    }
 };
 
 struct H263ProfileLevelMapper : ProfileLevelMapperHelper {
@@ -562,6 +597,9 @@
                      mIsHdr ? sHevcHdrProfiles.map(from, to) :
                               sHevcProfiles.map(from, to);
     }
+    virtual bool mapHdrFormat(int32_t from, C2Config::hdr_format_t *to) override {
+        return sHevcHdrFormats.map(from, to);
+    }
 
 private:
     bool mIsHdr;
@@ -640,6 +678,9 @@
                      mIsHdr ? sVp9HdrProfiles.map(from, to) :
                               sVp9Profiles.map(from, to);
     }
+    virtual bool mapHdrFormat(int32_t from, C2Config::hdr_format_t *to) override {
+        return sVp9HdrFormats.map(from, to);
+    }
 
 private:
     bool mIsHdr;
@@ -669,6 +710,9 @@
                           mIsHdr ? sAv1HdrProfiles.map(from, to) :
                                    sAv1Profiles.map(from, to);
     }
+    virtual bool mapHdrFormat(int32_t from, C2Config::hdr_format_t *to) override {
+        return sAv1HdrFormats.map(from, to);
+    }
 
 private:
     bool mIsHdr;
@@ -678,6 +722,13 @@
 
 } // namespace
 
+// the default mapper is used for media types that do not support HDR
+bool C2Mapper::ProfileLevelMapper::mapHdrFormat(int32_t, C2Config::hdr_format_t *to) {
+    // by default map all (including vendor) profiles to SDR
+    *to = C2Config::hdr_format_t::SDR;
+    return true;
+}
+
 // static
 std::shared_ptr<C2Mapper::ProfileLevelMapper>
 C2Mapper::GetProfileLevelMapper(std::string mediaType) {
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.h b/media/codec2/sfplugin/utils/Codec2Mapper.h
index 33d305e..c8e9e13 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.h
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.h
@@ -34,6 +34,16 @@
             virtual bool mapProfile(int32_t, C2Config::profile_t*) = 0;
             virtual bool mapLevel(C2Config::level_t, int32_t*) = 0;
             virtual bool mapLevel(int32_t, C2Config::level_t*) = 0;
+
+            /**
+             * Mapper method that maps a MediaCodec profile to the supported
+             * HDR format for that profile. Since 10-bit profiles are used for
+             * HLG, this method will return HLG for all 10-bit profiles, but
+             * the caller should also verify that the transfer function is
+             * indeed HLG.
+             */
+            // not an abstract method as we have a default implementation for SDR
+            virtual bool mapHdrFormat(int32_t, C2Config::hdr_format_t *hdr);
             virtual ~ProfileLevelMapper() = default;
         };
 
diff --git a/media/codec2/vndk/C2Fence.cpp b/media/codec2/vndk/C2Fence.cpp
index 9c5183e..de18df89 100644
--- a/media/codec2/vndk/C2Fence.cpp
+++ b/media/codec2/vndk/C2Fence.cpp
@@ -16,13 +16,24 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "C2FenceFactory"
+#include <cutils/native_handle.h>
 #include <utils/Log.h>
+#include <ui/Fence.h>
 
 #include <C2FenceFactory.h>
 #include <C2SurfaceSyncObj.h>
 
+#define MAX_FENCE_FDS 1
+
 class C2Fence::Impl {
 public:
+    enum type_t : uint32_t {
+        INVALID_FENCE,
+        NULL_FENCE,
+        SURFACE_FENCE,
+        SYNC_FENCE,
+    };
+
     virtual c2_status_t wait(c2_nsecs_t timeoutNs) = 0;
 
     virtual bool valid() const = 0;
@@ -33,9 +44,26 @@
 
     virtual bool isHW() const = 0;
 
+    virtual type_t type() const = 0;
+
+    /**
+     * Create a native handle for the fence so it can be marshalled.
+     * The native handle must store fence type in the first integer.
+     *
+     * \return a valid native handle if the fence can be marshalled, otherwise return null.
+     */
+    virtual native_handle_t *createNativeHandle() const = 0;
+
     virtual ~Impl() = default;
 
     Impl() = default;
+
+    static type_t GetTypeFromNativeHandle(const native_handle_t* nh) {
+        if (nh && nh->numFds >= 0 && nh->numFds <= MAX_FENCE_FDS && nh->numInts > 0) {
+            return static_cast<type_t>(nh->data[nh->numFds]);
+        }
+        return INVALID_FENCE;
+    }
 };
 
 c2_status_t C2Fence::wait(c2_nsecs_t timeoutNs) {
@@ -115,6 +143,15 @@
         return false;
     }
 
+    virtual type_t type() const {
+        return SURFACE_FENCE;
+    }
+
+    virtual native_handle_t *createNativeHandle() const {
+        ALOG_ASSERT(false, "Cannot create native handle from surface fence");
+        return nullptr;
+    }
+
     virtual ~SurfaceFenceImpl() {};
 
     SurfaceFenceImpl(std::shared_ptr<C2SurfaceSyncMemory> syncMem, uint32_t waitId) :
@@ -143,3 +180,119 @@
     }
     return C2Fence();
 }
+
+using namespace android;
+
+class _C2FenceFactory::SyncFenceImpl : public C2Fence::Impl {
+public:
+    virtual c2_status_t wait(c2_nsecs_t timeoutNs) {
+        c2_nsecs_t timeoutMs = timeoutNs / 1000;
+        if (timeoutMs > INT_MAX) {
+            timeoutMs = INT_MAX;
+        }
+
+        switch (mFence->wait((int)timeoutMs)) {
+            case NO_ERROR:
+                return C2_OK;
+            case -ETIME:
+                return C2_TIMED_OUT;
+            default:
+                return C2_CORRUPTED;
+        }
+    }
+
+    virtual bool valid() const {
+        return mFence->getStatus() != Fence::Status::Invalid;
+    }
+
+    virtual bool ready() const {
+        return mFence->getStatus() == Fence::Status::Signaled;
+    }
+
+    virtual int fd() const {
+        return mFence->dup();
+    }
+
+    virtual bool isHW() const {
+        return true;
+    }
+
+    virtual type_t type() const {
+        return SYNC_FENCE;
+    }
+
+    virtual native_handle_t *createNativeHandle() const {
+        native_handle_t* nh = native_handle_create(1, 1);
+        if (!nh) {
+            ALOGE("Failed to allocate native handle for sync fence");
+            return nullptr;
+        }
+        nh->data[0] = fd();
+        nh->data[1] = type();
+        return nh;
+    }
+
+    virtual ~SyncFenceImpl() {};
+
+    SyncFenceImpl(int fenceFd) :
+            mFence(sp<Fence>::make(fenceFd)) {}
+
+    static std::shared_ptr<SyncFenceImpl> CreateFromNativeHandle(const native_handle_t* nh) {
+        if (!nh || nh->numFds != 1 || nh->numInts != 1) {
+            ALOGE("Invalid handle for sync fence");
+            return nullptr;
+        }
+        int fd = dup(nh->data[0]);
+        std::shared_ptr<SyncFenceImpl> p = std::make_shared<SyncFenceImpl>(fd);
+        if (!p) {
+            ALOGE("Failed to allocate sync fence impl");
+            close(fd);
+        }
+        return p;
+    }
+
+private:
+    const sp<Fence> mFence;
+};
+
+C2Fence _C2FenceFactory::CreateSyncFence(int fenceFd) {
+    std::shared_ptr<C2Fence::Impl> p;
+    if (fenceFd >= 0) {
+        p = std::make_shared<_C2FenceFactory::SyncFenceImpl>(fenceFd);
+        if (!p) {
+            ALOGE("Failed to allocate sync fence impl");
+            close(fenceFd);
+        }
+        if (!p->valid()) {
+            p.reset();
+        }
+    } else {
+        ALOGE("Create sync fence from invalid fd");
+    }
+    return C2Fence(p);
+}
+
+native_handle_t* _C2FenceFactory::CreateNativeHandle(const C2Fence& fence) {
+    return fence.mImpl? fence.mImpl->createNativeHandle() : nullptr;
+}
+
+C2Fence _C2FenceFactory::CreateFromNativeHandle(const native_handle_t* handle) {
+    if (!handle) {
+        return C2Fence();
+    }
+    C2Fence::Impl::type_t type = C2Fence::Impl::GetTypeFromNativeHandle(handle);
+    std::shared_ptr<C2Fence::Impl> p;
+    switch (type) {
+        case C2Fence::Impl::SYNC_FENCE:
+            p = SyncFenceImpl::CreateFromNativeHandle(handle);
+            break;
+        default:
+            ALOGW("Unsupported fence type %d", type);
+            break;
+    }
+    if (p && !p->valid()) {
+        p.reset();
+    }
+    return C2Fence(p);
+}
+
diff --git a/media/codec2/vndk/include/C2FenceFactory.h b/media/codec2/vndk/include/C2FenceFactory.h
index d4bed26..4944115 100644
--- a/media/codec2/vndk/include/C2FenceFactory.h
+++ b/media/codec2/vndk/include/C2FenceFactory.h
@@ -28,6 +28,7 @@
 struct _C2FenceFactory {
 
     class SurfaceFenceImpl;
+    class SyncFenceImpl;
 
     /*
      * Create C2Fence for BufferQueueBased blockpool.
@@ -38,6 +39,30 @@
     static C2Fence CreateSurfaceFence(
             std::shared_ptr<C2SurfaceSyncMemory> syncMem,
             uint32_t waitId);
+
+    /*
+     * Create C2Fence from a fence file fd.
+     *
+     * \param fenceFd           Fence file descriptor.
+     *                          It will be owned and closed by the returned fence object.
+     */
+    static C2Fence CreateSyncFence(int fenceFd);
+
+    /**
+     * Create a native handle from fence for marshalling
+     *
+     * \return a non-null pointer if the fence can be marshalled, otherwise return nullptr
+     */
+    static native_handle_t* CreateNativeHandle(const C2Fence& fence);
+
+    /*
+     * Create C2Fence from a native handle.
+
+     * \param handle           A native handle representing a fence
+     *                         The fd in the native handle will be duplicated, so the caller will
+     *                         still own the handle and have to close it.
+     */
+    static C2Fence CreateFromNativeHandle(const native_handle_t* handle);
 };
 
 
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index eccbf46..5ca874e 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -1969,26 +1969,8 @@
             }
 
             if (chunk_type == FOURCC("fLaC")) {
-
-                // From https://github.com/xiph/flac/blob/master/doc/isoflac.txt
-                // 4 for mime, 4 for blockType and BlockLen, 34 for metadata
-                uint8_t flacInfo[4 + 4 + 34];
-                // skipping dFla, version
-                data_offset += sizeof(buffer) + 12;
-                size_t flacOffset = 4;
-                // Add flaC header mime type to CSD
-                strncpy((char *)flacInfo, "fLaC", 4);
-                if (mDataSource->readAt(
-                        data_offset, flacInfo + flacOffset, sizeof(flacInfo) - flacOffset) <
-                        (ssize_t)sizeof(flacInfo) - flacOffset) {
-                    return ERROR_IO;
-                }
-                data_offset += sizeof(flacInfo) - flacOffset;
-
-                AMediaFormat_setBuffer(mLastTrack->meta, AMEDIAFORMAT_KEY_CSD_0, flacInfo,
-                                       sizeof(flacInfo));
+                data_offset += sizeof(buffer);
                 *offset = data_offset;
-                CHECK_EQ(*offset, stop_offset);
             }
 
             while (*offset < stop_offset) {
@@ -2521,6 +2503,35 @@
             break;
         }
 
+        case FOURCC("dfLa"):
+        {
+            *offset += chunk_size;
+
+            // From https://github.com/xiph/flac/blob/master/doc/isoflac.txt
+            // 4 for mediaType, 4 for blockType and BlockLen, 34 for metadata
+            uint8_t flacInfo[4 + 4 + 34];
+
+            if (chunk_data_size != sizeof(flacInfo)) {
+                return ERROR_MALFORMED;
+            }
+
+            data_offset += 4;
+            size_t flacOffset = 4;
+            // Add flaC header mediaType to CSD
+            strncpy((char *)flacInfo, "fLaC", 4);
+
+            ssize_t bytesToRead = sizeof(flacInfo) - flacOffset;
+            if (mDataSource->readAt(
+                    data_offset, flacInfo + flacOffset, bytesToRead) < bytesToRead) {
+                return ERROR_IO;
+            }
+
+            data_offset += bytesToRead;
+            AMediaFormat_setBuffer(mLastTrack->meta, AMEDIAFORMAT_KEY_CSD_0, flacInfo,
+                                    sizeof(flacInfo));
+            break;
+        }
+
         case FOURCC("avcC"):
         {
             *offset += chunk_size;
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 382f4a7..6f1974e 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -4099,26 +4099,29 @@
                 break;
             }
 
-            if (asyncNotify != nullptr) {
-                if (mSurface != NULL) {
-                    if (!mReleaseSurface) {
-                        uint64_t usage = 0;
-                        if (mSurface->getConsumerUsage(&usage) != OK) {
-                            usage = 0;
-                        }
-                        mReleaseSurface.reset(new ReleaseSurface(usage));
+            bool forceSync = false;
+            if (asyncNotify != nullptr && mSurface != NULL) {
+                if (!mReleaseSurface) {
+                    uint64_t usage = 0;
+                    if (mSurface->getConsumerUsage(&usage) != OK) {
+                        usage = 0;
                     }
-                    if (mSurface != mReleaseSurface->getSurface()) {
-                        status_t err = connectToSurface(mReleaseSurface->getSurface());
-                        ALOGW_IF(err != OK, "error connecting to release surface: err = %d", err);
-                        if (err == OK && !(mFlags & kFlagUsesSoftwareRenderer)) {
-                            err = mCodec->setSurface(mReleaseSurface->getSurface());
-                            ALOGW_IF(err != OK, "error setting release surface: err = %d", err);
-                        }
-                        if (err == OK) {
-                            (void)disconnectFromSurface();
-                            mSurface = mReleaseSurface->getSurface();
-                        }
+                    mReleaseSurface.reset(new ReleaseSurface(usage));
+                }
+                if (mSurface != mReleaseSurface->getSurface()) {
+                    status_t err = connectToSurface(mReleaseSurface->getSurface());
+                    ALOGW_IF(err != OK, "error connecting to release surface: err = %d", err);
+                    if (err == OK && !(mFlags & kFlagUsesSoftwareRenderer)) {
+                        err = mCodec->setSurface(mReleaseSurface->getSurface());
+                        ALOGW_IF(err != OK, "error setting release surface: err = %d", err);
+                    }
+                    if (err == OK) {
+                        (void)disconnectFromSurface();
+                        mSurface = mReleaseSurface->getSurface();
+                    } else {
+                        // We were not able to switch the surface, so force
+                        // synchronous release.
+                        forceSync = true;
                     }
                 }
             }
@@ -4142,8 +4145,10 @@
             }
 
             if (asyncNotify != nullptr) {
-                mResourceManagerProxy->markClientForPendingRemoval();
-                postPendingRepliesAndDeferredMessages("kWhatRelease:async");
+                if (!forceSync) {
+                    mResourceManagerProxy->markClientForPendingRemoval();
+                    postPendingRepliesAndDeferredMessages("kWhatRelease:async");
+                }
                 asyncNotifyPost.clear();
                 mAsyncReleaseCompleteNotification = asyncNotify;
             }
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 4b6470a..a443ed9 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -126,14 +126,10 @@
     }
 }
 
-static bool isHdr(const sp<AMessage> &format) {
-    // if CSD specifies HDR transfer(s), we assume HDR. Otherwise, if it specifies non-HDR
-    // transfers, we must assume non-HDR. This is because CSD trumps any color-transfer key
-    // in the format.
-    int32_t isHdr;
-    if (format->findInt32("android._is-hdr", &isHdr)) {
-        return isHdr;
-    }
+/**
+ * Returns true if, and only if, the given format corresponds to HDR10 or HDR10+.
+ */
+static bool isHdr10or10Plus(const sp<AMessage> &format) {
 
     // if user/container supplied HDR static info without transfer set, assume true
     if ((format->contains("hdr-static-info") || format->contains("hdr10-plus-info"))
@@ -143,8 +139,7 @@
     // otherwise, verify that an HDR transfer function is set
     int32_t transfer;
     if (format->findInt32("color-transfer", &transfer)) {
-        return transfer == ColorUtils::kColorTransferST2084
-                || transfer == ColorUtils::kColorTransferHLG;
+        return transfer == ColorUtils::kColorTransferST2084;
     }
     return false;
 }
@@ -419,8 +414,12 @@
     }
 
     // bump to HDR profile
-    if (isHdr(format) && codecProfile == HEVCProfileMain10) {
-        codecProfile = HEVCProfileMain10HDR10;
+    if (isHdr10or10Plus(format) && codecProfile == HEVCProfileMain10) {
+        if (format->contains("hdr10-plus-info")) {
+            codecProfile = HEVCProfileMain10HDR10Plus;
+        } else {
+            codecProfile = HEVCProfileMain10HDR10;
+        }
     }
 
     format->setInt32("profile", codecProfile);
@@ -615,16 +614,25 @@
                         { 3, VP9Profile3 },
                     };
 
-                    const static ALookup<int32_t, int32_t> toHdr {
+                    const static ALookup<int32_t, int32_t> toHdr10 {
                         { VP9Profile2, VP9Profile2HDR },
                         { VP9Profile3, VP9Profile3HDR },
                     };
 
+                    const static ALookup<int32_t, int32_t> toHdr10Plus {
+                        { VP9Profile2, VP9Profile2HDR10Plus },
+                        { VP9Profile3, VP9Profile3HDR10Plus },
+                    };
+
                     int32_t profile;
                     if (profiles.map(data[0], &profile)) {
                         // convert to HDR profile
-                        if (isHdr(format)) {
-                            toHdr.lookup(profile, &profile);
+                        if (isHdr10or10Plus(format)) {
+                            if (format->contains("hdr10-plus-info")) {
+                                toHdr10Plus.lookup(profile, &profile);
+                            } else {
+                                toHdr10.lookup(profile, &profile);
+                            }
                         }
 
                         format->setInt32("profile", profile);
@@ -684,7 +692,7 @@
     int32_t profile;
     if (profiles.map(std::make_pair(highBitDepth, profileData), &profile)) {
         // bump to HDR profile
-        if (isHdr(format) && profile == AV1ProfileMain10) {
+        if (isHdr10or10Plus(format) && profile == AV1ProfileMain10) {
             if (format->contains("hdr10-plus-info")) {
                 profile = AV1ProfileMain10HDR10Plus;
             } else {
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index de418da..f7fd5d6 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -4091,6 +4091,26 @@
         // Verify buffer caches
         std::vector<uint64_t> bufIds(offlineStream.circulatingBufferIds.begin(),
                 offlineStream.circulatingBufferIds.end());
+        {
+            // Due to timing it is possible that we may not have any remaining pending capture
+            // requests that can update the caches on Hal side. This can result in buffer cache
+            // mismatch between the service and the Hal and must be accounted for.
+            std::lock_guard<std::mutex> l(mFreedBuffersLock);
+            for (const auto& it : mFreedBuffers) {
+                if (it.first == id) {
+                    ALOGV("%s: stream ID %d buffer id %" PRIu64 " cache removal still pending",
+                            __FUNCTION__, id, it.second);
+                    const auto& cachedEntry = std::find(bufIds.begin(), bufIds.end(), it.second);
+                    if (cachedEntry != bufIds.end()) {
+                        bufIds.erase(cachedEntry);
+                    } else {
+                        ALOGE("%s: stream ID %d buffer id %" PRIu64 " cache removal still pending "
+                                "however buffer is no longer in the offline stream info!",
+                                __FUNCTION__, id, it.second);
+                    }
+                }
+            }
+        }
         if (!verifyBufferIds(id, bufIds)) {
             ALOGE("%s: stream ID %d buffer cache records mismatch!", __FUNCTION__, id);
             return UNKNOWN_ERROR;