Merge "Add bugreport mode for debugging onboarding." into main
diff --git a/cmds/dumpstate/DumpstateUtil.cpp b/cmds/dumpstate/DumpstateUtil.cpp
index 4842312..615701c 100644
--- a/cmds/dumpstate/DumpstateUtil.cpp
+++ b/cmds/dumpstate/DumpstateUtil.cpp
@@ -207,9 +207,7 @@
 int PropertiesHelper::dry_run_ = -1;
 int PropertiesHelper::unroot_ = -1;
 int PropertiesHelper::parallel_run_ = -1;
-#if !defined(__ANDROID_VNDK__)
 int PropertiesHelper::strict_run_ = -1;
-#endif
 
 bool PropertiesHelper::IsUserBuild() {
     if (build_type_.empty()) {
@@ -240,7 +238,6 @@
     return parallel_run_ == 1;
 }
 
-#if !defined(__ANDROID_VNDK__)
 bool PropertiesHelper::IsStrictRun() {
     if (strict_run_ == -1) {
         // Defaults to using stricter timeouts.
@@ -248,7 +245,6 @@
     }
     return strict_run_ == 1;
 }
-#endif
 
 int DumpFileToFd(int out_fd, const std::string& title, const std::string& path) {
     android::base::unique_fd fd(TEMP_FAILURE_RETRY(open(path.c_str(), O_RDONLY | O_NONBLOCK | O_CLOEXEC)));
diff --git a/cmds/dumpstate/DumpstateUtil.h b/cmds/dumpstate/DumpstateUtil.h
index 6049e3e..9e955e3 100644
--- a/cmds/dumpstate/DumpstateUtil.h
+++ b/cmds/dumpstate/DumpstateUtil.h
@@ -198,18 +198,14 @@
      * will default to true. This results in shortened timeouts for flaky
      * sections.
      */
-#if !defined(__ANDROID_VNDK__)
     static bool IsStrictRun();
-#endif
 
   private:
     static std::string build_type_;
     static int dry_run_;
     static int unroot_;
     static int parallel_run_;
-#if !defined(__ANDROID_VNDK__)
     static int strict_run_;
-#endif
 };
 
 /*
diff --git a/cmds/installd/dexopt.h b/cmds/installd/dexopt.h
index 5cf402c..df02588 100644
--- a/cmds/installd/dexopt.h
+++ b/cmds/installd/dexopt.h
@@ -18,6 +18,7 @@
 #define DEXOPT_H_
 
 #include "installd_constants.h"
+#include "unique_file.h"
 
 #include <sys/types.h>
 
@@ -156,6 +157,10 @@
 // artifacts.
 int get_odex_visibility(const char* apk_path, const char* instruction_set, const char* oat_dir);
 
+UniqueFile maybe_open_reference_profile(const std::string& pkgname, const std::string& dex_path,
+                                        const char* profile_name, bool profile_guided,
+                                        bool is_public, int uid, bool is_secondary_dex);
+
 }  // namespace installd
 }  // namespace android
 
diff --git a/cmds/installd/otapreopt.cpp b/cmds/installd/otapreopt.cpp
index 7cabdb0..27ae8f6 100644
--- a/cmds/installd/otapreopt.cpp
+++ b/cmds/installd/otapreopt.cpp
@@ -14,20 +14,21 @@
  ** limitations under the License.
  */
 
-#include <algorithm>
 #include <inttypes.h>
-#include <limits>
-#include <random>
-#include <regex>
 #include <selinux/android.h>
 #include <selinux/avc.h>
 #include <stdlib.h>
 #include <string.h>
 #include <sys/capability.h>
+#include <sys/mman.h>
 #include <sys/prctl.h>
 #include <sys/stat.h>
-#include <sys/mman.h>
 #include <sys/wait.h>
+#include <algorithm>
+#include <iterator>
+#include <limits>
+#include <random>
+#include <regex>
 
 #include <android-base/logging.h>
 #include <android-base/macros.h>
@@ -47,6 +48,7 @@
 #include "otapreopt_parameters.h"
 #include "otapreopt_utils.h"
 #include "system_properties.h"
+#include "unique_file.h"
 #include "utils.h"
 
 #ifndef LOG_TAG
@@ -87,6 +89,9 @@
 static_assert(DEXOPT_MASK           == (0x3dfe | DEXOPT_IDLE_BACKGROUND_JOB),
               "DEXOPT_MASK unexpected.");
 
+constexpr const char* kAotCompilerFilters[]{
+        "space-profile", "space", "speed-profile", "speed", "everything-profile", "everything",
+};
 
 template<typename T>
 static constexpr bool IsPowerOfTwo(T x) {
@@ -415,6 +420,32 @@
         return (strcmp(arg, "!") == 0) ? nullptr : arg;
     }
 
+    bool IsAotCompilation() const {
+        if (std::find(std::begin(kAotCompilerFilters), std::end(kAotCompilerFilters),
+                      parameters_.compiler_filter) == std::end(kAotCompilerFilters)) {
+            return false;
+        }
+
+        int dexopt_flags = parameters_.dexopt_flags;
+        bool profile_guided = (dexopt_flags & DEXOPT_PROFILE_GUIDED) != 0;
+        bool is_secondary_dex = (dexopt_flags & DEXOPT_SECONDARY_DEX) != 0;
+        bool is_public = (dexopt_flags & DEXOPT_PUBLIC) != 0;
+
+        if (profile_guided) {
+            UniqueFile reference_profile =
+                    maybe_open_reference_profile(parameters_.pkgName, parameters_.apk_path,
+                                                 parameters_.profile_name, profile_guided,
+                                                 is_public, parameters_.uid, is_secondary_dex);
+            struct stat sbuf;
+            if (reference_profile.fd() == -1 ||
+                (fstat(reference_profile.fd(), &sbuf) != -1 && sbuf.st_size == 0)) {
+                return false;
+            }
+        }
+
+        return true;
+    }
+
     bool ShouldSkipPreopt() const {
         // There's one thing we have to be careful about: we may/will be asked to compile an app
         // living in the system image. This may be a valid request - if the app wasn't compiled,
@@ -439,9 +470,12 @@
         //       (This is ugly as it's the only thing where we need to understand the contents
         //        of parameters_, but it beats postponing the decision or using the call-
         //        backs to do weird things.)
+
+        // In addition, no need to preopt for "verify". The existing vdex files in the OTA package
+        // and the /data partition will still be usable after the OTA update is applied.
         const char* apk_path = parameters_.apk_path;
         CHECK(apk_path != nullptr);
-        if (StartsWith(apk_path, android_root_)) {
+        if (StartsWith(apk_path, android_root_) || !IsAotCompilation()) {
             const char* last_slash = strrchr(apk_path, '/');
             if (last_slash != nullptr) {
                 std::string path(apk_path, last_slash - apk_path + 1);
@@ -471,13 +505,18 @@
     // TODO(calin): embed the profile name in the parameters.
     int Dexopt() {
         std::string error;
+
+        int dexopt_flags = parameters_.dexopt_flags;
+        // Make sure dex2oat is run with background priority.
+        dexopt_flags |= DEXOPT_BOOTCOMPLETE | DEXOPT_IDLE_BACKGROUND_JOB;
+
         int res = dexopt(parameters_.apk_path,
                          parameters_.uid,
                          parameters_.pkgName,
                          parameters_.instruction_set,
                          parameters_.dexopt_needed,
                          parameters_.oat_dir,
-                         parameters_.dexopt_flags,
+                         dexopt_flags,
                          parameters_.compiler_filter,
                          parameters_.volume_uuid,
                          parameters_.shared_libraries,
@@ -521,61 +560,6 @@
         return Dexopt();
     }
 
-    ////////////////////////////////////
-    // Helpers, mostly taken from ART //
-    ////////////////////////////////////
-
-    // Choose a random relocation offset. Taken from art/runtime/gc/image_space.cc.
-    static int32_t ChooseRelocationOffsetDelta(int32_t min_delta, int32_t max_delta) {
-        constexpr size_t kPageSize = PAGE_SIZE;
-        static_assert(IsPowerOfTwo(kPageSize), "page size must be power of two");
-        CHECK_EQ(min_delta % kPageSize, 0u);
-        CHECK_EQ(max_delta % kPageSize, 0u);
-        CHECK_LT(min_delta, max_delta);
-
-        std::default_random_engine generator;
-        generator.seed(GetSeed());
-        std::uniform_int_distribution<int32_t> distribution(min_delta, max_delta);
-        int32_t r = distribution(generator);
-        if (r % 2 == 0) {
-            r = RoundUp(r, kPageSize);
-        } else {
-            r = RoundDown(r, kPageSize);
-        }
-        CHECK_LE(min_delta, r);
-        CHECK_GE(max_delta, r);
-        CHECK_EQ(r % kPageSize, 0u);
-        return r;
-    }
-
-    static uint64_t GetSeed() {
-#ifdef __BIONIC__
-        // Bionic exposes arc4random, use it.
-        uint64_t random_data;
-        arc4random_buf(&random_data, sizeof(random_data));
-        return random_data;
-#else
-#error "This is only supposed to run with bionic. Otherwise, implement..."
-#endif
-    }
-
-    void AddCompilerOptionFromSystemProperty(const char* system_property,
-            const char* prefix,
-            bool runtime,
-            std::vector<std::string>& out) const {
-        const std::string* value = system_properties_.GetProperty(system_property);
-        if (value != nullptr) {
-            if (runtime) {
-                out.push_back("--runtime-arg");
-            }
-            if (prefix != nullptr) {
-                out.push_back(StringPrintf("%s%s", prefix, value->c_str()));
-            } else {
-                out.push_back(*value);
-            }
-        }
-    }
-
     static constexpr const char* kBootClassPathPropertyName = "BOOTCLASSPATH";
     static constexpr const char* kAndroidRootPathPropertyName = "ANDROID_ROOT";
     static constexpr const char* kAndroidDataPathPropertyName = "ANDROID_DATA";
diff --git a/cmds/installd/otapreopt_chroot.cpp b/cmds/installd/otapreopt_chroot.cpp
index c86993c..c40caf5 100644
--- a/cmds/installd/otapreopt_chroot.cpp
+++ b/cmds/installd/otapreopt_chroot.cpp
@@ -19,9 +19,12 @@
 #include <sys/mount.h>
 #include <sys/stat.h>
 #include <sys/wait.h>
+#include <unistd.h>
 
+#include <algorithm>
 #include <array>
 #include <fstream>
+#include <iostream>
 #include <sstream>
 
 #include <android-base/file.h>
@@ -29,6 +32,7 @@
 #include <android-base/macros.h>
 #include <android-base/scopeguard.h>
 #include <android-base/stringprintf.h>
+#include <android-base/strings.h>
 #include <android-base/unique_fd.h>
 #include <libdm/dm.h>
 #include <selinux/android.h>
@@ -37,7 +41,7 @@
 #include "otapreopt_utils.h"
 
 #ifndef LOG_TAG
-#define LOG_TAG "otapreopt"
+#define LOG_TAG "otapreopt_chroot"
 #endif
 
 using android::base::StringPrintf;
@@ -49,20 +53,22 @@
 // so just try the possibilities one by one.
 static constexpr std::array kTryMountFsTypes = {"ext4", "erofs"};
 
-static void CloseDescriptor(int fd) {
-    if (fd >= 0) {
-        int result = close(fd);
-        UNUSED(result);  // Ignore result. Printing to logcat will open a new descriptor
-                         // that we do *not* want.
-    }
-}
-
 static void CloseDescriptor(const char* descriptor_string) {
     int fd = -1;
     std::istringstream stream(descriptor_string);
     stream >> fd;
     if (!stream.fail()) {
-        CloseDescriptor(fd);
+        if (fd >= 0) {
+            if (close(fd) < 0) {
+                PLOG(ERROR) << "Failed to close " << fd;
+            }
+        }
+    }
+}
+
+static void SetCloseOnExec(int fd) {
+    if (fcntl(fd, F_SETFD, FD_CLOEXEC) < 0) {
+        PLOG(ERROR) << "Failed to set FD_CLOEXEC on " << fd;
     }
 }
 
@@ -129,24 +135,39 @@
 }
 
 // Entry for otapreopt_chroot. Expected parameters are:
-//   [cmd] [status-fd] [target-slot] "dexopt" [dexopt-params]
-// The file descriptor denoted by status-fd will be closed. The rest of the parameters will
-// be passed on to otapreopt in the chroot.
+//
+//   [cmd] [status-fd] [target-slot-suffix]
+//
+// The file descriptor denoted by status-fd will be closed. Dexopt commands on
+// the form
+//
+//   "dexopt" [dexopt-params]
+//
+// are then read from stdin until EOF and passed on to /system/bin/otapreopt one
+// by one. After each call a line with the current command count is written to
+// stdout and flushed.
 static int otapreopt_chroot(const int argc, char **arg) {
     // Validate arguments
-    // We need the command, status channel and target slot, at a minimum.
-    if(argc < 3) {
-        PLOG(ERROR) << "Not enough arguments.";
+    if (argc == 2 && std::string_view(arg[1]) == "--version") {
+        // Accept a single --version flag, to allow the script to tell this binary
+        // from the earlier one.
+        std::cout << "2" << std::endl;
+        return 0;
+    }
+    if (argc != 3) {
+        LOG(ERROR) << "Wrong number of arguments: " << argc;
         exit(208);
     }
-    // Close all file descriptors. They are coming from the caller, we do not want to pass them
-    // on across our fork/exec into a different domain.
-    // 1) Default descriptors.
-    CloseDescriptor(STDIN_FILENO);
-    CloseDescriptor(STDOUT_FILENO);
-    CloseDescriptor(STDERR_FILENO);
-    // 2) The status channel.
-    CloseDescriptor(arg[1]);
+    const char* status_fd = arg[1];
+    const char* slot_suffix = arg[2];
+
+    // Set O_CLOEXEC on standard fds. They are coming from the caller, we do not
+    // want to pass them on across our fork/exec into a different domain.
+    SetCloseOnExec(STDIN_FILENO);
+    SetCloseOnExec(STDOUT_FILENO);
+    SetCloseOnExec(STDERR_FILENO);
+    // Close the status channel.
+    CloseDescriptor(status_fd);
 
     // We need to run the otapreopt tool from the postinstall partition. As such, set up a
     // mount namespace and change root.
@@ -185,20 +206,20 @@
     //  2) We're in a mount namespace here, so when we die, this will be cleaned up.
     //  3) Ignore errors. Printing anything at this stage will open a file descriptor
     //     for logging.
-    if (!ValidateTargetSlotSuffix(arg[2])) {
-        LOG(ERROR) << "Target slot suffix not legal: " << arg[2];
+    if (!ValidateTargetSlotSuffix(slot_suffix)) {
+        LOG(ERROR) << "Target slot suffix not legal: " << slot_suffix;
         exit(207);
     }
-    TryExtraMount("vendor", arg[2], "/postinstall/vendor");
+    TryExtraMount("vendor", slot_suffix, "/postinstall/vendor");
 
     // Try to mount the product partition. update_engine doesn't do this for us, but we
     // want it for product APKs. Same notes as vendor above.
-    TryExtraMount("product", arg[2], "/postinstall/product");
+    TryExtraMount("product", slot_suffix, "/postinstall/product");
 
     // Try to mount the system_ext partition. update_engine doesn't do this for
     // us, but we want it for system_ext APKs. Same notes as vendor and product
     // above.
-    TryExtraMount("system_ext", arg[2], "/postinstall/system_ext");
+    TryExtraMount("system_ext", slot_suffix, "/postinstall/system_ext");
 
     constexpr const char* kPostInstallLinkerconfig = "/postinstall/linkerconfig";
     // Try to mount /postinstall/linkerconfig. we will set it up after performing the chroot
@@ -329,30 +350,37 @@
         exit(218);
     }
 
-    // Now go on and run otapreopt.
+    // Now go on and read dexopt lines from stdin and pass them on to otapreopt.
 
-    // Incoming:  cmd + status-fd + target-slot + cmd...      | Incoming | = argc
-    // Outgoing:  cmd             + target-slot + cmd...      | Outgoing | = argc - 1
-    std::vector<std::string> cmd;
-    cmd.reserve(argc);
-    cmd.push_back("/system/bin/otapreopt");
+    int count = 1;
+    for (std::array<char, 1000> linebuf;
+         std::cin.clear(), std::cin.getline(&linebuf[0], linebuf.size()); ++count) {
+        // Subtract one from gcount() since getline() counts the newline.
+        std::string line(&linebuf[0], std::cin.gcount() - 1);
 
-    // The first parameter is the status file descriptor, skip.
-    for (size_t i = 2; i < static_cast<size_t>(argc); ++i) {
-        cmd.push_back(arg[i]);
+        if (std::cin.fail()) {
+            LOG(ERROR) << "Command exceeds max length " << linebuf.size() << " - skipped: " << line;
+            continue;
+        }
+
+        std::vector<std::string> tokenized_line = android::base::Tokenize(line, " ");
+        std::vector<std::string> cmd{"/system/bin/otapreopt", slot_suffix};
+        std::move(tokenized_line.begin(), tokenized_line.end(), std::back_inserter(cmd));
+
+        LOG(INFO) << "Command " << count << ": " << android::base::Join(cmd, " ");
+
+        // Fork and execute otapreopt in its own process.
+        std::string error_msg;
+        bool exec_result = Exec(cmd, &error_msg);
+        if (!exec_result) {
+            LOG(ERROR) << "Running otapreopt failed: " << error_msg;
+        }
+
+        // Print the count to stdout and flush to indicate progress.
+        std::cout << count << std::endl;
     }
 
-    // Fork and execute otapreopt in its own process.
-    std::string error_msg;
-    bool exec_result = Exec(cmd, &error_msg);
-    if (!exec_result) {
-        LOG(ERROR) << "Running otapreopt failed: " << error_msg;
-    }
-
-    if (!exec_result) {
-        exit(213);
-    }
-
+    LOG(INFO) << "No more dexopt commands";
     return 0;
 }
 
diff --git a/cmds/installd/otapreopt_script.sh b/cmds/installd/otapreopt_script.sh
index db5c34e..28bd793 100644
--- a/cmds/installd/otapreopt_script.sh
+++ b/cmds/installd/otapreopt_script.sh
@@ -16,7 +16,9 @@
 # limitations under the License.
 #
 
-# This script will run as a postinstall step to drive otapreopt.
+# This script runs as a postinstall step to drive otapreopt. It comes with the
+# OTA package, but runs /system/bin/otapreopt_chroot in the (old) active system
+# image. See system/extras/postinst/postinst.sh for some docs.
 
 TARGET_SLOT="$1"
 STATUS_FD="$2"
@@ -31,12 +33,11 @@
 
 BOOT_COMPLETE=$(getprop $BOOT_PROPERTY_NAME)
 if [ "$BOOT_COMPLETE" != "1" ] ; then
-  echo "Error: boot-complete not detected."
+  echo "$0: Error: boot-complete not detected."
   # We must return 0 to not block sideload.
   exit 0
 fi
 
-
 # Compute target slot suffix.
 # TODO: Once bootctl is not restricted, we should query from there. Or get this from
 #       update_engine as a parameter.
@@ -45,45 +46,63 @@
 elif [ "$TARGET_SLOT" = "1" ] ; then
   TARGET_SLOT_SUFFIX="_b"
 else
-  echo "Unknown target slot $TARGET_SLOT"
+  echo "$0: Unknown target slot $TARGET_SLOT"
   exit 1
 fi
 
+if [ "$(/system/bin/otapreopt_chroot --version)" != 2 ]; then
+  # We require an updated chroot wrapper that reads dexopt commands from stdin.
+  # Even if we kept compat with the old binary, the OTA preopt wouldn't work due
+  # to missing sepolicy rules, so there's no use spending time trying to dexopt
+  # (b/291974157).
+  echo "$0: Current system image is too old to work with OTA preopt - skipping."
+  exit 0
+fi
 
 PREPARE=$(cmd otadexopt prepare)
 # Note: Ignore preparation failures. Step and done will fail and exit this.
 #       This is necessary to support suspends - the OTA service will keep
 #       the state around for us.
 
-PROGRESS=$(cmd otadexopt progress)
-print -u${STATUS_FD} "global_progress $PROGRESS"
-
-i=0
-while ((i<MAXIMUM_PACKAGES)) ; do
+# Create an array with all dexopt commands in advance, to know how many there are.
+otadexopt_cmds=()
+while (( ${#otadexopt_cmds[@]} < MAXIMUM_PACKAGES )) ; do
   DONE=$(cmd otadexopt done)
   if [ "$DONE" = "OTA complete." ] ; then
     break
   fi
-
-  DEXOPT_PARAMS=$(cmd otadexopt next)
-
-  /system/bin/otapreopt_chroot $STATUS_FD $TARGET_SLOT_SUFFIX $DEXOPT_PARAMS >&- 2>&-
-
-  PROGRESS=$(cmd otadexopt progress)
-  print -u${STATUS_FD} "global_progress $PROGRESS"
-
-  sleep 1
-  i=$((i+1))
+  otadexopt_cmds+=("$(cmd otadexopt next)")
 done
 
 DONE=$(cmd otadexopt done)
+cmd otadexopt cleanup
+
+echo "$0: Using streaming otapreopt_chroot on ${#otadexopt_cmds[@]} packages"
+
+function print_otadexopt_cmds {
+  for cmd in "${otadexopt_cmds[@]}" ; do
+    print "$cmd"
+  done
+}
+
+function report_progress {
+  while read count ; do
+    # mksh can't do floating point arithmetic, so emulate a fixed point calculation.
+    (( permilles = 1000 * count / ${#otadexopt_cmds[@]} ))
+    printf 'global_progress %d.%03d\n' $((permilles / 1000)) $((permilles % 1000)) >&${STATUS_FD}
+  done
+}
+
+print_otadexopt_cmds | \
+  /system/bin/otapreopt_chroot $STATUS_FD $TARGET_SLOT_SUFFIX | \
+  report_progress
+
 if [ "$DONE" = "OTA incomplete." ] ; then
-  echo "Incomplete."
+  echo "$0: Incomplete."
 else
-  echo "Complete or error."
+  echo "$0: Complete or error."
 fi
 
 print -u${STATUS_FD} "global_progress 1.0"
-cmd otadexopt cleanup
 
 exit 0
diff --git a/cmds/installd/run_dex2oat.cpp b/cmds/installd/run_dex2oat.cpp
index 4221a3a..7648265 100644
--- a/cmds/installd/run_dex2oat.cpp
+++ b/cmds/installd/run_dex2oat.cpp
@@ -208,36 +208,13 @@
     }
 
     // Compute compiler filter.
-    {
-        std::string dex2oat_compiler_filter_arg;
-        {
-            // If we are booting without the real /data, don't spend time compiling.
-            std::string vold_decrypt = GetProperty("vold.decrypt", "");
-            bool skip_compilation = vold_decrypt == "trigger_restart_min_framework" ||
-                    vold_decrypt == "1";
-
-            bool have_dex2oat_relocation_skip_flag = false;
-            if (skip_compilation) {
-                dex2oat_compiler_filter_arg = "--compiler-filter=extract";
-                have_dex2oat_relocation_skip_flag = true;
-            } else if (compiler_filter != nullptr) {
-                dex2oat_compiler_filter_arg = StringPrintf("--compiler-filter=%s",
-                                                           compiler_filter);
-            }
-            if (have_dex2oat_relocation_skip_flag) {
-                AddRuntimeArg("-Xnorelocate");
-            }
-        }
-
-        if (dex2oat_compiler_filter_arg.empty()) {
-            dex2oat_compiler_filter_arg = MapPropertyToArg("dalvik.vm.dex2oat-filter",
-                                                           "--compiler-filter=%s");
-        }
-        AddArg(dex2oat_compiler_filter_arg);
-
-        if (compilation_reason != nullptr) {
-            AddArg(std::string("--compilation-reason=") + compilation_reason);
-        }
+    if (compiler_filter != nullptr) {
+        AddArg(StringPrintf("--compiler-filter=%s", compiler_filter));
+    } else {
+        AddArg(MapPropertyToArg("dalvik.vm.dex2oat-filter", "--compiler-filter=%s"));
+    }
+    if (compilation_reason != nullptr) {
+        AddArg(std::string("--compilation-reason=") + compilation_reason);
     }
 
     AddArg(MapPropertyToArg("dalvik.vm.dex2oat-max-image-block-size",
diff --git a/cmds/installd/run_dex2oat_test.cpp b/cmds/installd/run_dex2oat_test.cpp
index 304ba7b..56f84a5 100644
--- a/cmds/installd/run_dex2oat_test.cpp
+++ b/cmds/installd/run_dex2oat_test.cpp
@@ -441,24 +441,6 @@
     VerifyExpectedFlags();
 }
 
-TEST_F(RunDex2OatTest, SkipRelocationInMinFramework) {
-    setSystemProperty("vold.decrypt", "trigger_restart_min_framework");
-    CallRunDex2Oat(RunDex2OatArgs::MakeDefaultTestArgs());
-
-    SetExpectedFlagUsed("--compiler-filter", "=extract");
-    SetExpectedFlagUsed("-Xnorelocate", "");
-    VerifyExpectedFlags();
-}
-
-TEST_F(RunDex2OatTest, SkipRelocationIfDecryptedWithFullDiskEncryption) {
-    setSystemProperty("vold.decrypt", "1");
-    CallRunDex2Oat(RunDex2OatArgs::MakeDefaultTestArgs());
-
-    SetExpectedFlagUsed("--compiler-filter", "=extract");
-    SetExpectedFlagUsed("-Xnorelocate", "");
-    VerifyExpectedFlags();
-}
-
 TEST_F(RunDex2OatTest, DalvikVmDex2oatFilter) {
     setSystemProperty("dalvik.vm.dex2oat-filter", "speed");
     auto args = RunDex2OatArgs::MakeDefaultTestArgs();
diff --git a/data/etc/Android.bp b/data/etc/Android.bp
index 2143d93..c962c15 100644
--- a/data/etc/Android.bp
+++ b/data/etc/Android.bp
@@ -263,8 +263,8 @@
 }
 
 prebuilt_etc {
-    name: "android.hardware.threadnetwork.prebuilt.xml",
-    src: "android.hardware.threadnetwork.xml",
+    name: "android.hardware.thread_network.prebuilt.xml",
+    src: "android.hardware.thread_network.xml",
     defaults: ["frameworks_native_data_etc_defaults"],
 }
 
@@ -341,6 +341,12 @@
 }
 
 prebuilt_etc {
+    name: "android.software.opengles.deqp.level-latest.prebuilt.xml",
+    src: "android.software.opengles.deqp.level-latest.xml",
+    defaults: ["frameworks_native_data_etc_defaults"],
+}
+
+prebuilt_etc {
     name: "android.software.sip.voip.prebuilt.xml",
     src: "android.software.sip.voip.xml",
     defaults: ["frameworks_native_data_etc_defaults"],
@@ -371,6 +377,12 @@
 }
 
 prebuilt_etc {
+    name: "android.software.vulkan.deqp.level-latest.prebuilt.xml",
+    src: "android.software.vulkan.deqp.level-latest.xml",
+    defaults: ["frameworks_native_data_etc_defaults"],
+}
+
+prebuilt_etc {
     name: "aosp_excluded_hardware.prebuilt.xml",
     src: "aosp_excluded_hardware.xml",
     defaults: ["frameworks_native_data_etc_defaults"],
diff --git a/data/etc/android.hardware.threadnetwork.xml b/data/etc/android.hardware.thread_network.xml
similarity index 83%
rename from data/etc/android.hardware.threadnetwork.xml
rename to data/etc/android.hardware.thread_network.xml
index 9cbdc90..b116ed6 100644
--- a/data/etc/android.hardware.threadnetwork.xml
+++ b/data/etc/android.hardware.thread_network.xml
@@ -13,7 +13,7 @@
      See the License for the specific language governing permissions and
      limitations under the License.
 -->
-<!-- Adds the feature indicating support for the ThreadNetwork API -->
+<!-- Adds the feature indicating support for the Thread networking protocol -->
 <permissions>
-    <feature name="android.hardware.threadnetwork" />
+    <feature name="android.hardware.thread_network" />
 </permissions>
diff --git a/data/etc/android.hardware.threadnetwork.xml b/data/etc/android.software.opengles.deqp.level-latest.xml
similarity index 68%
copy from data/etc/android.hardware.threadnetwork.xml
copy to data/etc/android.software.opengles.deqp.level-latest.xml
index 9cbdc90..bd15eb6 100644
--- a/data/etc/android.hardware.threadnetwork.xml
+++ b/data/etc/android.software.opengles.deqp.level-latest.xml
@@ -1,5 +1,5 @@
 <?xml version="1.0" encoding="utf-8"?>
-<!-- Copyright (C) 2023 The Android Open Source Project
+<!-- Copyright 2023 The Android Open Source Project
 
      Licensed under the Apache License, Version 2.0 (the "License");
      you may not use this file except in compliance with the License.
@@ -13,7 +13,9 @@
      See the License for the specific language governing permissions and
      limitations under the License.
 -->
-<!-- Adds the feature indicating support for the ThreadNetwork API -->
+
+<!-- This is the standard feature indicating that the device passes OpenGL ES
+     dEQP tests associated with the most recent level for this Android version. -->
 <permissions>
-    <feature name="android.hardware.threadnetwork" />
+    <feature name="android.software.opengles.deqp.level" version="132580097" />
 </permissions>
diff --git a/data/etc/android.hardware.threadnetwork.xml b/data/etc/android.software.vulkan.deqp.level-latest.xml
similarity index 68%
copy from data/etc/android.hardware.threadnetwork.xml
copy to data/etc/android.software.vulkan.deqp.level-latest.xml
index 9cbdc90..87be070 100644
--- a/data/etc/android.hardware.threadnetwork.xml
+++ b/data/etc/android.software.vulkan.deqp.level-latest.xml
@@ -1,5 +1,5 @@
 <?xml version="1.0" encoding="utf-8"?>
-<!-- Copyright (C) 2023 The Android Open Source Project
+<!-- Copyright 2023 The Android Open Source Project
 
      Licensed under the Apache License, Version 2.0 (the "License");
      you may not use this file except in compliance with the License.
@@ -13,7 +13,9 @@
      See the License for the specific language governing permissions and
      limitations under the License.
 -->
-<!-- Adds the feature indicating support for the ThreadNetwork API -->
+
+<!-- This is the standard feature indicating that the device passes Vulkan
+     dEQP tests associated with the most recent level for this Android version. -->
 <permissions>
-    <feature name="android.hardware.threadnetwork" />
+    <feature name="android.software.vulkan.deqp.level" version="132580097" />
 </permissions>
diff --git a/include/android/performance_hint.h b/include/android/performance_hint.h
index cedd361..ba8b02d 100644
--- a/include/android/performance_hint.h
+++ b/include/android/performance_hint.h
@@ -14,6 +14,23 @@
  * limitations under the License.
  */
 
+ /**
+ * @defgroup APerformanceHint Performance Hint Manager
+ *
+ * APerformanceHint allows apps to create performance hint sessions for groups
+ * of threads, and provide hints to the system about the workload of those threads,
+ * to help the system more accurately allocate power for them. It is the NDK
+ * counterpart to the Java PerformanceHintManager SDK API.
+ *
+ * @{
+ */
+
+/**
+ * @file performance_hint.h
+ * @brief API for creating and managing a hint session.
+ */
+
+
 #ifndef ANDROID_NATIVE_PERFORMANCE_HINT_H
 #define ANDROID_NATIVE_PERFORMANCE_HINT_H
 
@@ -48,7 +65,7 @@
  * An opaque type representing a handle to a performance hint manager.
  * It must be released after use.
  *
- * <p>To use:<ul>
+ * To use:<ul>
  *    <li>Obtain the performance hint manager instance by calling
  *        {@link APerformanceHint_getManager} function.</li>
  *    <li>Create an {@link APerformanceHintSession} with
@@ -61,50 +78,43 @@
 /**
  * An opaque type representing a handle to a performance hint session.
  * A session can only be acquired from a {@link APerformanceHintManager}
- * with {@link APerformanceHint_getPreferredUpdateRateNanos}. It must be
+ * with {@link APerformanceHint_createSession}. It must be
  * freed with {@link APerformanceHint_closeSession} after use.
  *
  * A Session represents a group of threads with an inter-related workload such that hints for
  * their performance should be considered as a unit. The threads in a given session should be
- * long-life and not created or destroyed dynamically.
+ * long-lived and not created or destroyed dynamically.
  *
- * <p>Each session is expected to have a periodic workload with a target duration for each
- * cycle. The cycle duration is likely greater than the target work duration to allow other
- * parts of the pipeline to run within the available budget. For example, a renderer thread may
- * work at 60hz in order to produce frames at the display's frame but have a target work
- * duration of only 6ms.</p>
+ * The work duration API can be used with periodic workloads to dynamically adjust thread
+ * performance and keep the work on schedule while optimizing the available power budget.
+ * When using the work duration API, the starting target duration should be specified
+ * while creating the session, and can later be adjusted with
+ * {@link APerformanceHint_updateTargetWorkDuration}. While using the work duration
+ * API, the client is expected to call {@link APerformanceHint_reportActualWorkDuration} each
+ * cycle to report the actual time taken to complete to the system.
  *
- * <p>After each cycle of work, the client is expected to use
- * {@link APerformanceHint_reportActualWorkDuration} to report the actual time taken to
- * complete.</p>
- *
- * <p>To use:<ul>
- *    <li>Update a sessions target duration for each cycle of work
- *        with  {@link APerformanceHint_updateTargetWorkDuration}.</li>
- *    <li>Report the actual duration for the last cycle of work with
- *        {@link APerformanceHint_reportActualWorkDuration}.</li>
- *    <li>Release the session instance with
- *        {@link APerformanceHint_closeSession}.</li></ul></p>
+ * All timings should be from `std::chrono::steady_clock` or `clock_gettime(CLOCK_MONOTONIC, ...)`
  */
 typedef struct APerformanceHintSession APerformanceHintSession;
 
 /**
   * Acquire an instance of the performance hint manager.
   *
-  * @return manager instance on success, nullptr on failure.
+  * @return APerformanceHintManager instance on success, nullptr on failure.
   */
 APerformanceHintManager* APerformanceHint_getManager() __INTRODUCED_IN(__ANDROID_API_T__);
 
 /**
  * Creates a session for the given set of threads and sets their initial target work
  * duration.
+ *
  * @param manager The performance hint manager instance.
  * @param threadIds The list of threads to be associated with this session. They must be part of
- *     this app's thread group.
- * @param size the size of threadIds.
- * @param initialTargetWorkDurationNanos The desired duration in nanoseconds for the new session.
- *     This must be positive.
- * @return manager instance on success, nullptr on failure.
+ *     this process' thread group.
+ * @param size The size of the list of threadIds.
+ * @param initialTargetWorkDurationNanos The target duration in nanoseconds for the new session.
+ *     This must be positive if using the work duration API, or 0 otherwise.
+ * @return APerformanceHintManager instance on success, nullptr on failure.
  */
 APerformanceHintSession* APerformanceHint_createSession(
         APerformanceHintManager* manager,
@@ -124,8 +134,8 @@
  * Updates this session's target duration for each cycle of work.
  *
  * @param session The performance hint session instance to update.
- * @param targetDurationNanos the new desired duration in nanoseconds. This must be positive.
- * @return 0 on success
+ * @param targetDurationNanos The new desired duration in nanoseconds. This must be positive.
+ * @return 0 on success.
  *         EINVAL if targetDurationNanos is not positive.
  *         EPIPE if communication with the system service has failed.
  */
@@ -136,14 +146,13 @@
 /**
  * Reports the actual duration for the last cycle of work.
  *
- * <p>The system will attempt to adjust the core placement of the threads within the thread
- * group and/or the frequency of the core on which they are run to bring the actual duration
- * close to the target duration.</p>
+ * The system will attempt to adjust the scheduling and performance of the
+ * threads within the thread group to bring the actual duration close to the target duration.
  *
  * @param session The performance hint session instance to update.
- * @param actualDurationNanos how long the thread group took to complete its last task in
- *     nanoseconds. This must be positive.
- * @return 0 on success
+ * @param actualDurationNanos The duration of time the thread group took to complete its last
+ *     task in nanoseconds. This must be positive.
+ * @return 0 on success.
  *         EINVAL if actualDurationNanos is not positive.
  *         EPIPE if communication with the system service has failed.
  */
@@ -164,12 +173,13 @@
  * Set a list of threads to the performance hint session. This operation will replace
  * the current list of threads with the given list of threads.
  *
- * @param session The performance hint session instance for the threads.
+ * @param session The performance hint session instance to update.
  * @param threadIds The list of threads to be associated with this session. They must be part of
  *     this app's thread group.
- * @param size the size of the list of threadIds.
+ * @param size The size of the list of threadIds.
  * @return 0 on success.
- *         EINVAL if the list of thread ids is empty or if  any of the thread ids is not part of the thread group.
+ *         EINVAL if the list of thread ids is empty or if any of the thread ids are not part of
+               the thread group.
  *         EPIPE if communication with the system service has failed.
  *         EPERM if any thread id doesn't belong to the application.
  */
@@ -178,6 +188,21 @@
         const pid_t* threadIds,
         size_t size) __INTRODUCED_IN(__ANDROID_API_U__);
 
+/**
+ * This tells the session that these threads can be
+ * safely scheduled to prefer power efficiency over performance.
+ *
+ * @param session The performance hint session instance to update.
+ * @param enabled The flag which sets whether this session will use power-efficient scheduling.
+ * @return 0 on success.
+ *         EPIPE if communication with the system service has failed.
+ */
+int APerformanceHint_setPreferPowerEfficiency(
+        APerformanceHintSession* session,
+        bool enabled) __INTRODUCED_IN(__ANDROID_API_V__);
+
 __END_DECLS
 
 #endif // ANDROID_NATIVE_PERFORMANCE_HINT_H
+
+/** @} */
\ No newline at end of file
diff --git a/include/input/VelocityTracker.h b/include/input/VelocityTracker.h
index b58feac..2e99495 100644
--- a/include/input/VelocityTracker.h
+++ b/include/input/VelocityTracker.h
@@ -16,6 +16,7 @@
 
 #pragma once
 
+#include <android/os/IInputConstants.h>
 #include <input/Input.h>
 #include <input/RingBuffer.h>
 #include <utils/BitSet.h>
@@ -35,19 +36,20 @@
     static const size_t MAX_DEGREE = 4;
 
     enum class Strategy : int32_t {
-        DEFAULT = -1,
-        MIN = 0,
-        IMPULSE = 0,
-        LSQ1 = 1,
-        LSQ2 = 2,
-        LSQ3 = 3,
-        WLSQ2_DELTA = 4,
-        WLSQ2_CENTRAL = 5,
-        WLSQ2_RECENT = 6,
-        INT1 = 7,
-        INT2 = 8,
-        LEGACY = 9,
+        DEFAULT = android::os::IInputConstants::VELOCITY_TRACKER_STRATEGY_DEFAULT,
+        IMPULSE = android::os::IInputConstants::VELOCITY_TRACKER_STRATEGY_IMPULSE,
+        LSQ1 = android::os::IInputConstants::VELOCITY_TRACKER_STRATEGY_LSQ1,
+        LSQ2 = android::os::IInputConstants::VELOCITY_TRACKER_STRATEGY_LSQ2,
+        LSQ3 = android::os::IInputConstants::VELOCITY_TRACKER_STRATEGY_LSQ3,
+        WLSQ2_DELTA = android::os::IInputConstants::VELOCITY_TRACKER_STRATEGY_WLSQ2_DELTA,
+        WLSQ2_CENTRAL = android::os::IInputConstants::VELOCITY_TRACKER_STRATEGY_WLSQ2_CENTRAL,
+        WLSQ2_RECENT = android::os::IInputConstants::VELOCITY_TRACKER_STRATEGY_WLSQ2_RECENT,
+        INT1 = android::os::IInputConstants::VELOCITY_TRACKER_STRATEGY_INT1,
+        INT2 = android::os::IInputConstants::VELOCITY_TRACKER_STRATEGY_INT2,
+        LEGACY = android::os::IInputConstants::VELOCITY_TRACKER_STRATEGY_LEGACY,
+        MIN = IMPULSE,
         MAX = LEGACY,
+        ftl_last = LEGACY,
     };
 
     /*
@@ -81,8 +83,6 @@
     // TODO(b/32830165): support axis-specific strategies.
     VelocityTracker(const Strategy strategy = Strategy::DEFAULT);
 
-    ~VelocityTracker();
-
     /** Return true if the axis is supported for velocity tracking, false otherwise. */
     static bool isAxisSupported(int32_t axis);
 
diff --git a/libs/binder/Android.bp b/libs/binder/Android.bp
index 3f1fc33..6c2b313 100644
--- a/libs/binder/Android.bp
+++ b/libs/binder/Android.bp
@@ -190,6 +190,9 @@
         "-performance-move-const-arg", // b/273486801
         "portability*",
     ],
+    lto: {
+        thin: true,
+    },
 }
 
 cc_library_headers {
diff --git a/libs/binder/MemoryHeapBase.cpp b/libs/binder/MemoryHeapBase.cpp
index 3da06ba..fc273e0 100644
--- a/libs/binder/MemoryHeapBase.cpp
+++ b/libs/binder/MemoryHeapBase.cpp
@@ -73,8 +73,8 @@
         ALOGV("MemoryHeapBase: Attempting to force MemFD");
         fd = memfd_create_region(name ? name : "MemoryHeapBase", size);
         if (fd < 0 || (mapfd(fd, true, size) != NO_ERROR)) return;
-        const int SEAL_FLAGS = ((mFlags & READ_ONLY) ? F_SEAL_FUTURE_WRITE : 0) |
-                ((mFlags & MEMFD_ALLOW_SEALING_FLAG) ? 0 : F_SEAL_SEAL);
+        const int SEAL_FLAGS = ((mFlags & READ_ONLY) ? F_SEAL_FUTURE_WRITE : 0) | F_SEAL_GROW |
+                F_SEAL_SHRINK | ((mFlags & MEMFD_ALLOW_SEALING_FLAG) ? 0 : F_SEAL_SEAL);
         if (SEAL_FLAGS && (fcntl(fd, F_ADD_SEALS, SEAL_FLAGS) == -1)) {
             ALOGE("MemoryHeapBase: MemFD %s sealing with flags %x failed with error  %s", name,
                   SEAL_FLAGS, strerror(errno));
diff --git a/libs/binder/rust/Android.bp b/libs/binder/rust/Android.bp
index d36ebac..672d6cf 100644
--- a/libs/binder/rust/Android.bp
+++ b/libs/binder/rust/Android.bp
@@ -97,34 +97,12 @@
     crate_name: "binder_ndk_bindgen",
     wrapper_src: "sys/BinderBindings.hpp",
     source_stem: "bindings",
-    bindgen_flags: [
+    bindgen_flag_files: [
         // Unfortunately the only way to specify the rust_non_exhaustive enum
         // style for a type is to make it the default
-        "--default-enum-style",
-        "rust_non_exhaustive",
         // and then specify constified enums for the enums we don't want
         // rustified
-        "--constified-enum",
-        "android::c_interface::consts::.*",
-
-        "--allowlist-type",
-        "android::c_interface::.*",
-        "--allowlist-type",
-        "AStatus",
-        "--allowlist-type",
-        "AIBinder_Class",
-        "--allowlist-type",
-        "AIBinder",
-        "--allowlist-type",
-        "AIBinder_Weak",
-        "--allowlist-type",
-        "AIBinder_DeathRecipient",
-        "--allowlist-type",
-        "AParcel",
-        "--allowlist-type",
-        "binder_status_t",
-        "--allowlist-function",
-        ".*",
+        "libbinder_ndk_bindgen_flags.txt",
     ],
     shared_libs: [
         "libbinder_ndk",
diff --git a/libs/binder/rust/libbinder_ndk_bindgen_flags.txt b/libs/binder/rust/libbinder_ndk_bindgen_flags.txt
new file mode 100644
index 0000000..551c59f
--- /dev/null
+++ b/libs/binder/rust/libbinder_ndk_bindgen_flags.txt
@@ -0,0 +1,11 @@
+--default-enum-style=rust_non_exhaustive
+--constified-enum=android::c_interface::consts::.*
+--allowlist-type=android::c_interface::.*
+--allowlist-type=AStatus
+--allowlist-type=AIBinder_Class
+--allowlist-type=AIBinder
+--allowlist-type=AIBinder_Weak
+--allowlist-type=AIBinder_DeathRecipient
+--allowlist-type=AParcel
+--allowlist-type=binder_status_t
+--allowlist-function=.*
diff --git a/libs/binder/rust/src/error.rs b/libs/binder/rust/src/error.rs
index 8d9ce0e..eb04cc3 100644
--- a/libs/binder/rust/src/error.rs
+++ b/libs/binder/rust/src/error.rs
@@ -370,6 +370,94 @@
     }
 }
 
+/// A conversion from `std::result::Result<T, E>` to `binder::Result<T>`. If this type is `Ok(T)`,
+/// it's returned as is. If this type is `Err(E)`, `E` is converted into `Status` which can be
+/// either a general binder exception, or a service-specific exception.
+///
+/// # Examples
+///
+/// ```
+/// // std::io::Error is formatted as the exception's message
+/// fn file_exists(name: &str) -> binder::Result<bool> {
+///     std::fs::metadata(name)
+///         .or_service_specific_exception(NOT_FOUND)?
+/// }
+///
+/// // A custom function is used to create the exception's message
+/// fn file_exists(name: &str) -> binder::Result<bool> {
+///     std::fs::metadata(name)
+///         .or_service_specific_exception_with(NOT_FOUND,
+///             |e| format!("file {} not found: {:?}", name, e))?
+/// }
+///
+/// // anyhow::Error is formatted as the exception's message
+/// use anyhow::{Context, Result};
+/// fn file_exists(name: &str) -> binder::Result<bool> {
+///     std::fs::metadata(name)
+///         .context("file {} not found")
+///         .or_service_specific_exception(NOT_FOUND)?
+/// }
+///
+/// // General binder exceptions can be created similarly
+/// fn file_exists(name: &str) -> binder::Result<bool> {
+///     std::fs::metadata(name)
+///         .or_binder_exception(ExceptionCode::ILLEGAL_ARGUMENT)?
+/// }
+/// ```
+pub trait IntoBinderResult<T, E> {
+    /// Converts the embedded error into a general binder exception of code `exception`. The
+    /// message of the exception is set by formatting the error for debugging.
+    fn or_binder_exception(self, exception: ExceptionCode) -> result::Result<T, Status>;
+
+    /// Converts the embedded error into a general binder exception of code `exception`. The
+    /// message of the exception is set by lazily evaluating the `op` function.
+    fn or_binder_exception_with<M: AsRef<str>, O: FnOnce(E) -> M>(
+        self,
+        exception: ExceptionCode,
+        op: O,
+    ) -> result::Result<T, Status>;
+
+    /// Converts the embedded error into a service-specific binder exception. `error_code` is used
+    /// to distinguish different service-specific binder exceptions. The message of the exception
+    /// is set by formatting the error for debugging.
+    fn or_service_specific_exception(self, error_code: i32) -> result::Result<T, Status>;
+
+    /// Converts the embedded error into a service-specific binder exception. `error_code` is used
+    /// to distinguish different service-specific binder exceptions. The message of the exception
+    /// is set by lazily evaluating the `op` function.
+    fn or_service_specific_exception_with<M: AsRef<str>, O: FnOnce(E) -> M>(
+        self,
+        error_code: i32,
+        op: O,
+    ) -> result::Result<T, Status>;
+}
+
+impl<T, E: std::fmt::Debug> IntoBinderResult<T, E> for result::Result<T, E> {
+    fn or_binder_exception(self, exception: ExceptionCode) -> result::Result<T, Status> {
+        self.or_binder_exception_with(exception, |e| format!("{:?}", e))
+    }
+
+    fn or_binder_exception_with<M: AsRef<str>, O: FnOnce(E) -> M>(
+        self,
+        exception: ExceptionCode,
+        op: O,
+    ) -> result::Result<T, Status> {
+        self.map_err(|e| Status::new_exception_str(exception, Some(op(e))))
+    }
+
+    fn or_service_specific_exception(self, error_code: i32) -> result::Result<T, Status> {
+        self.or_service_specific_exception_with(error_code, |e| format!("{:?}", e))
+    }
+
+    fn or_service_specific_exception_with<M: AsRef<str>, O: FnOnce(E) -> M>(
+        self,
+        error_code: i32,
+        op: O,
+    ) -> result::Result<T, Status> {
+        self.map_err(|e| Status::new_service_specific_error_str(error_code, Some(op(e))))
+    }
+}
+
 #[cfg(test)]
 mod tests {
     use super::*;
@@ -406,4 +494,66 @@
         assert_eq!(status.service_specific_error(), 0);
         assert_eq!(status.get_description(), "Status(-5, EX_ILLEGAL_STATE): ''".to_string());
     }
+
+    #[test]
+    fn convert_to_service_specific_exception() {
+        let res: std::result::Result<(), Status> =
+            Err("message").or_service_specific_exception(-42);
+
+        assert!(res.is_err());
+        let status = res.unwrap_err();
+        assert_eq!(status.exception_code(), ExceptionCode::SERVICE_SPECIFIC);
+        assert_eq!(status.service_specific_error(), -42);
+        assert_eq!(
+            status.get_description(),
+            "Status(-8, EX_SERVICE_SPECIFIC): '-42: \"message\"'".to_string()
+        );
+    }
+
+    #[test]
+    fn convert_to_service_specific_exception_with() {
+        let res: std::result::Result<(), Status> = Err("message")
+            .or_service_specific_exception_with(-42, |e| format!("outer message: {:?}", e));
+
+        assert!(res.is_err());
+        let status = res.unwrap_err();
+        assert_eq!(status.exception_code(), ExceptionCode::SERVICE_SPECIFIC);
+        assert_eq!(status.service_specific_error(), -42);
+        assert_eq!(
+            status.get_description(),
+            "Status(-8, EX_SERVICE_SPECIFIC): '-42: outer message: \"message\"'".to_string()
+        );
+    }
+
+    #[test]
+    fn convert_to_binder_exception() {
+        let res: std::result::Result<(), Status> =
+            Err("message").or_binder_exception(ExceptionCode::ILLEGAL_STATE);
+
+        assert!(res.is_err());
+        let status = res.unwrap_err();
+        assert_eq!(status.exception_code(), ExceptionCode::ILLEGAL_STATE);
+        assert_eq!(status.service_specific_error(), 0);
+        assert_eq!(
+            status.get_description(),
+            "Status(-5, EX_ILLEGAL_STATE): '\"message\"'".to_string()
+        );
+    }
+
+    #[test]
+    fn convert_to_binder_exception_with() {
+        let res: std::result::Result<(), Status> = Err("message")
+            .or_binder_exception_with(ExceptionCode::ILLEGAL_STATE, |e| {
+                format!("outer message: {:?}", e)
+            });
+
+        assert!(res.is_err());
+        let status = res.unwrap_err();
+        assert_eq!(status.exception_code(), ExceptionCode::ILLEGAL_STATE);
+        assert_eq!(status.service_specific_error(), 0);
+        assert_eq!(
+            status.get_description(),
+            "Status(-5, EX_ILLEGAL_STATE): 'outer message: \"message\"'".to_string()
+        );
+    }
 }
diff --git a/libs/binder/rust/src/lib.rs b/libs/binder/rust/src/lib.rs
index 0c8b48f..8841fe6 100644
--- a/libs/binder/rust/src/lib.rs
+++ b/libs/binder/rust/src/lib.rs
@@ -106,7 +106,7 @@
 
 pub use crate::binder_async::{BinderAsyncPool, BoxFuture};
 pub use binder::{BinderFeatures, FromIBinder, IBinder, Interface, Strong, Weak};
-pub use error::{ExceptionCode, Status, StatusCode};
+pub use error::{ExceptionCode, IntoBinderResult, Status, StatusCode};
 pub use native::{
     add_service, force_lazy_services_persist, is_handling_transaction, register_lazy_service,
     LazyServiceGuard,
diff --git a/libs/binder/tests/Android.bp b/libs/binder/tests/Android.bp
index 41856f9..cd3e7c0 100644
--- a/libs/binder/tests/Android.bp
+++ b/libs/binder/tests/Android.bp
@@ -77,6 +77,8 @@
     static_libs: [
         "binderRecordReplayTestIface-cpp",
         "binderReadParcelIface-cpp",
+        "libbinder_random_parcel_seeds",
+        "libbinder_random_parcel",
     ],
     test_suites: ["general-tests"],
     require_root: true,
diff --git a/libs/binder/tests/binderMemoryHeapBaseUnitTest.cpp b/libs/binder/tests/binderMemoryHeapBaseUnitTest.cpp
index 278dd2b..140270f 100644
--- a/libs/binder/tests/binderMemoryHeapBaseUnitTest.cpp
+++ b/libs/binder/tests/binderMemoryHeapBaseUnitTest.cpp
@@ -37,7 +37,8 @@
     ASSERT_NE(mHeap.get(), nullptr);
     int fd = mHeap->getHeapID();
     EXPECT_NE(fd, -1);
-    EXPECT_EQ(fcntl(fd, F_GET_SEALS), F_SEAL_SEAL);
+    EXPECT_EQ(fcntl(fd, F_GET_SEALS), F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL);
+    EXPECT_EQ(ftruncate(fd, 4096), -1);
 }
 
 TEST(MemoryHeapBase, MemfdUnsealed) {
@@ -48,7 +49,8 @@
     ASSERT_NE(mHeap.get(), nullptr);
     int fd = mHeap->getHeapID();
     EXPECT_NE(fd, -1);
-    EXPECT_EQ(fcntl(fd, F_GET_SEALS), 0);
+    EXPECT_EQ(fcntl(fd, F_GET_SEALS), F_SEAL_GROW | F_SEAL_SHRINK);
+    EXPECT_EQ(ftruncate(fd, 4096), -1);
 }
 
 TEST(MemoryHeapBase, MemfdSealedProtected) {
@@ -59,7 +61,9 @@
     ASSERT_NE(mHeap.get(), nullptr);
     int fd = mHeap->getHeapID();
     EXPECT_NE(fd, -1);
-    EXPECT_EQ(fcntl(fd, F_GET_SEALS), F_SEAL_SEAL | F_SEAL_FUTURE_WRITE);
+    EXPECT_EQ(fcntl(fd, F_GET_SEALS),
+              F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL | F_SEAL_FUTURE_WRITE);
+    EXPECT_EQ(ftruncate(fd, 4096), -1);
 }
 
 TEST(MemoryHeapBase, MemfdUnsealedProtected) {
@@ -71,7 +75,8 @@
     ASSERT_NE(mHeap.get(), nullptr);
     int fd = mHeap->getHeapID();
     EXPECT_NE(fd, -1);
-    EXPECT_EQ(fcntl(fd, F_GET_SEALS), F_SEAL_FUTURE_WRITE);
+    EXPECT_EQ(fcntl(fd, F_GET_SEALS), F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_FUTURE_WRITE);
+    EXPECT_EQ(ftruncate(fd, 4096), -1);
 }
 
 #else
diff --git a/libs/binder/tests/binderRecordReplayTest.cpp b/libs/binder/tests/binderRecordReplayTest.cpp
index 17d5c8a..6773c95 100644
--- a/libs/binder/tests/binderRecordReplayTest.cpp
+++ b/libs/binder/tests/binderRecordReplayTest.cpp
@@ -15,6 +15,7 @@
  */
 
 #include <BnBinderRecordReplayTest.h>
+#include <android-base/file.h>
 #include <android-base/logging.h>
 #include <android-base/unique_fd.h>
 #include <binder/Binder.h>
@@ -23,6 +24,11 @@
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
 #include <binder/RecordedTransaction.h>
+
+#include <fuzzbinder/libbinder_driver.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <fuzzseeds/random_parcel_seeds.h>
+
 #include <gtest/gtest.h>
 
 #include <sys/prctl.h>
@@ -30,6 +36,7 @@
 #include "parcelables/SingleDataParcelable.h"
 
 using namespace android;
+using android::generateSeedsFromRecording;
 using android::binder::Status;
 using android::binder::debug::RecordedTransaction;
 using parcelables::SingleDataParcelable;
@@ -84,6 +91,44 @@
     GENERATE_GETTER_SETTER(SingleDataParcelableArray, std::vector<SingleDataParcelable>);
 };
 
+std::vector<uint8_t> retrieveData(base::borrowed_fd fd) {
+    struct stat fdStat;
+    EXPECT_TRUE(fstat(fd.get(), &fdStat) != -1);
+    EXPECT_TRUE(fdStat.st_size != 0);
+
+    std::vector<uint8_t> buffer(fdStat.st_size);
+    auto readResult = android::base::ReadFully(fd, buffer.data(), fdStat.st_size);
+    EXPECT_TRUE(readResult != 0);
+    return std::move(buffer);
+}
+
+void replayFuzzService(const sp<BpBinder>& binder, const RecordedTransaction& transaction) {
+    base::unique_fd seedFd(open("/data/local/tmp/replayFuzzService",
+                                O_RDWR | O_CREAT | O_CLOEXEC | O_TRUNC, 0666));
+    ASSERT_TRUE(seedFd.ok());
+
+    // generate corpus from this transaction.
+    generateSeedsFromRecording(seedFd, transaction);
+
+    // Read the data which has been written to seed corpus
+    ASSERT_EQ(0, lseek(seedFd.get(), 0, SEEK_SET));
+    std::vector<uint8_t> seedData = retrieveData(seedFd);
+
+    // use fuzzService to replay the corpus
+    FuzzedDataProvider provider(seedData.data(), seedData.size());
+    fuzzService(binder, std::move(provider));
+}
+
+void replayBinder(const sp<BpBinder>& binder, const RecordedTransaction& transaction) {
+    // TODO: move logic to replay RecordedTransaction into RecordedTransaction
+    Parcel data;
+    data.setData(transaction.getDataParcel().data(), transaction.getDataParcel().dataSize());
+    auto result = binder->transact(transaction.getCode(), data, nullptr, transaction.getFlags());
+
+    // make sure recording does the thing we expect it to do
+    EXPECT_EQ(OK, result);
+}
+
 class BinderRecordReplayTest : public ::testing::Test {
 public:
     void SetUp() override {
@@ -98,48 +143,46 @@
     template <typename T, typename U>
     void recordReplay(Status (IBinderRecordReplayTest::*set)(T), U recordedValue,
                       Status (IBinderRecordReplayTest::*get)(U*), U changedValue) {
-        base::unique_fd fd(open("/data/local/tmp/binderRecordReplayTest.rec",
-                                O_RDWR | O_CREAT | O_CLOEXEC, 0666));
-        ASSERT_TRUE(fd.ok());
+        auto replayFunctions = {&replayBinder, &replayFuzzService};
+        for (auto replayFunc : replayFunctions) {
+            base::unique_fd fd(open("/data/local/tmp/binderRecordReplayTest.rec",
+                                    O_RDWR | O_CREAT | O_CLOEXEC, 0666));
+            ASSERT_TRUE(fd.ok());
 
-        // record a transaction
-        mBpBinder->startRecordingBinder(fd);
-        auto status = (*mInterface.*set)(recordedValue);
-        EXPECT_TRUE(status.isOk());
-        mBpBinder->stopRecordingBinder();
+            // record a transaction
+            mBpBinder->startRecordingBinder(fd);
+            auto status = (*mInterface.*set)(recordedValue);
+            EXPECT_TRUE(status.isOk());
+            mBpBinder->stopRecordingBinder();
 
-        // test transaction does the thing we expect it to do
-        U output;
-        status = (*mInterface.*get)(&output);
-        EXPECT_TRUE(status.isOk());
-        EXPECT_EQ(output, recordedValue);
+            // test transaction does the thing we expect it to do
+            U output;
+            status = (*mInterface.*get)(&output);
+            EXPECT_TRUE(status.isOk());
+            EXPECT_EQ(output, recordedValue);
 
-        // write over the existing state
-        status = (*mInterface.*set)(changedValue);
-        EXPECT_TRUE(status.isOk());
+            // write over the existing state
+            status = (*mInterface.*set)(changedValue);
+            EXPECT_TRUE(status.isOk());
 
-        status = (*mInterface.*get)(&output);
-        EXPECT_TRUE(status.isOk());
+            status = (*mInterface.*get)(&output);
+            EXPECT_TRUE(status.isOk());
 
-        EXPECT_EQ(output, changedValue);
+            EXPECT_EQ(output, changedValue);
 
-        // replay transaction
-        ASSERT_EQ(0, lseek(fd.get(), 0, SEEK_SET));
-        std::optional<RecordedTransaction> transaction = RecordedTransaction::fromFile(fd);
-        ASSERT_NE(transaction, std::nullopt);
+            // replay transaction
+            ASSERT_EQ(0, lseek(fd.get(), 0, SEEK_SET));
+            std::optional<RecordedTransaction> transaction = RecordedTransaction::fromFile(fd);
+            ASSERT_NE(transaction, std::nullopt);
 
-        // TODO: move logic to replay RecordedTransaction into RecordedTransaction
-        Parcel data;
-        data.setData(transaction->getDataParcel().data(), transaction->getDataParcel().dataSize());
-        auto result =
-                mBpBinder->transact(transaction->getCode(), data, nullptr, transaction->getFlags());
+            const RecordedTransaction& recordedTransaction = *transaction;
+            // call replay function with recorded transaction
+            (*replayFunc)(mBpBinder, recordedTransaction);
 
-        // make sure recording does the thing we expect it to do
-        EXPECT_EQ(OK, result);
-
-        status = (*mInterface.*get)(&output);
-        EXPECT_TRUE(status.isOk());
-        EXPECT_EQ(output, recordedValue);
+            status = (*mInterface.*get)(&output);
+            EXPECT_TRUE(status.isOk());
+            EXPECT_EQ(output, recordedValue);
+        }
     }
 
 private:
diff --git a/libs/binder/tests/parcel_fuzzer/Android.bp b/libs/binder/tests/parcel_fuzzer/Android.bp
index 35866ad..383795e 100644
--- a/libs/binder/tests/parcel_fuzzer/Android.bp
+++ b/libs/binder/tests/parcel_fuzzer/Android.bp
@@ -104,3 +104,43 @@
     local_include_dirs: ["include_random_parcel"],
     export_include_dirs: ["include_random_parcel"],
 }
+
+cc_library {
+    name: "libbinder_random_parcel_seeds",
+    host_supported: true,
+    vendor_available: true,
+    target: {
+        darwin: {
+            enabled: false,
+        },
+    },
+    srcs: [
+        "random_parcel_seeds.cpp",
+    ],
+    shared_libs: [
+        "libbase",
+        "libbinder",
+        "libbinder_ndk",
+        "libcutils",
+        "libutils",
+    ],
+    local_include_dirs: [
+        "include_random_parcel_seeds",
+    ],
+    export_include_dirs: ["include_random_parcel_seeds"],
+}
+
+cc_binary_host {
+    name: "binder2corpus",
+    static_libs: [
+        "libbinder_random_parcel_seeds",
+    ],
+    srcs: [
+        "binder2corpus/binder2corpus.cpp",
+    ],
+    shared_libs: [
+        "libbase",
+        "libbinder",
+        "libutils",
+    ],
+}
diff --git a/libs/binder/tests/parcel_fuzzer/binder2corpus/README.md b/libs/binder/tests/parcel_fuzzer/binder2corpus/README.md
new file mode 100644
index 0000000..59bf9f3
--- /dev/null
+++ b/libs/binder/tests/parcel_fuzzer/binder2corpus/README.md
@@ -0,0 +1,31 @@
+# binder2corpus
+
+This tool converts recordings generated by record_binder tool to fuzzer seeds for fuzzService.
+
+# Steps to add corpus:
+
+## Start recording the service binder
+ex. record_binder start manager
+
+## Run test on device or keep device idle
+ex. atest servicemanager_test
+
+## Stop the recording
+record_binder stop manager
+
+## Pull the recording on host
+Recordings are present on device at /data/local/recordings/<service_name>. Use adb pull.
+Use inspect command of record_binder to check if there are some transactions captured.
+ex. record_binder inspect manager
+
+## run corpus generator tool
+binder2corpus <recording_path> <dir_to_write_corpus>
+
+## Build fuzzer and sync data directory
+ex. m servicemanager_fuzzer && adb sync data
+
+## Push corpus on device
+ex. adb push servicemanager_fuzzer_corpus/ /data/fuzz/x86_64/servicemanager_fuzzer/
+
+## Run fuzzer with corpus directory as argument
+ex. adb shell /data/fuzz/x86_64/servicemanager_fuzzer/servicemanager_fuzzer /data/fuzz/x86_64/servicemanager_fuzzer/servicemanager_fuzzer_corpus
\ No newline at end of file
diff --git a/libs/binder/tests/parcel_fuzzer/binder2corpus/binder2corpus.cpp b/libs/binder/tests/parcel_fuzzer/binder2corpus/binder2corpus.cpp
new file mode 100644
index 0000000..c0fdaea
--- /dev/null
+++ b/libs/binder/tests/parcel_fuzzer/binder2corpus/binder2corpus.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android-base/file.h>
+#include <android-base/logging.h>
+#include <android-base/unique_fd.h>
+#include <binder/RecordedTransaction.h>
+
+#include <fuzzseeds/random_parcel_seeds.h>
+
+#include <sys/prctl.h>
+
+using android::generateSeedsFromRecording;
+using android::status_t;
+using android::base::unique_fd;
+using android::binder::debug::RecordedTransaction;
+
+status_t generateCorpus(const char* recordingPath, const char* corpusDir) {
+    unique_fd fd(open(recordingPath, O_RDONLY));
+    if (!fd.ok()) {
+        std::cerr << "Failed to open recording file at path " << recordingPath
+                  << " with error: " << strerror(errno) << '\n';
+        return android::BAD_VALUE;
+    }
+
+    if (auto res = mkdir(corpusDir, 0766); res != 0) {
+        std::cerr
+                << "Failed to create corpus directory at path. Delete directory if already exists: "
+                << corpusDir << std::endl;
+        return android::BAD_VALUE;
+    }
+
+    int transactionNumber = 0;
+    while (auto transaction = RecordedTransaction::fromFile(fd)) {
+        ++transactionNumber;
+        std::string filePath = std::string(corpusDir) + std::string("transaction_") +
+                std::to_string(transactionNumber);
+        constexpr int openFlags = O_WRONLY | O_CREAT | O_BINARY | O_CLOEXEC;
+        android::base::unique_fd corpusFd(open(filePath.c_str(), openFlags, 0666));
+        if (!corpusFd.ok()) {
+            std::cerr << "Failed to open fd. Path " << filePath
+                      << " with error: " << strerror(errno) << std::endl;
+            return android::UNKNOWN_ERROR;
+        }
+        generateSeedsFromRecording(corpusFd, transaction.value());
+    }
+
+    if (transactionNumber == 0) {
+        std::cerr << "No valid transaction has been found in recording file:  " << recordingPath
+                  << std::endl;
+        return android::BAD_VALUE;
+    }
+
+    return android::NO_ERROR;
+}
+
+void printHelp(const char* toolName) {
+    std::cout << "Usage: \n\n"
+              << toolName
+              << " <recording_path> <destination_directory> \n\n*Use "
+                 "record_binder tool for recording binder transactions."
+              << std::endl;
+}
+
+int main(int argc, char** argv) {
+    if (argc != 3) {
+        printHelp(argv[0]);
+        return 1;
+    }
+    const char* sourcePath = argv[1];
+    const char* corpusDir = argv[2];
+    if (android::NO_ERROR != generateCorpus(sourcePath, corpusDir)) {
+        std::cerr << "Failed to generate fuzzer corpus." << std::endl;
+        return 1;
+    }
+    return 0;
+}
diff --git a/libs/binder/tests/parcel_fuzzer/include_random_parcel_seeds/fuzzseeds/random_parcel_seeds.h b/libs/binder/tests/parcel_fuzzer/include_random_parcel_seeds/fuzzseeds/random_parcel_seeds.h
new file mode 100644
index 0000000..5755239
--- /dev/null
+++ b/libs/binder/tests/parcel_fuzzer/include_random_parcel_seeds/fuzzseeds/random_parcel_seeds.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android-base/file.h>
+#include <android-base/hex.h>
+#include <android-base/logging.h>
+
+#include <binder/Binder.h>
+#include <binder/Parcel.h>
+#include <binder/RecordedTransaction.h>
+
+#include <private/android_filesystem_config.h>
+
+#include <vector>
+
+using android::Parcel;
+using android::base::HexString;
+using std::vector;
+
+namespace android {
+namespace impl {
+// computes the bytes so that if they are passed to FuzzedDataProvider and
+// provider.ConsumeIntegralInRange<T>(min, max) is called, it will return val
+template <typename T>
+void writeReversedBuffer(std::vector<std::byte>& integralBuffer, T min, T max, T val);
+
+// Calls writeInBuffer method with min and max numeric limits of type T. This method
+// is reversal of ConsumeIntegral<T>() in FuzzedDataProvider
+template <typename T>
+void writeReversedBuffer(std::vector<std::byte>& integralBuffer, T val);
+} // namespace impl
+void generateSeedsFromRecording(base::borrowed_fd fd,
+                                const binder::debug::RecordedTransaction& transaction);
+} // namespace android
diff --git a/libs/binder/tests/parcel_fuzzer/libbinder_driver.cpp b/libs/binder/tests/parcel_fuzzer/libbinder_driver.cpp
index b268c5d..93ac116 100644
--- a/libs/binder/tests/parcel_fuzzer/libbinder_driver.cpp
+++ b/libs/binder/tests/parcel_fuzzer/libbinder_driver.cpp
@@ -35,6 +35,11 @@
             .extraFds = {},
     };
 
+    // Reserved bytes so that we don't have to change fuzzers and seed corpus if
+    // we introduce anything new in fuzzService.
+    std::vector<uint8_t> reservedBytes = provider.ConsumeBytes<uint8_t>(8);
+    (void)reservedBytes;
+
     // always refresh the calling identity, because we sometimes set it below, but also,
     // the code we're fuzzing might reset it
     IPCThreadState::self()->clearCallingIdentity();
@@ -55,8 +60,15 @@
 
     while (provider.remaining_bytes() > 0) {
         // Most of the AIDL services will have small set of transaction codes.
-        uint32_t code = provider.ConsumeBool() ? provider.ConsumeIntegral<uint32_t>()
-                                               : provider.ConsumeIntegralInRange<uint32_t>(0, 100);
+        // TODO(b/295942369) : Add remaining transact codes from IBinder.h
+        uint32_t code = provider.ConsumeBool()
+                ? provider.ConsumeIntegral<uint32_t>()
+                : provider.PickValueInArray<int64_t>(
+                          {provider.ConsumeIntegralInRange<uint32_t>(0, 100),
+                           IBinder::DUMP_TRANSACTION, IBinder::PING_TRANSACTION,
+                           IBinder::SHELL_COMMAND_TRANSACTION, IBinder::INTERFACE_TRANSACTION,
+                           IBinder::SYSPROPS_TRANSACTION, IBinder::EXTENSION_TRANSACTION,
+                           IBinder::TWEET_TRANSACTION, IBinder::LIKE_TRANSACTION});
         uint32_t flags = provider.ConsumeIntegral<uint32_t>();
         Parcel data;
         // for increased fuzz coverage
diff --git a/libs/binder/tests/parcel_fuzzer/random_parcel_seeds.cpp b/libs/binder/tests/parcel_fuzzer/random_parcel_seeds.cpp
new file mode 100644
index 0000000..9e3e2ab
--- /dev/null
+++ b/libs/binder/tests/parcel_fuzzer/random_parcel_seeds.cpp
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android-base/file.h>
+#include <android-base/logging.h>
+
+#include <binder/RecordedTransaction.h>
+
+#include <fuzzseeds/random_parcel_seeds.h>
+
+using android::base::WriteFully;
+
+namespace android {
+namespace impl {
+template <typename T>
+std::vector<uint8_t> reverseBytes(T min, T max, T val) {
+    uint64_t range = static_cast<uint64_t>(max) - min;
+    uint64_t result = val - min;
+    size_t offset = 0;
+
+    std::vector<uint8_t> reverseData;
+    uint8_t reversed = 0;
+    reversed |= result;
+
+    while (offset < sizeof(T) * CHAR_BIT && (range >> offset) > 0) {
+        reverseData.push_back(reversed);
+        reversed = 0;
+        reversed |= (result >> CHAR_BIT);
+        result = result >> CHAR_BIT;
+        offset += CHAR_BIT;
+    }
+
+    return std::move(reverseData);
+}
+
+template <typename T>
+void writeReversedBuffer(std::vector<uint8_t>& integralBuffer, T min, T max, T val) {
+    std::vector<uint8_t> reversedData = reverseBytes(min, max, val);
+    // ConsumeIntegral Calls read buffer from the end. Keep inserting at the front of the buffer
+    // so that we can align fuzzService operations with seed generation for readability.
+    integralBuffer.insert(integralBuffer.begin(), reversedData.begin(), reversedData.end());
+}
+
+template <typename T>
+void writeReversedBuffer(std::vector<uint8_t>& integralBuffer, T val) {
+    // For ConsumeIntegral<T>() calls, FuzzedDataProvider uses numeric limits min and max
+    // as range
+    writeReversedBuffer(integralBuffer, std::numeric_limits<T>::min(),
+                        std::numeric_limits<T>::max(), val);
+}
+
+} // namespace impl
+
+void generateSeedsFromRecording(base::borrowed_fd fd,
+                                const binder::debug::RecordedTransaction& transaction) {
+    // Write Reserved bytes for future use
+    std::vector<uint8_t> reservedBytes(8);
+    CHECK(WriteFully(fd, reservedBytes.data(), reservedBytes.size())) << fd.get();
+
+    std::vector<uint8_t> integralBuffer;
+
+    // Write UID array : Array elements are initialized in the order that they are declared
+    // UID array index 2 element
+    // int64_t aidRoot = 0;
+    impl::writeReversedBuffer(integralBuffer, static_cast<int64_t>(AID_ROOT) << 32,
+                              static_cast<int64_t>(AID_USER) << 32,
+                              static_cast<int64_t>(AID_ROOT) << 32);
+
+    // UID array index 3 element
+    impl::writeReversedBuffer(integralBuffer, static_cast<int64_t>(AID_ROOT) << 32);
+
+    // always pick AID_ROOT -> index 0
+    size_t uidIndex = 0;
+    impl::writeReversedBuffer(integralBuffer, static_cast<size_t>(0), static_cast<size_t>(3),
+                              uidIndex);
+
+    // Never set uid in seed corpus
+    uint8_t writeUid = 0;
+    impl::writeReversedBuffer(integralBuffer, writeUid);
+
+    // Read random code. this will be from recorded transaction
+    uint8_t selectCode = 1;
+    impl::writeReversedBuffer(integralBuffer, selectCode);
+
+    // Get from recorded transaction
+    uint32_t code = transaction.getCode();
+    impl::writeReversedBuffer(integralBuffer, code);
+
+    // Get from recorded transaction
+    uint32_t flags = transaction.getFlags();
+    impl::writeReversedBuffer(integralBuffer, flags);
+
+    // always fuzz primary binder
+    size_t extraBindersIndex = 0;
+    impl::writeReversedBuffer(integralBuffer, static_cast<size_t>(0), static_cast<size_t>(0),
+                              extraBindersIndex);
+
+    const Parcel& dataParcel = transaction.getDataParcel();
+
+    // This buffer holds the bytes which will be used for fillRandomParcel API
+    std::vector<uint8_t> fillParcelBuffer;
+
+    // Don't take rpc path
+    uint8_t rpcBranch = 0;
+    impl::writeReversedBuffer(fillParcelBuffer, rpcBranch);
+
+    // Implicit branch on this path -> options->writeHeader(p, provider)
+    uint8_t writeHeaderInternal = 0;
+    impl::writeReversedBuffer(fillParcelBuffer, writeHeaderInternal);
+
+    // Choose to write data in parcel
+    size_t fillFuncIndex = 0;
+    impl::writeReversedBuffer(fillParcelBuffer, static_cast<size_t>(0), static_cast<size_t>(2),
+                              fillFuncIndex);
+
+    // Write parcel data size from recorded transaction
+    size_t toWrite = transaction.getDataParcel().dataBufferSize();
+    impl::writeReversedBuffer(fillParcelBuffer, static_cast<size_t>(0), toWrite, toWrite);
+
+    // Write parcel data with size towrite from recorded transaction
+    CHECK(WriteFully(fd, dataParcel.data(), toWrite)) << fd.get();
+
+    // Write Fill Parcel buffer size in integralBuffer so that fuzzService knows size of data
+    size_t subDataSize = toWrite + fillParcelBuffer.size();
+    impl::writeReversedBuffer(integralBuffer, static_cast<size_t>(0), subDataSize, subDataSize);
+
+    // Write fill parcel buffer
+    CHECK(WriteFully(fd, fillParcelBuffer.data(), fillParcelBuffer.size())) << fd.get();
+
+    // Write the integralBuffer to data
+    CHECK(WriteFully(fd, integralBuffer.data(), integralBuffer.size())) << fd.get();
+}
+} // namespace android
diff --git a/libs/binder/tests/parcel_fuzzer/test_fuzzer/Android.bp b/libs/binder/tests/parcel_fuzzer/test_fuzzer/Android.bp
index 96092b1..690c39a 100644
--- a/libs/binder/tests/parcel_fuzzer/test_fuzzer/Android.bp
+++ b/libs/binder/tests/parcel_fuzzer/test_fuzzer/Android.bp
@@ -36,8 +36,8 @@
         triage_assignee: "waghpawan@google.com",
 
         // This fuzzer should be used only test fuzzService locally
-        fuzz_on_haiku_host: true,
-        fuzz_on_haiku_device: true,
+        fuzz_on_haiku_host: false,
+        fuzz_on_haiku_device: false,
     },
 }
 
diff --git a/libs/binder/tests/parcel_fuzzer/test_fuzzer/TestServiceFuzzer.cpp b/libs/binder/tests/parcel_fuzzer/test_fuzzer/TestServiceFuzzer.cpp
index 46205d7..ba1a6a1 100644
--- a/libs/binder/tests/parcel_fuzzer/test_fuzzer/TestServiceFuzzer.cpp
+++ b/libs/binder/tests/parcel_fuzzer/test_fuzzer/TestServiceFuzzer.cpp
@@ -33,6 +33,8 @@
     ON_KNOWN_UID,
     ON_SYSTEM_AID,
     ON_ROOT_AID,
+    ON_DUMP_TRANSACT,
+    ON_SHELL_CMD_TRANSACT,
 };
 
 // This service is to verify that fuzzService is functioning properly
@@ -92,6 +94,16 @@
         return Status::ok();
     }
 
+    status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) override {
+        if (mCrash == CrashType::ON_DUMP_TRANSACT && code == DUMP_TRANSACTION) {
+            LOG_ALWAYS_FATAL("Expected crash, DUMP.");
+        } else if (mCrash == CrashType::ON_SHELL_CMD_TRANSACT &&
+                   code == SHELL_COMMAND_TRANSACTION) {
+            LOG_ALWAYS_FATAL("Expected crash, SHELL_CMD.");
+        }
+        return BnTestService::onTransact(code, data, reply, flags);
+    }
+
 private:
     CrashType mCrash;
 };
@@ -121,6 +133,10 @@
         gCrashType = CrashType::ON_ROOT_AID;
     } else if (arg == "BINDER") {
         gCrashType = CrashType::ON_BINDER;
+    } else if (arg == "DUMP") {
+        gCrashType = CrashType::ON_DUMP_TRANSACT;
+    } else if (arg == "SHELL_CMD") {
+        gCrashType = CrashType::ON_SHELL_CMD_TRANSACT;
     } else {
         printf("INVALID ARG\n");
         exit(0); // success because this is a crash test
diff --git a/libs/binder/tests/parcel_fuzzer/test_fuzzer/run_fuzz_service_test.sh b/libs/binder/tests/parcel_fuzzer/test_fuzzer/run_fuzz_service_test.sh
index 25906d8..c447bff 100755
--- a/libs/binder/tests/parcel_fuzzer/test_fuzzer/run_fuzz_service_test.sh
+++ b/libs/binder/tests/parcel_fuzzer/test_fuzzer/run_fuzz_service_test.sh
@@ -27,7 +27,7 @@
     exit 1
 fi
 
-for CRASH_TYPE in PLAIN KNOWN_UID AID_SYSTEM AID_ROOT BINDER; do
+for CRASH_TYPE in PLAIN KNOWN_UID AID_SYSTEM AID_ROOT BINDER DUMP SHELL_CMD; do
     echo "INFO: Running fuzzer : test_service_fuzzer_should_crash $CRASH_TYPE"
 
     ./test_service_fuzzer_should_crash "$CRASH_TYPE" -max_total_time=30 &>"$FUZZER_OUT"
diff --git a/libs/bufferstreams/rust/Android.bp b/libs/bufferstreams/rust/Android.bp
index 95a85b5..ff95148 100644
--- a/libs/bufferstreams/rust/Android.bp
+++ b/libs/bufferstreams/rust/Android.bp
@@ -17,7 +17,8 @@
     crate_name: "bufferstreams",
     srcs: ["src/lib.rs"],
     edition: "2021",
-    vendor_available: true,
-    host_supported: true,
+    rlibs: [
+        "libnativewindow_rs",
+    ],
     min_sdk_version: "30",
 }
diff --git a/libs/bufferstreams/rust/src/lib.rs b/libs/bufferstreams/rust/src/lib.rs
index 51f1c73..1d321c8 100644
--- a/libs/bufferstreams/rust/src/lib.rs
+++ b/libs/bufferstreams/rust/src/lib.rs
@@ -14,9 +14,143 @@
 
 //! libbufferstreams: Reactive Streams for Graphics Buffers
 
+use nativewindow::*;
+use std::sync::{Arc, Weak};
+use std::time::Instant;
+
 /// This function will print Hello World.
 #[no_mangle]
 pub extern "C" fn hello() -> bool {
     println!("Hello world.");
     true
 }
+
+/// BufferPublishers provide buffers to BufferSusbscribers. Depending on the
+/// particular object in question, these could be allocated locally or provided
+/// over IPC.
+///
+/// BufferPublishers are required to adhere to the following, based on the
+/// reactive streams specification:
+/// *   The total number of on_next´s signalled by a Publisher to a Subscriber
+/// MUST be less than or equal to the total number of elements requested by that
+/// Subscriber´s Subscription at all times.
+/// *   A Publisher MAY signal fewer on_next than requested and terminate the
+/// Subscription by calling on_complete or on_error.
+/// *   on_subscribe, on_next, on_error and on_complete signaled to a Subscriber
+/// MUST be signaled serially.
+/// *   If a Publisher fails it MUST signal an on_error.
+/// *   If a Publisher terminates successfully (finite stream) it MUST signal an
+/// on_complete.
+/// *   If a Publisher signals either on_error or on_complete on a Subscriber,
+/// that Subscriber’s Subscription MUST be considered cancelled.
+/// *   Once a terminal state has been signaled (on_error, on_complete) it is
+/// REQUIRED that no further signals occur.
+/// *   If a Subscription is cancelled its Subscriber MUST eventually stop being
+///  signaled.
+/// *  A Publisher MAY support multiple Subscribers and decides whether each
+/// Subscription is unicast or multicast.
+pub trait BufferPublisher {
+    /// This function will create the subscription between the publisher and
+    /// the subscriber.
+    fn subscribe(&self, subscriber: Weak<dyn BufferSubscriber>);
+}
+
+/// BufferSubscribers can subscribe to BufferPublishers. They can request Frames
+/// via the BufferSubscription they get from the publisher, then receive Frames
+/// via on_next.
+///
+/// BufferSubcribers are required to adhere to the following, based on the
+/// reactive streams specification:
+/// *   The total number of on_next´s signalled by a Publisher to a Subscriber
+/// MUST be less than or equal to the total number of elements requested by that
+/// Subscriber´s Subscription at all times.
+/// *   A Publisher MAY signal fewer on_next than requested and terminate the
+/// Subscription by calling on_complete or on_error.
+/// *   on_subscribe, on_next, on_error and on_complete signaled to a Subscriber
+/// MUST be signaled serially.
+/// *   If a Publisher fails it MUST signal an on_error.
+/// *   If a Publisher terminates successfully (finite stream) it MUST signal an
+/// on_complete.
+/// *   If a Publisher signals either on_error or on_complete on a Subscriber,
+/// that Subscriber’s Subscription MUST be considered cancelled.
+/// *   Once a terminal state has been signaled (on_error, on_complete) it is
+/// REQUIRED that no further signals occur.
+/// *   If a Subscription is cancelled its Subscriber MUST eventually stop being
+/// signaled.
+/// *   Publisher.subscribe MAY be called as many times as wanted but MUST be
+/// with a different Subscriber each time.
+/// *   A Publisher MAY support multiple Subscribers and decides whether each
+/// Subscription is unicast or multicast.
+pub trait BufferSubscriber {
+    /// This function will be called at the beginning of the subscription.
+    fn on_subscribe(&self, subscription: Arc<dyn BufferSubscription>);
+    /// This function will be called for buffer that comes in.
+    fn on_next(&self, frame: Frame);
+    /// This function will be called in case of an error.
+    fn on_error(&self, error: BufferError);
+    /// This function will be called on finite streams when done.
+    fn on_complete(&self);
+}
+
+/// BufferSubscriptions serve as the bridge between BufferPublishers and
+/// BufferSubscribers. BufferSubscribers receive a BufferSubscription when they
+/// subscribe to a BufferPublisher via on_subscribe.
+/// This object is to be used by the BufferSubscriber to cancel its subscription
+/// or request more buffers.
+///
+/// BufferSubcriptions are required to adhere to the following, based on the
+/// reactive streams specification:
+/// *   Subscription.request and Subscription.cancel MUST only be called inside
+/// of its Subscriber context.
+/// *   The Subscription MUST allow the Subscriber to call Subscription.request
+/// synchronously from within on_next or on_subscribe.
+/// *   Subscription.request MUST place an upper bound on possible synchronous
+/// recursion between Publisher and Subscriber.
+/// *   Subscription.request SHOULD respect the responsivity of its caller by
+/// returning in a timely manner.
+/// *   Subscription.cancel MUST respect the responsivity of its caller by
+/// returning in a timely manner, MUST be idempotent and MUST be thread-safe.
+/// *   After the Subscription is cancelled, additional
+/// Subscription.request(n: u64) MUST be NOPs.
+/// *   After the Subscription is cancelled, additional Subscription.cancel()
+/// MUST be NOPs.
+/// *   While the Subscription is not cancelled, Subscription.request(n: u64)
+/// MUST register the given number of additional elements to be produced to the
+/// respective subscriber.
+/// *   While the Subscription is not cancelled, Subscription.request(n: u64)
+/// MUST signal on_error if the argument is <= 0. The cause message SHOULD
+/// explain that non-positive request signals are illegal.
+/// *  While the Subscription is not cancelled, Subscription.request(n: u64)
+/// MAY synchronously call on_next on this (or other) subscriber(s).
+/// *  While the Subscription is not cancelled, Subscription.request(n: u64)
+/// MAY synchronously call on_complete or on_error on this (or other)
+/// subscriber(s).
+/// *  While the Subscription is not cancelled, Subscription.cancel() MUST
+/// request the Publisher to eventually stop signaling its Subscriber. The
+/// operation is NOT REQUIRED to affect the Subscription immediately.
+/// *  While the Subscription is not cancelled, Subscription.cancel() MUST
+/// request the Publisher to eventually drop any references to the corresponding
+/// subscriber.
+/// *  While the Subscription is not cancelled, calling Subscription.cancel MAY
+/// cause the Publisher, if stateful, to transition into the shut-down state if
+/// no other Subscription exists at this point.
+/// *  Calling Subscription.cancel MUST return normally.
+/// *  Calling Subscription.request MUST return normally.
+pub trait BufferSubscription {
+    /// request
+    fn request(&self, n: u64);
+    /// cancel
+    fn cancel(&self);
+}
+/// Type used to describe errors produced by subscriptions.
+type BufferError = Box<dyn std::error::Error + Send + Sync + 'static>;
+
+/// Struct used to contain the buffer.
+pub struct Frame {
+    /// A handle to the C buffer interface.
+    pub buffer: AHardwareBuffer,
+    /// The time at which the buffer was dispatched.
+    pub present_time: Instant,
+    /// A fence used for reading/writing safely.
+    pub fence: i32,
+}
diff --git a/libs/graphicsenv/GraphicsEnv.cpp b/libs/graphicsenv/GraphicsEnv.cpp
index 715822b..732ca36 100644
--- a/libs/graphicsenv/GraphicsEnv.cpp
+++ b/libs/graphicsenv/GraphicsEnv.cpp
@@ -512,7 +512,11 @@
     return mShouldUseAngle;
 }
 
-void GraphicsEnv::setAngleInfo(const std::string& path, const bool shouldUseSystemAngle,
+// Set ANGLE information.
+// If path is "system", it means system ANGLE must be used for the process.
+// If shouldUseNativeDriver is true, it means native GLES drivers must be used for the process.
+// If path is set to nonempty and shouldUseNativeDriver is true, ANGLE will be used regardless.
+void GraphicsEnv::setAngleInfo(const std::string& path, const bool shouldUseNativeDriver,
                                const std::string& packageName,
                                const std::vector<std::string> eglFeatures) {
     if (mShouldUseAngle) {
@@ -529,8 +533,13 @@
     mAnglePath = std::move(path);
     ALOGV("setting app package name to '%s'", packageName.c_str());
     mPackageName = std::move(packageName);
-    mShouldUseAngle = true;
-    mShouldUseSystemAngle = shouldUseSystemAngle;
+    if (mAnglePath == "system") {
+        mShouldUseSystemAngle = true;
+    }
+    if (!mAnglePath.empty()) {
+        mShouldUseAngle = true;
+    }
+    mShouldUseNativeDriver = shouldUseNativeDriver;
 }
 
 std::string& GraphicsEnv::getPackageName() {
@@ -607,6 +616,10 @@
     return mShouldUseSystemAngle;
 }
 
+bool GraphicsEnv::shouldUseNativeDriver() {
+    return mShouldUseNativeDriver;
+}
+
 /**
  * APIs for debuggable layers
  */
diff --git a/libs/graphicsenv/include/graphicsenv/GraphicsEnv.h b/libs/graphicsenv/include/graphicsenv/GraphicsEnv.h
index fbf2902..6cce3f6 100644
--- a/libs/graphicsenv/include/graphicsenv/GraphicsEnv.h
+++ b/libs/graphicsenv/include/graphicsenv/GraphicsEnv.h
@@ -108,7 +108,10 @@
     // (libraries must be stored uncompressed and page aligned); such elements
     // in the search path must have a '!' after the zip filename, e.g.
     //     /system/app/ANGLEPrebuilt/ANGLEPrebuilt.apk!/lib/arm64-v8a
-    void setAngleInfo(const std::string& path, const bool useSystemAngle,
+    // If the search patch is "system", then it means the system ANGLE should be used.
+    // If shouldUseNativeDriver is true, it means native GLES drivers must be used for the process.
+    // If path is set to nonempty and shouldUseNativeDriver is true, ANGLE will be used regardless.
+    void setAngleInfo(const std::string& path, const bool shouldUseNativeDriver,
                       const std::string& packageName, const std::vector<std::string> eglFeatures);
     // Get the ANGLE driver namespace.
     android_namespace_t* getAngleNamespace();
@@ -118,6 +121,7 @@
     // Set the persist.graphics.egl system property value.
     void nativeToggleAngleAsSystemDriver(bool enabled);
     bool shouldUseSystemAngle();
+    bool shouldUseNativeDriver();
 
     /*
      * Apis for debug layer
@@ -175,6 +179,8 @@
     bool mShouldUseAngle = false;
     // Whether loader should load system ANGLE.
     bool mShouldUseSystemAngle = false;
+    // Whether loader should load native GLES driver.
+    bool mShouldUseNativeDriver = false;
     // ANGLE namespace.
     android_namespace_t* mAngleNamespace = nullptr;
 
diff --git a/libs/gui/Android.bp b/libs/gui/Android.bp
index d7e7eb8..298838d 100644
--- a/libs/gui/Android.bp
+++ b/libs/gui/Android.bp
@@ -62,6 +62,7 @@
     name: "guiconstants_aidl",
     srcs: [
         "android/gui/DropInputMode.aidl",
+        "android/gui/StalledTransactionInfo.aidl",
         "android/**/TouchOcclusionMode.aidl",
     ],
 }
@@ -140,6 +141,7 @@
         "android/gui/IWindowInfosListener.aidl",
         "android/gui/IWindowInfosPublisher.aidl",
         "android/gui/IWindowInfosReportedListener.aidl",
+        "android/gui/StalledTransactionInfo.aidl",
         "android/gui/WindowInfo.aidl",
         "android/gui/WindowInfosUpdate.aidl",
     ],
diff --git a/libs/gui/BLASTBufferQueue.cpp b/libs/gui/BLASTBufferQueue.cpp
index 5c324b2..207fa4f 100644
--- a/libs/gui/BLASTBufferQueue.cpp
+++ b/libs/gui/BLASTBufferQueue.cpp
@@ -303,13 +303,8 @@
                 // frame numbers that were in a sync. We remove the frame from mSyncedFrameNumbers
                 // set and then check if it's empty. If there are no more pending syncs, we can
                 // proceed with flushing the shadow queue.
-                // We also want to check if mSyncTransaction is null because it's possible another
-                // sync request came in while waiting, but it hasn't started processing yet. In that
-                // case, we don't actually want to flush the frames in between since they will get
-                // processed and merged with the sync transaction and released earlier than if they
-                // were sent to SF
                 mSyncedFrameNumbers.erase(currFrameNumber);
-                if (mSyncedFrameNumbers.empty() && mSyncTransaction == nullptr) {
+                if (mSyncedFrameNumbers.empty()) {
                     flushShadowQueue();
                 }
             } else {
diff --git a/libs/gui/OWNERS b/libs/gui/OWNERS
index 826a418..070f6bf 100644
--- a/libs/gui/OWNERS
+++ b/libs/gui/OWNERS
@@ -1,3 +1,5 @@
+# Bug component: 1075131
+
 chrisforbes@google.com
 jreck@google.com
 
diff --git a/libs/gui/SurfaceComposerClient.cpp b/libs/gui/SurfaceComposerClient.cpp
index db99726..7840120 100644
--- a/libs/gui/SurfaceComposerClient.cpp
+++ b/libs/gui/SurfaceComposerClient.cpp
@@ -59,7 +59,7 @@
 #include <private/gui/ComposerServiceAIDL.h>
 
 // This server size should always be smaller than the server cache size
-#define BUFFER_CACHE_MAX_SIZE 64
+#define BUFFER_CACHE_MAX_SIZE 4096
 
 namespace android {
 
@@ -450,7 +450,9 @@
             callbackFunction(transactionStats.latchTime, transactionStats.presentFence,
                              surfaceControlStats);
         }
+    }
 
+    for (const auto& transactionStats : listenerStats.transactionStats) {
         for (const auto& surfaceStats : transactionStats.surfaceStats) {
             // The callbackMap contains the SurfaceControl object, which we need to look up the
             // layerId. Since we don't know which callback contains the SurfaceControl, iterate
@@ -1306,6 +1308,13 @@
     return status.isOk() ? display : nullptr;
 }
 
+std::optional<gui::StalledTransactionInfo> SurfaceComposerClient::getStalledTransactionInfo(
+        pid_t pid) {
+    std::optional<gui::StalledTransactionInfo> result;
+    ComposerServiceAIDL::getComposerService()->getStalledTransactionInfo(pid, &result);
+    return result;
+}
+
 void SurfaceComposerClient::Transaction::setAnimationTransaction() {
     mAnimation = true;
 }
@@ -2513,38 +2522,41 @@
         outInfo->secure = ginfo.secure;
         outInfo->installOrientation = static_cast<ui::Rotation>(ginfo.installOrientation);
 
-        DeviceProductInfo info;
-        std::optional<gui::DeviceProductInfo> dpi = ginfo.deviceProductInfo;
-        gui::DeviceProductInfo::ManufactureOrModelDate& date = dpi->manufactureOrModelDate;
-        info.name = dpi->name;
-        if (dpi->manufacturerPnpId.size() > 0) {
-            // copid from PnpId = std::array<char, 4> in ui/DeviceProductInfo.h
-            constexpr int kMaxPnpIdSize = 4;
-            size_t count = std::max<size_t>(kMaxPnpIdSize, dpi->manufacturerPnpId.size());
-            std::copy_n(dpi->manufacturerPnpId.begin(), count, info.manufacturerPnpId.begin());
-        }
-        if (dpi->relativeAddress.size() > 0) {
-            std::copy(dpi->relativeAddress.begin(), dpi->relativeAddress.end(),
-                      std::back_inserter(info.relativeAddress));
-        }
-        info.productId = dpi->productId;
-        if (date.getTag() == Tag::modelYear) {
-            DeviceProductInfo::ModelYear modelYear;
-            modelYear.year = static_cast<uint32_t>(date.get<Tag::modelYear>().year);
-            info.manufactureOrModelDate = modelYear;
-        } else if (date.getTag() == Tag::manufactureYear) {
-            DeviceProductInfo::ManufactureYear manufactureYear;
-            manufactureYear.year = date.get<Tag::manufactureYear>().modelYear.year;
-            info.manufactureOrModelDate = manufactureYear;
-        } else if (date.getTag() == Tag::manufactureWeekAndYear) {
-            DeviceProductInfo::ManufactureWeekAndYear weekAndYear;
-            weekAndYear.year =
-                    date.get<Tag::manufactureWeekAndYear>().manufactureYear.modelYear.year;
-            weekAndYear.week = date.get<Tag::manufactureWeekAndYear>().week;
-            info.manufactureOrModelDate = weekAndYear;
-        }
+        if (const std::optional<gui::DeviceProductInfo> dpi = ginfo.deviceProductInfo) {
+            DeviceProductInfo info;
+            info.name = dpi->name;
+            if (dpi->manufacturerPnpId.size() > 0) {
+                // copid from PnpId = std::array<char, 4> in ui/DeviceProductInfo.h
+                constexpr int kMaxPnpIdSize = 4;
+                size_t count = std::max<size_t>(kMaxPnpIdSize, dpi->manufacturerPnpId.size());
+                std::copy_n(dpi->manufacturerPnpId.begin(), count, info.manufacturerPnpId.begin());
+            }
+            if (dpi->relativeAddress.size() > 0) {
+                std::copy(dpi->relativeAddress.begin(), dpi->relativeAddress.end(),
+                          std::back_inserter(info.relativeAddress));
+            }
+            info.productId = dpi->productId;
 
-        outInfo->deviceProductInfo = info;
+            const gui::DeviceProductInfo::ManufactureOrModelDate& date =
+                    dpi->manufactureOrModelDate;
+            if (date.getTag() == Tag::modelYear) {
+                DeviceProductInfo::ModelYear modelYear;
+                modelYear.year = static_cast<uint32_t>(date.get<Tag::modelYear>().year);
+                info.manufactureOrModelDate = modelYear;
+            } else if (date.getTag() == Tag::manufactureYear) {
+                DeviceProductInfo::ManufactureYear manufactureYear;
+                manufactureYear.year = date.get<Tag::manufactureYear>().modelYear.year;
+                info.manufactureOrModelDate = manufactureYear;
+            } else if (date.getTag() == Tag::manufactureWeekAndYear) {
+                DeviceProductInfo::ManufactureWeekAndYear weekAndYear;
+                weekAndYear.year =
+                        date.get<Tag::manufactureWeekAndYear>().manufactureYear.modelYear.year;
+                weekAndYear.week = date.get<Tag::manufactureWeekAndYear>().week;
+                info.manufactureOrModelDate = weekAndYear;
+            }
+
+            outInfo->deviceProductInfo = info;
+        }
     }
     return statusTFromBinderStatus(status);
 }
diff --git a/libs/gui/aidl/android/gui/ISurfaceComposer.aidl b/libs/gui/aidl/android/gui/ISurfaceComposer.aidl
index 539a1c1..e1726b7 100644
--- a/libs/gui/aidl/android/gui/ISurfaceComposer.aidl
+++ b/libs/gui/aidl/android/gui/ISurfaceComposer.aidl
@@ -46,6 +46,7 @@
 import android.gui.OverlayProperties;
 import android.gui.PullAtomData;
 import android.gui.ARect;
+import android.gui.StalledTransactionInfo;
 import android.gui.StaticDisplayInfo;
 import android.gui.WindowInfosListenerInfo;
 
@@ -230,20 +231,20 @@
      * The subregion can be optionally rotated.  It will also be scaled to
      * match the size of the output buffer.
      */
-    void captureDisplay(in DisplayCaptureArgs args, IScreenCaptureListener listener);
+    oneway void captureDisplay(in DisplayCaptureArgs args, IScreenCaptureListener listener);
 
     /**
      * Capture the specified screen. This requires the READ_FRAME_BUFFER
      * permission.
      */
-    void captureDisplayById(long displayId, IScreenCaptureListener listener);
+    oneway void captureDisplayById(long displayId, IScreenCaptureListener listener);
 
     /**
      * Capture a subtree of the layer hierarchy, potentially ignoring the root node.
      * This requires READ_FRAME_BUFFER permission. This function will fail if there
      * is a secure window on screen
      */
-    void captureLayers(in LayerCaptureArgs args, IScreenCaptureListener listener);
+    oneway void captureLayers(in LayerCaptureArgs args, IScreenCaptureListener listener);
 
     /**
      * Clears the frame statistics for animations.
@@ -280,8 +281,6 @@
      */
     List<LayerDebugInfo> getLayerDebugInfo();
 
-    boolean getColorManagement();
-
     /**
      * Gets the composition preference of the default data space and default pixel format,
      * as well as the wide color gamut data space and wide color gamut pixel format.
@@ -507,4 +506,10 @@
     void removeWindowInfosListener(IWindowInfosListener windowInfosListener);
 
     OverlayProperties getOverlaySupport();
+
+    /**
+     * Returns an instance of StalledTransaction if a transaction from the passed pid has not been
+     * applied in SurfaceFlinger due to an unsignaled fence. Otherwise, null is returned.
+     */
+    @nullable StalledTransactionInfo getStalledTransactionInfo(int pid);
 }
diff --git a/libs/gui/android/gui/StalledTransactionInfo.aidl b/libs/gui/android/gui/StalledTransactionInfo.aidl
new file mode 100644
index 0000000..e6aa9bd
--- /dev/null
+++ b/libs/gui/android/gui/StalledTransactionInfo.aidl
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.gui;
+
+/** @hide */
+parcelable StalledTransactionInfo {
+    String layerName;
+    long bufferId;
+    long frameNumber;
+}
\ No newline at end of file
diff --git a/libs/gui/fuzzer/Android.bp b/libs/gui/fuzzer/Android.bp
index 073cc08..cd738ac 100644
--- a/libs/gui/fuzzer/Android.bp
+++ b/libs/gui/fuzzer/Android.bp
@@ -24,6 +24,7 @@
 
 cc_defaults {
     name: "libgui_fuzzer_defaults",
+    defaults: ["android.hardware.power-ndk_shared"],
     static_libs: [
         "android.hidl.token@1.0-utils",
         "libbinder_random_parcel",
@@ -46,7 +47,6 @@
         "android.hardware.configstore-utils",
         "android.hardware.graphics.bufferqueue@1.0",
         "android.hardware.graphics.bufferqueue@2.0",
-        "android.hardware.power-V4-ndk",
         "android.hidl.token@1.0",
         "libSurfaceFlingerProp",
         "libgui",
diff --git a/libs/gui/fuzzer/libgui_fuzzer_utils.h b/libs/gui/fuzzer/libgui_fuzzer_utils.h
index 4c7d056..a381687 100644
--- a/libs/gui/fuzzer/libgui_fuzzer_utils.h
+++ b/libs/gui/fuzzer/libgui_fuzzer_utils.h
@@ -110,7 +110,6 @@
                 (override));
     MOCK_METHOD(binder::Status, onPullAtom, (int32_t, gui::PullAtomData*), (override));
     MOCK_METHOD(binder::Status, getLayerDebugInfo, (std::vector<gui::LayerDebugInfo>*), (override));
-    MOCK_METHOD(binder::Status, getColorManagement, (bool*), (override));
     MOCK_METHOD(binder::Status, getCompositionPreference, (gui::CompositionPreference*),
                 (override));
     MOCK_METHOD(binder::Status, getDisplayedContentSamplingAttributes,
@@ -158,6 +157,8 @@
     MOCK_METHOD(binder::Status, removeWindowInfosListener, (const sp<gui::IWindowInfosListener>&),
                 (override));
     MOCK_METHOD(binder::Status, getOverlaySupport, (gui::OverlayProperties*), (override));
+    MOCK_METHOD(binder::Status, getStalledTransactionInfo,
+                (int32_t, std::optional<gui::StalledTransactionInfo>*), (override));
 };
 
 class FakeBnSurfaceComposerClient : public gui::BnSurfaceComposerClient {
diff --git a/libs/gui/include/gui/SurfaceComposerClient.h b/libs/gui/include/gui/SurfaceComposerClient.h
index 3cf57b1..dbcbd3b 100644
--- a/libs/gui/include/gui/SurfaceComposerClient.h
+++ b/libs/gui/include/gui/SurfaceComposerClient.h
@@ -371,6 +371,10 @@
     //! Get token for a physical display given its stable ID
     static sp<IBinder> getPhysicalDisplayToken(PhysicalDisplayId displayId);
 
+    // Returns StalledTransactionInfo if a transaction from the provided pid has not been applied
+    // due to an unsignaled fence.
+    static std::optional<gui::StalledTransactionInfo> getStalledTransactionInfo(pid_t pid);
+
     struct SCHash {
         std::size_t operator()(const sp<SurfaceControl>& sc) const {
             return std::hash<SurfaceControl *>{}(sc.get());
diff --git a/libs/gui/tests/BLASTBufferQueue_test.cpp b/libs/gui/tests/BLASTBufferQueue_test.cpp
index cce2892..9618502 100644
--- a/libs/gui/tests/BLASTBufferQueue_test.cpp
+++ b/libs/gui/tests/BLASTBufferQueue_test.cpp
@@ -351,24 +351,6 @@
     sp<CountProducerListener> mProducerListener;
 };
 
-// Helper class to ensure the provided BBQ frame number has been committed in SurfaceFlinger.
-class BBQSyncHelper {
-public:
-    BBQSyncHelper(BLASTBufferQueueHelper& bbqHelper, uint64_t frameNumber) {
-        t.addTransactionCompletedCallback(callbackHelper.function, callbackHelper.getContext());
-        bbqHelper.mergeWithNextTransaction(&t, frameNumber);
-    }
-
-    void wait() {
-        CallbackData callbackData;
-        callbackHelper.getCallbackData(&callbackData);
-    }
-
-private:
-    Transaction t;
-    CallbackHelper callbackHelper;
-};
-
 TEST_F(BLASTBufferQueueTest, CreateBLASTBufferQueue) {
     // create BLASTBufferQueue adapter associated with this surface
     BLASTBufferQueueHelper adapter(mSurfaceControl, mDisplayWidth, mDisplayHeight);
@@ -440,7 +422,6 @@
     BLASTBufferQueueHelper adapter(mSurfaceControl, mDisplayWidth, mDisplayHeight);
     sp<IGraphicBufferProducer> igbProducer;
     setUpProducer(adapter, igbProducer);
-    BBQSyncHelper syncHelper{adapter, 1};
 
     int slot;
     sp<Fence> fence;
@@ -466,7 +447,8 @@
     igbProducer->queueBuffer(slot, input, &qbOutput);
     ASSERT_NE(ui::Transform::ROT_INVALID, qbOutput.transformHint);
 
-    syncHelper.wait();
+    // ensure the buffer queue transaction has been committed
+    Transaction().apply(true /* synchronous */);
 
     // capture screen and verify that it is red
     ASSERT_EQ(NO_ERROR, captureDisplay(mCaptureArgs, mCaptureResults));
@@ -526,7 +508,6 @@
     BLASTBufferQueueHelper adapter(mSurfaceControl, mDisplayWidth, mDisplayHeight);
     sp<IGraphicBufferProducer> igbProducer;
     setUpProducer(adapter, igbProducer);
-    BBQSyncHelper syncHelper{adapter, 1};
     int slot;
     sp<Fence> fence;
     sp<GraphicBuffer> buf;
@@ -551,7 +532,9 @@
     igbProducer->queueBuffer(slot, input, &qbOutput);
     ASSERT_NE(ui::Transform::ROT_INVALID, qbOutput.transformHint);
 
-    syncHelper.wait();
+    // ensure the buffer queue transaction has been committed
+    Transaction().apply(true /* synchronous */);
+
     // capture screen and verify that it is red
     ASSERT_EQ(NO_ERROR, captureDisplay(mCaptureArgs, mCaptureResults));
 
@@ -582,7 +565,6 @@
     BLASTBufferQueueHelper adapter(mSurfaceControl, bufferSideLength, bufferSideLength);
     sp<IGraphicBufferProducer> igbProducer;
     setUpProducer(adapter, igbProducer);
-    BBQSyncHelper syncHelper{adapter, 1};
     int slot;
     sp<Fence> fence;
     sp<GraphicBuffer> buf;
@@ -611,7 +593,9 @@
     igbProducer->queueBuffer(slot, input, &qbOutput);
     ASSERT_NE(ui::Transform::ROT_INVALID, qbOutput.transformHint);
 
-    syncHelper.wait();
+    // ensure the buffer queue transaction has been committed
+    Transaction().apply(true /* synchronous */);
+
     // capture screen and verify that it is red
     ASSERT_EQ(NO_ERROR, captureDisplay(mCaptureArgs, mCaptureResults));
     ASSERT_NO_FATAL_FAILURE(checkScreenCapture(r, g, b,
@@ -642,7 +626,6 @@
     BLASTBufferQueueHelper adapter(mSurfaceControl, windowSize.getWidth(), windowSize.getHeight());
     sp<IGraphicBufferProducer> igbProducer;
     setUpProducer(adapter, igbProducer);
-    BBQSyncHelper syncHelper{adapter, 1};
     int slot;
     sp<Fence> fence;
     sp<GraphicBuffer> buf;
@@ -675,7 +658,8 @@
     igbProducer->queueBuffer(slot, input, &qbOutput);
     ASSERT_NE(ui::Transform::ROT_INVALID, qbOutput.transformHint);
 
-    syncHelper.wait();
+    // ensure the buffer queue transaction has been committed
+    Transaction().apply(true /* synchronous */);
 
     ASSERT_EQ(NO_ERROR, captureDisplay(mCaptureArgs, mCaptureResults));
 
@@ -710,7 +694,6 @@
     BLASTBufferQueueHelper adapter(mSurfaceControl, windowSize.getWidth(), windowSize.getHeight());
     sp<IGraphicBufferProducer> igbProducer;
     setUpProducer(adapter, igbProducer);
-    BBQSyncHelper syncHelper{adapter, 1};
     int slot;
     sp<Fence> fence;
     sp<GraphicBuffer> buf;
@@ -743,7 +726,8 @@
     igbProducer->queueBuffer(slot, input, &qbOutput);
     ASSERT_NE(ui::Transform::ROT_INVALID, qbOutput.transformHint);
 
-    syncHelper.wait();
+    // ensure the buffer queue transaction has been committed
+    Transaction().apply(true /* synchronous */);
 
     ASSERT_EQ(NO_ERROR, captureDisplay(mCaptureArgs, mCaptureResults));
     // Verify cropped region is scaled correctly.
@@ -769,7 +753,6 @@
     sp<IGraphicBufferProducer> igbProducer;
     setUpProducer(adapter, igbProducer);
     {
-        BBQSyncHelper syncHelper{adapter, 1};
         int slot;
         sp<Fence> fence;
         sp<GraphicBuffer> buf;
@@ -791,7 +774,9 @@
                                                        NATIVE_WINDOW_SCALING_MODE_FREEZE, 0,
                                                        Fence::NO_FENCE);
         igbProducer->queueBuffer(slot, input, &qbOutput);
-        syncHelper.wait();
+
+        // ensure the buffer queue transaction has been committed
+        Transaction().apply(true /* synchronous */);
     }
     // capture screen and verify that it is red
     ASSERT_EQ(NO_ERROR, captureDisplay(mCaptureArgs, mCaptureResults));
@@ -804,7 +789,6 @@
     adapter.update(mSurfaceControl, mDisplayWidth, mDisplayHeight / 2);
 
     {
-        BBQSyncHelper syncHelper{adapter, 1};
         int slot;
         sp<Fence> fence;
         sp<GraphicBuffer> buf;
@@ -827,7 +811,8 @@
                                                        NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW,
                                                        0, Fence::NO_FENCE);
         igbProducer->queueBuffer(slot, input, &qbOutput);
-        syncHelper.wait();
+        // ensure the buffer queue transaction has been committed
+        Transaction().apply(true /* synchronous */);
     }
     // capture screen and verify that it is red
     ASSERT_EQ(NO_ERROR, captureDisplay(mCaptureArgs, mCaptureResults));
@@ -1348,43 +1333,6 @@
     ASSERT_EQ(queuesToNativeWindow, 1);
 }
 
-// Test a slow producer doesn't hold up a faster producer from the same client. Essentially tests
-// BBQ uses separate transaction queues.
-TEST_F(BLASTBufferQueueTest, OutOfOrderTransactionTest) {
-    sp<SurfaceControl> bgSurface =
-            mClient->createSurface(String8("BGTest"), 0, 0, PIXEL_FORMAT_RGBA_8888,
-                                   ISurfaceComposerClient::eFXSurfaceBufferState);
-    ASSERT_NE(nullptr, bgSurface.get());
-    Transaction t;
-    t.setLayerStack(bgSurface, ui::DEFAULT_LAYER_STACK)
-            .show(bgSurface)
-            .setDataspace(bgSurface, ui::Dataspace::V0_SRGB)
-            .setLayer(bgSurface, std::numeric_limits<int32_t>::max() - 1)
-            .apply();
-
-    BLASTBufferQueueHelper slowAdapter(mSurfaceControl, mDisplayWidth, mDisplayHeight);
-    sp<IGraphicBufferProducer> slowIgbProducer;
-    setUpProducer(slowAdapter, slowIgbProducer);
-    nsecs_t presentTimeDelay = std::chrono::nanoseconds(500ms).count();
-    queueBuffer(slowIgbProducer, 0 /* r */, 255 /* g */, 0 /* b */, presentTimeDelay);
-
-    BLASTBufferQueueHelper fastAdapter(bgSurface, mDisplayWidth, mDisplayHeight);
-    sp<IGraphicBufferProducer> fastIgbProducer;
-    setUpProducer(fastAdapter, fastIgbProducer);
-    uint8_t r = 255;
-    uint8_t g = 0;
-    uint8_t b = 0;
-    queueBuffer(fastIgbProducer, r, g, b, 0 /* presentTimeDelay */);
-    fastAdapter.waitForCallbacks();
-
-    // capture screen and verify that it is red
-    ASSERT_EQ(NO_ERROR, captureDisplay(mCaptureArgs, mCaptureResults));
-
-    ASSERT_NO_FATAL_FAILURE(
-            checkScreenCapture(r, g, b,
-                               {0, 0, (int32_t)mDisplayWidth, (int32_t)mDisplayHeight / 2}));
-}
-
 TEST_F(BLASTBufferQueueTest, TransformHint) {
     // Transform hint is provided to BBQ via the surface control passed by WM
     mSurfaceControl->setTransformHint(ui::Transform::ROT_90);
@@ -1457,7 +1405,7 @@
         igbProducer->queueBuffer(slot, input, &qbOutput);
         ASSERT_NE(ui::Transform::ROT_INVALID, qbOutput.transformHint);
 
-        adapter.waitForCallbacks();
+        Transaction().apply(true /* synchronous */);
         ASSERT_EQ(NO_ERROR, captureDisplay(mCaptureArgs, mCaptureResults));
 
         switch (tr) {
diff --git a/libs/gui/tests/OWNERS b/libs/gui/tests/OWNERS
index 156efdb..48cd30d 100644
--- a/libs/gui/tests/OWNERS
+++ b/libs/gui/tests/OWNERS
@@ -1,3 +1,6 @@
 # Android > Android OS & Apps > Framework (Java + Native) > Window Manager > Surfaces
 # Bug component: 316245 = per-file BLASTBufferQueue_test.cpp, DisplayInfo_test.cpp, EndToEndNativeInputTest.cpp, WindowInfos_test.cpp
 # Buganizer template url: https://b.corp.google.com/issues/new?component=316245&template=1018194 = per-file BLASTBufferQueue_test.cpp, DisplayInfo_test.cpp, EndToEndNativeInputTest.cpp, WindowInfos_test.cpp
+
+# Android > Android OS & Apps > graphics > Core Graphics Stack (CoGS)
+# Bug component: 1075130
diff --git a/libs/gui/tests/Surface_test.cpp b/libs/gui/tests/Surface_test.cpp
index 567604d..30b7a9f 100644
--- a/libs/gui/tests/Surface_test.cpp
+++ b/libs/gui/tests/Surface_test.cpp
@@ -879,10 +879,6 @@
         return binder::Status::ok();
     }
 
-    binder::Status getColorManagement(bool* /*outGetColorManagement*/) override {
-        return binder::Status::ok();
-    }
-
     binder::Status getCompositionPreference(gui::CompositionPreference* /*outPref*/) override {
         return binder::Status::ok();
     }
@@ -1016,6 +1012,11 @@
         return binder::Status::ok();
     }
 
+    binder::Status getStalledTransactionInfo(
+            int32_t /*pid*/, std::optional<gui::StalledTransactionInfo>* /*result*/) override {
+        return binder::Status::ok();
+    }
+
 protected:
     IBinder* onAsBinder() override { return nullptr; }
 
diff --git a/libs/input/InputTransport.cpp b/libs/input/InputTransport.cpp
index d9b7700..3446540 100644
--- a/libs/input/InputTransport.cpp
+++ b/libs/input/InputTransport.cpp
@@ -75,10 +75,20 @@
 
 /**
  * Log debug messages about touch event resampling.
- * Enable this via "adb shell setprop log.tag.InputTransportResampling DEBUG" (requires restart)
+ *
+ * Enable this via "adb shell setprop log.tag.InputTransportResampling DEBUG".
+ * This requires a restart on non-debuggable (e.g. user) builds, but should take effect immediately
+ * on debuggable builds (e.g. userdebug).
  */
-const bool DEBUG_RESAMPLING =
-        __android_log_is_loggable(ANDROID_LOG_DEBUG, LOG_TAG "Resampling", ANDROID_LOG_INFO);
+bool debugResampling() {
+    if (!IS_DEBUGGABLE_BUILD) {
+        static const bool DEBUG_TRANSPORT_RESAMPLING =
+                __android_log_is_loggable(ANDROID_LOG_DEBUG, LOG_TAG "Resampling",
+                                          ANDROID_LOG_INFO);
+        return DEBUG_TRANSPORT_RESAMPLING;
+    }
+    return __android_log_is_loggable(ANDROID_LOG_DEBUG, LOG_TAG "Resampling", ANDROID_LOG_INFO);
+}
 
 } // namespace
 
@@ -1158,7 +1168,7 @@
                     state.recentCoordinatesAreIdentical(id)) {
                 PointerCoords& msgCoords = msg.body.motion.pointers[i].coords;
                 const PointerCoords& resampleCoords = state.lastResample.getPointerById(id);
-                ALOGD_IF(DEBUG_RESAMPLING, "[%d] - rewrite (%0.3f, %0.3f), old (%0.3f, %0.3f)", id,
+                ALOGD_IF(debugResampling(), "[%d] - rewrite (%0.3f, %0.3f), old (%0.3f, %0.3f)", id,
                          resampleCoords.getX(), resampleCoords.getY(), msgCoords.getX(),
                          msgCoords.getY());
                 msgCoords.setAxisValue(AMOTION_EVENT_AXIS_X, resampleCoords.getX());
@@ -1181,13 +1191,13 @@
 
     ssize_t index = findTouchState(event->getDeviceId(), event->getSource());
     if (index < 0) {
-        ALOGD_IF(DEBUG_RESAMPLING, "Not resampled, no touch state for device.");
+        ALOGD_IF(debugResampling(), "Not resampled, no touch state for device.");
         return;
     }
 
     TouchState& touchState = mTouchStates[index];
     if (touchState.historySize < 1) {
-        ALOGD_IF(DEBUG_RESAMPLING, "Not resampled, no history for device.");
+        ALOGD_IF(debugResampling(), "Not resampled, no history for device.");
         return;
     }
 
@@ -1197,7 +1207,7 @@
     for (size_t i = 0; i < pointerCount; i++) {
         uint32_t id = event->getPointerId(i);
         if (!current->idBits.hasBit(id)) {
-            ALOGD_IF(DEBUG_RESAMPLING, "Not resampled, missing id %d", id);
+            ALOGD_IF(debugResampling(), "Not resampled, missing id %d", id);
             return;
         }
     }
@@ -1213,7 +1223,7 @@
         other = &future;
         nsecs_t delta = future.eventTime - current->eventTime;
         if (delta < RESAMPLE_MIN_DELTA) {
-            ALOGD_IF(DEBUG_RESAMPLING, "Not resampled, delta time is too small: %" PRId64 " ns.",
+            ALOGD_IF(debugResampling(), "Not resampled, delta time is too small: %" PRId64 " ns.",
                      delta);
             return;
         }
@@ -1224,17 +1234,17 @@
         other = touchState.getHistory(1);
         nsecs_t delta = current->eventTime - other->eventTime;
         if (delta < RESAMPLE_MIN_DELTA) {
-            ALOGD_IF(DEBUG_RESAMPLING, "Not resampled, delta time is too small: %" PRId64 " ns.",
+            ALOGD_IF(debugResampling(), "Not resampled, delta time is too small: %" PRId64 " ns.",
                      delta);
             return;
         } else if (delta > RESAMPLE_MAX_DELTA) {
-            ALOGD_IF(DEBUG_RESAMPLING, "Not resampled, delta time is too large: %" PRId64 " ns.",
+            ALOGD_IF(debugResampling(), "Not resampled, delta time is too large: %" PRId64 " ns.",
                      delta);
             return;
         }
         nsecs_t maxPredict = current->eventTime + min(delta / 2, RESAMPLE_MAX_PREDICTION);
         if (sampleTime > maxPredict) {
-            ALOGD_IF(DEBUG_RESAMPLING,
+            ALOGD_IF(debugResampling(),
                      "Sample time is too far in the future, adjusting prediction "
                      "from %" PRId64 " to %" PRId64 " ns.",
                      sampleTime - current->eventTime, maxPredict - current->eventTime);
@@ -1242,7 +1252,7 @@
         }
         alpha = float(current->eventTime - sampleTime) / delta;
     } else {
-        ALOGD_IF(DEBUG_RESAMPLING, "Not resampled, insufficient data.");
+        ALOGD_IF(debugResampling(), "Not resampled, insufficient data.");
         return;
     }
 
@@ -1284,13 +1294,13 @@
             resampledCoords.setAxisValue(AMOTION_EVENT_AXIS_Y,
                                          lerp(currentCoords.getY(), otherCoords.getY(), alpha));
             resampledCoords.isResampled = true;
-            ALOGD_IF(DEBUG_RESAMPLING,
+            ALOGD_IF(debugResampling(),
                      "[%d] - out (%0.3f, %0.3f), cur (%0.3f, %0.3f), "
                      "other (%0.3f, %0.3f), alpha %0.3f",
                      id, resampledCoords.getX(), resampledCoords.getY(), currentCoords.getX(),
                      currentCoords.getY(), otherCoords.getX(), otherCoords.getY(), alpha);
         } else {
-            ALOGD_IF(DEBUG_RESAMPLING, "[%d] - out (%0.3f, %0.3f), cur (%0.3f, %0.3f)", id,
+            ALOGD_IF(debugResampling(), "[%d] - out (%0.3f, %0.3f), cur (%0.3f, %0.3f)", id,
                      resampledCoords.getX(), resampledCoords.getY(), currentCoords.getX(),
                      currentCoords.getY());
         }
diff --git a/libs/input/InputVerifier.cpp b/libs/input/InputVerifier.cpp
index 341eb6f..6c602e0 100644
--- a/libs/input/InputVerifier.cpp
+++ b/libs/input/InputVerifier.cpp
@@ -31,7 +31,7 @@
 // --- InputVerifier ---
 
 InputVerifier::InputVerifier(const std::string& name)
-      : mVerifier(android::input::verifier::create(name)){};
+      : mVerifier(android::input::verifier::create(rust::String::lossy(name))){};
 
 Result<void> InputVerifier::processMovement(DeviceId deviceId, int32_t action,
                                             uint32_t pointerCount,
diff --git a/libs/input/VelocityTracker.cpp b/libs/input/VelocityTracker.cpp
index 8704eee..116b778 100644
--- a/libs/input/VelocityTracker.cpp
+++ b/libs/input/VelocityTracker.cpp
@@ -17,6 +17,7 @@
 #define LOG_TAG "VelocityTracker"
 
 #include <android-base/logging.h>
+#include <ftl/enum.h>
 #include <inttypes.h>
 #include <limits.h>
 #include <math.h>
@@ -148,27 +149,19 @@
 VelocityTracker::VelocityTracker(const Strategy strategy)
       : mLastEventTime(0), mCurrentPointerIdBits(0), mOverrideStrategy(strategy) {}
 
-VelocityTracker::~VelocityTracker() {
-}
-
 bool VelocityTracker::isAxisSupported(int32_t axis) {
     return DEFAULT_STRATEGY_BY_AXIS.find(axis) != DEFAULT_STRATEGY_BY_AXIS.end();
 }
 
 void VelocityTracker::configureStrategy(int32_t axis) {
     const bool isDifferentialAxis = DIFFERENTIAL_AXES.find(axis) != DIFFERENTIAL_AXES.end();
-
-    std::unique_ptr<VelocityTrackerStrategy> createdStrategy;
-    if (mOverrideStrategy != VelocityTracker::Strategy::DEFAULT) {
-        createdStrategy = createStrategy(mOverrideStrategy, /*deltaValues=*/isDifferentialAxis);
+    if (isDifferentialAxis || mOverrideStrategy == VelocityTracker::Strategy::DEFAULT) {
+        // Do not allow overrides of strategies for differential axes, for now.
+        mConfiguredStrategies[axis] = createStrategy(DEFAULT_STRATEGY_BY_AXIS.at(axis),
+                                                     /*deltaValues=*/isDifferentialAxis);
     } else {
-        createdStrategy = createStrategy(DEFAULT_STRATEGY_BY_AXIS.at(axis),
-                                         /*deltaValues=*/isDifferentialAxis);
+        mConfiguredStrategies[axis] = createStrategy(mOverrideStrategy, /*deltaValues=*/false);
     }
-
-    LOG_ALWAYS_FATAL_IF(createdStrategy == nullptr,
-                        "Could not create velocity tracker strategy for axis '%" PRId32 "'!", axis);
-    mConfiguredStrategies[axis] = std::move(createdStrategy);
 }
 
 std::unique_ptr<VelocityTrackerStrategy> VelocityTracker::createStrategy(
@@ -216,6 +209,9 @@
         default:
             break;
     }
+    LOG(FATAL) << "Invalid strategy: " << ftl::enum_string(strategy)
+               << ", deltaValues = " << deltaValues;
+
     return nullptr;
 }
 
@@ -272,12 +268,10 @@
     mConfiguredStrategies[axis]->addMovement(eventTime, pointerId, position);
 
     if (DEBUG_VELOCITY) {
-        ALOGD("VelocityTracker: addMovement eventTime=%" PRId64 ", pointerId=%" PRId32
-              ", activePointerId=%s",
-              eventTime, pointerId, toString(mActivePointerId).c_str());
-
-        ALOGD("  %d: axis=%d, position=%0.3f, velocity=%s", pointerId, axis, position,
-              toString(getVelocity(axis, pointerId)).c_str());
+        LOG(INFO) << "VelocityTracker: addMovement axis=" << MotionEvent::getLabel(axis)
+                  << ", eventTime=" << eventTime << ", pointerId=" << pointerId
+                  << ", activePointerId=" << toString(mActivePointerId) << ", position=" << position
+                  << ", velocity=" << toString(getVelocity(axis, pointerId));
     }
 }
 
diff --git a/libs/input/android/os/IInputConstants.aidl b/libs/input/android/os/IInputConstants.aidl
index dab843b..8f6f95b 100644
--- a/libs/input/android/os/IInputConstants.aidl
+++ b/libs/input/android/os/IInputConstants.aidl
@@ -57,4 +57,88 @@
 
     /* The default pointer acceleration value. */
     const int DEFAULT_POINTER_ACCELERATION = 3;
+
+    /**
+     * Use the default Velocity Tracker Strategy. Different axes may use different default
+     * strategies.
+     */
+    const int VELOCITY_TRACKER_STRATEGY_DEFAULT = -1;
+
+    /**
+     * Velocity Tracker Strategy: Impulse.
+     * Physical model of pushing an object.  Quality: VERY GOOD.
+     * Works with duplicate coordinates, unclean finger liftoff.
+     */
+    const int VELOCITY_TRACKER_STRATEGY_IMPULSE = 0;
+
+    /**
+     * Velocity Tracker Strategy: LSQ1.
+     * 1st order least squares.  Quality: POOR.
+     * Frequently underfits the touch data especially when the finger accelerates
+     * or changes direction.  Often underestimates velocity.  The direction
+     * is overly influenced by historical touch points.
+     */
+    const int VELOCITY_TRACKER_STRATEGY_LSQ1 = 1;
+
+    /**
+     * Velocity Tracker Strategy: LSQ2.
+     * 2nd order least squares.  Quality: VERY GOOD.
+     * Pretty much ideal, but can be confused by certain kinds of touch data,
+     * particularly if the panel has a tendency to generate delayed,
+     * duplicate or jittery touch coordinates when the finger is released.
+     */
+    const int VELOCITY_TRACKER_STRATEGY_LSQ2 = 2;
+
+    /**
+     * Velocity Tracker Strategy: LSQ3.
+     * 3rd order least squares.  Quality: UNUSABLE.
+     * Frequently overfits the touch data yielding wildly divergent estimates
+     * of the velocity when the finger is released.
+     */
+    const int VELOCITY_TRACKER_STRATEGY_LSQ3 = 3;
+
+    /**
+     * Velocity Tracker Strategy: WLSQ2_DELTA.
+     * 2nd order weighted least squares, delta weighting.  Quality: EXPERIMENTAL
+     */
+    const int VELOCITY_TRACKER_STRATEGY_WLSQ2_DELTA = 4;
+
+    /**
+     * Velocity Tracker Strategy: WLSQ2_CENTRAL.
+     * 2nd order weighted least squares, central weighting.  Quality: EXPERIMENTALe
+     */
+    const int VELOCITY_TRACKER_STRATEGY_WLSQ2_CENTRAL = 5;
+
+    /**
+     * Velocity Tracker Strategy: WLSQ2_RECENT.
+     * 2nd order weighted least squares, recent weighting.  Quality: EXPERIMENTAL
+     */
+    const int VELOCITY_TRACKER_STRATEGY_WLSQ2_RECENT = 6;
+
+    /**
+     * Velocity Tracker Strategy: INT1.
+     * 1st order integrating filter.  Quality: GOOD.
+     * Not as good as 'lsq2' because it cannot estimate acceleration but it is
+     * more tolerant of errors.  Like 'lsq1', this strategy tends to underestimate
+     * the velocity of a fling but this strategy tends to respond to changes in
+     * direction more quickly and accurately.
+     */
+    const int VELOCITY_TRACKER_STRATEGY_INT1 = 7;
+
+    /**
+     * Velocity Tracker Strategy: INT2.
+     * 2nd order integrating filter.  Quality: EXPERIMENTAL.
+     * For comparison purposes only.  Unlike 'int1' this strategy can compensate
+     * for acceleration but it typically overestimates the effect.
+     */
+    const int VELOCITY_TRACKER_STRATEGY_INT2 = 8;
+
+    /**
+     * Velocity Tracker Strategy: Legacy.
+     * Legacy velocity tracker algorithm.  Quality: POOR.
+     * For comparison purposes only.  This algorithm is strongly influenced by
+     * old data points, consistently underestimates velocity and takes a very long
+     * time to adjust to changes in direction.
+     */
+    const int VELOCITY_TRACKER_STRATEGY_LEGACY = 9;
 }
diff --git a/libs/input/rust/lib.rs b/libs/input/rust/lib.rs
index 688d941..1d3c434 100644
--- a/libs/input/rust/lib.rs
+++ b/libs/input/rust/lib.rs
@@ -23,6 +23,7 @@
 pub use input_verifier::InputVerifier;
 
 #[cxx::bridge(namespace = "android::input")]
+#[allow(unsafe_op_in_unsafe_fn)]
 mod ffi {
     #[namespace = "android"]
     unsafe extern "C++" {
diff --git a/libs/input/tests/Android.bp b/libs/input/tests/Android.bp
index cadac88..86b996b 100644
--- a/libs/input/tests/Android.bp
+++ b/libs/input/tests/Android.bp
@@ -18,6 +18,7 @@
         "InputDevice_test.cpp",
         "InputEvent_test.cpp",
         "InputPublisherAndConsumer_test.cpp",
+        "InputVerifier_test.cpp",
         "MotionPredictor_test.cpp",
         "RingBuffer_test.cpp",
         "TfLiteMotionPredictor_test.cpp",
diff --git a/libs/input/tests/InputVerifier_test.cpp b/libs/input/tests/InputVerifier_test.cpp
new file mode 100644
index 0000000..e24fa6e
--- /dev/null
+++ b/libs/input/tests/InputVerifier_test.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+#include <input/InputVerifier.h>
+#include <string>
+
+namespace android {
+
+TEST(InputVerifierTest, CreationWithInvalidUtfStringDoesNotCrash) {
+    constexpr char bytes[] = {static_cast<char>(0xC0), static_cast<char>(0x80)};
+    const std::string name(bytes, sizeof(bytes));
+    InputVerifier verifier(name);
+}
+
+} // namespace android
diff --git a/libs/input/tests/VelocityTracker_test.cpp b/libs/input/tests/VelocityTracker_test.cpp
index ffebff1..1c8ec90 100644
--- a/libs/input/tests/VelocityTracker_test.cpp
+++ b/libs/input/tests/VelocityTracker_test.cpp
@@ -278,6 +278,11 @@
         const std::vector<std::pair<std::chrono::nanoseconds, float>>& motions,
         std::optional<float> targetVelocity) {
     checkVelocity(computeVelocity(strategy, motions, AMOTION_EVENT_AXIS_SCROLL), targetVelocity);
+    // The strategy LSQ2 is not compatible with AXIS_SCROLL. In those situations, we should fall
+    // back to a strategy that supports differential axes.
+    checkVelocity(computeVelocity(VelocityTracker::Strategy::LSQ2, motions,
+                                  AMOTION_EVENT_AXIS_SCROLL),
+                  targetVelocity);
 }
 
 static void computeAndCheckQuadraticVelocity(const std::vector<PlanarMotionEventEntry>& motions,
diff --git a/libs/nativewindow/rust/src/lib.rs b/libs/nativewindow/rust/src/lib.rs
index a5bcc62..a2ec57c 100644
--- a/libs/nativewindow/rust/src/lib.rs
+++ b/libs/nativewindow/rust/src/lib.rs
@@ -61,6 +61,7 @@
     /// program termination.
     ///
     /// Available since API level 26.
+    #[inline]
     pub fn new(
         width: u32,
         height: u32,
@@ -199,6 +200,7 @@
     #[test]
     #[should_panic]
     fn take_from_raw_panics_on_null() {
+        // SAFETY: Passing a null pointer is safe, it should just panic.
         unsafe { AHardwareBuffer::take_from_raw(ptr::null_mut()) };
     }
 
@@ -216,9 +218,13 @@
         };
         let mut raw_buffer_ptr = ptr::null_mut();
 
+        // SAFETY: The pointers are valid because they come from references, and
+        // `AHardwareBuffer_allocate` doesn't retain them after it returns.
         let status = unsafe { ffi::AHardwareBuffer_allocate(&buffer_desc, &mut raw_buffer_ptr) };
         assert_eq!(status, 0);
 
+        // SAFETY: The pointer must be valid because it was just allocated successfully, and we
+        // don't use it after calling this.
         let buffer = unsafe { AHardwareBuffer::take_from_raw(raw_buffer_ptr as *mut c_void) };
         assert_eq!(buffer.width(), 1024);
     }
diff --git a/libs/nativewindow/tests/benchmark/Android.bp b/libs/nativewindow/tests/benchmark/Android.bp
new file mode 100644
index 0000000..6f844cf
--- /dev/null
+++ b/libs/nativewindow/tests/benchmark/Android.bp
@@ -0,0 +1,50 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+cc_defaults {
+    name: "nativewindow_benchmark_defaults_cc",
+    shared_libs: ["libnativewindow"],
+    static_libs: [
+        "libbase",
+        "libgoogle-benchmark-main",
+    ],
+    test_suites: [
+        "device-tests",
+        "NativeWindowBenchmarks",
+    ],
+}
+
+cc_benchmark {
+    name: "nativewindow_buffer_benchmarks_cc",
+    srcs: ["buffer_benchmarks.cc"],
+    defaults: ["nativewindow_benchmark_defaults_cc"],
+}
+
+rust_defaults {
+    name: "nativewindow_benchmark_defaults_rs",
+    rustlibs: [
+        "libnativewindow_rs",
+        "libcriterion",
+    ],
+    test_suites: [
+        "device-tests",
+        "NativeWindowBenchmarks",
+    ],
+}
+
+rust_benchmark {
+    name: "nativewindow_buffer_benchmarks_rs",
+    srcs: ["buffer_benchmarks.rs"],
+    defaults: ["nativewindow_benchmark_defaults_rs"],
+}
diff --git a/libs/nativewindow/tests/benchmark/README.md b/libs/nativewindow/tests/benchmark/README.md
new file mode 100644
index 0000000..7eae538
--- /dev/null
+++ b/libs/nativewindow/tests/benchmark/README.md
@@ -0,0 +1,22 @@
+# libnativewindow Benchmarks
+
+This directory contains benchmarks for the C++ and Rust variants of
+libnativewindow.
+
+## Running
+
+It is currently a little tricky to get statistics from Rust benchmarks directly
+from tradefed. But we can hack it by using atest to build/push, then running
+the benchmarks by hand to get stats.
+
+```
+  $ atest nativewindow_buffer_benchmarks_rs nativewindow_buffer_benchmarks_cc -d
+  $ adb shell /data/local/tmp/nativewindow_buffer_benchmarks_cc/x86_64/nativewindow_buffer_benchmarks_cc
+  $ adb shell /data/local/tmp/nativewindow_buffer_benchmarks_rs/x86_64/nativewindow_buffer_benchmarks_rs --bench
+```
+
+## Results
+
+On a remote emulator, the results we see from the benchmarks from Rust and C++
+seem to be roughly equivalent! Allocating/deallocating a 720p buffer takes
+~2.3ms on each.
diff --git a/libs/nativewindow/tests/benchmark/buffer_benchmarks.cc b/libs/nativewindow/tests/benchmark/buffer_benchmarks.cc
new file mode 100644
index 0000000..9b31993
--- /dev/null
+++ b/libs/nativewindow/tests/benchmark/buffer_benchmarks.cc
@@ -0,0 +1,74 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <android-base/macros.h>
+#include <android/hardware_buffer.h>
+#include <benchmark/benchmark.h>
+
+constexpr AHardwareBuffer_Desc k720pDesc = {.width = 1280,
+                                            .height = 720,
+                                            .layers = 1,
+                                            .format = AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM,
+                                            .usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN,
+                                            .stride = 0};
+
+static void BM_BufferAllocationDeallocation(benchmark::State& state) {
+    AHardwareBuffer* buffer = nullptr;
+    for (auto _ : state) {
+        int status = AHardwareBuffer_allocate(&k720pDesc, &buffer);
+        if (UNLIKELY(status != 0)) {
+            state.SkipWithError("Unable to allocate buffer.");
+        }
+        AHardwareBuffer_release(buffer);
+        buffer = nullptr;
+    }
+}
+BENCHMARK(BM_BufferAllocationDeallocation);
+
+static void BM_AHardwareBuffer_Id(benchmark::State& state) {
+    AHardwareBuffer* buffer = nullptr;
+    int status = AHardwareBuffer_allocate(&k720pDesc, &buffer);
+    if (UNLIKELY(status != 0)) {
+        state.SkipWithError("Unable to allocate buffer.");
+    }
+
+    for (auto _ : state) {
+        uint64_t id = 0;
+        int status = AHardwareBuffer_getId(buffer, &id);
+        if (UNLIKELY(status != 0)) {
+            state.SkipWithError("Unable to get ID.");
+        }
+    }
+
+    AHardwareBuffer_release(buffer);
+}
+BENCHMARK(BM_AHardwareBuffer_Id);
+
+static void BM_AHardwareBuffer_Desc(benchmark::State& state) {
+    AHardwareBuffer* buffer = nullptr;
+    int status = AHardwareBuffer_allocate(&k720pDesc, &buffer);
+    if (UNLIKELY(status != 0)) {
+        state.SkipWithError("Unable to allocate buffer.");
+    }
+
+    for (auto _ : state) {
+        AHardwareBuffer_Desc desc = {};
+        AHardwareBuffer_describe(buffer, &desc);
+    }
+
+    AHardwareBuffer_release(buffer);
+}
+BENCHMARK(BM_AHardwareBuffer_Desc);
+
+BENCHMARK_MAIN();
diff --git a/libs/nativewindow/tests/benchmark/buffer_benchmarks.rs b/libs/nativewindow/tests/benchmark/buffer_benchmarks.rs
new file mode 100644
index 0000000..fbd49c0
--- /dev/null
+++ b/libs/nativewindow/tests/benchmark/buffer_benchmarks.rs
@@ -0,0 +1,60 @@
+// Copyright (C) 2023 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//! Benchmark for libnativewindow AHardwareBuffer bindings
+
+#![allow(dead_code)]
+#![allow(missing_docs)]
+
+use criterion::*;
+use nativewindow::*;
+
+#[inline]
+fn create_720p_buffer() -> AHardwareBuffer {
+    AHardwareBuffer::new(
+        1280,
+        720,
+        1,
+        AHardwareBuffer_Format::AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM,
+        AHardwareBuffer_UsageFlags::AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN,
+    )
+    .unwrap()
+}
+
+fn criterion_benchmark(c: &mut Criterion) {
+    c.bench_function("allocate_deallocate", |b| {
+        b.iter(|| {
+            let buffer = create_720p_buffer();
+            drop(buffer);
+        })
+    });
+
+    let buffer = create_720p_buffer();
+    c.bench_with_input(BenchmarkId::new("id", "buffer"), &buffer, |b, buffer| {
+        b.iter(|| {
+            buffer.id();
+        })
+    });
+
+    // This benchmark exercises getters that need to fetch data via an
+    // underlying call to AHardwareBuffer_describe.
+    c.bench_with_input(BenchmarkId::new("desc", "buffer"), &buffer, |b, buffer| {
+        b.iter(|| {
+            buffer.width();
+        })
+    });
+}
+
+criterion_group!(benches, criterion_benchmark);
+criterion_main!(benches);
diff --git a/libs/renderengine/OWNERS b/libs/renderengine/OWNERS
index 5d23a5e..66e1aa1 100644
--- a/libs/renderengine/OWNERS
+++ b/libs/renderengine/OWNERS
@@ -1,3 +1,5 @@
+# Bug component: 1075131
+
 adyabr@google.com
 alecmouri@google.com
 djsollen@google.com
diff --git a/libs/renderengine/benchmark/RenderEngineBench.cpp b/libs/renderengine/benchmark/RenderEngineBench.cpp
index bd7b617..791d4c9 100644
--- a/libs/renderengine/benchmark/RenderEngineBench.cpp
+++ b/libs/renderengine/benchmark/RenderEngineBench.cpp
@@ -121,7 +121,6 @@
                         .setSupportsBackgroundBlur(true)
                         .setContextPriority(RenderEngine::ContextPriority::REALTIME)
                         .setRenderEngineType(type)
-                        .setUseColorManagerment(true)
                         .build();
     return RenderEngine::create(args);
 }
diff --git a/libs/renderengine/gl/GLESRenderEngine.cpp b/libs/renderengine/gl/GLESRenderEngine.cpp
index 0d7df10..a512b9a 100644
--- a/libs/renderengine/gl/GLESRenderEngine.cpp
+++ b/libs/renderengine/gl/GLESRenderEngine.cpp
@@ -389,7 +389,6 @@
         mVpWidth(0),
         mVpHeight(0),
         mFramebufferImageCacheSize(args.imageCacheSize),
-        mUseColorManagement(args.useColorManagement),
         mPrecacheToneMapperShaderOnly(args.precacheToneMapperShaderOnly) {
     glGetIntegerv(GL_MAX_TEXTURE_SIZE, &mMaxTextureSize);
     glGetIntegerv(GL_MAX_VIEWPORT_DIMS, mMaxViewportDims);
@@ -410,33 +409,31 @@
 
     // mColorBlindnessCorrection = M;
 
-    if (mUseColorManagement) {
-        const ColorSpace srgb(ColorSpace::sRGB());
-        const ColorSpace displayP3(ColorSpace::DisplayP3());
-        const ColorSpace bt2020(ColorSpace::BT2020());
+    const ColorSpace srgb(ColorSpace::sRGB());
+    const ColorSpace displayP3(ColorSpace::DisplayP3());
+    const ColorSpace bt2020(ColorSpace::BT2020());
 
-        // no chromatic adaptation needed since all color spaces use D65 for their white points.
-        mSrgbToXyz = mat4(srgb.getRGBtoXYZ());
-        mDisplayP3ToXyz = mat4(displayP3.getRGBtoXYZ());
-        mBt2020ToXyz = mat4(bt2020.getRGBtoXYZ());
-        mXyzToSrgb = mat4(srgb.getXYZtoRGB());
-        mXyzToDisplayP3 = mat4(displayP3.getXYZtoRGB());
-        mXyzToBt2020 = mat4(bt2020.getXYZtoRGB());
+    // no chromatic adaptation needed since all color spaces use D65 for their white points.
+    mSrgbToXyz = mat4(srgb.getRGBtoXYZ());
+    mDisplayP3ToXyz = mat4(displayP3.getRGBtoXYZ());
+    mBt2020ToXyz = mat4(bt2020.getRGBtoXYZ());
+    mXyzToSrgb = mat4(srgb.getXYZtoRGB());
+    mXyzToDisplayP3 = mat4(displayP3.getXYZtoRGB());
+    mXyzToBt2020 = mat4(bt2020.getXYZtoRGB());
 
-        // Compute sRGB to Display P3 and BT2020 transform matrix.
-        // NOTE: For now, we are limiting output wide color space support to
-        // Display-P3 and BT2020 only.
-        mSrgbToDisplayP3 = mXyzToDisplayP3 * mSrgbToXyz;
-        mSrgbToBt2020 = mXyzToBt2020 * mSrgbToXyz;
+    // Compute sRGB to Display P3 and BT2020 transform matrix.
+    // NOTE: For now, we are limiting output wide color space support to
+    // Display-P3 and BT2020 only.
+    mSrgbToDisplayP3 = mXyzToDisplayP3 * mSrgbToXyz;
+    mSrgbToBt2020 = mXyzToBt2020 * mSrgbToXyz;
 
-        // Compute Display P3 to sRGB and BT2020 transform matrix.
-        mDisplayP3ToSrgb = mXyzToSrgb * mDisplayP3ToXyz;
-        mDisplayP3ToBt2020 = mXyzToBt2020 * mDisplayP3ToXyz;
+    // Compute Display P3 to sRGB and BT2020 transform matrix.
+    mDisplayP3ToSrgb = mXyzToSrgb * mDisplayP3ToXyz;
+    mDisplayP3ToBt2020 = mXyzToBt2020 * mDisplayP3ToXyz;
 
-        // Compute BT2020 to sRGB and Display P3 transform matrix
-        mBt2020ToSrgb = mXyzToSrgb * mBt2020ToXyz;
-        mBt2020ToDisplayP3 = mXyzToDisplayP3 * mBt2020ToXyz;
-    }
+    // Compute BT2020 to sRGB and Display P3 transform matrix
+    mBt2020ToSrgb = mXyzToSrgb * mBt2020ToXyz;
+    mBt2020ToDisplayP3 = mXyzToDisplayP3 * mBt2020ToXyz;
 
     char value[PROPERTY_VALUE_MAX];
     property_get("debug.egl.traceGpuCompletion", value, "0");
@@ -518,7 +515,7 @@
 
 std::future<void> GLESRenderEngine::primeCache() {
     ProgramCache::getInstance().primeCache(mInProtectedContext ? mProtectedEGLContext : mEGLContext,
-                                           mUseColorManagement, mPrecacheToneMapperShaderOnly);
+                                           mPrecacheToneMapperShaderOnly);
     return {};
 }
 
@@ -1251,7 +1248,6 @@
             texture.setFiltering(layer.source.buffer.useTextureFiltering);
 
             texture.setDimensions(gBuf->getWidth(), gBuf->getHeight());
-            setSourceY410BT2020(layer.source.buffer.isY410BT2020);
 
             renderengine::Mesh::VertexArray<vec2> texCoords(mesh.getTexCoordArray<vec2>());
             texCoords[0] = vec2(0.0, 0.0);
@@ -1294,7 +1290,6 @@
         // Cleanup if there's a buffer source
         if (layer.source.buffer.buffer != nullptr) {
             disableBlending();
-            setSourceY410BT2020(false);
             disableTexturing();
         }
     }
@@ -1357,10 +1352,6 @@
     }
 }
 
-void GLESRenderEngine::setSourceY410BT2020(bool enable) {
-    mState.isY410BT2020 = enable;
-}
-
 void GLESRenderEngine::setSourceDataSpace(Dataspace source) {
     mDataSpace = source;
 }
@@ -1450,99 +1441,95 @@
     // BT2020 data space, in that case, the output data space is set to be
     // BT2020_HLG or BT2020_PQ respectively. In GPU fall back we need
     // to respect this and convert non-HDR content to HDR format.
-    if (mUseColorManagement) {
-        Dataspace inputStandard = static_cast<Dataspace>(mDataSpace & Dataspace::STANDARD_MASK);
-        Dataspace inputTransfer = static_cast<Dataspace>(mDataSpace & Dataspace::TRANSFER_MASK);
-        Dataspace outputStandard =
-                static_cast<Dataspace>(mOutputDataSpace & Dataspace::STANDARD_MASK);
-        Dataspace outputTransfer =
-                static_cast<Dataspace>(mOutputDataSpace & Dataspace::TRANSFER_MASK);
-        bool needsXYZConversion = needsXYZTransformMatrix();
+    Dataspace inputStandard = static_cast<Dataspace>(mDataSpace & Dataspace::STANDARD_MASK);
+    Dataspace inputTransfer = static_cast<Dataspace>(mDataSpace & Dataspace::TRANSFER_MASK);
+    Dataspace outputStandard = static_cast<Dataspace>(mOutputDataSpace & Dataspace::STANDARD_MASK);
+    Dataspace outputTransfer = static_cast<Dataspace>(mOutputDataSpace & Dataspace::TRANSFER_MASK);
+    bool needsXYZConversion = needsXYZTransformMatrix();
 
-        // NOTE: if the input standard of the input dataspace is not STANDARD_DCI_P3 or
-        // STANDARD_BT2020, it will be  treated as STANDARD_BT709
-        if (inputStandard != Dataspace::STANDARD_DCI_P3 &&
-            inputStandard != Dataspace::STANDARD_BT2020) {
-            inputStandard = Dataspace::STANDARD_BT709;
+    // NOTE: if the input standard of the input dataspace is not STANDARD_DCI_P3 or
+    // STANDARD_BT2020, it will be  treated as STANDARD_BT709
+    if (inputStandard != Dataspace::STANDARD_DCI_P3 &&
+        inputStandard != Dataspace::STANDARD_BT2020) {
+        inputStandard = Dataspace::STANDARD_BT709;
+    }
+
+    if (needsXYZConversion) {
+        // The supported input color spaces are standard RGB, Display P3 and BT2020.
+        switch (inputStandard) {
+            case Dataspace::STANDARD_DCI_P3:
+                managedState.inputTransformMatrix = mDisplayP3ToXyz;
+                break;
+            case Dataspace::STANDARD_BT2020:
+                managedState.inputTransformMatrix = mBt2020ToXyz;
+                break;
+            default:
+                managedState.inputTransformMatrix = mSrgbToXyz;
+                break;
         }
 
-        if (needsXYZConversion) {
-            // The supported input color spaces are standard RGB, Display P3 and BT2020.
-            switch (inputStandard) {
-                case Dataspace::STANDARD_DCI_P3:
-                    managedState.inputTransformMatrix = mDisplayP3ToXyz;
-                    break;
-                case Dataspace::STANDARD_BT2020:
-                    managedState.inputTransformMatrix = mBt2020ToXyz;
-                    break;
-                default:
-                    managedState.inputTransformMatrix = mSrgbToXyz;
-                    break;
-            }
-
-            // The supported output color spaces are BT2020, Display P3 and standard RGB.
-            switch (outputStandard) {
-                case Dataspace::STANDARD_BT2020:
-                    managedState.outputTransformMatrix = mXyzToBt2020;
-                    break;
-                case Dataspace::STANDARD_DCI_P3:
-                    managedState.outputTransformMatrix = mXyzToDisplayP3;
-                    break;
-                default:
-                    managedState.outputTransformMatrix = mXyzToSrgb;
-                    break;
-            }
-        } else if (inputStandard != outputStandard) {
-            // At this point, the input data space and output data space could be both
-            // HDR data spaces, but they match each other, we do nothing in this case.
-            // In addition to the case above, the input data space could be
-            // - scRGB linear
-            // - scRGB non-linear
-            // - sRGB
-            // - Display P3
-            // - BT2020
-            // The output data spaces could be
-            // - sRGB
-            // - Display P3
-            // - BT2020
-            switch (outputStandard) {
-                case Dataspace::STANDARD_BT2020:
-                    if (inputStandard == Dataspace::STANDARD_BT709) {
-                        managedState.outputTransformMatrix = mSrgbToBt2020;
-                    } else if (inputStandard == Dataspace::STANDARD_DCI_P3) {
-                        managedState.outputTransformMatrix = mDisplayP3ToBt2020;
-                    }
-                    break;
-                case Dataspace::STANDARD_DCI_P3:
-                    if (inputStandard == Dataspace::STANDARD_BT709) {
-                        managedState.outputTransformMatrix = mSrgbToDisplayP3;
-                    } else if (inputStandard == Dataspace::STANDARD_BT2020) {
-                        managedState.outputTransformMatrix = mBt2020ToDisplayP3;
-                    }
-                    break;
-                default:
-                    if (inputStandard == Dataspace::STANDARD_DCI_P3) {
-                        managedState.outputTransformMatrix = mDisplayP3ToSrgb;
-                    } else if (inputStandard == Dataspace::STANDARD_BT2020) {
-                        managedState.outputTransformMatrix = mBt2020ToSrgb;
-                    }
-                    break;
-            }
+        // The supported output color spaces are BT2020, Display P3 and standard RGB.
+        switch (outputStandard) {
+            case Dataspace::STANDARD_BT2020:
+                managedState.outputTransformMatrix = mXyzToBt2020;
+                break;
+            case Dataspace::STANDARD_DCI_P3:
+                managedState.outputTransformMatrix = mXyzToDisplayP3;
+                break;
+            default:
+                managedState.outputTransformMatrix = mXyzToSrgb;
+                break;
         }
-
-        // we need to convert the RGB value to linear space and convert it back when:
-        // - there is a color matrix that is not an identity matrix, or
-        // - there is an output transform matrix that is not an identity matrix, or
-        // - the input transfer function doesn't match the output transfer function.
-        if (managedState.hasColorMatrix() || managedState.hasOutputTransformMatrix() ||
-            inputTransfer != outputTransfer) {
-            managedState.inputTransferFunction =
-                    Description::dataSpaceToTransferFunction(inputTransfer);
-            managedState.outputTransferFunction =
-                    Description::dataSpaceToTransferFunction(outputTransfer);
+    } else if (inputStandard != outputStandard) {
+        // At this point, the input data space and output data space could be both
+        // HDR data spaces, but they match each other, we do nothing in this case.
+        // In addition to the case above, the input data space could be
+        // - scRGB linear
+        // - scRGB non-linear
+        // - sRGB
+        // - Display P3
+        // - BT2020
+        // The output data spaces could be
+        // - sRGB
+        // - Display P3
+        // - BT2020
+        switch (outputStandard) {
+            case Dataspace::STANDARD_BT2020:
+                if (inputStandard == Dataspace::STANDARD_BT709) {
+                    managedState.outputTransformMatrix = mSrgbToBt2020;
+                } else if (inputStandard == Dataspace::STANDARD_DCI_P3) {
+                    managedState.outputTransformMatrix = mDisplayP3ToBt2020;
+                }
+                break;
+            case Dataspace::STANDARD_DCI_P3:
+                if (inputStandard == Dataspace::STANDARD_BT709) {
+                    managedState.outputTransformMatrix = mSrgbToDisplayP3;
+                } else if (inputStandard == Dataspace::STANDARD_BT2020) {
+                    managedState.outputTransformMatrix = mBt2020ToDisplayP3;
+                }
+                break;
+            default:
+                if (inputStandard == Dataspace::STANDARD_DCI_P3) {
+                    managedState.outputTransformMatrix = mDisplayP3ToSrgb;
+                } else if (inputStandard == Dataspace::STANDARD_BT2020) {
+                    managedState.outputTransformMatrix = mBt2020ToSrgb;
+                }
+                break;
         }
     }
 
+    // we need to convert the RGB value to linear space and convert it back when:
+    // - there is a color matrix that is not an identity matrix, or
+    // - there is an output transform matrix that is not an identity matrix, or
+    // - the input transfer function doesn't match the output transfer function.
+    if (managedState.hasColorMatrix() || managedState.hasOutputTransformMatrix() ||
+        inputTransfer != outputTransfer) {
+        managedState.inputTransferFunction =
+                Description::dataSpaceToTransferFunction(inputTransfer);
+        managedState.outputTransferFunction =
+                Description::dataSpaceToTransferFunction(outputTransfer);
+    }
+
     ProgramCache::getInstance().useProgram(mInProtectedContext ? mProtectedEGLContext : mEGLContext,
                                            managedState);
 
@@ -1553,7 +1540,7 @@
         glDrawArrays(mesh.getPrimitive(), 0, mesh.getVertexCount());
     }
 
-    if (mUseColorManagement && outputDebugPPMs) {
+    if (outputDebugPPMs) {
         static uint64_t managedColorFrameCount = 0;
         std::ostringstream out;
         out << "/data/texture_out" << managedColorFrameCount++;
diff --git a/libs/renderengine/gl/GLESRenderEngine.h b/libs/renderengine/gl/GLESRenderEngine.h
index 402ff52..f5368d4 100644
--- a/libs/renderengine/gl/GLESRenderEngine.h
+++ b/libs/renderengine/gl/GLESRenderEngine.h
@@ -183,7 +183,6 @@
     void setupCornerRadiusCropSize(float width, float height);
 
     // HDR and color management related functions and state
-    void setSourceY410BT2020(bool enable);
     void setSourceDataSpace(ui::Dataspace source);
     void setOutputDataSpace(ui::Dataspace dataspace);
     void setDisplayMaxLuminance(const float maxLuminance);
@@ -237,10 +236,6 @@
     // Current output dataspace of the render engine
     ui::Dataspace mOutputDataSpace = ui::Dataspace::UNKNOWN;
 
-    // Whether device supports color management, currently color management
-    // supports sRGB, DisplayP3 color spaces.
-    const bool mUseColorManagement = false;
-
     // Whether only shaders performing tone mapping from HDR to SDR will be generated on
     // primeCache().
     const bool mPrecacheToneMapperShaderOnly = false;
diff --git a/libs/renderengine/gl/ProgramCache.cpp b/libs/renderengine/gl/ProgramCache.cpp
index f7f2d54..96ccf5c 100644
--- a/libs/renderengine/gl/ProgramCache.cpp
+++ b/libs/renderengine/gl/ProgramCache.cpp
@@ -77,8 +77,7 @@
     return f;
 }
 
-void ProgramCache::primeCache(
-        EGLContext context, bool useColorManagement, bool toneMapperShaderOnly) {
+void ProgramCache::primeCache(EGLContext context, bool toneMapperShaderOnly) {
     auto& cache = mCaches[context];
     uint32_t shaderCount = 0;
 
@@ -98,9 +97,6 @@
             shaderKey.set(Key::INPUT_TF_MASK, (i & 1) ?
                     Key::INPUT_TF_HLG : Key::INPUT_TF_ST2084);
 
-            // Cache Y410 input on or off
-            shaderKey.set(Key::Y410_BT2020_MASK, (i & 2) ?
-                    Key::Y410_BT2020_ON : Key::Y410_BT2020_OFF);
             if (cache.count(shaderKey) == 0) {
                 cache.emplace(shaderKey, generateProgram(shaderKey));
                 shaderCount++;
@@ -129,27 +125,24 @@
     }
 
     // Prime for sRGB->P3 conversion
-    if (useColorManagement) {
-        Key shaderKey;
-        shaderKey.set(Key::BLEND_MASK | Key::OUTPUT_TRANSFORM_MATRIX_MASK | Key::INPUT_TF_MASK |
-                              Key::OUTPUT_TF_MASK,
-                      Key::BLEND_PREMULT | Key::OUTPUT_TRANSFORM_MATRIX_ON | Key::INPUT_TF_SRGB |
-                              Key::OUTPUT_TF_SRGB);
-        for (int i = 0; i < 16; i++) {
-            shaderKey.set(Key::OPACITY_MASK,
-                          (i & 1) ? Key::OPACITY_OPAQUE : Key::OPACITY_TRANSLUCENT);
-            shaderKey.set(Key::ALPHA_MASK, (i & 2) ? Key::ALPHA_LT_ONE : Key::ALPHA_EQ_ONE);
+    Key shaderKey;
+    shaderKey.set(Key::BLEND_MASK | Key::OUTPUT_TRANSFORM_MATRIX_MASK | Key::INPUT_TF_MASK |
+                          Key::OUTPUT_TF_MASK,
+                  Key::BLEND_PREMULT | Key::OUTPUT_TRANSFORM_MATRIX_ON | Key::INPUT_TF_SRGB |
+                          Key::OUTPUT_TF_SRGB);
+    for (int i = 0; i < 16; i++) {
+        shaderKey.set(Key::OPACITY_MASK, (i & 1) ? Key::OPACITY_OPAQUE : Key::OPACITY_TRANSLUCENT);
+        shaderKey.set(Key::ALPHA_MASK, (i & 2) ? Key::ALPHA_LT_ONE : Key::ALPHA_EQ_ONE);
 
-            // Cache rounded corners
-            shaderKey.set(Key::ROUNDED_CORNERS_MASK,
-                          (i & 4) ? Key::ROUNDED_CORNERS_ON : Key::ROUNDED_CORNERS_OFF);
+        // Cache rounded corners
+        shaderKey.set(Key::ROUNDED_CORNERS_MASK,
+                      (i & 4) ? Key::ROUNDED_CORNERS_ON : Key::ROUNDED_CORNERS_OFF);
 
-            // Cache texture off option for window transition
-            shaderKey.set(Key::TEXTURE_MASK, (i & 8) ? Key::TEXTURE_EXT : Key::TEXTURE_OFF);
-            if (cache.count(shaderKey) == 0) {
-                cache.emplace(shaderKey, generateProgram(shaderKey));
-                shaderCount++;
-            }
+        // Cache texture off option for window transition
+        shaderKey.set(Key::TEXTURE_MASK, (i & 8) ? Key::TEXTURE_EXT : Key::TEXTURE_OFF);
+        if (cache.count(shaderKey) == 0) {
+            cache.emplace(shaderKey, generateProgram(shaderKey));
+            shaderCount++;
         }
     }
 
@@ -161,13 +154,11 @@
 ProgramCache::Key ProgramCache::computeKey(const Description& description) {
     Key needs;
     needs.set(Key::TEXTURE_MASK,
-              !description.textureEnabled
-                      ? Key::TEXTURE_OFF
+              !description.textureEnabled ? Key::TEXTURE_OFF
                       : description.texture.getTextureTarget() == GL_TEXTURE_EXTERNAL_OES
-                              ? Key::TEXTURE_EXT
-                              : description.texture.getTextureTarget() == GL_TEXTURE_2D
-                                      ? Key::TEXTURE_2D
-                                      : Key::TEXTURE_OFF)
+                      ? Key::TEXTURE_EXT
+                      : description.texture.getTextureTarget() == GL_TEXTURE_2D ? Key::TEXTURE_2D
+                                                                                : Key::TEXTURE_OFF)
             .set(Key::ALPHA_MASK, (description.color.a < 1) ? Key::ALPHA_LT_ONE : Key::ALPHA_EQ_ONE)
             .set(Key::BLEND_MASK,
                  description.isPremultipliedAlpha ? Key::BLEND_PREMULT : Key::BLEND_NORMAL)
@@ -186,8 +177,6 @@
             .set(Key::ROUNDED_CORNERS_MASK,
                  description.cornerRadius > 0 ? Key::ROUNDED_CORNERS_ON : Key::ROUNDED_CORNERS_OFF)
             .set(Key::SHADOW_MASK, description.drawShadows ? Key::SHADOW_ON : Key::SHADOW_OFF);
-    needs.set(Key::Y410_BT2020_MASK,
-              description.isY410BT2020 ? Key::Y410_BT2020_ON : Key::Y410_BT2020_OFF);
 
     if (needs.hasTransformMatrix() ||
         (description.inputTransferFunction != description.outputTransferFunction)) {
@@ -650,20 +639,6 @@
         fs << "uniform vec4 color;";
     }
 
-    if (needs.isY410BT2020()) {
-        fs << R"__SHADER__(
-            vec3 convertY410BT2020(const vec3 color) {
-                const vec3 offset = vec3(0.0625, 0.5, 0.5);
-                const mat3 transform = mat3(
-                    vec3(1.1678,  1.1678, 1.1678),
-                    vec3(   0.0, -0.1878, 2.1481),
-                    vec3(1.6836, -0.6523,   0.0));
-                // Y is in G, U is in R, and V is in B
-                return clamp(transform * (color.grb - offset), 0.0, 1.0);
-            }
-            )__SHADER__";
-    }
-
     if (needs.hasTransformMatrix() || (needs.getInputTF() != needs.getOutputTF()) ||
         needs.hasDisplayColorMatrix()) {
         if (needs.needsToneMapping()) {
@@ -730,9 +705,6 @@
     } else {
         if (needs.isTexturing()) {
             fs << "gl_FragColor = texture2D(sampler, outTexCoords);";
-            if (needs.isY410BT2020()) {
-                fs << "gl_FragColor.rgb = convertY410BT2020(gl_FragColor.rgb);";
-            }
         } else {
             fs << "gl_FragColor.rgb = color.rgb;";
             fs << "gl_FragColor.a = 1.0;";
diff --git a/libs/renderengine/gl/ProgramCache.h b/libs/renderengine/gl/ProgramCache.h
index 535d21c..83fef8e 100644
--- a/libs/renderengine/gl/ProgramCache.h
+++ b/libs/renderengine/gl/ProgramCache.h
@@ -108,11 +108,6 @@
             OUTPUT_TF_ST2084 = 2 << OUTPUT_TF_SHIFT,
             OUTPUT_TF_HLG = 3 << OUTPUT_TF_SHIFT,
 
-            Y410_BT2020_SHIFT = 12,
-            Y410_BT2020_MASK = 1 << Y410_BT2020_SHIFT,
-            Y410_BT2020_OFF = 0 << Y410_BT2020_SHIFT,
-            Y410_BT2020_ON = 1 << Y410_BT2020_SHIFT,
-
             SHADOW_SHIFT = 13,
             SHADOW_MASK = 1 << SHADOW_SHIFT,
             SHADOW_OFF = 0 << SHADOW_SHIFT,
@@ -180,7 +175,6 @@
             outputTF >>= Key::OUTPUT_TF_SHIFT;
             return inputTF != outputTF;
         }
-        inline bool isY410BT2020() const { return (mKey & Y410_BT2020_MASK) == Y410_BT2020_ON; }
 
         // for use by std::unordered_map
 
@@ -195,7 +189,7 @@
     ~ProgramCache() = default;
 
     // Generate shaders to populate the cache
-    void primeCache(const EGLContext context, bool useColorManagement, bool toneMapperShaderOnly);
+    void primeCache(const EGLContext context, bool toneMapperShaderOnly);
 
     size_t getSize(const EGLContext context) { return mCaches[context].size(); }
 
diff --git a/libs/renderengine/include/renderengine/LayerSettings.h b/libs/renderengine/include/renderengine/LayerSettings.h
index b3a617c..b501c40 100644
--- a/libs/renderengine/include/renderengine/LayerSettings.h
+++ b/libs/renderengine/include/renderengine/LayerSettings.h
@@ -64,9 +64,6 @@
     // overrides the alpha channel of the buffer.
     bool isOpaque = false;
 
-    // HDR color-space setting for Y410.
-    bool isY410BT2020 = false;
-
     float maxLuminanceNits = 0.0;
 };
 
@@ -189,8 +186,7 @@
             lhs.useTextureFiltering == rhs.useTextureFiltering &&
             lhs.textureTransform == rhs.textureTransform &&
             lhs.usePremultipliedAlpha == rhs.usePremultipliedAlpha &&
-            lhs.isOpaque == rhs.isOpaque && lhs.isY410BT2020 == rhs.isY410BT2020 &&
-            lhs.maxLuminanceNits == rhs.maxLuminanceNits;
+            lhs.isOpaque == rhs.isOpaque && lhs.maxLuminanceNits == rhs.maxLuminanceNits;
 }
 
 static inline bool operator==(const Geometry& lhs, const Geometry& rhs) {
@@ -247,7 +243,6 @@
     PrintMatrix(settings.textureTransform, os);
     *os << "\n    .usePremultipliedAlpha = " << settings.usePremultipliedAlpha;
     *os << "\n    .isOpaque = " << settings.isOpaque;
-    *os << "\n    .isY410BT2020 = " << settings.isY410BT2020;
     *os << "\n    .maxLuminanceNits = " << settings.maxLuminanceNits;
     *os << "\n}";
 }
diff --git a/libs/renderengine/include/renderengine/RenderEngine.h b/libs/renderengine/include/renderengine/RenderEngine.h
index 0d910c9..83af252 100644
--- a/libs/renderengine/include/renderengine/RenderEngine.h
+++ b/libs/renderengine/include/renderengine/RenderEngine.h
@@ -271,14 +271,13 @@
 
 private:
     // must be created by Builder via constructor with full argument list
-    RenderEngineCreationArgs(int _pixelFormat, uint32_t _imageCacheSize, bool _useColorManagement,
+    RenderEngineCreationArgs(int _pixelFormat, uint32_t _imageCacheSize,
                              bool _enableProtectedContext, bool _precacheToneMapperShaderOnly,
                              bool _supportsBackgroundBlur,
                              RenderEngine::ContextPriority _contextPriority,
                              RenderEngine::RenderEngineType _renderEngineType)
           : pixelFormat(_pixelFormat),
             imageCacheSize(_imageCacheSize),
-            useColorManagement(_useColorManagement),
             enableProtectedContext(_enableProtectedContext),
             precacheToneMapperShaderOnly(_precacheToneMapperShaderOnly),
             supportsBackgroundBlur(_supportsBackgroundBlur),
@@ -298,10 +297,6 @@
         this->imageCacheSize = imageCacheSize;
         return *this;
     }
-    Builder& setUseColorManagerment(bool useColorManagement) {
-        this->useColorManagement = useColorManagement;
-        return *this;
-    }
     Builder& setEnableProtectedContext(bool enableProtectedContext) {
         this->enableProtectedContext = enableProtectedContext;
         return *this;
@@ -323,16 +318,15 @@
         return *this;
     }
     RenderEngineCreationArgs build() const {
-        return RenderEngineCreationArgs(pixelFormat, imageCacheSize, useColorManagement,
-                                        enableProtectedContext, precacheToneMapperShaderOnly,
-                                        supportsBackgroundBlur, contextPriority, renderEngineType);
+        return RenderEngineCreationArgs(pixelFormat, imageCacheSize, enableProtectedContext,
+                                        precacheToneMapperShaderOnly, supportsBackgroundBlur,
+                                        contextPriority, renderEngineType);
     }
 
 private:
     // 1 means RGBA_8888
     int pixelFormat = 1;
     uint32_t imageCacheSize = 0;
-    bool useColorManagement = true;
     bool enableProtectedContext = false;
     bool precacheToneMapperShaderOnly = false;
     bool supportsBackgroundBlur = false;
diff --git a/libs/renderengine/include/renderengine/private/Description.h b/libs/renderengine/include/renderengine/private/Description.h
index fa6ec10..2873ad7 100644
--- a/libs/renderengine/include/renderengine/private/Description.h
+++ b/libs/renderengine/include/renderengine/private/Description.h
@@ -64,9 +64,6 @@
     // color used when texturing is disabled or when setting alpha.
     half4 color;
 
-    // true if the sampled pixel values are in Y410/BT2020 rather than RGBA
-    bool isY410BT2020 = false;
-
     // transfer functions for the input/output
     TransferFunction inputTransferFunction = TransferFunction::LINEAR;
     TransferFunction outputTransferFunction = TransferFunction::LINEAR;
diff --git a/libs/renderengine/skia/AutoBackendTexture.cpp b/libs/renderengine/skia/AutoBackendTexture.cpp
index dad3c19..90dcae4 100644
--- a/libs/renderengine/skia/AutoBackendTexture.cpp
+++ b/libs/renderengine/skia/AutoBackendTexture.cpp
@@ -23,7 +23,7 @@
 #include <SkImage.h>
 #include <include/gpu/ganesh/SkImageGanesh.h>
 #include <include/gpu/ganesh/SkSurfaceGanesh.h>
-
+#include <include/gpu/ganesh/gl/GrGLBackendSurface.h>
 #include <android/hardware_buffer.h>
 #include "ColorSpaces.h"
 #include "log/log_main.h"
@@ -40,13 +40,44 @@
     AHardwareBuffer_Desc desc;
     AHardwareBuffer_describe(buffer, &desc);
     bool createProtectedImage = 0 != (desc.usage & AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT);
-    GrBackendFormat backendFormat =
-            GrAHardwareBufferUtils::GetBackendFormat(context, buffer, desc.format, false);
-    mBackendTexture =
-            GrAHardwareBufferUtils::MakeBackendTexture(context, buffer, desc.width, desc.height,
-                                                       &mDeleteProc, &mUpdateProc, &mImageCtx,
-                                                       createProtectedImage, backendFormat,
-                                                       isOutputBuffer);
+    GrBackendFormat backendFormat;
+
+    GrBackendApi backend = context->backend();
+    if (backend == GrBackendApi::kOpenGL) {
+        backendFormat =
+                GrAHardwareBufferUtils::GetGLBackendFormat(context, desc.format, false);
+        mBackendTexture =
+                GrAHardwareBufferUtils::MakeGLBackendTexture(context,
+                                                             buffer,
+                                                             desc.width,
+                                                             desc.height,
+                                                             &mDeleteProc,
+                                                             &mUpdateProc,
+                                                             &mImageCtx,
+                                                             createProtectedImage,
+                                                             backendFormat,
+                                                             isOutputBuffer);
+    } else if (backend == GrBackendApi::kVulkan) {
+        backendFormat =
+                GrAHardwareBufferUtils::GetVulkanBackendFormat(context,
+                                                               buffer,
+                                                               desc.format,
+                                                               false);
+        mBackendTexture =
+                GrAHardwareBufferUtils::MakeVulkanBackendTexture(context,
+                                                                 buffer,
+                                                                 desc.width,
+                                                                 desc.height,
+                                                                 &mDeleteProc,
+                                                                 &mUpdateProc,
+                                                                 &mImageCtx,
+                                                                 createProtectedImage,
+                                                                 backendFormat,
+                                                                 isOutputBuffer);
+    } else {
+        LOG_ALWAYS_FATAL("Unexpected backend %d", backend);
+    }
+
     mColorType = GrAHardwareBufferUtils::GetSkColorTypeFromBufferFormat(desc.format);
     if (!mBackendTexture.isValid() || !desc.width || !desc.height) {
         LOG_ALWAYS_FATAL("Failed to create a valid texture. [%p]:[%d,%d] isProtected:%d "
@@ -94,7 +125,7 @@
     switch (tex.backend()) {
         case GrBackendApi::kOpenGL: {
             GrGLTextureInfo textureInfo;
-            bool retrievedTextureInfo = tex.getGLTextureInfo(&textureInfo);
+            bool retrievedTextureInfo = GrBackendTextures::GetGLTextureInfo(tex, &textureInfo);
             LOG_ALWAYS_FATAL("%s isTextureValid:%d dataspace:%d"
                              "\n\tGrBackendTexture: (%i x %i) hasMipmaps: %i isProtected: %i "
                              "texType: %i\n\t\tGrGLTextureInfo: success: %i fTarget: %u fFormat: %u"
diff --git a/libs/renderengine/skia/SkiaGLRenderEngine.cpp b/libs/renderengine/skia/SkiaGLRenderEngine.cpp
index ff598e7..92181d8 100644
--- a/libs/renderengine/skia/SkiaGLRenderEngine.cpp
+++ b/libs/renderengine/skia/SkiaGLRenderEngine.cpp
@@ -251,14 +251,13 @@
 SkiaGLRenderEngine::SkiaGLRenderEngine(const RenderEngineCreationArgs& args, EGLDisplay display,
                                        EGLContext ctxt, EGLSurface placeholder,
                                        EGLContext protectedContext, EGLSurface protectedPlaceholder)
-      : SkiaRenderEngine(args.renderEngineType,
-                         static_cast<PixelFormat>(args.pixelFormat),
-                         args.useColorManagement, args.supportsBackgroundBlur),
+      : SkiaRenderEngine(args.renderEngineType, static_cast<PixelFormat>(args.pixelFormat),
+                         args.supportsBackgroundBlur),
         mEGLDisplay(display),
         mEGLContext(ctxt),
         mPlaceholderSurface(placeholder),
         mProtectedEGLContext(protectedContext),
-        mProtectedPlaceholderSurface(protectedPlaceholder) { }
+        mProtectedPlaceholderSurface(protectedPlaceholder) {}
 
 SkiaGLRenderEngine::~SkiaGLRenderEngine() {
     finishRenderingAndAbandonContext();
diff --git a/libs/renderengine/skia/SkiaRenderEngine.cpp b/libs/renderengine/skia/SkiaRenderEngine.cpp
index 29d8ba7..2225d5f 100644
--- a/libs/renderengine/skia/SkiaRenderEngine.cpp
+++ b/libs/renderengine/skia/SkiaRenderEngine.cpp
@@ -269,10 +269,8 @@
 }
 
 SkiaRenderEngine::SkiaRenderEngine(RenderEngineType type, PixelFormat pixelFormat,
-                                   bool useColorManagement, bool supportsBackgroundBlur)
-      : RenderEngine(type),
-        mDefaultPixelFormat(pixelFormat),
-        mUseColorManagement(useColorManagement) {
+                                   bool supportsBackgroundBlur)
+      : RenderEngine(type), mDefaultPixelFormat(pixelFormat) {
     if (supportsBackgroundBlur) {
         ALOGD("Background Blurs Enabled");
         mBlurFilter = new KawaseBlurFilter();
@@ -511,7 +509,8 @@
         auto effect =
                 shaders::LinearEffect{.inputDataspace = parameters.layer.sourceDataspace,
                                       .outputDataspace = parameters.outputDataSpace,
-                                      .undoPremultipliedAlpha = parameters.undoPremultipliedAlpha};
+                                      .undoPremultipliedAlpha = parameters.undoPremultipliedAlpha,
+                                      .fakeOutputDataspace = parameters.fakeOutputDataspace};
 
         auto effectIter = mRuntimeEffects.find(effect);
         sk_sp<SkRuntimeEffect> runtimeEffect = nullptr;
@@ -907,12 +906,14 @@
                 (display.outputDataspace & ui::Dataspace::TRANSFER_MASK) ==
                         static_cast<int32_t>(ui::Dataspace::TRANSFER_SRGB);
 
-        const ui::Dataspace runtimeEffectDataspace = !dimInLinearSpace && isExtendedHdr
+        const bool useFakeOutputDataspaceForRuntimeEffect = !dimInLinearSpace && isExtendedHdr;
+
+        const ui::Dataspace fakeDataspace = useFakeOutputDataspaceForRuntimeEffect
                 ? static_cast<ui::Dataspace>(
                           (display.outputDataspace & ui::Dataspace::STANDARD_MASK) |
                           ui::Dataspace::TRANSFER_GAMMA2_2 |
                           (display.outputDataspace & ui::Dataspace::RANGE_MASK))
-                : display.outputDataspace;
+                : ui::Dataspace::UNKNOWN;
 
         // If the input dataspace is range extended, the output dataspace transfer is sRGB
         // and dimmingStage is GAMMA_OETF, dim in linear space instead, and
@@ -923,8 +924,7 @@
         // luminance in linear space, which color pipelines request GAMMA_OETF break
         // without a gamma 2.2 fixup.
         const bool requiresLinearEffect = layer.colorTransform != mat4() ||
-                (mUseColorManagement &&
-                 needsToneMapping(layer.sourceDataspace, display.outputDataspace)) ||
+                (needsToneMapping(layer.sourceDataspace, display.outputDataspace)) ||
                 (dimInLinearSpace && !equalsWithinMargin(1.f, layerDimmingRatio)) ||
                 (!dimInLinearSpace && isExtendedHdr);
 
@@ -935,10 +935,7 @@
             continue;
         }
 
-        // If color management is disabled, then mark the source image with the same colorspace as
-        // the destination surface so that Skia's color management is a no-op.
-        const ui::Dataspace layerDataspace =
-                !mUseColorManagement ? display.outputDataspace : layer.sourceDataspace;
+        const ui::Dataspace layerDataspace = layer.sourceDataspace;
 
         SkPaint paint;
         if (layer.source.buffer.buffer) {
@@ -1019,7 +1016,8 @@
                                                   .layerDimmingRatio = dimInLinearSpace
                                                           ? layerDimmingRatio
                                                           : 1.f,
-                                                  .outputDataSpace = runtimeEffectDataspace}));
+                                                  .outputDataSpace = display.outputDataspace,
+                                                  .fakeOutputDataspace = fakeDataspace}));
 
             // Turn on dithering when dimming beyond this (arbitrary) threshold...
             static constexpr float kDimmingThreshold = 0.2f;
@@ -1083,7 +1081,8 @@
                                                   .undoPremultipliedAlpha = false,
                                                   .requiresLinearEffect = requiresLinearEffect,
                                                   .layerDimmingRatio = layerDimmingRatio,
-                                                  .outputDataSpace = runtimeEffectDataspace}));
+                                                  .outputDataSpace = display.outputDataspace,
+                                                  .fakeOutputDataspace = fakeDataspace}));
         }
 
         if (layer.disableBlending) {
diff --git a/libs/renderengine/skia/SkiaRenderEngine.h b/libs/renderengine/skia/SkiaRenderEngine.h
index 6457bfa..7b4a0a0 100644
--- a/libs/renderengine/skia/SkiaRenderEngine.h
+++ b/libs/renderengine/skia/SkiaRenderEngine.h
@@ -59,10 +59,7 @@
 class SkiaRenderEngine : public RenderEngine {
 public:
     static std::unique_ptr<SkiaRenderEngine> create(const RenderEngineCreationArgs& args);
-    SkiaRenderEngine(RenderEngineType type,
-                     PixelFormat pixelFormat,
-                     bool useColorManagement,
-                     bool supportsBackgroundBlur);
+    SkiaRenderEngine(RenderEngineType type, PixelFormat pixelFormat, bool supportsBackgroundBlur);
     ~SkiaRenderEngine() override;
 
     std::future<void> primeCache() override final;
@@ -157,11 +154,11 @@
         bool requiresLinearEffect;
         float layerDimmingRatio;
         const ui::Dataspace outputDataSpace;
+        const ui::Dataspace fakeOutputDataspace;
     };
     sk_sp<SkShader> createRuntimeEffectShader(const RuntimeEffectShaderParameters&);
 
     const PixelFormat mDefaultPixelFormat;
-    const bool mUseColorManagement;
 
     // Identifier used for various mappings of layers to various
     // textures or shaders
diff --git a/libs/renderengine/skia/SkiaVkRenderEngine.cpp b/libs/renderengine/skia/SkiaVkRenderEngine.cpp
index c16586b..6ecc6ab 100644
--- a/libs/renderengine/skia/SkiaVkRenderEngine.cpp
+++ b/libs/renderengine/skia/SkiaVkRenderEngine.cpp
@@ -592,7 +592,7 @@
 
 SkiaVkRenderEngine::SkiaVkRenderEngine(const RenderEngineCreationArgs& args)
       : SkiaRenderEngine(args.renderEngineType, static_cast<PixelFormat>(args.pixelFormat),
-                         args.useColorManagement, args.supportsBackgroundBlur) {}
+                         args.supportsBackgroundBlur) {}
 
 SkiaVkRenderEngine::~SkiaVkRenderEngine() {
     finishRenderingAndAbandonContext();
diff --git a/libs/renderengine/skia/filters/KawaseBlurFilter.cpp b/libs/renderengine/skia/filters/KawaseBlurFilter.cpp
index 0c7335c..7bf2b0c 100644
--- a/libs/renderengine/skia/filters/KawaseBlurFilter.cpp
+++ b/libs/renderengine/skia/filters/KawaseBlurFilter.cpp
@@ -61,25 +61,7 @@
 
 // Draws the given runtime shader on a GPU (Ganesh) surface and returns the result as an
 // SkImage.
-static sk_sp<SkImage> makeImage(GrRecordingContext* context, SkRuntimeShaderBuilder* builder,
-                                const SkImageInfo& resultInfo) {
-    if (resultInfo.alphaType() == kUnpremul_SkAlphaType ||
-        resultInfo.alphaType() == kUnknown_SkAlphaType) {
-        return nullptr;
-    }
-    constexpr int kSampleCount = 1;
-    constexpr bool kMipmapped = false;
-
-    sk_sp<SkSurface> surface = SkSurfaces::RenderTarget(context,
-                                                        skgpu::Budgeted::kYes,
-                                                        resultInfo,
-                                                        kSampleCount,
-                                                        kTopLeft_GrSurfaceOrigin,
-                                                        nullptr,
-                                                        kMipmapped);
-    if (!surface) {
-        return nullptr;
-    }
+static sk_sp<SkImage> makeImage(SkSurface* surface, SkRuntimeShaderBuilder* builder) {
     sk_sp<SkShader> shader = builder->makeShader(nullptr);
     if (!shader) {
         return nullptr;
@@ -97,11 +79,16 @@
                                           const SkRect& blurRect) const {
     LOG_ALWAYS_FATAL_IF(context == nullptr, "%s: Needs GPU context", __func__);
     LOG_ALWAYS_FATAL_IF(input == nullptr, "%s: Invalid input image", __func__);
+
+    if (blurRadius == 0) {
+        return input;
+    }
+
     // Kawase is an approximation of Gaussian, but it behaves differently from it.
     // A radius transformation is required for approximating them, and also to introduce
     // non-integer steps, necessary to smoothly interpolate large radii.
     float tmpRadius = (float)blurRadius / 2.0f;
-    float numberOfPasses = std::min(kMaxPasses, (uint32_t)ceil(tmpRadius));
+    uint32_t numberOfPasses = std::min(kMaxPasses, (uint32_t)ceil(tmpRadius));
     float radiusByPasses = tmpRadius / (float)numberOfPasses;
 
     // create blur surface with the bit depth and colorspace of the original surface
@@ -121,15 +108,33 @@
             input->makeShader(SkTileMode::kClamp, SkTileMode::kClamp, linear, blurMatrix);
     blurBuilder.uniform("in_blurOffset") = radiusByPasses * kInputScale;
 
-    sk_sp<SkImage> tmpBlur = makeImage(context, &blurBuilder, scaledInfo);
+    constexpr int kSampleCount = 1;
+    constexpr bool kMipmapped = false;
+    constexpr SkSurfaceProps* kProps = nullptr;
+    sk_sp<SkSurface> surface =
+            SkSurfaces::RenderTarget(context, skgpu::Budgeted::kYes, scaledInfo, kSampleCount,
+                                     kTopLeft_GrSurfaceOrigin, kProps, kMipmapped);
+    LOG_ALWAYS_FATAL_IF(!surface, "%s: Failed to create surface for blurring!", __func__);
+    sk_sp<SkImage> tmpBlur = makeImage(surface.get(), &blurBuilder);
 
-    // And now we'll build our chain of scaled blur stages
-    for (auto i = 1; i < numberOfPasses; i++) {
-        LOG_ALWAYS_FATAL_IF(tmpBlur == nullptr, "%s: tmpBlur is null for pass %d", __func__, i);
-        blurBuilder.child("child") =
-                tmpBlur->makeShader(SkTileMode::kClamp, SkTileMode::kClamp, linear);
-        blurBuilder.uniform("in_blurOffset") = (float) i * radiusByPasses * kInputScale;
-        tmpBlur = makeImage(context, &blurBuilder, scaledInfo);
+    // And now we'll build our chain of scaled blur stages. If there is more than one pass,
+    // create a second surface and ping pong between them.
+    sk_sp<SkSurface> surfaceTwo;
+    if (numberOfPasses <= 1) {
+        LOG_ALWAYS_FATAL_IF(tmpBlur == nullptr, "%s: tmpBlur is null", __func__);
+    } else {
+        surfaceTwo = surface->makeSurface(scaledInfo);
+        LOG_ALWAYS_FATAL_IF(!surfaceTwo, "%s: Failed to create second blur surface!", __func__);
+
+        for (auto i = 1; i < numberOfPasses; i++) {
+            LOG_ALWAYS_FATAL_IF(tmpBlur == nullptr, "%s: tmpBlur is null for pass %d", __func__, i);
+            blurBuilder.child("child") =
+                    tmpBlur->makeShader(SkTileMode::kClamp, SkTileMode::kClamp, linear);
+            blurBuilder.uniform("in_blurOffset") = (float) i * radiusByPasses * kInputScale;
+            tmpBlur = makeImage(surfaceTwo.get(), &blurBuilder);
+            using std::swap;
+            swap(surface, surfaceTwo);
+        }
     }
 
     return tmpBlur;
diff --git a/libs/renderengine/tests/RenderEngineTest.cpp b/libs/renderengine/tests/RenderEngineTest.cpp
index f3f2da8..1ad0fa6 100644
--- a/libs/renderengine/tests/RenderEngineTest.cpp
+++ b/libs/renderengine/tests/RenderEngineTest.cpp
@@ -109,7 +109,6 @@
     virtual renderengine::RenderEngine::RenderEngineType type() = 0;
     virtual std::unique_ptr<renderengine::RenderEngine> createRenderEngine() = 0;
     virtual bool typeSupported() = 0;
-    virtual bool useColorManagement() const = 0;
 };
 
 class SkiaVkRenderEngineFactory : public RenderEngineFactory {
@@ -130,13 +129,11 @@
                 renderengine::RenderEngineCreationArgs::Builder()
                         .setPixelFormat(static_cast<int>(ui::PixelFormat::RGBA_8888))
                         .setImageCacheSize(1)
-                        .setUseColorManagerment(false)
                         .setEnableProtectedContext(false)
                         .setPrecacheToneMapperShaderOnly(false)
                         .setSupportsBackgroundBlur(true)
                         .setContextPriority(renderengine::RenderEngine::ContextPriority::MEDIUM)
                         .setRenderEngineType(type())
-                        .setUseColorManagerment(useColorManagement())
                         .build();
         return renderengine::skia::SkiaVkRenderEngine::create(reCreationArgs);
     }
@@ -144,14 +141,9 @@
     bool typeSupported() override {
         return skia::SkiaVkRenderEngine::canSupportSkiaVkRenderEngine();
     }
-    bool useColorManagement() const override { return false; }
     void skip() { GTEST_SKIP(); }
 };
 
-class SkiaVkCMRenderEngineFactory : public SkiaVkRenderEngineFactory {
-public:
-    bool useColorManagement() const override { return true; }
-};
 class SkiaGLESRenderEngineFactory : public RenderEngineFactory {
 public:
     std::string name() override { return "SkiaGLRenderEngineFactory"; }
@@ -170,13 +162,11 @@
                         .setSupportsBackgroundBlur(true)
                         .setContextPriority(renderengine::RenderEngine::ContextPriority::MEDIUM)
                         .setRenderEngineType(type())
-                        .setUseColorManagerment(useColorManagement())
                         .build();
         return renderengine::skia::SkiaGLRenderEngine::create(reCreationArgs);
     }
 
     bool typeSupported() override { return true; }
-    bool useColorManagement() const override { return false; }
 };
 
 class SkiaGLESCMRenderEngineFactory : public RenderEngineFactory {
@@ -197,13 +187,11 @@
                         .setSupportsBackgroundBlur(true)
                         .setContextPriority(renderengine::RenderEngine::ContextPriority::MEDIUM)
                         .setRenderEngineType(type())
-                        .setUseColorManagerment(useColorManagement())
                         .build();
         return renderengine::skia::SkiaGLRenderEngine::create(reCreationArgs);
     }
 
     bool typeSupported() override { return true; }
-    bool useColorManagement() const override { return true; }
 };
 
 class RenderEngineTest : public ::testing::TestWithParam<std::shared_ptr<RenderEngineFactory>> {
@@ -1559,9 +1547,7 @@
 
 INSTANTIATE_TEST_SUITE_P(PerRenderEngineType, RenderEngineTest,
                          testing::Values(std::make_shared<SkiaGLESRenderEngineFactory>(),
-                                         std::make_shared<SkiaGLESCMRenderEngineFactory>(),
-                                         std::make_shared<SkiaVkRenderEngineFactory>(),
-                                         std::make_shared<SkiaVkCMRenderEngineFactory>()));
+                                         std::make_shared<SkiaVkRenderEngineFactory>()));
 
 TEST_P(RenderEngineTest, drawLayers_noLayersToDraw) {
     if (!GetParam()->typeSupported()) {
@@ -1745,7 +1731,7 @@
 TEST_P(RenderEngineTest, drawLayers_fillBufferColorTransform_sourceDataspace) {
     const auto& renderEngineFactory = GetParam();
     // skip for non color management
-    if (!renderEngineFactory->typeSupported() || !renderEngineFactory->useColorManagement()) {
+    if (!renderEngineFactory->typeSupported()) {
         GTEST_SKIP();
     }
 
@@ -1756,7 +1742,7 @@
 TEST_P(RenderEngineTest, drawLayers_fillBufferColorTransform_outputDataspace) {
     const auto& renderEngineFactory = GetParam();
     // skip for non color management
-    if (!renderEngineFactory->typeSupported() || !renderEngineFactory->useColorManagement()) {
+    if (!renderEngineFactory->typeSupported()) {
         GTEST_SKIP();
     }
 
@@ -1895,7 +1881,7 @@
 TEST_P(RenderEngineTest, drawLayers_fillBufferColorTransformAndSourceDataspace_opaqueBufferSource) {
     const auto& renderEngineFactory = GetParam();
     // skip for non color management
-    if (!renderEngineFactory->typeSupported() || !renderEngineFactory->useColorManagement()) {
+    if (!renderEngineFactory->typeSupported()) {
         GTEST_SKIP();
     }
 
@@ -1906,7 +1892,7 @@
 TEST_P(RenderEngineTest, drawLayers_fillBufferColorTransformAndOutputDataspace_opaqueBufferSource) {
     const auto& renderEngineFactory = GetParam();
     // skip for non color management
-    if (!renderEngineFactory->typeSupported() || !renderEngineFactory->useColorManagement()) {
+    if (!renderEngineFactory->typeSupported()) {
         GTEST_SKIP();
     }
 
@@ -2045,7 +2031,7 @@
 TEST_P(RenderEngineTest, drawLayers_fillBufferColorTransformAndSourceDataspace_bufferSource) {
     const auto& renderEngineFactory = GetParam();
     // skip for non color management
-    if (!renderEngineFactory->typeSupported() || !renderEngineFactory->useColorManagement()) {
+    if (!renderEngineFactory->typeSupported()) {
         GTEST_SKIP();
     }
 
@@ -2056,7 +2042,7 @@
 TEST_P(RenderEngineTest, drawLayers_fillBufferColorTransformAndOutputDataspace_bufferSource) {
     const auto& renderEngineFactory = GetParam();
     // skip for non color management
-    if (!renderEngineFactory->typeSupported() || !renderEngineFactory->useColorManagement()) {
+    if (!renderEngineFactory->typeSupported()) {
         GTEST_SKIP();
     }
 
@@ -2592,10 +2578,6 @@
         GTEST_SKIP();
     }
 
-    if (!GetParam()->useColorManagement()) {
-        GTEST_SKIP();
-    }
-
     initializeRenderEngine();
 
     const ui::Dataspace dataspace = ui::Dataspace::V0_SRGB;
@@ -3017,15 +2999,11 @@
     std::vector<renderengine::LayerSettings> layers{greenLayer};
     invokeDraw(display, layers);
 
-    if (GetParam()->useColorManagement()) {
-        expectBufferColor(rect, 117, 251, 76, 255);
-    } else {
-        expectBufferColor(rect, 0, 255, 0, 255);
-    }
+    expectBufferColor(rect, 117, 251, 76, 255);
 }
 
 TEST_P(RenderEngineTest, test_tonemapPQMatches) {
-    if (!GetParam()->typeSupported() || !GetParam()->useColorManagement()) {
+    if (!GetParam()->typeSupported()) {
         GTEST_SKIP();
     }
 
@@ -3042,7 +3020,7 @@
 }
 
 TEST_P(RenderEngineTest, test_tonemapHLGMatches) {
-    if (!GetParam()->typeSupported() || !GetParam()->useColorManagement()) {
+    if (!GetParam()->typeSupported()) {
         GTEST_SKIP();
     }
 
@@ -3262,9 +3240,9 @@
         fut.wait();
     }
 
-    const int minimumExpectedShadersCompiled = GetParam()->useColorManagement() ? 60 : 30;
+    static constexpr int kMinimumExpectedShadersCompiled = 60;
     ASSERT_GT(static_cast<skia::SkiaGLRenderEngine*>(mRE.get())->reportShadersCompiled(),
-              minimumExpectedShadersCompiled);
+              kMinimumExpectedShadersCompiled);
 }
 } // namespace renderengine
 } // namespace android
diff --git a/libs/shaders/shaders.cpp b/libs/shaders/shaders.cpp
index c85517a..ef039e5 100644
--- a/libs/shaders/shaders.cpp
+++ b/libs/shaders/shaders.cpp
@@ -168,8 +168,8 @@
 void generateOETF(std::string& shader) {
     // Only support gamma 2.2 for now
     shader.append(R"(
-        float OETF(float3 linear) {
-            return sign(linear) * pow(abs(linear), (1.0 / 2.2));
+        float3 OETF(float3 linear) {
+            return sign(linear) * pow(abs(linear), float3(1.0 / 2.2));
         }
     )");
 }
diff --git a/libs/ultrahdr/fuzzer/ultrahdr_dec_fuzzer.cpp b/libs/ultrahdr/fuzzer/ultrahdr_dec_fuzzer.cpp
index ad1d57a..f1f4035 100644
--- a/libs/ultrahdr/fuzzer/ultrahdr_dec_fuzzer.cpp
+++ b/libs/ultrahdr/fuzzer/ultrahdr_dec_fuzzer.cpp
@@ -54,7 +54,7 @@
     std::cout << "input buffer size " << jpegImgR.length << std::endl;
     std::cout << "image dimensions " << info.width << " x " << info.width << std::endl;
 #endif
-    size_t outSize = info.width * info.height * ((of == ULTRAHDR_OUTPUT_SDR) ? 4 : 8);
+    size_t outSize = info.width * info.height * ((of == ULTRAHDR_OUTPUT_HDR_LINEAR) ? 8 : 4);
     jpegr_uncompressed_struct decodedJpegR;
     auto decodedRaw = std::make_unique<uint8_t[]>(outSize);
     decodedJpegR.data = decodedRaw.get();
diff --git a/libs/ultrahdr/fuzzer/ultrahdr_enc_fuzzer.cpp b/libs/ultrahdr/fuzzer/ultrahdr_enc_fuzzer.cpp
index bbe58e0..bf9b031 100644
--- a/libs/ultrahdr/fuzzer/ultrahdr_enc_fuzzer.cpp
+++ b/libs/ultrahdr/fuzzer/ultrahdr_enc_fuzzer.cpp
@@ -23,22 +23,12 @@
 
 // User include files
 #include "ultrahdr/gainmapmath.h"
+#include "ultrahdr/jpegdecoderhelper.h"
 #include "ultrahdr/jpegencoderhelper.h"
 #include "utils/Log.h"
 
 using namespace android::ultrahdr;
 
-// constants
-const int kMinWidth = 8;
-const int kMaxWidth = 7680;
-
-const int kMinHeight = 8;
-const int kMaxHeight = 4320;
-
-const int kScaleFactor = 4;
-
-const int kJpegBlock = 16;
-
 // Color gamuts for image data, sync with ultrahdr.h
 const int kCgMin = ULTRAHDR_COLORGAMUT_UNSPECIFIED + 1;
 const int kCgMax = ULTRAHDR_COLORGAMUT_MAX;
@@ -60,7 +50,7 @@
     UltraHdrEncFuzzer(const uint8_t* data, size_t size) : mFdp(data, size){};
     void process();
     void fillP010Buffer(uint16_t* data, int width, int height, int stride);
-    void fill420Buffer(uint8_t* data, int size);
+    void fill420Buffer(uint8_t* data, int width, int height, int stride);
 
 private:
     FuzzedDataProvider mFdp;
@@ -70,11 +60,12 @@
     uint16_t* tmp = data;
     std::vector<uint16_t> buffer(16);
     for (int i = 0; i < buffer.size(); i++) {
-        buffer[i] = mFdp.ConsumeIntegralInRange<int>(0, (1 << 10) - 1);
+        buffer[i] = (mFdp.ConsumeIntegralInRange<int>(0, (1 << 10) - 1)) << 6;
     }
     for (int j = 0; j < height; j++) {
         for (int i = 0; i < width; i += buffer.size()) {
-            memcpy(data + i, buffer.data(), std::min((int)buffer.size(), (width - i)));
+            memcpy(tmp + i, buffer.data(),
+                   std::min((int)buffer.size(), (width - i)) * sizeof(*data));
             std::shuffle(buffer.begin(), buffer.end(),
                          std::default_random_engine(std::random_device{}()));
         }
@@ -82,13 +73,18 @@
     }
 }
 
-void UltraHdrEncFuzzer::fill420Buffer(uint8_t* data, int size) {
+void UltraHdrEncFuzzer::fill420Buffer(uint8_t* data, int width, int height, int stride) {
+    uint8_t* tmp = data;
     std::vector<uint8_t> buffer(16);
     mFdp.ConsumeData(buffer.data(), buffer.size());
-    for (int i = 0; i < size; i += buffer.size()) {
-        memcpy(data + i, buffer.data(), std::min((int)buffer.size(), (size - i)));
-        std::shuffle(buffer.begin(), buffer.end(),
-                     std::default_random_engine(std::random_device{}()));
+    for (int j = 0; j < height; j++) {
+        for (int i = 0; i < width; i += buffer.size()) {
+            memcpy(tmp + i, buffer.data(),
+                   std::min((int)buffer.size(), (width - i)) * sizeof(*data));
+            std::shuffle(buffer.begin(), buffer.end(),
+                         std::default_random_engine(std::random_device{}()));
+        }
+        tmp += stride;
     }
 }
 
@@ -129,9 +125,10 @@
         int height = mFdp.ConsumeIntegralInRange<int>(kMinHeight, kMaxHeight);
         height = (height >> 1) << 1;
 
-        std::unique_ptr<uint16_t[]> bufferY = nullptr;
-        std::unique_ptr<uint16_t[]> bufferUV = nullptr;
-        std::unique_ptr<uint8_t[]> yuv420ImgRaw = nullptr;
+        std::unique_ptr<uint16_t[]> bufferYHdr = nullptr;
+        std::unique_ptr<uint16_t[]> bufferUVHdr = nullptr;
+        std::unique_ptr<uint8_t[]> bufferYSdr = nullptr;
+        std::unique_ptr<uint8_t[]> bufferUVSdr = nullptr;
         std::unique_ptr<uint8_t[]> grayImgRaw = nullptr;
         if (muxSwitch != 4) {
             // init p010 image
@@ -145,30 +142,29 @@
             int bppP010 = 2;
             if (isUVContiguous) {
                 size_t p010Size = yStride * height * 3 / 2;
-                bufferY = std::make_unique<uint16_t[]>(p010Size);
-                p010Img.data = bufferY.get();
+                bufferYHdr = std::make_unique<uint16_t[]>(p010Size);
+                p010Img.data = bufferYHdr.get();
                 p010Img.chroma_data = nullptr;
                 p010Img.chroma_stride = 0;
-                fillP010Buffer(bufferY.get(), width, height, yStride);
-                fillP010Buffer(bufferY.get() + yStride * height, width, height / 2, yStride);
+                fillP010Buffer(bufferYHdr.get(), width, height, yStride);
+                fillP010Buffer(bufferYHdr.get() + yStride * height, width, height / 2, yStride);
             } else {
                 int uvStride = mFdp.ConsumeIntegralInRange<int>(width, width + 128);
                 size_t p010YSize = yStride * height;
-                bufferY = std::make_unique<uint16_t[]>(p010YSize);
-                p010Img.data = bufferY.get();
-                fillP010Buffer(bufferY.get(), width, height, yStride);
+                bufferYHdr = std::make_unique<uint16_t[]>(p010YSize);
+                p010Img.data = bufferYHdr.get();
+                fillP010Buffer(bufferYHdr.get(), width, height, yStride);
                 size_t p010UVSize = uvStride * p010Img.height / 2;
-                bufferUV = std::make_unique<uint16_t[]>(p010UVSize);
-                p010Img.chroma_data = bufferUV.get();
+                bufferUVHdr = std::make_unique<uint16_t[]>(p010UVSize);
+                p010Img.chroma_data = bufferUVHdr.get();
                 p010Img.chroma_stride = uvStride;
-                fillP010Buffer(bufferUV.get(), width, height / 2, uvStride);
+                fillP010Buffer(bufferUVHdr.get(), width, height / 2, uvStride);
             }
         } else {
-            int map_width = width / kScaleFactor;
-            int map_height = height / kScaleFactor;
-            map_width = static_cast<size_t>(floor((map_width + kJpegBlock - 1) / kJpegBlock)) *
-                    kJpegBlock;
-            map_height = ((map_height + 1) >> 1) << 1;
+            size_t map_width = static_cast<size_t>(
+                    floor((width + kMapDimensionScaleFactor - 1) / kMapDimensionScaleFactor));
+            size_t map_height = static_cast<size_t>(
+                    floor((height + kMapDimensionScaleFactor - 1) / kMapDimensionScaleFactor));
             // init 400 image
             grayImg.width = map_width;
             grayImg.height = map_height;
@@ -177,7 +173,7 @@
             const size_t graySize = map_width * map_height;
             grayImgRaw = std::make_unique<uint8_t[]>(graySize);
             grayImg.data = grayImgRaw.get();
-            fill420Buffer(grayImgRaw.get(), graySize);
+            fill420Buffer(grayImgRaw.get(), map_width, map_height, map_width);
             grayImg.chroma_data = nullptr;
             grayImg.luma_stride = 0;
             grayImg.chroma_stride = 0;
@@ -185,17 +181,38 @@
 
         if (muxSwitch > 0) {
             // init 420 image
+            bool isUVContiguous = mFdp.ConsumeBool();
+            bool hasYStride = mFdp.ConsumeBool();
+            int yStride = hasYStride ? mFdp.ConsumeIntegralInRange<int>(width, width + 128) : width;
             yuv420Img.width = width;
             yuv420Img.height = height;
             yuv420Img.colorGamut = yuv420Cg;
-
-            const size_t yuv420Size = (yuv420Img.width * yuv420Img.height * 3) / 2;
-            yuv420ImgRaw = std::make_unique<uint8_t[]>(yuv420Size);
-            yuv420Img.data = yuv420ImgRaw.get();
-            fill420Buffer(yuv420ImgRaw.get(), yuv420Size);
-            yuv420Img.chroma_data = nullptr;
-            yuv420Img.luma_stride = 0;
-            yuv420Img.chroma_stride = 0;
+            yuv420Img.luma_stride = hasYStride ? yStride : 0;
+            if (isUVContiguous) {
+                size_t yuv420Size = yStride * height * 3 / 2;
+                bufferYSdr = std::make_unique<uint8_t[]>(yuv420Size);
+                yuv420Img.data = bufferYSdr.get();
+                yuv420Img.chroma_data = nullptr;
+                yuv420Img.chroma_stride = 0;
+                fill420Buffer(bufferYSdr.get(), width, height, yStride);
+                fill420Buffer(bufferYSdr.get() + yStride * height, width / 2, height / 2,
+                              yStride / 2);
+                fill420Buffer(bufferYSdr.get() + yStride * height * 5 / 4, width / 2, height / 2,
+                              yStride / 2);
+            } else {
+                int uvStride = mFdp.ConsumeIntegralInRange<int>(width / 2, width / 2 + 128);
+                size_t yuv420YSize = yStride * height;
+                bufferYSdr = std::make_unique<uint8_t[]>(yuv420YSize);
+                yuv420Img.data = bufferYSdr.get();
+                fill420Buffer(bufferYSdr.get(), width, height, yStride);
+                size_t yuv420UVSize = uvStride * yuv420Img.height / 2 * 2;
+                bufferUVSdr = std::make_unique<uint8_t[]>(yuv420UVSize);
+                yuv420Img.chroma_data = bufferYSdr.get();
+                yuv420Img.chroma_stride = uvStride;
+                fill420Buffer(bufferUVSdr.get(), width / 2, height / 2, uvStride);
+                fill420Buffer(bufferUVSdr.get() + uvStride * height / 2, width / 2, height / 2,
+                              uvStride);
+            }
         }
 
         // dest
@@ -212,6 +229,8 @@
         std::cout << "p010 luma stride " << p010Img.luma_stride << std::endl;
         std::cout << "p010 chroma stride " << p010Img.chroma_stride << std::endl;
         std::cout << "420 color gamut " << yuv420Img.colorGamut << std::endl;
+        std::cout << "420 luma stride " << yuv420Img.luma_stride << std::endl;
+        std::cout << "420 chroma stride " << yuv420Img.chroma_stride << std::endl;
         std::cout << "quality factor " << quality << std::endl;
 #endif
 
@@ -226,8 +245,19 @@
         } else {
             // compressed img
             JpegEncoderHelper encoder;
-            if (encoder.compressImage(yuv420Img.data, yuv420Img.width, yuv420Img.height, quality,
-                                      nullptr, 0)) {
+            struct jpegr_uncompressed_struct yuv420ImgCopy = yuv420Img;
+            if (yuv420ImgCopy.luma_stride == 0) yuv420ImgCopy.luma_stride = yuv420Img.width;
+            if (!yuv420ImgCopy.chroma_data) {
+                uint8_t* data = reinterpret_cast<uint8_t*>(yuv420Img.data);
+                yuv420ImgCopy.chroma_data = data + yuv420Img.luma_stride * yuv420Img.height;
+                yuv420ImgCopy.chroma_stride = yuv420Img.luma_stride >> 1;
+            }
+
+            if (encoder.compressImage(reinterpret_cast<uint8_t*>(yuv420ImgCopy.data),
+                                      reinterpret_cast<uint8_t*>(yuv420ImgCopy.chroma_data),
+                                      yuv420ImgCopy.width, yuv420ImgCopy.height,
+                                      yuv420ImgCopy.luma_stride, yuv420ImgCopy.chroma_stride,
+                                      quality, nullptr, 0)) {
                 jpegImg.length = encoder.getCompressedImageSize();
                 jpegImg.maxLength = jpegImg.length;
                 jpegImg.data = encoder.getCompressedImagePtr();
@@ -242,14 +272,15 @@
                 } else if (muxSwitch == 4) { // api 4
                     jpegImgR.length = 0;
                     JpegEncoderHelper gainMapEncoder;
-                    if (gainMapEncoder.compressImage(grayImg.data, grayImg.width, grayImg.height,
-                                                     quality, nullptr, 0, true)) {
+                    if (gainMapEncoder.compressImage(reinterpret_cast<uint8_t*>(grayImg.data),
+                                                     nullptr, grayImg.width, grayImg.height,
+                                                     grayImg.width, 0, quality, nullptr, 0)) {
                         jpegGainMap.length = gainMapEncoder.getCompressedImageSize();
                         jpegGainMap.maxLength = jpegImg.length;
                         jpegGainMap.data = gainMapEncoder.getCompressedImagePtr();
                         jpegGainMap.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED;
                         ultrahdr_metadata_struct metadata;
-                        metadata.version = "1.0";
+                        metadata.version = kJpegrVersion;
                         if (tf == ULTRAHDR_TF_HLG) {
                             metadata.maxContentBoost = kHlgMaxNits / kSdrWhiteNits;
                         } else if (tf == ULTRAHDR_TF_PQ) {
@@ -274,7 +305,8 @@
             jpegr_info_struct info{0, 0, &iccData, &exifData};
             status = jpegHdr.getJPEGRInfo(&jpegImgR, &info);
             if (status == android::OK) {
-                size_t outSize = info.width * info.height * ((of == ULTRAHDR_OUTPUT_SDR) ? 4 : 8);
+                size_t outSize =
+                        info.width * info.height * ((of == ULTRAHDR_OUTPUT_HDR_LINEAR) ? 8 : 4);
                 jpegr_uncompressed_struct decodedJpegR;
                 auto decodedRaw = std::make_unique<uint8_t[]>(outSize);
                 decodedJpegR.data = decodedRaw.get();
diff --git a/libs/ultrahdr/gainmapmath.cpp b/libs/ultrahdr/gainmapmath.cpp
index 8015a4e..ae9c4ca 100644
--- a/libs/ultrahdr/gainmapmath.cpp
+++ b/libs/ultrahdr/gainmapmath.cpp
@@ -168,7 +168,7 @@
 
 // See IEC 61966-2-1, Equations F.5 and F.6.
 float srgbInvOetfLUT(float e_gamma) {
-  uint32_t value = static_cast<uint32_t>(e_gamma * kSrgbInvOETFNumEntries);
+  uint32_t value = static_cast<uint32_t>(e_gamma * (kSrgbInvOETFNumEntries - 1) + 0.5);
   //TODO() : Remove once conversion modules have appropriate clamping in place
   value = CLIP3(value, 0, kSrgbInvOETFNumEntries - 1);
   return kSrgbInvOETF[value];
@@ -288,7 +288,7 @@
 }
 
 float hlgOetfLUT(float e) {
-  uint32_t value = static_cast<uint32_t>(e * kHlgOETFNumEntries);
+  uint32_t value = static_cast<uint32_t>(e * (kHlgOETFNumEntries - 1) + 0.5);
   //TODO() : Remove once conversion modules have appropriate clamping in place
   value = CLIP3(value, 0, kHlgOETFNumEntries - 1);
 
@@ -315,7 +315,7 @@
 }
 
 float hlgInvOetfLUT(float e_gamma) {
-  uint32_t value = static_cast<uint32_t>(e_gamma * kHlgInvOETFNumEntries);
+  uint32_t value = static_cast<uint32_t>(e_gamma * (kHlgInvOETFNumEntries - 1) + 0.5);
   //TODO() : Remove once conversion modules have appropriate clamping in place
   value = CLIP3(value, 0, kHlgInvOETFNumEntries - 1);
 
@@ -344,7 +344,7 @@
 }
 
 float pqOetfLUT(float e) {
-  uint32_t value = static_cast<uint32_t>(e * kPqOETFNumEntries);
+  uint32_t value = static_cast<uint32_t>(e * (kPqOETFNumEntries - 1) + 0.5);
   //TODO() : Remove once conversion modules have appropriate clamping in place
   value = CLIP3(value, 0, kPqOETFNumEntries - 1);
 
@@ -376,7 +376,7 @@
 }
 
 float pqInvOetfLUT(float e_gamma) {
-  uint32_t value = static_cast<uint32_t>(e_gamma * kPqInvOETFNumEntries);
+  uint32_t value = static_cast<uint32_t>(e_gamma * (kPqInvOETFNumEntries - 1) + 0.5);
   //TODO() : Remove once conversion modules have appropriate clamping in place
   value = CLIP3(value, 0, kPqInvOETFNumEntries - 1);
 
@@ -531,21 +531,21 @@
 
   Color new_uv = (yuv1 + yuv2 + yuv3 + yuv4) / 4.0f;
 
-  size_t pixel_y1_idx =  x_chroma * 2      +  y_chroma * 2      * image->width;
-  size_t pixel_y2_idx = (x_chroma * 2 + 1) +  y_chroma * 2      * image->width;
-  size_t pixel_y3_idx =  x_chroma * 2      + (y_chroma * 2 + 1) * image->width;
-  size_t pixel_y4_idx = (x_chroma * 2 + 1) + (y_chroma * 2 + 1) * image->width;
+  size_t pixel_y1_idx =  x_chroma * 2      +  y_chroma * 2      * image->luma_stride;
+  size_t pixel_y2_idx = (x_chroma * 2 + 1) +  y_chroma * 2      * image->luma_stride;
+  size_t pixel_y3_idx =  x_chroma * 2      + (y_chroma * 2 + 1) * image->luma_stride;
+  size_t pixel_y4_idx = (x_chroma * 2 + 1) + (y_chroma * 2 + 1) * image->luma_stride;
 
   uint8_t& y1_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y1_idx];
   uint8_t& y2_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y2_idx];
   uint8_t& y3_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y3_idx];
   uint8_t& y4_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y4_idx];
 
-  size_t pixel_count = image->width * image->height;
-  size_t pixel_uv_idx = x_chroma + y_chroma * (image->width / 2);
+  size_t pixel_count = image->chroma_stride * image->height / 2;
+  size_t pixel_uv_idx = x_chroma + y_chroma * (image->chroma_stride);
 
-  uint8_t& u_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_count + pixel_uv_idx];
-  uint8_t& v_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_count * 5 / 4 + pixel_uv_idx];
+  uint8_t& u_uint = reinterpret_cast<uint8_t*>(image->chroma_data)[pixel_uv_idx];
+  uint8_t& v_uint = reinterpret_cast<uint8_t*>(image->chroma_data)[pixel_count + pixel_uv_idx];
 
   y1_uint = static_cast<uint8_t>(CLIP3((yuv1.y * 255.0f + 0.5f), 0, 255));
   y2_uint = static_cast<uint8_t>(CLIP3((yuv2.y * 255.0f + 0.5f), 0, 255));
@@ -598,14 +598,18 @@
 }
 
 Color getYuv420Pixel(jr_uncompressed_ptr image, size_t x, size_t y) {
-  size_t pixel_count = image->width * image->height;
+  uint8_t* luma_data = reinterpret_cast<uint8_t*>(image->data);
+  size_t luma_stride = image->luma_stride;
+  uint8_t* chroma_data = reinterpret_cast<uint8_t*>(image->chroma_data);
+  size_t chroma_stride = image->chroma_stride;
 
-  size_t pixel_y_idx = x + y * image->width;
-  size_t pixel_uv_idx = x / 2 + (y / 2) * (image->width / 2);
+  size_t offset_cr = chroma_stride * (image->height / 2);
+  size_t pixel_y_idx = x + y * luma_stride;
+  size_t pixel_chroma_idx = x / 2 + (y / 2) * chroma_stride;
 
-  uint8_t y_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y_idx];
-  uint8_t u_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_count + pixel_uv_idx];
-  uint8_t v_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_count * 5 / 4 + pixel_uv_idx];
+  uint8_t y_uint = luma_data[pixel_y_idx];
+  uint8_t u_uint = chroma_data[pixel_chroma_idx];
+  uint8_t v_uint = chroma_data[offset_cr + pixel_chroma_idx];
 
   // 128 bias for UV given we are using jpeglib; see:
   // https://github.com/kornelski/libjpeg/blob/master/structure.doc
@@ -615,20 +619,10 @@
 }
 
 Color getP010Pixel(jr_uncompressed_ptr image, size_t x, size_t y) {
-  size_t luma_stride = image->luma_stride;
-  size_t chroma_stride = image->chroma_stride;
   uint16_t* luma_data = reinterpret_cast<uint16_t*>(image->data);
+  size_t luma_stride = image->luma_stride == 0 ? image->width : image->luma_stride;
   uint16_t* chroma_data = reinterpret_cast<uint16_t*>(image->chroma_data);
-
-  if (luma_stride == 0) {
-    luma_stride = image->width;
-  }
-  if (chroma_stride == 0) {
-    chroma_stride = luma_stride;
-  }
-  if (chroma_data == nullptr) {
-    chroma_data = &reinterpret_cast<uint16_t*>(image->data)[luma_stride * image->height];
-  }
+  size_t chroma_stride = image->chroma_stride;
 
   size_t pixel_y_idx = y * luma_stride + x;
   size_t pixel_u_idx = (y >> 1) * chroma_stride + (x & ~0x1);
diff --git a/libs/ultrahdr/include/ultrahdr/gainmapmath.h b/libs/ultrahdr/include/ultrahdr/gainmapmath.h
index 50b4d2f..9f1238f 100644
--- a/libs/ultrahdr/include/ultrahdr/gainmapmath.h
+++ b/libs/ultrahdr/include/ultrahdr/gainmapmath.h
@@ -172,7 +172,7 @@
   }
 
   float getGainFactor(float gain) {
-    uint32_t idx = static_cast<uint32_t>(gain * (kGainFactorNumEntries - 1));
+    uint32_t idx = static_cast<uint32_t>(gain * (kGainFactorNumEntries - 1) + 0.5);
     //TODO() : Remove once conversion modules have appropriate clamping in place
     idx = CLIP3(idx, 0, kGainFactorNumEntries - 1);
     return mGainTable[idx];
diff --git a/libs/ultrahdr/include/ultrahdr/jpegencoderhelper.h b/libs/ultrahdr/include/ultrahdr/jpegencoderhelper.h
index 2c6778e..9d06415 100644
--- a/libs/ultrahdr/include/ultrahdr/jpegencoderhelper.h
+++ b/libs/ultrahdr/include/ultrahdr/jpegencoderhelper.h
@@ -19,6 +19,7 @@
 
 // We must include cstdio before jpeglib.h. It is a requirement of libjpeg.
 #include <cstdio>
+#include <vector>
 
 extern "C" {
 #include <jerror.h>
@@ -26,10 +27,11 @@
 }
 
 #include <utils/Errors.h>
-#include <vector>
 
 namespace android::ultrahdr {
 
+#define ALIGNM(x, m) ((((x) + ((m)-1)) / (m)) * (m))
+
 /*
  * Encapsulates a converter from raw image (YUV420planer or grey-scale) to JPEG format.
  * This class is not thread-safe.
@@ -46,8 +48,9 @@
      * ICC segment which will be added to the compressed image.
      * Returns false if errors occur during compression.
      */
-    bool compressImage(const void* image, int width, int height, int quality,
-                       const void* iccBuffer, unsigned int iccSize, bool isSingleChannel = false);
+    bool compressImage(const uint8_t* yBuffer, const uint8_t* uvBuffer, int width, int height,
+                       int lumaStride, int chromaStride, int quality, const void* iccBuffer,
+                       unsigned int iccSize);
 
     /*
      * Returns the compressed JPEG buffer pointer. This method must be called only after calling
@@ -66,6 +69,7 @@
      * We must pass at least 16 scanlines according to libjpeg documentation.
      */
     static const int kCompressBatchSize = 16;
+
 private:
     // initDestination(), emptyOutputBuffer() and emptyOutputBuffer() are callback functions to be
     // passed into jpeg library.
@@ -75,15 +79,16 @@
     static void outputErrorMessage(j_common_ptr cinfo);
 
     // Returns false if errors occur.
-    bool encode(const void* inYuv, int width, int height, int jpegQuality,
-                const void* iccBuffer, unsigned int iccSize, bool isSingleChannel);
+    bool encode(const uint8_t* yBuffer, const uint8_t* uvBuffer, int width, int height,
+                int lumaStride, int chromaStride, int quality, const void* iccBuffer,
+                unsigned int iccSize);
     void setJpegDestination(jpeg_compress_struct* cinfo);
     void setJpegCompressStruct(int width, int height, int quality, jpeg_compress_struct* cinfo,
                                bool isSingleChannel);
     // Returns false if errors occur.
-    bool compress(jpeg_compress_struct* cinfo, const uint8_t* image, bool isSingleChannel);
-    bool compressYuv(jpeg_compress_struct* cinfo, const uint8_t* yuv);
-    bool compressSingleChannel(jpeg_compress_struct* cinfo, const uint8_t* image);
+    bool compressYuv(jpeg_compress_struct* cinfo, const uint8_t* yBuffer, const uint8_t* uvBuffer,
+                     int lumaStride, int chromaStride);
+    bool compressY(jpeg_compress_struct* cinfo, const uint8_t* yBuffer, int lumaStride);
 
     // The block size for encoded jpeg image buffer.
     static const int kBlockSize = 16384;
diff --git a/libs/ultrahdr/include/ultrahdr/jpegr.h b/libs/ultrahdr/include/ultrahdr/jpegr.h
index f80496a..850cb32 100644
--- a/libs/ultrahdr/include/ultrahdr/jpegr.h
+++ b/libs/ultrahdr/include/ultrahdr/jpegr.h
@@ -17,9 +17,13 @@
 #ifndef ANDROID_ULTRAHDR_JPEGR_H
 #define ANDROID_ULTRAHDR_JPEGR_H
 
-#include "jpegencoderhelper.h"
-#include "jpegrerrorcode.h"
-#include "ultrahdr.h"
+#include <cstdint>
+#include <vector>
+
+#include "ultrahdr/jpegdecoderhelper.h"
+#include "ultrahdr/jpegencoderhelper.h"
+#include "ultrahdr/jpegrerrorcode.h"
+#include "ultrahdr/ultrahdr.h"
 
 #ifndef FLT_MAX
 #define FLT_MAX 0x1.fffffep127f
@@ -27,6 +31,27 @@
 
 namespace android::ultrahdr {
 
+// The current JPEGR version that we encode to
+static const char* const kJpegrVersion = "1.0";
+
+// Map is quarter res / sixteenth size
+static const size_t kMapDimensionScaleFactor = 4;
+
+// Gain Map width is (image_width / kMapDimensionScaleFactor). If we were to
+// compress 420 GainMap in jpeg, then we need at least 2 samples. For Grayscale
+// 1 sample is sufficient. We are using 2 here anyways
+static const int kMinWidth = 2 * kMapDimensionScaleFactor;
+static const int kMinHeight = 2 * kMapDimensionScaleFactor;
+
+// Minimum Codec Unit(MCU) for 420 sub-sampling is decided by JPEG encoder by parameter
+// JpegEncoderHelper::kCompressBatchSize.
+// The width and height of image under compression is expected to be a multiple of MCU size.
+// If this criteria is not satisfied, padding is done.
+static const size_t kJpegBlock = JpegEncoderHelper::kCompressBatchSize;
+
+/*
+ * Holds information of jpegr image
+ */
 struct jpegr_info_struct {
     size_t width;
     size_t height;
@@ -49,16 +74,19 @@
 
     // Values below are optional
     // Pointer to chroma data, if it's NULL, chroma plane is considered to be immediately
-    // following after the luma plane.
-    // Note: currently this feature is only supported for P010 image (HDR input).
+    // after the luma plane.
     void* chroma_data = nullptr;
-    // Strides of Y plane in number of pixels, using 0 to present uninitialized, must be
-    // larger than or equal to luma width.
-    // Note: currently this feature is only supported for P010 image (HDR input).
+    // Stride of Y plane in number of pixels. 0 indicates the member is uninitialized. If
+    // non-zero this value must be larger than or equal to luma width. If stride is
+    // uninitialized then it is assumed to be equal to luma width.
     int luma_stride = 0;
-    // Strides of UV plane in number of pixels, using 0 to present uninitialized, must be
-    // larger than or equal to chroma width.
-    // Note: currently this feature is only supported for P010 image (HDR input).
+    // Stride of UV plane in number of pixels.
+    // 1. If this handle points to P010 image then this value must be larger than
+    //    or equal to luma width.
+    // 2. If this handle points to 420 image then this value must be larger than
+    //    or equal to (luma width / 2).
+    // NOTE: if chroma_data is nullptr, chroma_stride is irrelevant. Just as the way,
+    // chroma_data is derived from luma ptr, chroma stride is derived from luma stride.
     int chroma_stride = 0;
 };
 
@@ -102,10 +130,10 @@
      * Tonemap the HDR input to a SDR image, generate gain map from the HDR and SDR images,
      * compress SDR YUV to 8-bit JPEG and append the gain map to the end of the compressed
      * JPEG.
-     * @param uncompressed_p010_image uncompressed HDR image in P010 color format
+     * @param p010_image_ptr uncompressed HDR image in P010 color format
      * @param hdr_tf transfer function of the HDR image
      * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength}
-     *             represents the maximum available size of the desitination buffer, and it must be
+     *             represents the maximum available size of the destination buffer, and it must be
      *             set before calling this method. If the encoded JPEGR size exceeds
      *             {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}.
      * @param quality target quality of the JPEG encoding, must be in range of 0-100 where 100 is
@@ -113,11 +141,8 @@
      * @param exif pointer to the exif metadata.
      * @return NO_ERROR if encoding succeeds, error code if error occurs.
      */
-    status_t encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
-                         ultrahdr_transfer_function hdr_tf,
-                         jr_compressed_ptr dest,
-                         int quality,
-                         jr_exif_ptr exif);
+    status_t encodeJPEGR(jr_uncompressed_ptr p010_image_ptr, ultrahdr_transfer_function hdr_tf,
+                         jr_compressed_ptr dest, int quality, jr_exif_ptr exif);
 
     /*
      * Encode API-1
@@ -126,8 +151,8 @@
      * Generate gain map from the HDR and SDR inputs, compress SDR YUV to 8-bit JPEG and append
      * the gain map to the end of the compressed JPEG. HDR and SDR inputs must be the same
      * resolution. SDR input is assumed to use the sRGB transfer function.
-     * @param uncompressed_p010_image uncompressed HDR image in P010 color format
-     * @param uncompressed_yuv_420_image uncompressed SDR image in YUV_420 color format
+     * @param p010_image_ptr uncompressed HDR image in P010 color format
+     * @param yuv420_image_ptr uncompressed SDR image in YUV_420 color format
      * @param hdr_tf transfer function of the HDR image
      * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength}
      *             represents the maximum available size of the desitination buffer, and it must be
@@ -138,11 +163,8 @@
      * @param exif pointer to the exif metadata.
      * @return NO_ERROR if encoding succeeds, error code if error occurs.
      */
-    status_t encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
-                         jr_uncompressed_ptr uncompressed_yuv_420_image,
-                         ultrahdr_transfer_function hdr_tf,
-                         jr_compressed_ptr dest,
-                         int quality,
+    status_t encodeJPEGR(jr_uncompressed_ptr p010_image_ptr, jr_uncompressed_ptr yuv420_image_ptr,
+                         ultrahdr_transfer_function hdr_tf, jr_compressed_ptr dest, int quality,
                          jr_exif_ptr exif);
 
     /*
@@ -155,11 +177,11 @@
      * compressed JPEG. Adds an ICC profile if one isn't present in the input JPEG image. HDR and
      * SDR inputs must be the same resolution and color space. SDR image is assumed to use the sRGB
      * transfer function.
-     * @param uncompressed_p010_image uncompressed HDR image in P010 color format
-     * @param uncompressed_yuv_420_image uncompressed SDR image in YUV_420 color format
-     *                                   Note: the SDR image must be the decoded version of the JPEG
-     *                                         input
-     * @param compressed_jpeg_image compressed 8-bit JPEG image
+     * @param p010_image_ptr uncompressed HDR image in P010 color format
+     * @param yuv420_image_ptr uncompressed SDR image in YUV_420 color format
+     * @param yuv420jpg_image_ptr SDR image compressed in jpeg format
+     *                            Note: the compressed SDR image must be the compressed
+     *                                  yuv420_image_ptr image in JPEG format.
      * @param hdr_tf transfer function of the HDR image
      * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength}
      *             represents the maximum available size of the desitination buffer, and it must be
@@ -167,10 +189,8 @@
      *             {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}.
      * @return NO_ERROR if encoding succeeds, error code if error occurs.
      */
-    status_t encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
-                         jr_uncompressed_ptr uncompressed_yuv_420_image,
-                         jr_compressed_ptr compressed_jpeg_image,
-                         ultrahdr_transfer_function hdr_tf,
+    status_t encodeJPEGR(jr_uncompressed_ptr p010_image_ptr, jr_uncompressed_ptr yuv420_image_ptr,
+                         jr_compressed_ptr yuv420jpg_image_ptr, ultrahdr_transfer_function hdr_tf,
                          jr_compressed_ptr dest);
 
     /*
@@ -183,8 +203,8 @@
      * and the decoded SDR result, append the gain map to the end of the compressed JPEG. Adds an
      * ICC profile if one isn't present in the input JPEG image. HDR and SDR inputs must be the same
      * resolution. JPEG image is assumed to use the sRGB transfer function.
-     * @param uncompressed_p010_image uncompressed HDR image in P010 color format
-     * @param compressed_jpeg_image compressed 8-bit JPEG image
+     * @param p010_image_ptr uncompressed HDR image in P010 color format
+     * @param yuv420jpg_image_ptr SDR image compressed in jpeg format
      * @param hdr_tf transfer function of the HDR image
      * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength}
      *             represents the maximum available size of the desitination buffer, and it must be
@@ -192,10 +212,8 @@
      *             {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}.
      * @return NO_ERROR if encoding succeeds, error code if error occurs.
      */
-    status_t encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
-                         jr_compressed_ptr compressed_jpeg_image,
-                         ultrahdr_transfer_function hdr_tf,
-                         jr_compressed_ptr dest);
+    status_t encodeJPEGR(jr_uncompressed_ptr p010_image_ptr, jr_compressed_ptr yuv420jpg_image_ptr,
+                         ultrahdr_transfer_function hdr_tf, jr_compressed_ptr dest);
 
     /*
      * Encode API-4
@@ -203,8 +221,8 @@
      *
      * Assemble the primary JPEG image, the gain map and the metadata to JPEG/R format. Adds an ICC
      * profile if one isn't present in the input JPEG image.
-     * @param compressed_jpeg_image compressed 8-bit JPEG image
-     * @param compressed_gainmap compressed 8-bit JPEG single channel image
+     * @param yuv420jpg_image_ptr SDR image compressed in jpeg format
+     * @param gainmapjpg_image_ptr gain map image compressed in jpeg format
      * @param metadata metadata to be written in XMP of the primary jpeg
      * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength}
      *             represents the maximum available size of the desitination buffer, and it must be
@@ -212,9 +230,8 @@
      *             {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}.
      * @return NO_ERROR if encoding succeeds, error code if error occurs.
      */
-    status_t encodeJPEGR(jr_compressed_ptr compressed_jpeg_image,
-                         jr_compressed_ptr compressed_gainmap,
-                         ultrahdr_metadata_ptr metadata,
+    status_t encodeJPEGR(jr_compressed_ptr yuv420jpg_image_ptr,
+                         jr_compressed_ptr gainmapjpg_image_ptr, ultrahdr_metadata_ptr metadata,
                          jr_compressed_ptr dest);
 
     /*
@@ -227,8 +244,7 @@
      *
      * This method only supports single gain map metadata values for fields that allow multi-channel
      * metadata values.
-     *
-     * @param compressed_jpegr_image compressed JPEGR image.
+     * @param jpegr_image_ptr compressed JPEGR image.
      * @param dest destination of the uncompressed JPEGR image.
      * @param max_display_boost (optional) the maximum available boost supported by a display,
      *                          the value must be greater than or equal to 1.0.
@@ -248,57 +264,55 @@
                             ----------------------------------------------------------------------
                             |   JPEGR_OUTPUT_HDR_HLG   |            RGBA_1010102 HLG             |
                             ----------------------------------------------------------------------
-     * @param gain_map destination of the decoded gain map. The default value is NULL where
-                           the decoder will do nothing about it. If configured not NULL the decoder
-                           will write the decoded gain_map data into this structure. The format
-                           is defined in {@code jpegr_uncompressed_struct}.
+     * @param gainmap_image_ptr destination of the decoded gain map. The default value is NULL
+                                where the decoder will do nothing about it. If configured not NULL
+                                the decoder will write the decoded gain_map data into this
+                                structure. The format is defined in
+                                {@code jpegr_uncompressed_struct}.
      * @param metadata destination of the decoded metadata. The default value is NULL where the
                        decoder will do nothing about it. If configured not NULL the decoder will
                        write metadata into this structure. the format of metadata is defined in
                        {@code ultrahdr_metadata_struct}.
      * @return NO_ERROR if decoding succeeds, error code if error occurs.
      */
-    status_t decodeJPEGR(jr_compressed_ptr compressed_jpegr_image,
-                         jr_uncompressed_ptr dest,
-                         float max_display_boost = FLT_MAX,
-                         jr_exif_ptr exif = nullptr,
+    status_t decodeJPEGR(jr_compressed_ptr jpegr_image_ptr, jr_uncompressed_ptr dest,
+                         float max_display_boost = FLT_MAX, jr_exif_ptr exif = nullptr,
                          ultrahdr_output_format output_format = ULTRAHDR_OUTPUT_HDR_LINEAR,
-                         jr_uncompressed_ptr gain_map = nullptr,
+                         jr_uncompressed_ptr gainmap_image_ptr = nullptr,
                          ultrahdr_metadata_ptr metadata = nullptr);
 
     /*
-    * Gets Info from JPEGR file without decoding it.
-    *
-    * This method only supports single gain map metadata values for fields that allow multi-channel
-    * metadata values.
-    *
-    * The output is filled jpegr_info structure
-    * @param compressed_jpegr_image compressed JPEGR image
-    * @param jpegr_info pointer to output JPEGR info. Members of jpegr_info
-    *         are owned by the caller
-    * @return NO_ERROR if JPEGR parsing succeeds, error code otherwise
-    */
-    status_t getJPEGRInfo(jr_compressed_ptr compressed_jpegr_image,
-                          jr_info_ptr jpegr_info);
+     * Gets Info from JPEGR file without decoding it.
+     *
+     * This method only supports single gain map metadata values for fields that allow multi-channel
+     * metadata values.
+     *
+     * The output is filled jpegr_info structure
+     * @param jpegr_image_ptr compressed JPEGR image
+     * @param jpeg_image_info_ptr pointer to jpegr info struct. Members of jpegr_info
+     *                            are owned by the caller
+     * @return NO_ERROR if JPEGR parsing succeeds, error code otherwise
+     */
+    status_t getJPEGRInfo(jr_compressed_ptr jpegr_image_ptr, jr_info_ptr jpeg_image_info_ptr);
+
 protected:
     /*
      * This method is called in the encoding pipeline. It will take the uncompressed 8-bit and
      * 10-bit yuv images as input, and calculate the uncompressed gain map. The input images
      * must be the same resolution. The SDR input is assumed to use the sRGB transfer function.
      *
-     * @param uncompressed_yuv_420_image uncompressed SDR image in YUV_420 color format
-     * @param uncompressed_p010_image uncompressed HDR image in P010 color format
+     * @param yuv420_image_ptr uncompressed SDR image in YUV_420 color format
+     * @param p010_image_ptr uncompressed HDR image in P010 color format
      * @param hdr_tf transfer function of the HDR image
-     * @param dest gain map; caller responsible for memory of data
-     * @param metadata max_content_boost is filled in
+     * @param metadata everything but "version" is filled in this struct
+     * @param dest location at which gain map image is stored (caller responsible for memory
+                   of data).
      * @param sdr_is_601 if true, then use BT.601 decoding of YUV regardless of SDR image gamut
      * @return NO_ERROR if calculation succeeds, error code if error occurs.
      */
-    status_t generateGainMap(jr_uncompressed_ptr uncompressed_yuv_420_image,
-                             jr_uncompressed_ptr uncompressed_p010_image,
-                             ultrahdr_transfer_function hdr_tf,
-                             ultrahdr_metadata_ptr metadata,
-                             jr_uncompressed_ptr dest,
+    status_t generateGainMap(jr_uncompressed_ptr yuv420_image_ptr,
+                             jr_uncompressed_ptr p010_image_ptr, ultrahdr_transfer_function hdr_tf,
+                             ultrahdr_metadata_ptr metadata, jr_uncompressed_ptr dest,
                              bool sdr_is_601 = false);
 
     /*
@@ -309,8 +323,8 @@
      * The SDR image is assumed to use the sRGB transfer function. The SDR image is also assumed to
      * be a decoded JPEG for the purpose of YUV interpration.
      *
-     * @param uncompressed_yuv_420_image uncompressed SDR image in YUV_420 color format
-     * @param uncompressed_gain_map uncompressed gain map
+     * @param yuv420_image_ptr uncompressed SDR image in YUV_420 color format
+     * @param gainmap_image_ptr pointer to uncompressed gain map image struct.
      * @param metadata JPEG/R metadata extracted from XMP.
      * @param output_format flag for setting output color format. if set to
      *                      {@code JPEGR_OUTPUT_SDR}, decoder will only decode the primary image
@@ -319,35 +333,33 @@
      * @param dest reconstructed HDR image
      * @return NO_ERROR if calculation succeeds, error code if error occurs.
      */
-    status_t applyGainMap(jr_uncompressed_ptr uncompressed_yuv_420_image,
-                          jr_uncompressed_ptr uncompressed_gain_map,
-                          ultrahdr_metadata_ptr metadata,
-                          ultrahdr_output_format output_format,
-                          float max_display_boost,
+    status_t applyGainMap(jr_uncompressed_ptr yuv420_image_ptr,
+                          jr_uncompressed_ptr gainmap_image_ptr, ultrahdr_metadata_ptr metadata,
+                          ultrahdr_output_format output_format, float max_display_boost,
                           jr_uncompressed_ptr dest);
 
 private:
     /*
      * This method is called in the encoding pipeline. It will encode the gain map.
      *
-     * @param uncompressed_gain_map uncompressed gain map
-     * @param resource to compress gain map
+     * @param gainmap_image_ptr pointer to uncompressed gain map image struct
+     * @param jpeg_enc_obj_ptr helper resource to compress gain map
      * @return NO_ERROR if encoding succeeds, error code if error occurs.
      */
-    status_t compressGainMap(jr_uncompressed_ptr uncompressed_gain_map,
-                             JpegEncoderHelper* jpeg_encoder);
+    status_t compressGainMap(jr_uncompressed_ptr gainmap_image_ptr,
+                             JpegEncoderHelper* jpeg_enc_obj_ptr);
 
     /*
-     * This methoud is called to separate primary image and gain map image from JPEGR
+     * This method is called to separate primary image and gain map image from JPEGR
      *
-     * @param compressed_jpegr_image compressed JPEGR image
-     * @param primary_image destination of primary image
-     * @param gain_map destination of compressed gain map
+     * @param jpegr_image_ptr pointer to compressed JPEGR image.
+     * @param primary_jpg_image_ptr destination of primary image
+     * @param gainmap_jpg_image_ptr destination of compressed gain map image
      * @return NO_ERROR if calculation succeeds, error code if error occurs.
-    */
-    status_t extractPrimaryImageAndGainMap(jr_compressed_ptr compressed_jpegr_image,
-                                           jr_compressed_ptr primary_image,
-                                           jr_compressed_ptr gain_map);
+     */
+    status_t extractPrimaryImageAndGainMap(jr_compressed_ptr jpegr_image_ptr,
+                                           jr_compressed_ptr primary_jpg_image_ptr,
+                                           jr_compressed_ptr gainmap_jpg_image_ptr);
 
     /*
      * This method is called in the encoding pipeline. It will take the standard 8-bit JPEG image,
@@ -358,8 +370,9 @@
      * API-3 this parameter is null, but the primary image in JPEG/R may still have EXIF as long as
      * the input JPEG has EXIF.
      *
-     * @param compressed_jpeg_image compressed 8-bit JPEG image
-     * @param compress_gain_map compressed recover map
+
+     * @param primary_jpg_image_ptr destination of primary image
+     * @param gainmap_jpg_image_ptr destination of compressed gain map image
      * @param (nullable) exif EXIF package
      * @param (nullable) icc ICC package
      * @param icc_size length in bytes of ICC package
@@ -367,22 +380,18 @@
      * @param dest compressed JPEGR image
      * @return NO_ERROR if calculation succeeds, error code if error occurs.
      */
-    status_t appendGainMap(jr_compressed_ptr compressed_jpeg_image,
-                           jr_compressed_ptr compressed_gain_map,
-                           jr_exif_ptr exif,
-                           void* icc, size_t icc_size,
-                           ultrahdr_metadata_ptr metadata,
-                           jr_compressed_ptr dest);
+    status_t appendGainMap(jr_compressed_ptr primary_jpg_image_ptr,
+                           jr_compressed_ptr gainmap_jpg_image_ptr, jr_exif_ptr exif, void* icc,
+                           size_t icc_size, ultrahdr_metadata_ptr metadata, jr_compressed_ptr dest);
 
     /*
      * This method will tone map a HDR image to an SDR image.
      *
-     * @param src (input) uncompressed P010 image
-     * @param dest (output) tone mapping result as a YUV_420 image
-     * @return NO_ERROR if calculation succeeds, error code if error occurs.
+     * @param src pointer to uncompressed HDR image struct. HDR image is expected to be
+     *            in p010 color format
+     * @param dest pointer to store tonemapped SDR image
      */
-    status_t toneMap(jr_uncompressed_ptr src,
-                     jr_uncompressed_ptr dest);
+    status_t toneMap(jr_uncompressed_ptr src, jr_uncompressed_ptr dest);
 
     /*
      * This method will convert a YUV420 image from one YUV encoding to another in-place (eg.
@@ -396,15 +405,15 @@
      * @param dest_encoding output YUV encoding
      * @return NO_ERROR if calculation succeeds, error code if error occurs.
      */
-    status_t convertYuv(jr_uncompressed_ptr image,
-                        ultrahdr_color_gamut src_encoding,
+    status_t convertYuv(jr_uncompressed_ptr image, ultrahdr_color_gamut src_encoding,
                         ultrahdr_color_gamut dest_encoding);
 
     /*
      * This method will check the validity of the input arguments.
      *
-     * @param uncompressed_p010_image uncompressed HDR image in P010 color format
-     * @param uncompressed_yuv_420_image uncompressed SDR image in YUV_420 color format
+     * @param p010_image_ptr uncompressed HDR image in P010 color format
+     * @param yuv420_image_ptr pointer to uncompressed SDR image struct. HDR image is expected to
+     *                         be in 420p color format
      * @param hdr_tf transfer function of the HDR image
      * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength}
      *             represents the maximum available size of the desitination buffer, and it must be
@@ -412,32 +421,30 @@
      *             {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}.
      * @return NO_ERROR if the input args are valid, error code is not valid.
      */
-     status_t areInputArgumentsValid(jr_uncompressed_ptr uncompressed_p010_image,
-                                     jr_uncompressed_ptr uncompressed_yuv_420_image,
-                                     ultrahdr_transfer_function hdr_tf,
-                                     jr_compressed_ptr dest);
+    status_t areInputArgumentsValid(jr_uncompressed_ptr p010_image_ptr,
+                                    jr_uncompressed_ptr yuv420_image_ptr,
+                                    ultrahdr_transfer_function hdr_tf, jr_compressed_ptr dest_ptr);
 
     /*
      * This method will check the validity of the input arguments.
      *
-     * @param uncompressed_p010_image uncompressed HDR image in P010 color format
-     * @param uncompressed_yuv_420_image uncompressed SDR image in YUV_420 color format
+     * @param p010_image_ptr uncompressed HDR image in P010 color format
+     * @param yuv420_image_ptr pointer to uncompressed SDR image struct. HDR image is expected to
+     *                         be in 420p color format
      * @param hdr_tf transfer function of the HDR image
      * @param dest destination of the compressed JPEGR image. Please note that {@code maxLength}
-     *             represents the maximum available size of the desitination buffer, and it must be
+     *             represents the maximum available size of the destination buffer, and it must be
      *             set before calling this method. If the encoded JPEGR size exceeds
      *             {@code maxLength}, this method will return {@code ERROR_JPEGR_BUFFER_TOO_SMALL}.
      * @param quality target quality of the JPEG encoding, must be in range of 0-100 where 100 is
      *                the highest quality
      * @return NO_ERROR if the input args are valid, error code is not valid.
      */
-     status_t areInputArgumentsValid(jr_uncompressed_ptr uncompressed_p010_image,
-                                     jr_uncompressed_ptr uncompressed_yuv_420_image,
-                                     ultrahdr_transfer_function hdr_tf,
-                                     jr_compressed_ptr dest,
-                                     int quality);
+    status_t areInputArgumentsValid(jr_uncompressed_ptr p010_image_ptr,
+                                    jr_uncompressed_ptr yuv420_image_ptr,
+                                    ultrahdr_transfer_function hdr_tf, jr_compressed_ptr dest,
+                                    int quality);
 };
-
 } // namespace android::ultrahdr
 
 #endif // ANDROID_ULTRAHDR_JPEGR_H
diff --git a/libs/ultrahdr/include/ultrahdr/ultrahdr.h b/libs/ultrahdr/include/ultrahdr/ultrahdr.h
index 17cc971..66f7088 100644
--- a/libs/ultrahdr/include/ultrahdr/ultrahdr.h
+++ b/libs/ultrahdr/include/ultrahdr/ultrahdr.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_ULTRAHDR_ULTRAHDR_H
 #define ANDROID_ULTRAHDR_ULTRAHDR_H
 
+#include <string>
+
 namespace android::ultrahdr {
 // Color gamuts for image data
 typedef enum {
diff --git a/libs/ultrahdr/jpegdecoderhelper.cpp b/libs/ultrahdr/jpegdecoderhelper.cpp
index fef5444..33bf9ef 100644
--- a/libs/ultrahdr/jpegdecoderhelper.cpp
+++ b/libs/ultrahdr/jpegdecoderhelper.cpp
@@ -227,10 +227,20 @@
     mHeight = cinfo.image_height;
 
     if (decodeToRGBA) {
-        if (cinfo.jpeg_color_space == JCS_GRAYSCALE) {
-            // We don't intend to support decoding grayscale to RGBA
-            status = false;
-            ALOGE("%s: decoding grayscale to RGBA is unsupported", __func__);
+        // The primary image is expected to be yuv420 sampling
+            if (cinfo.jpeg_color_space != JCS_YCbCr) {
+                status = false;
+                ALOGE("%s: decodeToRGBA unexpected jpeg color space ", __func__);
+                goto CleanUp;
+            }
+            if (cinfo.comp_info[0].h_samp_factor != 2 ||
+                    cinfo.comp_info[1].h_samp_factor != 1 ||
+                    cinfo.comp_info[2].h_samp_factor != 1 ||
+                    cinfo.comp_info[0].v_samp_factor != 2 ||
+                    cinfo.comp_info[1].v_samp_factor != 1 ||
+                    cinfo.comp_info[2].v_samp_factor != 1 ) {
+                status = false;
+                ALOGE("%s: decodeToRGBA unexpected primary image sub-sampling", __func__);
             goto CleanUp;
         }
         // 4 bytes per pixel
@@ -251,12 +261,16 @@
             mResultBuffer.resize(cinfo.image_width * cinfo.image_height * 3 / 2, 0);
         } else if (cinfo.jpeg_color_space == JCS_GRAYSCALE) {
             mResultBuffer.resize(cinfo.image_width * cinfo.image_height, 0);
+        } else {
+            status = false;
+            ALOGE("%s: decodeToYUV unexpected jpeg color space", __func__);
+            goto CleanUp;
         }
         cinfo.out_color_space = cinfo.jpeg_color_space;
         cinfo.raw_data_out = TRUE;
     }
 
-    cinfo.dct_method = JDCT_IFAST;
+    cinfo.dct_method = JDCT_ISLOW;
 
     jpeg_start_decompress(&cinfo);
 
diff --git a/libs/ultrahdr/jpegencoderhelper.cpp b/libs/ultrahdr/jpegencoderhelper.cpp
index a03547b..13ae742 100644
--- a/libs/ultrahdr/jpegencoderhelper.cpp
+++ b/libs/ultrahdr/jpegencoderhelper.cpp
@@ -14,38 +14,35 @@
  * limitations under the License.
  */
 
+#include <cstring>
+#include <memory>
+#include <vector>
+
 #include <ultrahdr/jpegencoderhelper.h>
-
 #include <utils/Log.h>
 
-#include <errno.h>
-
 namespace android::ultrahdr {
 
-#define ALIGNM(x, m)  ((((x) + ((m) - 1)) / (m)) * (m))
-
 // The destination manager that can access |mResultBuffer| in JpegEncoderHelper.
 struct destination_mgr {
-public:
     struct jpeg_destination_mgr mgr;
     JpegEncoderHelper* encoder;
 };
 
-JpegEncoderHelper::JpegEncoderHelper() {
-}
+JpegEncoderHelper::JpegEncoderHelper() {}
 
-JpegEncoderHelper::~JpegEncoderHelper() {
-}
+JpegEncoderHelper::~JpegEncoderHelper() {}
 
-bool JpegEncoderHelper::compressImage(const void* image, int width, int height, int quality,
-                                   const void* iccBuffer, unsigned int iccSize,
-                                   bool isSingleChannel) {
+bool JpegEncoderHelper::compressImage(const uint8_t* yBuffer, const uint8_t* uvBuffer, int width,
+                                      int height, int lumaStride, int chromaStride, int quality,
+                                      const void* iccBuffer, unsigned int iccSize) {
     mResultBuffer.clear();
-    if (!encode(image, width, height, quality, iccBuffer, iccSize, isSingleChannel)) {
+    if (!encode(yBuffer, uvBuffer, width, height, lumaStride, chromaStride, quality, iccBuffer,
+                iccSize)) {
         return false;
     }
-    ALOGI("Compressed JPEG: %d[%dx%d] -> %zu bytes",
-        (width * height * 12) / 8, width, height, mResultBuffer.size());
+    ALOGI("Compressed JPEG: %d[%dx%d] -> %zu bytes", (width * height * 12) / 8, width, height,
+          mResultBuffer.size());
     return true;
 }
 
@@ -85,29 +82,28 @@
     char buffer[JMSG_LENGTH_MAX];
 
     /* Create the message */
-    (*cinfo->err->format_message) (cinfo, buffer);
+    (*cinfo->err->format_message)(cinfo, buffer);
     ALOGE("%s\n", buffer);
 }
 
-bool JpegEncoderHelper::encode(const void* image, int width, int height, int jpegQuality,
-                         const void* iccBuffer, unsigned int iccSize, bool isSingleChannel) {
+bool JpegEncoderHelper::encode(const uint8_t* yBuffer, const uint8_t* uvBuffer, int width,
+                               int height, int lumaStride, int chromaStride, int quality,
+                               const void* iccBuffer, unsigned int iccSize) {
     jpeg_compress_struct cinfo;
     jpeg_error_mgr jerr;
 
     cinfo.err = jpeg_std_error(&jerr);
-    // Override output_message() to print error log with ALOGE().
     cinfo.err->output_message = &outputErrorMessage;
     jpeg_create_compress(&cinfo);
     setJpegDestination(&cinfo);
-
-    setJpegCompressStruct(width, height, jpegQuality, &cinfo, isSingleChannel);
+    setJpegCompressStruct(width, height, quality, &cinfo, uvBuffer == nullptr);
     jpeg_start_compress(&cinfo, TRUE);
-
     if (iccBuffer != nullptr && iccSize > 0) {
         jpeg_write_marker(&cinfo, JPEG_APP0 + 2, static_cast<const JOCTET*>(iccBuffer), iccSize);
     }
-
-    bool status = compress(&cinfo, static_cast<const uint8_t*>(image), isSingleChannel);
+    bool status = cinfo.num_components == 1
+            ? compressY(&cinfo, yBuffer, lumaStride)
+            : compressYuv(&cinfo, yBuffer, uvBuffer, lumaStride, chromaStride);
     jpeg_finish_compress(&cinfo);
     jpeg_destroy_compress(&cinfo);
 
@@ -115,8 +111,9 @@
 }
 
 void JpegEncoderHelper::setJpegDestination(jpeg_compress_struct* cinfo) {
-    destination_mgr* dest = static_cast<struct destination_mgr *>((*cinfo->mem->alloc_small) (
-            (j_common_ptr) cinfo, JPOOL_PERMANENT, sizeof(destination_mgr)));
+    destination_mgr* dest = static_cast<struct destination_mgr*>(
+            (*cinfo->mem->alloc_small)((j_common_ptr)cinfo, JPOOL_PERMANENT,
+                                       sizeof(destination_mgr)));
     dest->encoder = this;
     dest->mgr.init_destination = &initDestination;
     dest->mgr.empty_output_buffer = &emptyOutputBuffer;
@@ -125,59 +122,40 @@
 }
 
 void JpegEncoderHelper::setJpegCompressStruct(int width, int height, int quality,
-                                        jpeg_compress_struct* cinfo, bool isSingleChannel) {
+                                              jpeg_compress_struct* cinfo, bool isSingleChannel) {
     cinfo->image_width = width;
     cinfo->image_height = height;
-    if (isSingleChannel) {
-        cinfo->input_components = 1;
-        cinfo->in_color_space = JCS_GRAYSCALE;
-    } else {
-        cinfo->input_components = 3;
-        cinfo->in_color_space = JCS_YCbCr;
-    }
+    cinfo->input_components = isSingleChannel ? 1 : 3;
+    cinfo->in_color_space = isSingleChannel ? JCS_GRAYSCALE : JCS_YCbCr;
     jpeg_set_defaults(cinfo);
-
     jpeg_set_quality(cinfo, quality, TRUE);
-    jpeg_set_colorspace(cinfo, isSingleChannel ? JCS_GRAYSCALE : JCS_YCbCr);
     cinfo->raw_data_in = TRUE;
-    cinfo->dct_method = JDCT_IFAST;
-
-    if (!isSingleChannel) {
-        // Configure sampling factors. The sampling factor is JPEG subsampling 420 because the
-        // source format is YUV420.
-        cinfo->comp_info[0].h_samp_factor = 2;
-        cinfo->comp_info[0].v_samp_factor = 2;
-        cinfo->comp_info[1].h_samp_factor = 1;
-        cinfo->comp_info[1].v_samp_factor = 1;
-        cinfo->comp_info[2].h_samp_factor = 1;
-        cinfo->comp_info[2].v_samp_factor = 1;
+    cinfo->dct_method = JDCT_ISLOW;
+    cinfo->comp_info[0].h_samp_factor = cinfo->in_color_space == JCS_GRAYSCALE ? 1 : 2;
+    cinfo->comp_info[0].v_samp_factor = cinfo->in_color_space == JCS_GRAYSCALE ? 1 : 2;
+    for (int i = 1; i < cinfo->num_components; i++) {
+        cinfo->comp_info[i].h_samp_factor = 1;
+        cinfo->comp_info[i].v_samp_factor = 1;
     }
 }
 
-bool JpegEncoderHelper::compress(
-        jpeg_compress_struct* cinfo, const uint8_t* image, bool isSingleChannel) {
-    if (isSingleChannel) {
-        return compressSingleChannel(cinfo, image);
-    }
-    return compressYuv(cinfo, image);
-}
-
-bool JpegEncoderHelper::compressYuv(jpeg_compress_struct* cinfo, const uint8_t* yuv) {
+bool JpegEncoderHelper::compressYuv(jpeg_compress_struct* cinfo, const uint8_t* yBuffer,
+                                    const uint8_t* uvBuffer, int lumaStride, int chromaStride) {
     JSAMPROW y[kCompressBatchSize];
     JSAMPROW cb[kCompressBatchSize / 2];
     JSAMPROW cr[kCompressBatchSize / 2];
-    JSAMPARRAY planes[3] {y, cb, cr};
+    JSAMPARRAY planes[3]{y, cb, cr};
 
-    size_t y_plane_size = cinfo->image_width * cinfo->image_height;
-    size_t uv_plane_size = y_plane_size / 4;
-    uint8_t* y_plane = const_cast<uint8_t*>(yuv);
-    uint8_t* u_plane = const_cast<uint8_t*>(yuv + y_plane_size);
-    uint8_t* v_plane = const_cast<uint8_t*>(yuv + y_plane_size + uv_plane_size);
+    size_t y_plane_size = lumaStride * cinfo->image_height;
+    size_t u_plane_size = chromaStride * cinfo->image_height / 2;
+    uint8_t* y_plane = const_cast<uint8_t*>(yBuffer);
+    uint8_t* u_plane = const_cast<uint8_t*>(uvBuffer);
+    uint8_t* v_plane = const_cast<uint8_t*>(u_plane + u_plane_size);
     std::unique_ptr<uint8_t[]> empty = std::make_unique<uint8_t[]>(cinfo->image_width);
     memset(empty.get(), 0, cinfo->image_width);
 
     const int aligned_width = ALIGNM(cinfo->image_width, kCompressBatchSize);
-    const bool is_width_aligned = (aligned_width == cinfo->image_width);
+    const bool need_padding = (lumaStride < aligned_width);
     std::unique_ptr<uint8_t[]> buffer_intrm = nullptr;
     uint8_t* y_plane_intrm = nullptr;
     uint8_t* u_plane_intrm = nullptr;
@@ -186,7 +164,7 @@
     JSAMPROW cb_intrm[kCompressBatchSize / 2];
     JSAMPROW cr_intrm[kCompressBatchSize / 2];
     JSAMPARRAY planes_intrm[3]{y_intrm, cb_intrm, cr_intrm};
-    if (!is_width_aligned) {
+    if (need_padding) {
         size_t mcu_row_size = aligned_width * kCompressBatchSize * 3 / 2;
         buffer_intrm = std::make_unique<uint8_t[]>(mcu_row_size);
         y_plane_intrm = buffer_intrm.get();
@@ -211,11 +189,11 @@
         for (int i = 0; i < kCompressBatchSize; ++i) {
             size_t scanline = cinfo->next_scanline + i;
             if (scanline < cinfo->image_height) {
-                y[i] = y_plane + scanline * cinfo->image_width;
+                y[i] = y_plane + scanline * lumaStride;
             } else {
                 y[i] = empty.get();
             }
-            if (!is_width_aligned) {
+            if (need_padding) {
                 memcpy(y_intrm[i], y[i], cinfo->image_width);
             }
         }
@@ -223,18 +201,18 @@
         for (int i = 0; i < kCompressBatchSize / 2; ++i) {
             size_t scanline = cinfo->next_scanline / 2 + i;
             if (scanline < cinfo->image_height / 2) {
-                int offset = scanline * (cinfo->image_width / 2);
+                int offset = scanline * chromaStride;
                 cb[i] = u_plane + offset;
                 cr[i] = v_plane + offset;
             } else {
                 cb[i] = cr[i] = empty.get();
             }
-            if (!is_width_aligned) {
+            if (need_padding) {
                 memcpy(cb_intrm[i], cb[i], cinfo->image_width / 2);
                 memcpy(cr_intrm[i], cr[i], cinfo->image_width / 2);
             }
         }
-        int processed = jpeg_write_raw_data(cinfo, is_width_aligned ? planes : planes_intrm,
+        int processed = jpeg_write_raw_data(cinfo, need_padding ? planes_intrm : planes,
                                             kCompressBatchSize);
         if (processed != kCompressBatchSize) {
             ALOGE("Number of processed lines does not equal input lines.");
@@ -244,22 +222,23 @@
     return true;
 }
 
-bool JpegEncoderHelper::compressSingleChannel(jpeg_compress_struct* cinfo, const uint8_t* image) {
+bool JpegEncoderHelper::compressY(jpeg_compress_struct* cinfo, const uint8_t* yBuffer,
+                                  int lumaStride) {
     JSAMPROW y[kCompressBatchSize];
-    JSAMPARRAY planes[1] {y};
+    JSAMPARRAY planes[1]{y};
 
-    uint8_t* y_plane = const_cast<uint8_t*>(image);
+    uint8_t* y_plane = const_cast<uint8_t*>(yBuffer);
     std::unique_ptr<uint8_t[]> empty = std::make_unique<uint8_t[]>(cinfo->image_width);
     memset(empty.get(), 0, cinfo->image_width);
 
     const int aligned_width = ALIGNM(cinfo->image_width, kCompressBatchSize);
-    bool is_width_aligned = (aligned_width == cinfo->image_width);
+    const bool need_padding = (lumaStride < aligned_width);
     std::unique_ptr<uint8_t[]> buffer_intrm = nullptr;
     uint8_t* y_plane_intrm = nullptr;
     uint8_t* u_plane_intrm = nullptr;
     JSAMPROW y_intrm[kCompressBatchSize];
     JSAMPARRAY planes_intrm[]{y_intrm};
-    if (!is_width_aligned) {
+    if (need_padding) {
         size_t mcu_row_size = aligned_width * kCompressBatchSize;
         buffer_intrm = std::make_unique<uint8_t[]>(mcu_row_size);
         y_plane_intrm = buffer_intrm.get();
@@ -273,15 +252,15 @@
         for (int i = 0; i < kCompressBatchSize; ++i) {
             size_t scanline = cinfo->next_scanline + i;
             if (scanline < cinfo->image_height) {
-                y[i] = y_plane + scanline * cinfo->image_width;
+                y[i] = y_plane + scanline * lumaStride;
             } else {
                 y[i] = empty.get();
             }
-            if (!is_width_aligned) {
+            if (need_padding) {
                 memcpy(y_intrm[i], y[i], cinfo->image_width);
             }
         }
-        int processed = jpeg_write_raw_data(cinfo, is_width_aligned ? planes : planes_intrm,
+        int processed = jpeg_write_raw_data(cinfo, need_padding ? planes_intrm : planes,
                                             kCompressBatchSize);
         if (processed != kCompressBatchSize / 2) {
             ALOGE("Number of processed lines does not equal input lines.");
@@ -291,4 +270,4 @@
     return true;
 }
 
-} // namespace ultrahdr
+} // namespace android::ultrahdr
diff --git a/libs/ultrahdr/jpegr.cpp b/libs/ultrahdr/jpegr.cpp
index 5a601bd..dc439d7 100644
--- a/libs/ultrahdr/jpegr.cpp
+++ b/libs/ultrahdr/jpegr.cpp
@@ -14,31 +14,26 @@
  * limitations under the License.
  */
 
-#include <ultrahdr/jpegr.h>
-#include <ultrahdr/jpegencoderhelper.h>
-#include <ultrahdr/jpegdecoderhelper.h>
-#include <ultrahdr/gainmapmath.h>
-#include <ultrahdr/jpegrutils.h>
-#include <ultrahdr/multipictureformat.h>
-#include <ultrahdr/icc.h>
-
-#include <image_io/jpeg/jpeg_marker.h>
-#include <image_io/jpeg/jpeg_info.h>
-#include <image_io/jpeg/jpeg_scanner.h>
-#include <image_io/jpeg/jpeg_info_builder.h>
-#include <image_io/base/data_segment_data_source.h>
-#include <utils/Log.h>
-
-#include <map>
-#include <memory>
-#include <sstream>
-#include <string>
 #include <cmath>
 #include <condition_variable>
 #include <deque>
+#include <memory>
 #include <mutex>
 #include <thread>
-#include <unistd.h>
+
+#include <ultrahdr/gainmapmath.h>
+#include <ultrahdr/icc.h>
+#include <ultrahdr/jpegr.h>
+#include <ultrahdr/jpegrutils.h>
+#include <ultrahdr/multipictureformat.h>
+
+#include <image_io/base/data_segment_data_source.h>
+#include <image_io/jpeg/jpeg_info.h>
+#include <image_io/jpeg/jpeg_info_builder.h>
+#include <image_io/jpeg/jpeg_marker.h>
+#include <image_io/jpeg/jpeg_scanner.h>
+
+#include <utils/Log.h>
 
 using namespace std;
 using namespace photos_editing_formats::image_io;
@@ -60,25 +55,6 @@
     }                           \
   }
 
-// The current JPEGR version that we encode to
-static const char* const kJpegrVersion = "1.0";
-
-// Map is quarter res / sixteenth size
-static const size_t kMapDimensionScaleFactor = 4;
-
-// Gain Map width is (image_width / kMapDimensionScaleFactor). If we were to
-// compress 420 GainMap in jpeg, then we need at least 2 samples. For Grayscale
-// 1 sample is sufficient. We are using 2 here anyways
-static const int kMinWidth = 2 * kMapDimensionScaleFactor;
-static const int kMinHeight = 2 * kMapDimensionScaleFactor;
-
-// JPEG block size.
-// JPEG encoding / decoding will require block based DCT transform 16 x 16 for luma,
-// and 8 x 8 for chroma.
-// Width must be 16 dividable for luma, and 8 dividable for chroma.
-// If this criteria is not facilitated, we will pad zeros based to each line on the
-// required block size.
-static const size_t kJpegBlock = JpegEncoderHelper::kCompressBatchSize;
 // JPEG compress quality (0 ~ 100) for gain map
 static const int kMapCompressQuality = 85;
 
@@ -96,184 +72,176 @@
   return cpuCoreCount;
 }
 
-status_t JpegR::areInputArgumentsValid(jr_uncompressed_ptr uncompressed_p010_image,
-                                       jr_uncompressed_ptr uncompressed_yuv_420_image,
+status_t JpegR::areInputArgumentsValid(jr_uncompressed_ptr p010_image_ptr,
+                                       jr_uncompressed_ptr yuv420_image_ptr,
                                        ultrahdr_transfer_function hdr_tf,
-                                       jr_compressed_ptr dest) {
-  if (uncompressed_p010_image == nullptr || uncompressed_p010_image->data == nullptr) {
-    ALOGE("received nullptr for uncompressed p010 image");
+                                       jr_compressed_ptr dest_ptr) {
+  if (p010_image_ptr == nullptr || p010_image_ptr->data == nullptr) {
+    ALOGE("Received nullptr for input p010 image");
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
-
-  if (uncompressed_p010_image->width % 2 != 0
-          || uncompressed_p010_image->height % 2 != 0) {
-    ALOGE("Image dimensions cannot be odd, image dimensions %dx%d",
-          uncompressed_p010_image->width, uncompressed_p010_image->height);
+  if (p010_image_ptr->width % 2 != 0 || p010_image_ptr->height % 2 != 0) {
+    ALOGE("Image dimensions cannot be odd, image dimensions %dx%d", p010_image_ptr->width,
+          p010_image_ptr->height);
     return ERROR_JPEGR_INVALID_INPUT_TYPE;
   }
-
-  if (uncompressed_p010_image->width < kMinWidth
-          || uncompressed_p010_image->height < kMinHeight) {
-    ALOGE("Image dimensions cannot be less than %dx%d, image dimensions %dx%d",
-          kMinWidth, kMinHeight, uncompressed_p010_image->width, uncompressed_p010_image->height);
+  if (p010_image_ptr->width < kMinWidth || p010_image_ptr->height < kMinHeight) {
+    ALOGE("Image dimensions cannot be less than %dx%d, image dimensions %dx%d", kMinWidth,
+          kMinHeight, p010_image_ptr->width, p010_image_ptr->height);
     return ERROR_JPEGR_INVALID_INPUT_TYPE;
   }
-
-  if (uncompressed_p010_image->width > kMaxWidth
-          || uncompressed_p010_image->height > kMaxHeight) {
-    ALOGE("Image dimensions cannot be larger than %dx%d, image dimensions %dx%d",
-          kMaxWidth, kMaxHeight, uncompressed_p010_image->width, uncompressed_p010_image->height);
+  if (p010_image_ptr->width > kMaxWidth || p010_image_ptr->height > kMaxHeight) {
+    ALOGE("Image dimensions cannot be larger than %dx%d, image dimensions %dx%d", kMaxWidth,
+          kMaxHeight, p010_image_ptr->width, p010_image_ptr->height);
     return ERROR_JPEGR_INVALID_INPUT_TYPE;
   }
-
-  if (uncompressed_p010_image->colorGamut <= ULTRAHDR_COLORGAMUT_UNSPECIFIED
-          || uncompressed_p010_image->colorGamut > ULTRAHDR_COLORGAMUT_MAX) {
-    ALOGE("Unrecognized p010 color gamut %d", uncompressed_p010_image->colorGamut);
+  if (p010_image_ptr->colorGamut <= ULTRAHDR_COLORGAMUT_UNSPECIFIED ||
+      p010_image_ptr->colorGamut > ULTRAHDR_COLORGAMUT_MAX) {
+    ALOGE("Unrecognized p010 color gamut %d", p010_image_ptr->colorGamut);
     return ERROR_JPEGR_INVALID_INPUT_TYPE;
   }
-
-  if (uncompressed_p010_image->luma_stride != 0
-          && uncompressed_p010_image->luma_stride < uncompressed_p010_image->width) {
-    ALOGE("Luma stride can not be smaller than width, stride=%d, width=%d",
-                uncompressed_p010_image->luma_stride, uncompressed_p010_image->width);
+  if (p010_image_ptr->luma_stride != 0 && p010_image_ptr->luma_stride < p010_image_ptr->width) {
+    ALOGE("Luma stride must not be smaller than width, stride=%d, width=%d",
+          p010_image_ptr->luma_stride, p010_image_ptr->width);
     return ERROR_JPEGR_INVALID_INPUT_TYPE;
   }
-
-  if (uncompressed_p010_image->chroma_data != nullptr
-          && uncompressed_p010_image->chroma_stride < uncompressed_p010_image->width) {
-    ALOGE("Chroma stride can not be smaller than width, stride=%d, width=%d",
-          uncompressed_p010_image->chroma_stride,
-          uncompressed_p010_image->width);
+  if (p010_image_ptr->chroma_data != nullptr &&
+      p010_image_ptr->chroma_stride < p010_image_ptr->width) {
+    ALOGE("Chroma stride must not be smaller than width, stride=%d, width=%d",
+          p010_image_ptr->chroma_stride, p010_image_ptr->width);
     return ERROR_JPEGR_INVALID_INPUT_TYPE;
   }
-
-  if (dest == nullptr || dest->data == nullptr) {
-    ALOGE("received nullptr for destination");
+  if (dest_ptr == nullptr || dest_ptr->data == nullptr) {
+    ALOGE("Received nullptr for destination");
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
-
-  if (hdr_tf <= ULTRAHDR_TF_UNSPECIFIED || hdr_tf > ULTRAHDR_TF_MAX
-          || hdr_tf == ULTRAHDR_TF_SRGB) {
+  if (hdr_tf <= ULTRAHDR_TF_UNSPECIFIED || hdr_tf > ULTRAHDR_TF_MAX || hdr_tf == ULTRAHDR_TF_SRGB) {
     ALOGE("Invalid hdr transfer function %d", hdr_tf);
     return ERROR_JPEGR_INVALID_INPUT_TYPE;
   }
-
-  if (uncompressed_yuv_420_image == nullptr) {
+  if (yuv420_image_ptr == nullptr) {
     return NO_ERROR;
   }
-
-  if (uncompressed_yuv_420_image->data == nullptr) {
-    ALOGE("received nullptr for uncompressed 420 image");
+  if (yuv420_image_ptr->data == nullptr) {
+    ALOGE("Received nullptr for uncompressed 420 image");
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
-
-  if (uncompressed_yuv_420_image->luma_stride != 0) {
-    ALOGE("Stride is not supported for YUV420 image");
-    return ERROR_JPEGR_UNSUPPORTED_FEATURE;
-  }
-
-  if (uncompressed_yuv_420_image->chroma_data != nullptr) {
-    ALOGE("Pointer to chroma plane is not supported for YUV420 image, chroma data must"
-          "be immediately after the luma data.");
-    return ERROR_JPEGR_UNSUPPORTED_FEATURE;
-  }
-
-  if (uncompressed_p010_image->width != uncompressed_yuv_420_image->width
-      || uncompressed_p010_image->height != uncompressed_yuv_420_image->height) {
-    ALOGE("Image resolutions mismatch: P010: %dx%d, YUV420: %dx%d",
-              uncompressed_p010_image->width,
-              uncompressed_p010_image->height,
-              uncompressed_yuv_420_image->width,
-              uncompressed_yuv_420_image->height);
-    return ERROR_JPEGR_RESOLUTION_MISMATCH;
-  }
-
-  if (uncompressed_yuv_420_image->colorGamut <= ULTRAHDR_COLORGAMUT_UNSPECIFIED
-          || uncompressed_yuv_420_image->colorGamut > ULTRAHDR_COLORGAMUT_MAX) {
-    ALOGE("Unrecognized 420 color gamut %d", uncompressed_yuv_420_image->colorGamut);
+  if (yuv420_image_ptr->luma_stride != 0 &&
+      yuv420_image_ptr->luma_stride < yuv420_image_ptr->width) {
+    ALOGE("Luma stride must not be smaller than width, stride=%d, width=%d",
+          yuv420_image_ptr->luma_stride, yuv420_image_ptr->width);
     return ERROR_JPEGR_INVALID_INPUT_TYPE;
   }
-
+  if (yuv420_image_ptr->chroma_data != nullptr &&
+      yuv420_image_ptr->chroma_stride < yuv420_image_ptr->width / 2) {
+    ALOGE("Chroma stride must not be smaller than (width / 2), stride=%d, width=%d",
+          yuv420_image_ptr->chroma_stride, yuv420_image_ptr->width);
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
+  if (p010_image_ptr->width != yuv420_image_ptr->width ||
+      p010_image_ptr->height != yuv420_image_ptr->height) {
+    ALOGE("Image resolutions mismatch: P010: %dx%d, YUV420: %dx%d", p010_image_ptr->width,
+          p010_image_ptr->height, yuv420_image_ptr->width, yuv420_image_ptr->height);
+    return ERROR_JPEGR_RESOLUTION_MISMATCH;
+  }
+  if (yuv420_image_ptr->colorGamut <= ULTRAHDR_COLORGAMUT_UNSPECIFIED ||
+      yuv420_image_ptr->colorGamut > ULTRAHDR_COLORGAMUT_MAX) {
+    ALOGE("Unrecognized 420 color gamut %d", yuv420_image_ptr->colorGamut);
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
+  }
   return NO_ERROR;
 }
 
-status_t JpegR::areInputArgumentsValid(jr_uncompressed_ptr uncompressed_p010_image,
-                                       jr_uncompressed_ptr uncompressed_yuv_420_image,
+status_t JpegR::areInputArgumentsValid(jr_uncompressed_ptr p010_image_ptr,
+                                       jr_uncompressed_ptr yuv420_image_ptr,
                                        ultrahdr_transfer_function hdr_tf,
-                                       jr_compressed_ptr dest,
-                                       int quality) {
-  if (status_t ret = areInputArgumentsValid(
-          uncompressed_p010_image, uncompressed_yuv_420_image, hdr_tf, dest) != NO_ERROR) {
-    return ret;
-  }
-
+                                       jr_compressed_ptr dest_ptr, int quality) {
   if (quality < 0 || quality > 100) {
     ALOGE("quality factor is out side range [0-100], quality factor : %d", quality);
     return ERROR_JPEGR_INVALID_INPUT_TYPE;
   }
-
-  return NO_ERROR;
+  return areInputArgumentsValid(p010_image_ptr, yuv420_image_ptr, hdr_tf, dest_ptr);
 }
 
 /* Encode API-0 */
-status_t JpegR::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
-                            ultrahdr_transfer_function hdr_tf,
-                            jr_compressed_ptr dest,
-                            int quality,
-                            jr_exif_ptr exif) {
-  if (status_t ret = areInputArgumentsValid(
-          uncompressed_p010_image, /* uncompressed_yuv_420_image */ nullptr,
-          hdr_tf, dest, quality) != NO_ERROR) {
+status_t JpegR::encodeJPEGR(jr_uncompressed_ptr p010_image_ptr, ultrahdr_transfer_function hdr_tf,
+                            jr_compressed_ptr dest, int quality, jr_exif_ptr exif) {
+  // validate input arguments
+  if (auto ret = areInputArgumentsValid(p010_image_ptr, nullptr, hdr_tf, dest, quality);
+      ret != NO_ERROR) {
     return ret;
   }
-
   if (exif != nullptr && exif->data == nullptr) {
     ALOGE("received nullptr for exif metadata");
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
 
-  ultrahdr_metadata_struct metadata;
-  metadata.version = kJpegrVersion;
+  // clean up input structure for later usage
+  jpegr_uncompressed_struct p010_image = *p010_image_ptr;
+  if (p010_image.luma_stride == 0) p010_image.luma_stride = p010_image.width;
+  if (!p010_image.chroma_data) {
+    uint16_t* data = reinterpret_cast<uint16_t*>(p010_image.data);
+    p010_image.chroma_data = data + p010_image.luma_stride * p010_image.height;
+    p010_image.chroma_stride = p010_image.luma_stride;
+  }
 
-  jpegr_uncompressed_struct uncompressed_yuv_420_image;
-  unique_ptr<uint8_t[]> uncompressed_yuv_420_image_data = make_unique<uint8_t[]>(
-      uncompressed_p010_image->width * uncompressed_p010_image->height * 3 / 2);
-  uncompressed_yuv_420_image.data = uncompressed_yuv_420_image_data.get();
-  JPEGR_CHECK(toneMap(uncompressed_p010_image, &uncompressed_yuv_420_image));
+  const int yu420_luma_stride = ALIGNM(p010_image.width, kJpegBlock);
+  unique_ptr<uint8_t[]> yuv420_image_data =
+          make_unique<uint8_t[]>(yu420_luma_stride * p010_image.height * 3 / 2);
+  jpegr_uncompressed_struct yuv420_image = {.data = yuv420_image_data.get(),
+                                            .width = p010_image.width,
+                                            .height = p010_image.height,
+                                            .colorGamut = p010_image.colorGamut,
+                                            .luma_stride = yu420_luma_stride,
+                                            .chroma_data = nullptr,
+                                            .chroma_stride = yu420_luma_stride >> 1};
+  uint8_t* data = reinterpret_cast<uint8_t*>(yuv420_image.data);
+  yuv420_image.chroma_data = data + yuv420_image.luma_stride * yuv420_image.height;
 
-  jpegr_uncompressed_struct map;
-  JPEGR_CHECK(generateGainMap(
-      &uncompressed_yuv_420_image, uncompressed_p010_image, hdr_tf, &metadata, &map));
+  // tone map
+  JPEGR_CHECK(toneMap(&p010_image, &yuv420_image));
+
+  // gain map
+  ultrahdr_metadata_struct metadata = {.version = kJpegrVersion};
+  jpegr_uncompressed_struct gainmap_image;
+  JPEGR_CHECK(generateGainMap(&yuv420_image, &p010_image, hdr_tf, &metadata, &gainmap_image));
   std::unique_ptr<uint8_t[]> map_data;
-  map_data.reset(reinterpret_cast<uint8_t*>(map.data));
+  map_data.reset(reinterpret_cast<uint8_t*>(gainmap_image.data));
 
-  JpegEncoderHelper jpeg_encoder_gainmap;
-  JPEGR_CHECK(compressGainMap(&map, &jpeg_encoder_gainmap));
-  jpegr_compressed_struct compressed_map;
-  compressed_map.maxLength = jpeg_encoder_gainmap.getCompressedImageSize();
-  compressed_map.length = compressed_map.maxLength;
-  compressed_map.data = jpeg_encoder_gainmap.getCompressedImagePtr();
-  compressed_map.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+  // compress gain map
+  JpegEncoderHelper jpeg_enc_obj_gm;
+  JPEGR_CHECK(compressGainMap(&gainmap_image, &jpeg_enc_obj_gm));
+  jpegr_compressed_struct compressed_map = {.data = jpeg_enc_obj_gm.getCompressedImagePtr(),
+                                            .length = static_cast<int>(
+                                                    jpeg_enc_obj_gm.getCompressedImageSize()),
+                                            .maxLength = static_cast<int>(
+                                                    jpeg_enc_obj_gm.getCompressedImageSize()),
+                                            .colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED};
 
-  sp<DataStruct> icc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
-                                                  uncompressed_yuv_420_image.colorGamut);
+  sp<DataStruct> icc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB, yuv420_image.colorGamut);
 
-  // Convert to Bt601 YUV encoding for JPEG encode
-  JPEGR_CHECK(convertYuv(&uncompressed_yuv_420_image, uncompressed_yuv_420_image.colorGamut,
-                         ULTRAHDR_COLORGAMUT_P3));
+  // convert to Bt601 YUV encoding for JPEG encode
+  if (yuv420_image.colorGamut != ULTRAHDR_COLORGAMUT_P3) {
+    JPEGR_CHECK(convertYuv(&yuv420_image, yuv420_image.colorGamut, ULTRAHDR_COLORGAMUT_P3));
+  }
 
-  JpegEncoderHelper jpeg_encoder;
-  if (!jpeg_encoder.compressImage(uncompressed_yuv_420_image.data,
-                                  uncompressed_yuv_420_image.width,
-                                  uncompressed_yuv_420_image.height, quality,
-                                  icc->getData(), icc->getLength())) {
+  // compress 420 image
+  JpegEncoderHelper jpeg_enc_obj_yuv420;
+  if (!jpeg_enc_obj_yuv420.compressImage(reinterpret_cast<uint8_t*>(yuv420_image.data),
+                                         reinterpret_cast<uint8_t*>(yuv420_image.chroma_data),
+                                         yuv420_image.width, yuv420_image.height,
+                                         yuv420_image.luma_stride, yuv420_image.chroma_stride,
+                                         quality, icc->getData(), icc->getLength())) {
     return ERROR_JPEGR_ENCODE_ERROR;
   }
-  jpegr_compressed_struct jpeg;
-  jpeg.data = jpeg_encoder.getCompressedImagePtr();
-  jpeg.length = jpeg_encoder.getCompressedImageSize();
+  jpegr_compressed_struct jpeg = {.data = jpeg_enc_obj_yuv420.getCompressedImagePtr(),
+                                  .length = static_cast<int>(
+                                          jpeg_enc_obj_yuv420.getCompressedImageSize()),
+                                  .maxLength = static_cast<int>(
+                                          jpeg_enc_obj_yuv420.getCompressedImageSize()),
+                                  .colorGamut = yuv420_image.colorGamut};
 
-  // No ICC since JPEG encode already did it
+  // append gain map, no ICC since JPEG encode already did it
   JPEGR_CHECK(appendGainMap(&jpeg, &compressed_map, exif, /* icc */ nullptr, /* icc size */ 0,
                             &metadata, dest));
 
@@ -281,226 +249,277 @@
 }
 
 /* Encode API-1 */
-status_t JpegR::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
-                            jr_uncompressed_ptr uncompressed_yuv_420_image,
-                            ultrahdr_transfer_function hdr_tf,
-                            jr_compressed_ptr dest,
-                            int quality,
-                            jr_exif_ptr exif) {
-  if (uncompressed_yuv_420_image == nullptr) {
+status_t JpegR::encodeJPEGR(jr_uncompressed_ptr p010_image_ptr,
+                            jr_uncompressed_ptr yuv420_image_ptr, ultrahdr_transfer_function hdr_tf,
+                            jr_compressed_ptr dest, int quality, jr_exif_ptr exif) {
+  // validate input arguments
+  if (yuv420_image_ptr == nullptr) {
     ALOGE("received nullptr for uncompressed 420 image");
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
-
   if (exif != nullptr && exif->data == nullptr) {
     ALOGE("received nullptr for exif metadata");
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
-
-  if (status_t ret = areInputArgumentsValid(
-          uncompressed_p010_image, uncompressed_yuv_420_image, hdr_tf,
-          dest, quality) != NO_ERROR) {
+  if (auto ret = areInputArgumentsValid(p010_image_ptr, yuv420_image_ptr, hdr_tf, dest, quality);
+      ret != NO_ERROR) {
     return ret;
   }
 
-  ultrahdr_metadata_struct metadata;
-  metadata.version = kJpegrVersion;
+  // clean up input structure for later usage
+  jpegr_uncompressed_struct p010_image = *p010_image_ptr;
+  if (p010_image.luma_stride == 0) p010_image.luma_stride = p010_image.width;
+  if (!p010_image.chroma_data) {
+    uint16_t* data = reinterpret_cast<uint16_t*>(p010_image.data);
+    p010_image.chroma_data = data + p010_image.luma_stride * p010_image.height;
+    p010_image.chroma_stride = p010_image.luma_stride;
+  }
+  jpegr_uncompressed_struct yuv420_image = *yuv420_image_ptr;
+  if (yuv420_image.luma_stride == 0) yuv420_image.luma_stride = yuv420_image.width;
+  if (!yuv420_image.chroma_data) {
+    uint8_t* data = reinterpret_cast<uint8_t*>(yuv420_image.data);
+    yuv420_image.chroma_data = data + yuv420_image.luma_stride * yuv420_image.height;
+    yuv420_image.chroma_stride = yuv420_image.luma_stride >> 1;
+  }
 
-  jpegr_uncompressed_struct map;
-  JPEGR_CHECK(generateGainMap(
-      uncompressed_yuv_420_image, uncompressed_p010_image, hdr_tf, &metadata, &map));
+  // gain map
+  ultrahdr_metadata_struct metadata = {.version = kJpegrVersion};
+  jpegr_uncompressed_struct gainmap_image;
+  JPEGR_CHECK(generateGainMap(&yuv420_image, &p010_image, hdr_tf, &metadata, &gainmap_image));
   std::unique_ptr<uint8_t[]> map_data;
-  map_data.reset(reinterpret_cast<uint8_t*>(map.data));
+  map_data.reset(reinterpret_cast<uint8_t*>(gainmap_image.data));
 
-  JpegEncoderHelper jpeg_encoder_gainmap;
-  JPEGR_CHECK(compressGainMap(&map, &jpeg_encoder_gainmap));
-  jpegr_compressed_struct compressed_map;
-  compressed_map.maxLength = jpeg_encoder_gainmap.getCompressedImageSize();
-  compressed_map.length = compressed_map.maxLength;
-  compressed_map.data = jpeg_encoder_gainmap.getCompressedImagePtr();
-  compressed_map.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+  // compress gain map
+  JpegEncoderHelper jpeg_enc_obj_gm;
+  JPEGR_CHECK(compressGainMap(&gainmap_image, &jpeg_enc_obj_gm));
+  jpegr_compressed_struct compressed_map = {.data = jpeg_enc_obj_gm.getCompressedImagePtr(),
+                                            .length = static_cast<int>(
+                                                    jpeg_enc_obj_gm.getCompressedImageSize()),
+                                            .maxLength = static_cast<int>(
+                                                    jpeg_enc_obj_gm.getCompressedImageSize()),
+                                            .colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED};
 
-  sp<DataStruct> icc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
-                                                  uncompressed_yuv_420_image->colorGamut);
+  sp<DataStruct> icc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB, yuv420_image.colorGamut);
 
-  // Convert to Bt601 YUV encoding for JPEG encode; make a copy so as to no clobber client data
-  unique_ptr<uint8_t[]> yuv_420_bt601_data = make_unique<uint8_t[]>(
-      uncompressed_yuv_420_image->width * uncompressed_yuv_420_image->height * 3 / 2);
-  memcpy(yuv_420_bt601_data.get(), uncompressed_yuv_420_image->data,
-         uncompressed_yuv_420_image->width * uncompressed_yuv_420_image->height * 3 / 2);
+  jpegr_uncompressed_struct yuv420_bt601_image = yuv420_image;
+  unique_ptr<uint8_t[]> yuv_420_bt601_data;
+  // Convert to bt601 YUV encoding for JPEG encode
+  if (yuv420_image.colorGamut != ULTRAHDR_COLORGAMUT_P3) {
+    const int yuv_420_bt601_luma_stride = ALIGNM(yuv420_image.width, kJpegBlock);
+    yuv_420_bt601_data =
+            make_unique<uint8_t[]>(yuv_420_bt601_luma_stride * yuv420_image.height * 3 / 2);
+    yuv420_bt601_image.data = yuv_420_bt601_data.get();
+    yuv420_bt601_image.colorGamut = yuv420_image.colorGamut;
+    yuv420_bt601_image.luma_stride = yuv_420_bt601_luma_stride;
+    uint8_t* data = reinterpret_cast<uint8_t*>(yuv420_bt601_image.data);
+    yuv420_bt601_image.chroma_data = data + yuv_420_bt601_luma_stride * yuv420_image.height;
+    yuv420_bt601_image.chroma_stride = yuv_420_bt601_luma_stride >> 1;
 
-  jpegr_uncompressed_struct yuv_420_bt601_image = {
-    yuv_420_bt601_data.get(), uncompressed_yuv_420_image->width, uncompressed_yuv_420_image->height,
-    uncompressed_yuv_420_image->colorGamut };
-  JPEGR_CHECK(convertYuv(&yuv_420_bt601_image, yuv_420_bt601_image.colorGamut,
-                         ULTRAHDR_COLORGAMUT_P3));
+    {
+      // copy luma
+      uint8_t* y_dst = reinterpret_cast<uint8_t*>(yuv420_bt601_image.data);
+      uint8_t* y_src = reinterpret_cast<uint8_t*>(yuv420_image.data);
+      if (yuv420_bt601_image.luma_stride == yuv420_image.luma_stride) {
+        memcpy(y_dst, y_src, yuv420_bt601_image.luma_stride * yuv420_image.height);
+      } else {
+        for (size_t i = 0; i < yuv420_image.height; i++) {
+          memcpy(y_dst, y_src, yuv420_image.width);
+          if (yuv420_image.width != yuv420_bt601_image.luma_stride) {
+            memset(y_dst + yuv420_image.width, 0,
+                   yuv420_bt601_image.luma_stride - yuv420_image.width);
+          }
+          y_dst += yuv420_bt601_image.luma_stride;
+          y_src += yuv420_image.luma_stride;
+        }
+      }
+    }
 
-  JpegEncoderHelper jpeg_encoder;
-  if (!jpeg_encoder.compressImage(yuv_420_bt601_image.data,
-                                  yuv_420_bt601_image.width,
-                                  yuv_420_bt601_image.height, quality,
-                                  icc->getData(), icc->getLength())) {
+    if (yuv420_bt601_image.chroma_stride == yuv420_image.chroma_stride) {
+      // copy luma
+      uint8_t* ch_dst = reinterpret_cast<uint8_t*>(yuv420_bt601_image.chroma_data);
+      uint8_t* ch_src = reinterpret_cast<uint8_t*>(yuv420_image.chroma_data);
+      memcpy(ch_dst, ch_src, yuv420_bt601_image.chroma_stride * yuv420_image.height);
+    } else {
+      // copy cb & cr
+      uint8_t* cb_dst = reinterpret_cast<uint8_t*>(yuv420_bt601_image.chroma_data);
+      uint8_t* cb_src = reinterpret_cast<uint8_t*>(yuv420_image.chroma_data);
+      uint8_t* cr_dst = cb_dst + (yuv420_bt601_image.chroma_stride * yuv420_bt601_image.height / 2);
+      uint8_t* cr_src = cb_src + (yuv420_image.chroma_stride * yuv420_image.height / 2);
+      for (size_t i = 0; i < yuv420_image.height / 2; i++) {
+        memcpy(cb_dst, cb_src, yuv420_image.width / 2);
+        memcpy(cr_dst, cr_src, yuv420_image.width / 2);
+        if (yuv420_bt601_image.width / 2 != yuv420_bt601_image.chroma_stride) {
+          memset(cb_dst + yuv420_image.width / 2, 0,
+                 yuv420_bt601_image.chroma_stride - yuv420_image.width / 2);
+          memset(cr_dst + yuv420_image.width / 2, 0,
+                 yuv420_bt601_image.chroma_stride - yuv420_image.width / 2);
+        }
+        cb_dst += yuv420_bt601_image.chroma_stride;
+        cb_src += yuv420_image.chroma_stride;
+        cr_dst += yuv420_bt601_image.chroma_stride;
+        cr_src += yuv420_image.chroma_stride;
+      }
+    }
+    JPEGR_CHECK(convertYuv(&yuv420_bt601_image, yuv420_image.colorGamut, ULTRAHDR_COLORGAMUT_P3));
+  }
+
+  // compress 420 image
+  JpegEncoderHelper jpeg_enc_obj_yuv420;
+  if (!jpeg_enc_obj_yuv420.compressImage(reinterpret_cast<uint8_t*>(yuv420_bt601_image.data),
+                                         reinterpret_cast<uint8_t*>(yuv420_bt601_image.chroma_data),
+                                         yuv420_bt601_image.width, yuv420_bt601_image.height,
+                                         yuv420_bt601_image.luma_stride,
+                                         yuv420_bt601_image.chroma_stride, quality, icc->getData(),
+                                         icc->getLength())) {
     return ERROR_JPEGR_ENCODE_ERROR;
   }
-  jpegr_compressed_struct jpeg;
-  jpeg.data = jpeg_encoder.getCompressedImagePtr();
-  jpeg.length = jpeg_encoder.getCompressedImageSize();
 
-  // No ICC since jpeg encode already did it
+  jpegr_compressed_struct jpeg = {.data = jpeg_enc_obj_yuv420.getCompressedImagePtr(),
+                                  .length = static_cast<int>(
+                                          jpeg_enc_obj_yuv420.getCompressedImageSize()),
+                                  .maxLength = static_cast<int>(
+                                          jpeg_enc_obj_yuv420.getCompressedImageSize()),
+                                  .colorGamut = yuv420_image.colorGamut};
+
+  // append gain map, no ICC since JPEG encode already did it
   JPEGR_CHECK(appendGainMap(&jpeg, &compressed_map, exif, /* icc */ nullptr, /* icc size */ 0,
                             &metadata, dest));
-
   return NO_ERROR;
 }
 
 /* Encode API-2 */
-status_t JpegR::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
-                            jr_uncompressed_ptr uncompressed_yuv_420_image,
-                            jr_compressed_ptr compressed_jpeg_image,
-                            ultrahdr_transfer_function hdr_tf,
-                            jr_compressed_ptr dest) {
-  if (uncompressed_yuv_420_image == nullptr) {
+status_t JpegR::encodeJPEGR(jr_uncompressed_ptr p010_image_ptr,
+                            jr_uncompressed_ptr yuv420_image_ptr,
+                            jr_compressed_ptr yuv420jpg_image_ptr,
+                            ultrahdr_transfer_function hdr_tf, jr_compressed_ptr dest) {
+  // validate input arguments
+  if (yuv420_image_ptr == nullptr) {
     ALOGE("received nullptr for uncompressed 420 image");
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
-
-  if (compressed_jpeg_image == nullptr || compressed_jpeg_image->data == nullptr) {
+  if (yuv420jpg_image_ptr == nullptr || yuv420jpg_image_ptr->data == nullptr) {
     ALOGE("received nullptr for compressed jpeg image");
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
-
-  if (status_t ret = areInputArgumentsValid(
-          uncompressed_p010_image, uncompressed_yuv_420_image, hdr_tf, dest) != NO_ERROR) {
+  if (auto ret = areInputArgumentsValid(p010_image_ptr, yuv420_image_ptr, hdr_tf, dest);
+      ret != NO_ERROR) {
     return ret;
   }
 
-  ultrahdr_metadata_struct metadata;
-  metadata.version = kJpegrVersion;
-
-  jpegr_uncompressed_struct map;
-  JPEGR_CHECK(generateGainMap(
-      uncompressed_yuv_420_image, uncompressed_p010_image, hdr_tf, &metadata, &map));
-  std::unique_ptr<uint8_t[]> map_data;
-  map_data.reset(reinterpret_cast<uint8_t*>(map.data));
-
-  JpegEncoderHelper jpeg_encoder_gainmap;
-  JPEGR_CHECK(compressGainMap(&map, &jpeg_encoder_gainmap));
-  jpegr_compressed_struct compressed_map;
-  compressed_map.maxLength = jpeg_encoder_gainmap.getCompressedImageSize();
-  compressed_map.length = compressed_map.maxLength;
-  compressed_map.data = jpeg_encoder_gainmap.getCompressedImagePtr();
-  compressed_map.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED;
-
-  // We just want to check if ICC is present, so don't do a full decode. Note,
-  // this doesn't verify that the ICC is valid.
-  JpegDecoderHelper decoder;
-  std::vector<uint8_t> icc;
-  decoder.getCompressedImageParameters(compressed_jpeg_image->data, compressed_jpeg_image->length,
-                                       /* pWidth */ nullptr, /* pHeight */ nullptr,
-                                       &icc, /* exifData */ nullptr);
-
-  // Add ICC if not already present.
-  if (icc.size() > 0) {
-      JPEGR_CHECK(appendGainMap(compressed_jpeg_image, &compressed_map, /* exif */ nullptr,
-                                /* icc */ nullptr, /* icc size */ 0, &metadata, dest));
-  } else {
-      sp<DataStruct> newIcc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
-                                                         uncompressed_yuv_420_image->colorGamut);
-      JPEGR_CHECK(appendGainMap(compressed_jpeg_image, &compressed_map, /* exif */ nullptr,
-                                newIcc->getData(), newIcc->getLength(), &metadata, dest));
+  // clean up input structure for later usage
+  jpegr_uncompressed_struct p010_image = *p010_image_ptr;
+  if (p010_image.luma_stride == 0) p010_image.luma_stride = p010_image.width;
+  if (!p010_image.chroma_data) {
+    uint16_t* data = reinterpret_cast<uint16_t*>(p010_image.data);
+    p010_image.chroma_data = data + p010_image.luma_stride * p010_image.height;
+    p010_image.chroma_stride = p010_image.luma_stride;
+  }
+  jpegr_uncompressed_struct yuv420_image = *yuv420_image_ptr;
+  if (yuv420_image.luma_stride == 0) yuv420_image.luma_stride = yuv420_image.width;
+  if (!yuv420_image.chroma_data) {
+    uint8_t* data = reinterpret_cast<uint8_t*>(yuv420_image.data);
+    yuv420_image.chroma_data = data + yuv420_image.luma_stride * p010_image.height;
+    yuv420_image.chroma_stride = yuv420_image.luma_stride >> 1;
   }
 
-  return NO_ERROR;
+  // gain map
+  ultrahdr_metadata_struct metadata = {.version = kJpegrVersion};
+  jpegr_uncompressed_struct gainmap_image;
+  JPEGR_CHECK(generateGainMap(&yuv420_image, &p010_image, hdr_tf, &metadata, &gainmap_image));
+  std::unique_ptr<uint8_t[]> map_data;
+  map_data.reset(reinterpret_cast<uint8_t*>(gainmap_image.data));
+
+  // compress gain map
+  JpegEncoderHelper jpeg_enc_obj_gm;
+  JPEGR_CHECK(compressGainMap(&gainmap_image, &jpeg_enc_obj_gm));
+  jpegr_compressed_struct gainmapjpg_image = {.data = jpeg_enc_obj_gm.getCompressedImagePtr(),
+                                              .length = static_cast<int>(
+                                                      jpeg_enc_obj_gm.getCompressedImageSize()),
+                                              .maxLength = static_cast<int>(
+                                                      jpeg_enc_obj_gm.getCompressedImageSize()),
+                                              .colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED};
+
+  return encodeJPEGR(yuv420jpg_image_ptr, &gainmapjpg_image, &metadata, dest);
 }
 
 /* Encode API-3 */
-status_t JpegR::encodeJPEGR(jr_uncompressed_ptr uncompressed_p010_image,
-                            jr_compressed_ptr compressed_jpeg_image,
-                            ultrahdr_transfer_function hdr_tf,
-                            jr_compressed_ptr dest) {
-  if (compressed_jpeg_image == nullptr || compressed_jpeg_image->data == nullptr) {
+status_t JpegR::encodeJPEGR(jr_uncompressed_ptr p010_image_ptr,
+                            jr_compressed_ptr yuv420jpg_image_ptr,
+                            ultrahdr_transfer_function hdr_tf, jr_compressed_ptr dest) {
+  // validate input arguments
+  if (yuv420jpg_image_ptr == nullptr || yuv420jpg_image_ptr->data == nullptr) {
     ALOGE("received nullptr for compressed jpeg image");
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
-
-  if (status_t ret = areInputArgumentsValid(
-          uncompressed_p010_image, /* uncompressed_yuv_420_image */ nullptr,
-          hdr_tf, dest) != NO_ERROR) {
+  if (auto ret = areInputArgumentsValid(p010_image_ptr, nullptr, hdr_tf, dest); ret != NO_ERROR) {
     return ret;
   }
 
-  // Note: output is Bt.601 YUV encoded regardless of gamut, due to jpeg decode.
-  JpegDecoderHelper jpeg_decoder;
-  if (!jpeg_decoder.decompressImage(compressed_jpeg_image->data, compressed_jpeg_image->length)) {
+  // clean up input structure for later usage
+  jpegr_uncompressed_struct p010_image = *p010_image_ptr;
+  if (p010_image.luma_stride == 0) p010_image.luma_stride = p010_image.width;
+  if (!p010_image.chroma_data) {
+    uint16_t* data = reinterpret_cast<uint16_t*>(p010_image.data);
+    p010_image.chroma_data = data + p010_image.luma_stride * p010_image.height;
+    p010_image.chroma_stride = p010_image.luma_stride;
+  }
+
+  // decode input jpeg, gamut is going to be bt601.
+  JpegDecoderHelper jpeg_dec_obj_yuv420;
+  if (!jpeg_dec_obj_yuv420.decompressImage(yuv420jpg_image_ptr->data,
+                                           yuv420jpg_image_ptr->length)) {
     return ERROR_JPEGR_DECODE_ERROR;
   }
-  jpegr_uncompressed_struct uncompressed_yuv_420_image;
-  uncompressed_yuv_420_image.data = jpeg_decoder.getDecompressedImagePtr();
-  uncompressed_yuv_420_image.width = jpeg_decoder.getDecompressedImageWidth();
-  uncompressed_yuv_420_image.height = jpeg_decoder.getDecompressedImageHeight();
-  uncompressed_yuv_420_image.colorGamut = compressed_jpeg_image->colorGamut;
+  jpegr_uncompressed_struct yuv420_image{};
+  yuv420_image.data = jpeg_dec_obj_yuv420.getDecompressedImagePtr();
+  yuv420_image.width = jpeg_dec_obj_yuv420.getDecompressedImageWidth();
+  yuv420_image.height = jpeg_dec_obj_yuv420.getDecompressedImageHeight();
+  yuv420_image.colorGamut = yuv420jpg_image_ptr->colorGamut;
+  if (yuv420_image.luma_stride == 0) yuv420_image.luma_stride = yuv420_image.width;
+  if (!yuv420_image.chroma_data) {
+    uint8_t* data = reinterpret_cast<uint8_t*>(yuv420_image.data);
+    yuv420_image.chroma_data = data + yuv420_image.luma_stride * p010_image.height;
+    yuv420_image.chroma_stride = yuv420_image.luma_stride >> 1;
+  }
 
-  if (uncompressed_p010_image->width != uncompressed_yuv_420_image.width
-   || uncompressed_p010_image->height != uncompressed_yuv_420_image.height) {
+  if (p010_image_ptr->width != yuv420_image.width ||
+      p010_image_ptr->height != yuv420_image.height) {
     return ERROR_JPEGR_RESOLUTION_MISMATCH;
   }
 
-  ultrahdr_metadata_struct metadata;
-  metadata.version = kJpegrVersion;
-
-  jpegr_uncompressed_struct map;
-  // Indicate that the SDR image is Bt.601 YUV encoded.
-  JPEGR_CHECK(generateGainMap(
-      &uncompressed_yuv_420_image, uncompressed_p010_image, hdr_tf, &metadata, &map,
-      true /* sdr_is_601 */ ));
+  // gain map
+  ultrahdr_metadata_struct metadata = {.version = kJpegrVersion};
+  jpegr_uncompressed_struct gainmap_image;
+  JPEGR_CHECK(generateGainMap(&yuv420_image, &p010_image, hdr_tf, &metadata, &gainmap_image,
+                              true /* sdr_is_601 */));
   std::unique_ptr<uint8_t[]> map_data;
-  map_data.reset(reinterpret_cast<uint8_t*>(map.data));
+  map_data.reset(reinterpret_cast<uint8_t*>(gainmap_image.data));
 
-  JpegEncoderHelper jpeg_encoder_gainmap;
-  JPEGR_CHECK(compressGainMap(&map, &jpeg_encoder_gainmap));
-  jpegr_compressed_struct compressed_map;
-  compressed_map.maxLength = jpeg_encoder_gainmap.getCompressedImageSize();
-  compressed_map.length = compressed_map.maxLength;
-  compressed_map.data = jpeg_encoder_gainmap.getCompressedImagePtr();
-  compressed_map.colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED;
+  // compress gain map
+  JpegEncoderHelper jpeg_enc_obj_gm;
+  JPEGR_CHECK(compressGainMap(&gainmap_image, &jpeg_enc_obj_gm));
+  jpegr_compressed_struct gainmapjpg_image = {.data = jpeg_enc_obj_gm.getCompressedImagePtr(),
+                                              .length = static_cast<int>(
+                                                      jpeg_enc_obj_gm.getCompressedImageSize()),
+                                              .maxLength = static_cast<int>(
+                                                      jpeg_enc_obj_gm.getCompressedImageSize()),
+                                              .colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED};
 
-  // We just want to check if ICC is present, so don't do a full decode. Note,
-  // this doesn't verify that the ICC is valid.
-  JpegDecoderHelper decoder;
-  std::vector<uint8_t> icc;
-  decoder.getCompressedImageParameters(compressed_jpeg_image->data, compressed_jpeg_image->length,
-                                       /* pWidth */ nullptr, /* pHeight */ nullptr,
-                                       &icc, /* exifData */ nullptr);
-
-  // Add ICC if not already present.
-  if (icc.size() > 0) {
-      JPEGR_CHECK(appendGainMap(compressed_jpeg_image, &compressed_map, /* exif */ nullptr,
-                                /* icc */ nullptr, /* icc size */ 0, &metadata, dest));
-  } else {
-      sp<DataStruct> newIcc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
-                                                         uncompressed_yuv_420_image.colorGamut);
-      JPEGR_CHECK(appendGainMap(compressed_jpeg_image, &compressed_map, /* exif */ nullptr,
-                                newIcc->getData(), newIcc->getLength(), &metadata, dest));
-  }
-
-  return NO_ERROR;
+  return encodeJPEGR(yuv420jpg_image_ptr, &gainmapjpg_image, &metadata, dest);
 }
 
 /* Encode API-4 */
-status_t JpegR::encodeJPEGR(jr_compressed_ptr compressed_jpeg_image,
-                            jr_compressed_ptr compressed_gainmap,
-                            ultrahdr_metadata_ptr metadata,
+status_t JpegR::encodeJPEGR(jr_compressed_ptr yuv420jpg_image_ptr,
+                            jr_compressed_ptr gainmapjpg_image_ptr, ultrahdr_metadata_ptr metadata,
                             jr_compressed_ptr dest) {
-  if (compressed_jpeg_image == nullptr || compressed_jpeg_image->data == nullptr) {
+  if (yuv420jpg_image_ptr == nullptr || yuv420jpg_image_ptr->data == nullptr) {
     ALOGE("received nullptr for compressed jpeg image");
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
-
-  if (compressed_gainmap == nullptr || compressed_gainmap->data == nullptr) {
+  if (gainmapjpg_image_ptr == nullptr || gainmapjpg_image_ptr->data == nullptr) {
     ALOGE("received nullptr for compressed gain map");
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
-
   if (dest == nullptr || dest->data == nullptr) {
     ALOGE("received nullptr for destination");
     return ERROR_JPEGR_INVALID_NULL_PTR;
@@ -510,46 +529,46 @@
   // this doesn't verify that the ICC is valid.
   JpegDecoderHelper decoder;
   std::vector<uint8_t> icc;
-  decoder.getCompressedImageParameters(compressed_jpeg_image->data, compressed_jpeg_image->length,
-                                       /* pWidth */ nullptr, /* pHeight */ nullptr,
-                                       &icc, /* exifData */ nullptr);
+  decoder.getCompressedImageParameters(yuv420jpg_image_ptr->data, yuv420jpg_image_ptr->length,
+                                       /* pWidth */ nullptr, /* pHeight */ nullptr, &icc,
+                                       /* exifData */ nullptr);
 
   // Add ICC if not already present.
   if (icc.size() > 0) {
-      JPEGR_CHECK(appendGainMap(compressed_jpeg_image, compressed_gainmap, /* exif */ nullptr,
-                                /* icc */ nullptr, /* icc size */ 0, metadata, dest));
+    JPEGR_CHECK(appendGainMap(yuv420jpg_image_ptr, gainmapjpg_image_ptr, /* exif */ nullptr,
+                              /* icc */ nullptr, /* icc size */ 0, metadata, dest));
   } else {
-      sp<DataStruct> newIcc = IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB,
-                                                         compressed_jpeg_image->colorGamut);
-      JPEGR_CHECK(appendGainMap(compressed_jpeg_image, compressed_gainmap, /* exif */ nullptr,
-                                newIcc->getData(), newIcc->getLength(), metadata, dest));
+    sp<DataStruct> newIcc =
+            IccHelper::writeIccProfile(ULTRAHDR_TF_SRGB, yuv420jpg_image_ptr->colorGamut);
+    JPEGR_CHECK(appendGainMap(yuv420jpg_image_ptr, gainmapjpg_image_ptr, /* exif */ nullptr,
+                              newIcc->getData(), newIcc->getLength(), metadata, dest));
   }
 
   return NO_ERROR;
 }
 
-status_t JpegR::getJPEGRInfo(jr_compressed_ptr compressed_jpegr_image, jr_info_ptr jpegr_info) {
-  if (compressed_jpegr_image == nullptr || compressed_jpegr_image->data == nullptr) {
+status_t JpegR::getJPEGRInfo(jr_compressed_ptr jpegr_image_ptr, jr_info_ptr jpeg_image_info_ptr) {
+  if (jpegr_image_ptr == nullptr || jpegr_image_ptr->data == nullptr) {
     ALOGE("received nullptr for compressed jpegr image");
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
-
-  if (jpegr_info == nullptr) {
+  if (jpeg_image_info_ptr == nullptr) {
     ALOGE("received nullptr for compressed jpegr info struct");
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
 
   jpegr_compressed_struct primary_image, gainmap_image;
-  status_t status =
-      extractPrimaryImageAndGainMap(compressed_jpegr_image, &primary_image, &gainmap_image);
+  status_t status = extractPrimaryImageAndGainMap(jpegr_image_ptr, &primary_image, &gainmap_image);
   if (status != NO_ERROR && status != ERROR_JPEGR_GAIN_MAP_IMAGE_NOT_FOUND) {
     return status;
   }
 
-  JpegDecoderHelper jpeg_decoder;
-  if (!jpeg_decoder.getCompressedImageParameters(primary_image.data, primary_image.length,
-                                                 &jpegr_info->width, &jpegr_info->height,
-                                                 jpegr_info->iccData, jpegr_info->exifData)) {
+  JpegDecoderHelper jpeg_dec_obj_hdr;
+  if (!jpeg_dec_obj_hdr.getCompressedImageParameters(primary_image.data, primary_image.length,
+                                                     &jpeg_image_info_ptr->width,
+                                                     &jpeg_image_info_ptr->height,
+                                                     jpeg_image_info_ptr->iccData,
+                                                     jpeg_image_info_ptr->exifData)) {
     return ERROR_JPEGR_DECODE_ERROR;
   }
 
@@ -557,41 +576,34 @@
 }
 
 /* Decode API */
-status_t JpegR::decodeJPEGR(jr_compressed_ptr compressed_jpegr_image,
-                            jr_uncompressed_ptr dest,
-                            float max_display_boost,
-                            jr_exif_ptr exif,
+status_t JpegR::decodeJPEGR(jr_compressed_ptr jpegr_image_ptr, jr_uncompressed_ptr dest,
+                            float max_display_boost, jr_exif_ptr exif,
                             ultrahdr_output_format output_format,
-                            jr_uncompressed_ptr gain_map,
-                            ultrahdr_metadata_ptr metadata) {
-  if (compressed_jpegr_image == nullptr || compressed_jpegr_image->data == nullptr) {
+                            jr_uncompressed_ptr gainmap_image_ptr, ultrahdr_metadata_ptr metadata) {
+  if (jpegr_image_ptr == nullptr || jpegr_image_ptr->data == nullptr) {
     ALOGE("received nullptr for compressed jpegr image");
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
-
   if (dest == nullptr || dest->data == nullptr) {
     ALOGE("received nullptr for dest image");
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
-
   if (max_display_boost < 1.0f) {
     ALOGE("received bad value for max_display_boost %f", max_display_boost);
     return ERROR_JPEGR_INVALID_INPUT_TYPE;
   }
-
   if (exif != nullptr && exif->data == nullptr) {
     ALOGE("received nullptr address for exif data");
     return ERROR_JPEGR_INVALID_INPUT_TYPE;
   }
-
   if (output_format <= ULTRAHDR_OUTPUT_UNSPECIFIED || output_format > ULTRAHDR_OUTPUT_MAX) {
     ALOGE("received bad value for output format %d", output_format);
     return ERROR_JPEGR_INVALID_INPUT_TYPE;
   }
 
-  jpegr_compressed_struct primary_image, gainmap_image;
+  jpegr_compressed_struct primary_jpeg_image, gainmap_jpeg_image;
   status_t status =
-      extractPrimaryImageAndGainMap(compressed_jpegr_image, &primary_image, &gainmap_image);
+          extractPrimaryImageAndGainMap(jpegr_image_ptr, &primary_jpeg_image, &gainmap_jpeg_image);
   if (status != NO_ERROR) {
     if (output_format != ULTRAHDR_OUTPUT_SDR || status != ERROR_JPEGR_GAIN_MAP_IMAGE_NOT_FOUND) {
       ALOGE("received invalid compressed jpegr image");
@@ -599,22 +611,22 @@
     }
   }
 
-  JpegDecoderHelper jpeg_decoder;
-  if (!jpeg_decoder.decompressImage(primary_image.data, primary_image.length,
-                                    (output_format == ULTRAHDR_OUTPUT_SDR))) {
+  JpegDecoderHelper jpeg_dec_obj_yuv420;
+  if (!jpeg_dec_obj_yuv420.decompressImage(primary_jpeg_image.data, primary_jpeg_image.length,
+                                           (output_format == ULTRAHDR_OUTPUT_SDR))) {
     return ERROR_JPEGR_DECODE_ERROR;
   }
 
   if (output_format == ULTRAHDR_OUTPUT_SDR) {
-    if ((jpeg_decoder.getDecompressedImageWidth() *
-         jpeg_decoder.getDecompressedImageHeight() * 4) >
-        jpeg_decoder.getDecompressedImageSize()) {
+    if ((jpeg_dec_obj_yuv420.getDecompressedImageWidth() *
+         jpeg_dec_obj_yuv420.getDecompressedImageHeight() * 4) >
+        jpeg_dec_obj_yuv420.getDecompressedImageSize()) {
       return ERROR_JPEGR_CALCULATION_ERROR;
     }
   } else {
-    if ((jpeg_decoder.getDecompressedImageWidth() *
-         jpeg_decoder.getDecompressedImageHeight() * 3 / 2) >
-        jpeg_decoder.getDecompressedImageSize()) {
+    if ((jpeg_dec_obj_yuv420.getDecompressedImageWidth() *
+         jpeg_dec_obj_yuv420.getDecompressedImageHeight() * 3 / 2) >
+        jpeg_dec_obj_yuv420.getDecompressedImageSize()) {
       return ERROR_JPEGR_CALCULATION_ERROR;
     }
   }
@@ -623,46 +635,46 @@
     if (exif->data == nullptr) {
       return ERROR_JPEGR_INVALID_NULL_PTR;
     }
-    if (exif->length < jpeg_decoder.getEXIFSize()) {
+    if (exif->length < jpeg_dec_obj_yuv420.getEXIFSize()) {
       return ERROR_JPEGR_BUFFER_TOO_SMALL;
     }
-    memcpy(exif->data, jpeg_decoder.getEXIFPtr(), jpeg_decoder.getEXIFSize());
-    exif->length = jpeg_decoder.getEXIFSize();
+    memcpy(exif->data, jpeg_dec_obj_yuv420.getEXIFPtr(), jpeg_dec_obj_yuv420.getEXIFSize());
+    exif->length = jpeg_dec_obj_yuv420.getEXIFSize();
   }
 
   if (output_format == ULTRAHDR_OUTPUT_SDR) {
-    dest->width = jpeg_decoder.getDecompressedImageWidth();
-    dest->height = jpeg_decoder.getDecompressedImageHeight();
-    memcpy(dest->data, jpeg_decoder.getDecompressedImagePtr(), dest->width * dest->height * 4);
+    dest->width = jpeg_dec_obj_yuv420.getDecompressedImageWidth();
+    dest->height = jpeg_dec_obj_yuv420.getDecompressedImageHeight();
+    memcpy(dest->data, jpeg_dec_obj_yuv420.getDecompressedImagePtr(),
+           dest->width * dest->height * 4);
     return NO_ERROR;
   }
 
-  JpegDecoderHelper gain_map_decoder;
-  if (!gain_map_decoder.decompressImage(gainmap_image.data, gainmap_image.length)) {
+  JpegDecoderHelper jpeg_dec_obj_gm;
+  if (!jpeg_dec_obj_gm.decompressImage(gainmap_jpeg_image.data, gainmap_jpeg_image.length)) {
     return ERROR_JPEGR_DECODE_ERROR;
   }
-  if ((gain_map_decoder.getDecompressedImageWidth() *
-       gain_map_decoder.getDecompressedImageHeight()) >
-      gain_map_decoder.getDecompressedImageSize()) {
+  if ((jpeg_dec_obj_gm.getDecompressedImageWidth() * jpeg_dec_obj_gm.getDecompressedImageHeight()) >
+      jpeg_dec_obj_gm.getDecompressedImageSize()) {
     return ERROR_JPEGR_CALCULATION_ERROR;
   }
 
-  jpegr_uncompressed_struct map;
-  map.data = gain_map_decoder.getDecompressedImagePtr();
-  map.width = gain_map_decoder.getDecompressedImageWidth();
-  map.height = gain_map_decoder.getDecompressedImageHeight();
+  jpegr_uncompressed_struct gainmap_image;
+  gainmap_image.data = jpeg_dec_obj_gm.getDecompressedImagePtr();
+  gainmap_image.width = jpeg_dec_obj_gm.getDecompressedImageWidth();
+  gainmap_image.height = jpeg_dec_obj_gm.getDecompressedImageHeight();
 
-  if (gain_map != nullptr) {
-    gain_map->width = map.width;
-    gain_map->height = map.height;
-    int size = gain_map->width * gain_map->height;
-    gain_map->data = malloc(size);
-    memcpy(gain_map->data, map.data, size);
+  if (gainmap_image_ptr != nullptr) {
+    gainmap_image_ptr->width = gainmap_image.width;
+    gainmap_image_ptr->height = gainmap_image.height;
+    int size = gainmap_image_ptr->width * gainmap_image_ptr->height;
+    gainmap_image_ptr->data = malloc(size);
+    memcpy(gainmap_image_ptr->data, gainmap_image.data, size);
   }
 
   ultrahdr_metadata_struct uhdr_metadata;
-  if (!getMetadataFromXMP(static_cast<uint8_t*>(gain_map_decoder.getXMPPtr()),
-                          gain_map_decoder.getXMPSize(), &uhdr_metadata)) {
+  if (!getMetadataFromXMP(static_cast<uint8_t*>(jpeg_dec_obj_gm.getXMPPtr()),
+                          jpeg_dec_obj_gm.getXMPSize(), &uhdr_metadata)) {
     return ERROR_JPEGR_INVALID_METADATA;
   }
 
@@ -677,32 +689,33 @@
     metadata->hdrCapacityMax = uhdr_metadata.hdrCapacityMax;
   }
 
-  jpegr_uncompressed_struct uncompressed_yuv_420_image;
-  uncompressed_yuv_420_image.data = jpeg_decoder.getDecompressedImagePtr();
-  uncompressed_yuv_420_image.width = jpeg_decoder.getDecompressedImageWidth();
-  uncompressed_yuv_420_image.height = jpeg_decoder.getDecompressedImageHeight();
-  uncompressed_yuv_420_image.colorGamut = IccHelper::readIccColorGamut(
-      jpeg_decoder.getICCPtr(), jpeg_decoder.getICCSize());
+  jpegr_uncompressed_struct yuv420_image;
+  yuv420_image.data = jpeg_dec_obj_yuv420.getDecompressedImagePtr();
+  yuv420_image.width = jpeg_dec_obj_yuv420.getDecompressedImageWidth();
+  yuv420_image.height = jpeg_dec_obj_yuv420.getDecompressedImageHeight();
+  yuv420_image.colorGamut = IccHelper::readIccColorGamut(jpeg_dec_obj_yuv420.getICCPtr(),
+                                                         jpeg_dec_obj_yuv420.getICCSize());
+  yuv420_image.luma_stride = yuv420_image.width;
+  uint8_t* data = reinterpret_cast<uint8_t*>(yuv420_image.data);
+  yuv420_image.chroma_data = data + yuv420_image.luma_stride * yuv420_image.height;
+  yuv420_image.chroma_stride = yuv420_image.width >> 1;
 
-  JPEGR_CHECK(applyGainMap(&uncompressed_yuv_420_image, &map, &uhdr_metadata, output_format,
+  JPEGR_CHECK(applyGainMap(&yuv420_image, &gainmap_image, &uhdr_metadata, output_format,
                            max_display_boost, dest));
   return NO_ERROR;
 }
 
-status_t JpegR::compressGainMap(jr_uncompressed_ptr uncompressed_gain_map,
-                                JpegEncoderHelper* jpeg_encoder) {
-  if (uncompressed_gain_map == nullptr || jpeg_encoder == nullptr) {
+status_t JpegR::compressGainMap(jr_uncompressed_ptr gainmap_image_ptr,
+                                JpegEncoderHelper* jpeg_enc_obj_ptr) {
+  if (gainmap_image_ptr == nullptr || jpeg_enc_obj_ptr == nullptr) {
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
 
   // Don't need to convert YUV to Bt601 since single channel
-  if (!jpeg_encoder->compressImage(uncompressed_gain_map->data,
-                                   uncompressed_gain_map->width,
-                                   uncompressed_gain_map->height,
-                                   kMapCompressQuality,
-                                   nullptr,
-                                   0,
-                                   true /* isSingleChannel */)) {
+  if (!jpeg_enc_obj_ptr->compressImage(reinterpret_cast<uint8_t*>(gainmap_image_ptr->data), nullptr,
+                                       gainmap_image_ptr->width, gainmap_image_ptr->height,
+                                       gainmap_image_ptr->luma_stride, 0, kMapCompressQuality,
+                                       nullptr, 0)) {
     return ERROR_JPEGR_ENCODE_ERROR;
   }
 
@@ -714,13 +727,13 @@
               "align job size to kMapDimensionScaleFactor");
 
 class JobQueue {
- public:
+public:
   bool dequeueJob(size_t& rowStart, size_t& rowEnd);
   void enqueueJob(size_t rowStart, size_t rowEnd);
   void markQueueForEnd();
   void reset();
 
- private:
+private:
   bool mQueuedAllJobs = false;
   std::deque<std::tuple<size_t, size_t>> mJobs;
   std::mutex mMutex;
@@ -767,41 +780,39 @@
   mQueuedAllJobs = false;
 }
 
-status_t JpegR::generateGainMap(jr_uncompressed_ptr uncompressed_yuv_420_image,
-                                jr_uncompressed_ptr uncompressed_p010_image,
-                                ultrahdr_transfer_function hdr_tf,
-                                ultrahdr_metadata_ptr metadata,
-                                jr_uncompressed_ptr dest,
-                                bool sdr_is_601) {
-  if (uncompressed_yuv_420_image == nullptr
-   || uncompressed_p010_image == nullptr
-   || metadata == nullptr
-   || dest == nullptr) {
+status_t JpegR::generateGainMap(jr_uncompressed_ptr yuv420_image_ptr,
+                                jr_uncompressed_ptr p010_image_ptr,
+                                ultrahdr_transfer_function hdr_tf, ultrahdr_metadata_ptr metadata,
+                                jr_uncompressed_ptr dest, bool sdr_is_601) {
+  if (yuv420_image_ptr == nullptr || p010_image_ptr == nullptr || metadata == nullptr ||
+      dest == nullptr || yuv420_image_ptr->data == nullptr ||
+      yuv420_image_ptr->chroma_data == nullptr || p010_image_ptr->data == nullptr ||
+      p010_image_ptr->chroma_data == nullptr) {
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
-
-  if (uncompressed_yuv_420_image->width != uncompressed_p010_image->width
-   || uncompressed_yuv_420_image->height != uncompressed_p010_image->height) {
+  if (yuv420_image_ptr->width != p010_image_ptr->width ||
+      yuv420_image_ptr->height != p010_image_ptr->height) {
     return ERROR_JPEGR_RESOLUTION_MISMATCH;
   }
-
-  if (uncompressed_yuv_420_image->colorGamut == ULTRAHDR_COLORGAMUT_UNSPECIFIED
-   || uncompressed_p010_image->colorGamut == ULTRAHDR_COLORGAMUT_UNSPECIFIED) {
+  if (yuv420_image_ptr->colorGamut == ULTRAHDR_COLORGAMUT_UNSPECIFIED ||
+      p010_image_ptr->colorGamut == ULTRAHDR_COLORGAMUT_UNSPECIFIED) {
     return ERROR_JPEGR_INVALID_COLORGAMUT;
   }
 
-  size_t image_width = uncompressed_yuv_420_image->width;
-  size_t image_height = uncompressed_yuv_420_image->height;
-  size_t map_width = image_width / kMapDimensionScaleFactor;
-  size_t map_height = image_height / kMapDimensionScaleFactor;
-  size_t map_stride = static_cast<size_t>(
-          floor((map_width + kJpegBlock - 1) / kJpegBlock)) * kJpegBlock;
-  size_t map_height_aligned = ((map_height + 1) >> 1) << 1;
+  size_t image_width = yuv420_image_ptr->width;
+  size_t image_height = yuv420_image_ptr->height;
+  size_t map_width = static_cast<size_t>(
+          floor((image_width + kMapDimensionScaleFactor - 1) / kMapDimensionScaleFactor));
+  size_t map_height = static_cast<size_t>(
+          floor((image_height + kMapDimensionScaleFactor - 1) / kMapDimensionScaleFactor));
 
-  dest->width = map_stride;
-  dest->height = map_height_aligned;
+  dest->data = new uint8_t[map_width * map_height];
+  dest->width = map_width;
+  dest->height = map_height;
   dest->colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED;
-  dest->data = new uint8_t[map_stride * map_height_aligned];
+  dest->luma_stride = map_width;
+  dest->chroma_data = nullptr;
+  dest->chroma_stride = 0;
   std::unique_ptr<uint8_t[]> map_data;
   map_data.reset(reinterpret_cast<uint8_t*>(dest->data));
 
@@ -843,12 +854,12 @@
   float log2MinBoost = log2(metadata->minContentBoost);
   float log2MaxBoost = log2(metadata->maxContentBoost);
 
-  ColorTransformFn hdrGamutConversionFn = getHdrConversionFn(
-      uncompressed_yuv_420_image->colorGamut, uncompressed_p010_image->colorGamut);
+  ColorTransformFn hdrGamutConversionFn =
+          getHdrConversionFn(yuv420_image_ptr->colorGamut, p010_image_ptr->colorGamut);
 
   ColorCalculationFn luminanceFn = nullptr;
   ColorTransformFn sdrYuvToRgbFn = nullptr;
-  switch (uncompressed_yuv_420_image->colorGamut) {
+  switch (yuv420_image_ptr->colorGamut) {
     case ULTRAHDR_COLORGAMUT_BT709:
       luminanceFn = srgbLuminance;
       sdrYuvToRgbFn = srgbYuvToRgb;
@@ -870,7 +881,7 @@
   }
 
   ColorTransformFn hdrYuvToRgbFn = nullptr;
-  switch (uncompressed_p010_image->colorGamut) {
+  switch (p010_image_ptr->colorGamut) {
     case ULTRAHDR_COLORGAMUT_BT709:
       hdrYuvToRgbFn = srgbYuvToRgb;
       break;
@@ -890,18 +901,15 @@
   size_t rowStep = threads == 1 ? image_height : kJobSzInRows;
   JobQueue jobQueue;
 
-  std::function<void()> generateMap = [uncompressed_yuv_420_image, uncompressed_p010_image,
-                                       metadata, dest, hdrInvOetf, hdrGamutConversionFn,
-                                       luminanceFn, sdrYuvToRgbFn, hdrYuvToRgbFn, hdr_white_nits,
-                                       log2MinBoost, log2MaxBoost, &jobQueue]() -> void {
+  std::function<void()> generateMap = [yuv420_image_ptr, p010_image_ptr, metadata, dest, hdrInvOetf,
+                                       hdrGamutConversionFn, luminanceFn, sdrYuvToRgbFn,
+                                       hdrYuvToRgbFn, hdr_white_nits, log2MinBoost, log2MaxBoost,
+                                       &jobQueue]() -> void {
     size_t rowStart, rowEnd;
-    size_t dest_map_width = uncompressed_yuv_420_image->width / kMapDimensionScaleFactor;
-    size_t dest_map_stride = dest->width;
     while (jobQueue.dequeueJob(rowStart, rowEnd)) {
       for (size_t y = rowStart; y < rowEnd; ++y) {
-        for (size_t x = 0; x < dest_map_width; ++x) {
-          Color sdr_yuv_gamma =
-              sampleYuv420(uncompressed_yuv_420_image, kMapDimensionScaleFactor, x, y);
+        for (size_t x = 0; x < dest->width; ++x) {
+          Color sdr_yuv_gamma = sampleYuv420(yuv420_image_ptr, kMapDimensionScaleFactor, x, y);
           Color sdr_rgb_gamma = sdrYuvToRgbFn(sdr_yuv_gamma);
           // We are assuming the SDR input is always sRGB transfer.
 #if USE_SRGB_INVOETF_LUT
@@ -911,15 +919,15 @@
 #endif
           float sdr_y_nits = luminanceFn(sdr_rgb) * kSdrWhiteNits;
 
-          Color hdr_yuv_gamma = sampleP010(uncompressed_p010_image, kMapDimensionScaleFactor, x, y);
+          Color hdr_yuv_gamma = sampleP010(p010_image_ptr, kMapDimensionScaleFactor, x, y);
           Color hdr_rgb_gamma = hdrYuvToRgbFn(hdr_yuv_gamma);
           Color hdr_rgb = hdrInvOetf(hdr_rgb_gamma);
           hdr_rgb = hdrGamutConversionFn(hdr_rgb);
           float hdr_y_nits = luminanceFn(hdr_rgb) * hdr_white_nits;
 
-          size_t pixel_idx = x + y * dest_map_stride;
+          size_t pixel_idx = x + y * dest->width;
           reinterpret_cast<uint8_t*>(dest->data)[pixel_idx] =
-              encodeGain(sdr_y_nits, hdr_y_nits, metadata, log2MinBoost, log2MaxBoost);
+                  encodeGain(sdr_y_nits, hdr_y_nits, metadata, log2MinBoost, log2MaxBoost);
         }
       }
     }
@@ -945,71 +953,66 @@
   return NO_ERROR;
 }
 
-status_t JpegR::applyGainMap(jr_uncompressed_ptr uncompressed_yuv_420_image,
-                             jr_uncompressed_ptr uncompressed_gain_map,
-                             ultrahdr_metadata_ptr metadata,
-                             ultrahdr_output_format output_format,
-                             float max_display_boost,
+status_t JpegR::applyGainMap(jr_uncompressed_ptr yuv420_image_ptr,
+                             jr_uncompressed_ptr gainmap_image_ptr, ultrahdr_metadata_ptr metadata,
+                             ultrahdr_output_format output_format, float max_display_boost,
                              jr_uncompressed_ptr dest) {
-  if (uncompressed_yuv_420_image == nullptr
-   || uncompressed_gain_map == nullptr
-   || metadata == nullptr
-   || dest == nullptr) {
+  if (yuv420_image_ptr == nullptr || gainmap_image_ptr == nullptr || metadata == nullptr ||
+      dest == nullptr || yuv420_image_ptr->data == nullptr ||
+      yuv420_image_ptr->chroma_data == nullptr || gainmap_image_ptr->data == nullptr) {
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
-
-  if (metadata->version.compare("1.0")) {
-      ALOGE("Unsupported metadata version: %s", metadata->version.c_str());
-      return ERROR_JPEGR_UNSUPPORTED_METADATA;
+  if (metadata->version.compare(kJpegrVersion)) {
+    ALOGE("Unsupported metadata version: %s", metadata->version.c_str());
+    return ERROR_JPEGR_UNSUPPORTED_METADATA;
   }
   if (metadata->gamma != 1.0f) {
-      ALOGE("Unsupported metadata gamma: %f", metadata->gamma);
-      return ERROR_JPEGR_UNSUPPORTED_METADATA;
+    ALOGE("Unsupported metadata gamma: %f", metadata->gamma);
+    return ERROR_JPEGR_UNSUPPORTED_METADATA;
   }
   if (metadata->offsetSdr != 0.0f || metadata->offsetHdr != 0.0f) {
-      ALOGE("Unsupported metadata offset sdr, hdr: %f, %f", metadata->offsetSdr,
-            metadata->offsetHdr);
-      return ERROR_JPEGR_UNSUPPORTED_METADATA;
+    ALOGE("Unsupported metadata offset sdr, hdr: %f, %f", metadata->offsetSdr, metadata->offsetHdr);
+    return ERROR_JPEGR_UNSUPPORTED_METADATA;
   }
-  if (metadata->hdrCapacityMin != metadata->minContentBoost
-   || metadata->hdrCapacityMax != metadata->maxContentBoost) {
-      ALOGE("Unsupported metadata hdr capacity min, max: %f, %f", metadata->hdrCapacityMin,
-            metadata->hdrCapacityMax);
-      return ERROR_JPEGR_UNSUPPORTED_METADATA;
+  if (metadata->hdrCapacityMin != metadata->minContentBoost ||
+      metadata->hdrCapacityMax != metadata->maxContentBoost) {
+    ALOGE("Unsupported metadata hdr capacity min, max: %f, %f", metadata->hdrCapacityMin,
+          metadata->hdrCapacityMax);
+    return ERROR_JPEGR_UNSUPPORTED_METADATA;
   }
 
   // TODO: remove once map scaling factor is computed based on actual map dims
-  size_t image_width = uncompressed_yuv_420_image->width;
-  size_t image_height = uncompressed_yuv_420_image->height;
-  size_t map_width = image_width / kMapDimensionScaleFactor;
-  size_t map_height = image_height / kMapDimensionScaleFactor;
-  map_width = static_cast<size_t>(
-          floor((map_width + kJpegBlock - 1) / kJpegBlock)) * kJpegBlock;
-  map_height = ((map_height + 1) >> 1) << 1;
-  if (map_width != uncompressed_gain_map->width
-   || map_height != uncompressed_gain_map->height) {
-    ALOGE("gain map dimensions and primary image dimensions are not to scale");
+  size_t image_width = yuv420_image_ptr->width;
+  size_t image_height = yuv420_image_ptr->height;
+  size_t map_width = static_cast<size_t>(
+          floor((image_width + kMapDimensionScaleFactor - 1) / kMapDimensionScaleFactor));
+  size_t map_height = static_cast<size_t>(
+          floor((image_height + kMapDimensionScaleFactor - 1) / kMapDimensionScaleFactor));
+  if (map_width != gainmap_image_ptr->width || map_height != gainmap_image_ptr->height) {
+    ALOGE("gain map dimensions and primary image dimensions are not to scale, computed gain map "
+          "resolution is %dx%d, received gain map resolution is %dx%d",
+          (int)map_width, (int)map_height, gainmap_image_ptr->width, gainmap_image_ptr->height);
     return ERROR_JPEGR_INVALID_INPUT_TYPE;
   }
 
-  dest->width = uncompressed_yuv_420_image->width;
-  dest->height = uncompressed_yuv_420_image->height;
+  dest->width = yuv420_image_ptr->width;
+  dest->height = yuv420_image_ptr->height;
   ShepardsIDW idwTable(kMapDimensionScaleFactor);
   float display_boost = std::min(max_display_boost, metadata->maxContentBoost);
   GainLUT gainLUT(metadata, display_boost);
 
   JobQueue jobQueue;
-  std::function<void()> applyRecMap = [uncompressed_yuv_420_image, uncompressed_gain_map,
-                                       metadata, dest, &jobQueue, &idwTable, output_format,
-                                       &gainLUT, display_boost]() -> void {
-    size_t width = uncompressed_yuv_420_image->width;
-    size_t height = uncompressed_yuv_420_image->height;
+  std::function<void()> applyRecMap = [yuv420_image_ptr, gainmap_image_ptr, metadata, dest,
+                                       &jobQueue, &idwTable, output_format, &gainLUT,
+                                       display_boost]() -> void {
+    size_t width = yuv420_image_ptr->width;
+    size_t height = yuv420_image_ptr->height;
 
     size_t rowStart, rowEnd;
     while (jobQueue.dequeueJob(rowStart, rowEnd)) {
       for (size_t y = rowStart; y < rowEnd; ++y) {
         for (size_t x = 0; x < width; ++x) {
-          Color yuv_gamma_sdr = getYuv420Pixel(uncompressed_yuv_420_image, x, y);
+          Color yuv_gamma_sdr = getYuv420Pixel(yuv420_image_ptr, x, y);
           // Assuming the sdr image is a decoded JPEG, we should always use Rec.601 YUV coefficients
           Color rgb_gamma_sdr = p3YuvToRgb(yuv_gamma_sdr);
           // We are assuming the SDR base image is always sRGB transfer.
@@ -1025,9 +1028,9 @@
           // Currently map_scale_factor is of type size_t, but it could be changed to a float
           // later.
           if (map_scale_factor != floorf(map_scale_factor)) {
-            gain = sampleMap(uncompressed_gain_map, map_scale_factor, x, y);
+            gain = sampleMap(gainmap_image_ptr, map_scale_factor, x, y);
           } else {
-            gain = sampleMap(uncompressed_gain_map, map_scale_factor, x, y, idwTable);
+            gain = sampleMap(gainmap_image_ptr, map_scale_factor, x, y, idwTable);
           }
 
 #if USE_APPLY_GAIN_LUT
@@ -1039,14 +1042,12 @@
           size_t pixel_idx = x + y * width;
 
           switch (output_format) {
-            case ULTRAHDR_OUTPUT_HDR_LINEAR:
-            {
+            case ULTRAHDR_OUTPUT_HDR_LINEAR: {
               uint64_t rgba_f16 = colorToRgbaF16(rgb_hdr);
               reinterpret_cast<uint64_t*>(dest->data)[pixel_idx] = rgba_f16;
               break;
             }
-            case ULTRAHDR_OUTPUT_HDR_HLG:
-            {
+            case ULTRAHDR_OUTPUT_HDR_HLG: {
 #if USE_HLG_OETF_LUT
               ColorTransformFn hdrOetf = hlgOetfLUT;
 #else
@@ -1057,8 +1058,7 @@
               reinterpret_cast<uint32_t*>(dest->data)[pixel_idx] = rgba_1010102;
               break;
             }
-            case ULTRAHDR_OUTPUT_HDR_PQ:
-            {
+            case ULTRAHDR_OUTPUT_HDR_PQ: {
 #if USE_PQ_OETF_LUT
               ColorTransformFn hdrOetf = pqOetfLUT;
 #else
@@ -1069,8 +1069,8 @@
               reinterpret_cast<uint32_t*>(dest->data)[pixel_idx] = rgba_1010102;
               break;
             }
-            default:
-            {}
+            default: {
+            }
               // Should be impossible to hit after input validation.
           }
         }
@@ -1083,9 +1083,9 @@
   for (int th = 0; th < threads - 1; th++) {
     workers.push_back(std::thread(applyRecMap));
   }
-  const int rowStep = threads == 1 ? uncompressed_yuv_420_image->height : kJobSzInRows;
-  for (int rowStart = 0; rowStart < uncompressed_yuv_420_image->height;) {
-    int rowEnd = std::min(rowStart + rowStep, uncompressed_yuv_420_image->height);
+  const int rowStep = threads == 1 ? yuv420_image_ptr->height : kJobSzInRows;
+  for (int rowStart = 0; rowStart < yuv420_image_ptr->height;) {
+    int rowEnd = std::min(rowStart + rowStep, yuv420_image_ptr->height);
     jobQueue.enqueueJob(rowStart, rowEnd);
     rowStart = rowEnd;
   }
@@ -1095,18 +1095,18 @@
   return NO_ERROR;
 }
 
-status_t JpegR::extractPrimaryImageAndGainMap(jr_compressed_ptr compressed_jpegr_image,
-                                              jr_compressed_ptr primary_image,
-                                              jr_compressed_ptr gain_map) {
-  if (compressed_jpegr_image == nullptr) {
+status_t JpegR::extractPrimaryImageAndGainMap(jr_compressed_ptr jpegr_image_ptr,
+                                              jr_compressed_ptr primary_jpg_image_ptr,
+                                              jr_compressed_ptr gainmap_jpg_image_ptr) {
+  if (jpegr_image_ptr == nullptr) {
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
 
   MessageHandler msg_handler;
   std::shared_ptr<DataSegment> seg =
-                  DataSegment::Create(DataRange(0, compressed_jpegr_image->length),
-                                      static_cast<const uint8_t*>(compressed_jpegr_image->data),
-                                      DataSegment::BufferDispositionPolicy::kDontDelete);
+          DataSegment::Create(DataRange(0, jpegr_image_ptr->length),
+                              static_cast<const uint8_t*>(jpegr_image_ptr->data),
+                              DataSegment::BufferDispositionPolicy::kDontDelete);
   DataSegmentDataSource data_source(seg);
   JpegInfoBuilder jpeg_info_builder;
   jpeg_info_builder.SetImageLimit(2);
@@ -1125,20 +1125,20 @@
     return ERROR_JPEGR_INVALID_INPUT_TYPE;
   }
 
-  if (primary_image != nullptr) {
-    primary_image->data = static_cast<uint8_t*>(compressed_jpegr_image->data) +
-                                               image_ranges[0].GetBegin();
-    primary_image->length = image_ranges[0].GetLength();
+  if (primary_jpg_image_ptr != nullptr) {
+    primary_jpg_image_ptr->data =
+            static_cast<uint8_t*>(jpegr_image_ptr->data) + image_ranges[0].GetBegin();
+    primary_jpg_image_ptr->length = image_ranges[0].GetLength();
   }
 
   if (image_ranges.size() == 1) {
     return ERROR_JPEGR_GAIN_MAP_IMAGE_NOT_FOUND;
   }
 
-  if (gain_map != nullptr) {
-    gain_map->data = static_cast<uint8_t*>(compressed_jpegr_image->data) +
-                                              image_ranges[1].GetBegin();
-    gain_map->length = image_ranges[1].GetLength();
+  if (gainmap_jpg_image_ptr != nullptr) {
+    gainmap_jpg_image_ptr->data =
+            static_cast<uint8_t*>(jpegr_image_ptr->data) + image_ranges[1].GetBegin();
+    gainmap_jpg_image_ptr->length = image_ranges[1].GetLength();
   }
 
   // TODO: choose primary image and gain map image carefully
@@ -1183,58 +1183,48 @@
 // Exif 2.2 spec for EXIF marker
 // Adobe XMP spec part 3 for XMP marker
 // ICC v4.3 spec for ICC
-status_t JpegR::appendGainMap(jr_compressed_ptr compressed_jpeg_image,
-                              jr_compressed_ptr compressed_gain_map,
-                              jr_exif_ptr exif,
-                              void* icc, size_t icc_size,
-                              ultrahdr_metadata_ptr metadata,
+status_t JpegR::appendGainMap(jr_compressed_ptr primary_jpg_image_ptr,
+                              jr_compressed_ptr gainmap_jpg_image_ptr, jr_exif_ptr exif, void* icc,
+                              size_t icc_size, ultrahdr_metadata_ptr metadata,
                               jr_compressed_ptr dest) {
-  if (compressed_jpeg_image == nullptr
-   || compressed_gain_map == nullptr
-   || metadata == nullptr
-   || dest == nullptr) {
+  if (primary_jpg_image_ptr == nullptr || gainmap_jpg_image_ptr == nullptr || metadata == nullptr ||
+      dest == nullptr) {
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
-
   if (metadata->version.compare("1.0")) {
     ALOGE("received bad value for version: %s", metadata->version.c_str());
     return ERROR_JPEGR_INVALID_INPUT_TYPE;
   }
   if (metadata->maxContentBoost < metadata->minContentBoost) {
     ALOGE("received bad value for content boost min %f, max %f", metadata->minContentBoost,
-           metadata->maxContentBoost);
+          metadata->maxContentBoost);
     return ERROR_JPEGR_INVALID_INPUT_TYPE;
   }
-
   if (metadata->hdrCapacityMax < metadata->hdrCapacityMin || metadata->hdrCapacityMin < 1.0f) {
     ALOGE("received bad value for hdr capacity min %f, max %f", metadata->hdrCapacityMin,
-           metadata->hdrCapacityMax);
+          metadata->hdrCapacityMax);
     return ERROR_JPEGR_INVALID_INPUT_TYPE;
   }
-
   if (metadata->offsetSdr < 0.0f || metadata->offsetHdr < 0.0f) {
-    ALOGE("received bad value for offset sdr %f, hdr %f", metadata->offsetSdr,
-           metadata->offsetHdr);
+    ALOGE("received bad value for offset sdr %f, hdr %f", metadata->offsetSdr, metadata->offsetHdr);
     return ERROR_JPEGR_INVALID_INPUT_TYPE;
   }
-
   if (metadata->gamma <= 0.0f) {
     ALOGE("received bad value for gamma %f", metadata->gamma);
     return ERROR_JPEGR_INVALID_INPUT_TYPE;
   }
 
   const string nameSpace = "http://ns.adobe.com/xap/1.0/";
-  const int nameSpaceLength = nameSpace.size() + 1;  // need to count the null terminator
+  const int nameSpaceLength = nameSpace.size() + 1; // need to count the null terminator
 
   // calculate secondary image length first, because the length will be written into the primary
   // image xmp
   const string xmp_secondary = generateXmpForSecondaryImage(*metadata);
   const int xmp_secondary_length = 2 /* 2 bytes representing the length of the package */
-                                 + nameSpaceLength /* 29 bytes length of name space including \0 */
-                                 + xmp_secondary.size(); /* length of xmp packet */
+          + nameSpaceLength          /* 29 bytes length of name space including \0 */
+          + xmp_secondary.size();    /* length of xmp packet */
   const int secondary_image_size = 2 /* 2 bytes length of APP1 sign */
-                                 + xmp_secondary_length
-                                 + compressed_gain_map->length;
+          + xmp_secondary_length + gainmap_jpg_image_ptr->length;
   // primary image
   const string xmp_primary = generateXmpForPrimaryImage(secondary_image_size, *metadata);
   // same as primary
@@ -1273,41 +1263,39 @@
 
   // Write ICC
   if (icc != nullptr && icc_size > 0) {
-      const int length = icc_size + 2;
-      const uint8_t lengthH = ((length >> 8) & 0xff);
-      const uint8_t lengthL = (length & 0xff);
-      JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos));
-      JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kAPP2, 1, pos));
-      JPEGR_CHECK(Write(dest, &lengthH, 1, pos));
-      JPEGR_CHECK(Write(dest, &lengthL, 1, pos));
-      JPEGR_CHECK(Write(dest, icc, icc_size, pos));
+    const int length = icc_size + 2;
+    const uint8_t lengthH = ((length >> 8) & 0xff);
+    const uint8_t lengthL = (length & 0xff);
+    JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos));
+    JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kAPP2, 1, pos));
+    JPEGR_CHECK(Write(dest, &lengthH, 1, pos));
+    JPEGR_CHECK(Write(dest, &lengthL, 1, pos));
+    JPEGR_CHECK(Write(dest, icc, icc_size, pos));
   }
 
   // Prepare and write MPF
   {
-      const int length = 2 + calculateMpfSize();
-      const uint8_t lengthH = ((length >> 8) & 0xff);
-      const uint8_t lengthL = (length & 0xff);
-      int primary_image_size = pos + length + compressed_jpeg_image->length;
-      // between APP2 + package size + signature
-      // ff e2 00 58 4d 50 46 00
-      // 2 + 2 + 4 = 8 (bytes)
-      // and ff d8 sign of the secondary image
-      int secondary_image_offset = primary_image_size - pos - 8;
-      sp<DataStruct> mpf = generateMpf(primary_image_size,
-                                       0, /* primary_image_offset */
-                                       secondary_image_size,
-                                       secondary_image_offset);
-      JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos));
-      JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kAPP2, 1, pos));
-      JPEGR_CHECK(Write(dest, &lengthH, 1, pos));
-      JPEGR_CHECK(Write(dest, &lengthL, 1, pos));
-      JPEGR_CHECK(Write(dest, (void*)mpf->getData(), mpf->getLength(), pos));
+    const int length = 2 + calculateMpfSize();
+    const uint8_t lengthH = ((length >> 8) & 0xff);
+    const uint8_t lengthL = (length & 0xff);
+    int primary_image_size = pos + length + primary_jpg_image_ptr->length;
+    // between APP2 + package size + signature
+    // ff e2 00 58 4d 50 46 00
+    // 2 + 2 + 4 = 8 (bytes)
+    // and ff d8 sign of the secondary image
+    int secondary_image_offset = primary_image_size - pos - 8;
+    sp<DataStruct> mpf = generateMpf(primary_image_size, 0, /* primary_image_offset */
+                                     secondary_image_size, secondary_image_offset);
+    JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kStart, 1, pos));
+    JPEGR_CHECK(Write(dest, &photos_editing_formats::image_io::JpegMarker::kAPP2, 1, pos));
+    JPEGR_CHECK(Write(dest, &lengthH, 1, pos));
+    JPEGR_CHECK(Write(dest, &lengthL, 1, pos));
+    JPEGR_CHECK(Write(dest, (void*)mpf->getData(), mpf->getLength(), pos));
   }
 
   // Write primary image
-  JPEGR_CHECK(Write(dest,
-      (uint8_t*)compressed_jpeg_image->data + 2, compressed_jpeg_image->length - 2, pos));
+  JPEGR_CHECK(Write(dest, (uint8_t*)primary_jpg_image_ptr->data + 2,
+                    primary_jpg_image_ptr->length - 2, pos));
   // Finish primary image
 
   // Begin secondary image (gain map)
@@ -1329,8 +1317,8 @@
   }
 
   // Write secondary image
-  JPEGR_CHECK(Write(dest,
-        (uint8_t*)compressed_gain_map->data + 2, compressed_gain_map->length - 2, pos));
+  JPEGR_CHECK(Write(dest, (uint8_t*)gainmap_jpg_image_ptr->data + 2,
+                    gainmap_jpg_image_ptr->length - 2, pos));
 
   // Set back length
   dest->length = pos;
@@ -1343,62 +1331,52 @@
   if (src == nullptr || dest == nullptr) {
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
-
-  uint16_t* src_luma_data = reinterpret_cast<uint16_t*>(src->data);
-  size_t src_luma_stride = src->luma_stride == 0 ? src->width : src->luma_stride;
-
-  uint16_t* src_chroma_data;
-  size_t src_chroma_stride;
-  if (src->chroma_data == nullptr) {
-     src_chroma_stride = src_luma_stride;
-     src_chroma_data = &reinterpret_cast<uint16_t*>(src->data)[src_luma_stride * src->height];
-  } else {
-     src_chroma_stride = src->chroma_stride;
-     src_chroma_data = reinterpret_cast<uint16_t*>(src->chroma_data);
+  if (src->width != dest->width || src->height != dest->height) {
+    return ERROR_JPEGR_INVALID_INPUT_TYPE;
   }
-  dest->width = src->width;
-  dest->height = src->height;
-
-  size_t dest_luma_pixel_count = dest->width * dest->height;
-
+  uint16_t* src_y_data = reinterpret_cast<uint16_t*>(src->data);
+  uint8_t* dst_y_data = reinterpret_cast<uint8_t*>(dest->data);
   for (size_t y = 0; y < src->height; ++y) {
+    uint16_t* src_y_row = src_y_data + y * src->luma_stride;
+    uint8_t* dst_y_row = dst_y_data + y * dest->luma_stride;
     for (size_t x = 0; x < src->width; ++x) {
-      size_t src_y_idx = y * src_luma_stride + x;
-      size_t src_u_idx = (y >> 1) * src_chroma_stride + (x & ~0x1);
-      size_t src_v_idx = src_u_idx + 1;
-
-      uint16_t y_uint = src_luma_data[src_y_idx] >> 6;
-      uint16_t u_uint = src_chroma_data[src_u_idx] >> 6;
-      uint16_t v_uint = src_chroma_data[src_v_idx] >> 6;
-
-      size_t dest_y_idx = x + y * dest->width;
-      size_t dest_uv_idx = x / 2 + (y / 2) * (dest->width / 2);
-
-      uint8_t* y = &reinterpret_cast<uint8_t*>(dest->data)[dest_y_idx];
-      uint8_t* u = &reinterpret_cast<uint8_t*>(dest->data)[dest_luma_pixel_count + dest_uv_idx];
-      uint8_t* v = &reinterpret_cast<uint8_t*>(
-              dest->data)[dest_luma_pixel_count * 5 / 4 + dest_uv_idx];
-
-      *y = static_cast<uint8_t>((y_uint >> 2) & 0xff);
-      *u = static_cast<uint8_t>((u_uint >> 2) & 0xff);
-      *v = static_cast<uint8_t>((v_uint >> 2) & 0xff);
+      uint16_t y_uint = src_y_row[x] >> 6;
+      dst_y_row[x] = static_cast<uint8_t>((y_uint >> 2) & 0xff);
+    }
+    if (dest->width != dest->luma_stride) {
+      memset(dst_y_row + dest->width, 0, dest->luma_stride - dest->width);
     }
   }
-
+  uint16_t* src_uv_data = reinterpret_cast<uint16_t*>(src->chroma_data);
+  uint8_t* dst_u_data = reinterpret_cast<uint8_t*>(dest->chroma_data);
+  size_t dst_v_offset = (dest->chroma_stride * dest->height / 2);
+  uint8_t* dst_v_data = dst_u_data + dst_v_offset;
+  for (size_t y = 0; y < src->height / 2; ++y) {
+    uint16_t* src_uv_row = src_uv_data + y * src->chroma_stride;
+    uint8_t* dst_u_row = dst_u_data + y * dest->chroma_stride;
+    uint8_t* dst_v_row = dst_v_data + y * dest->chroma_stride;
+    for (size_t x = 0; x < src->width / 2; ++x) {
+      uint16_t u_uint = src_uv_row[x << 1] >> 6;
+      uint16_t v_uint = src_uv_row[(x << 1) + 1] >> 6;
+      dst_u_row[x] = static_cast<uint8_t>((u_uint >> 2) & 0xff);
+      dst_v_row[x] = static_cast<uint8_t>((v_uint >> 2) & 0xff);
+    }
+    if (dest->width / 2 != dest->chroma_stride) {
+      memset(dst_u_row + dest->width / 2, 0, dest->chroma_stride - dest->width / 2);
+      memset(dst_v_row + dest->width / 2, 0, dest->chroma_stride - dest->width / 2);
+    }
+  }
   dest->colorGamut = src->colorGamut;
-
   return NO_ERROR;
 }
 
-status_t JpegR::convertYuv(jr_uncompressed_ptr image,
-                           ultrahdr_color_gamut src_encoding,
+status_t JpegR::convertYuv(jr_uncompressed_ptr image, ultrahdr_color_gamut src_encoding,
                            ultrahdr_color_gamut dest_encoding) {
   if (image == nullptr) {
     return ERROR_JPEGR_INVALID_NULL_PTR;
   }
-
-  if (src_encoding == ULTRAHDR_COLORGAMUT_UNSPECIFIED
-   || dest_encoding == ULTRAHDR_COLORGAMUT_UNSPECIFIED) {
+  if (src_encoding == ULTRAHDR_COLORGAMUT_UNSPECIFIED ||
+      dest_encoding == ULTRAHDR_COLORGAMUT_UNSPECIFIED) {
     return ERROR_JPEGR_INVALID_COLORGAMUT;
   }
 
diff --git a/libs/ultrahdr/tests/gainmapmath_test.cpp b/libs/ultrahdr/tests/gainmapmath_test.cpp
index af90365..7c2d076 100644
--- a/libs/ultrahdr/tests/gainmapmath_test.cpp
+++ b/libs/ultrahdr/tests/gainmapmath_test.cpp
@@ -120,7 +120,7 @@
       0xB0, 0xB1,
       0xB2, 0xB3,
     };
-    return { pixels, 4, 4, ULTRAHDR_COLORGAMUT_BT709 };
+    return { pixels, 4, 4, ULTRAHDR_COLORGAMUT_BT709, pixels + 16, 4, 2 };
   }
 
   Color (*Yuv420Colors())[4] {
@@ -153,7 +153,7 @@
       0xA0 << 6, 0xB0 << 6, 0xA1 << 6, 0xB1 << 6,
       0xA2 << 6, 0xB2 << 6, 0xA3 << 6, 0xB3 << 6,
     };
-    return { pixels, 4, 4, ULTRAHDR_COLORGAMUT_BT709 };
+    return { pixels, 4, 4, ULTRAHDR_COLORGAMUT_BT709, pixels + 16, 4, 4 };
   }
 
   Color (*P010Colors())[4] {
@@ -636,6 +636,9 @@
     memcpy(out_buf.get(), input.data, out_buf_size);
     jpegr_uncompressed_struct output = Yuv420Image();
     output.data = out_buf.get();
+    output.chroma_data = out_buf.get() + input.width * input.height;
+    output.luma_stride = input.width;
+    output.chroma_stride = input.width / 2;
 
     transformYuv420(&output, 1, 1, transform);
 
diff --git a/libs/ultrahdr/tests/jpegencoderhelper_test.cpp b/libs/ultrahdr/tests/jpegencoderhelper_test.cpp
index f0e1fa4..33cb9f6 100644
--- a/libs/ultrahdr/tests/jpegencoderhelper_test.cpp
+++ b/libs/ultrahdr/tests/jpegencoderhelper_test.cpp
@@ -42,6 +42,7 @@
     };
     JpegEncoderHelperTest();
     ~JpegEncoderHelperTest();
+
 protected:
     virtual void SetUp();
     virtual void TearDown();
@@ -103,24 +104,32 @@
 
 TEST_F(JpegEncoderHelperTest, encodeAlignedImage) {
     JpegEncoderHelper encoder;
-    EXPECT_TRUE(encoder.compressImage(mAlignedImage.buffer.get(), mAlignedImage.width,
-                                      mAlignedImage.height, JPEG_QUALITY, NULL, 0));
+    EXPECT_TRUE(encoder.compressImage(mAlignedImage.buffer.get(),
+                                      mAlignedImage.buffer.get() +
+                                              mAlignedImage.width * mAlignedImage.height,
+                                      mAlignedImage.width, mAlignedImage.height,
+                                      mAlignedImage.width, mAlignedImage.width / 2, JPEG_QUALITY,
+                                      NULL, 0));
     ASSERT_GT(encoder.getCompressedImageSize(), static_cast<uint32_t>(0));
 }
 
 TEST_F(JpegEncoderHelperTest, encodeUnalignedImage) {
     JpegEncoderHelper encoder;
-    EXPECT_TRUE(encoder.compressImage(mUnalignedImage.buffer.get(), mUnalignedImage.width,
-                                      mUnalignedImage.height, JPEG_QUALITY, NULL, 0));
+    EXPECT_TRUE(encoder.compressImage(mUnalignedImage.buffer.get(),
+                                      mUnalignedImage.buffer.get() +
+                                              mUnalignedImage.width * mUnalignedImage.height,
+                                      mUnalignedImage.width, mUnalignedImage.height,
+                                      mUnalignedImage.width, mUnalignedImage.width / 2,
+                                      JPEG_QUALITY, NULL, 0));
     ASSERT_GT(encoder.getCompressedImageSize(), static_cast<uint32_t>(0));
 }
 
 TEST_F(JpegEncoderHelperTest, encodeSingleChannelImage) {
     JpegEncoderHelper encoder;
-    EXPECT_TRUE(encoder.compressImage(mSingleChannelImage.buffer.get(), mSingleChannelImage.width,
-                                         mSingleChannelImage.height, JPEG_QUALITY, NULL, 0, true));
+    EXPECT_TRUE(encoder.compressImage(mSingleChannelImage.buffer.get(), nullptr,
+                                      mSingleChannelImage.width, mSingleChannelImage.height,
+                                      mSingleChannelImage.width, 0, JPEG_QUALITY, NULL, 0));
     ASSERT_GT(encoder.getCompressedImageSize(), static_cast<uint32_t>(0));
 }
 
-}  // namespace android::ultrahdr
-
+} // namespace android::ultrahdr
diff --git a/libs/ultrahdr/tests/jpegr_test.cpp b/libs/ultrahdr/tests/jpegr_test.cpp
index 7837bcf..a750867 100644
--- a/libs/ultrahdr/tests/jpegr_test.cpp
+++ b/libs/ultrahdr/tests/jpegr_test.cpp
@@ -728,23 +728,23 @@
 
     rawImg420->width = kWidth;
     rawImg420->height = kHeight;
-    rawImg420->luma_stride = kWidth;
+    rawImg420->luma_stride = kWidth - 2;
     ASSERT_NE(uHdrLib.encodeJPEGR(rawImgP010, rawImg420,
                                   ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
                                   jpgImg.getImageHandle(), kQuality, nullptr),
               OK)
-            << "fail, API allows luma stride for 420";
+            << "fail, API allows bad luma stride for 420";
 
     rawImg420->width = kWidth;
     rawImg420->height = kHeight;
     rawImg420->luma_stride = 0;
     rawImg420->chroma_data = rawImgP010->data;
-    rawImg420->chroma_stride = kWidth;
+    rawImg420->chroma_stride = kWidth / 2 - 2;
     ASSERT_NE(uHdrLib.encodeJPEGR(rawImgP010, rawImg420,
                                   ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
                                   jpgImg.getImageHandle(), kQuality, nullptr),
               OK)
-            << "fail, API allows bad chroma pointer for 420";
+            << "fail, API allows bad chroma stride for 420";
   }
 }
 
@@ -1021,23 +1021,23 @@
 
     rawImg420->width = kWidth;
     rawImg420->height = kHeight;
-    rawImg420->luma_stride = kWidth;
+    rawImg420->luma_stride = kWidth - 2;
     ASSERT_NE(uHdrLib.encodeJPEGR(rawImgP010, rawImg420, jpgImg.getImageHandle(),
                                   ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
                                   jpgImg.getImageHandle()),
               OK)
-            << "fail, API allows luma stride for 420";
+            << "fail, API allows bad luma stride for 420";
 
     rawImg420->width = kWidth;
     rawImg420->height = kHeight;
     rawImg420->luma_stride = 0;
     rawImg420->chroma_data = rawImgP010->data;
-    rawImg420->chroma_stride = kWidth;
+    rawImg420->chroma_stride = kWidth / 2 - 2;
     ASSERT_NE(uHdrLib.encodeJPEGR(rawImgP010, rawImg420, jpgImg.getImageHandle(),
                                   ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
                                   jpgImg.getImageHandle()),
               OK)
-            << "fail, API allows bad chroma pointer for 420";
+            << "fail, API allows bad chroma stride for 420";
   }
 }
 
@@ -1375,11 +1375,21 @@
   EXPECT_FLOAT_EQ(metadata_expected.hdrCapacityMax, metadata_read.hdrCapacityMax);
 }
 
+class JpegRAPIEncodeAndDecodeTest
+      : public ::testing::TestWithParam<std::tuple<ultrahdr_color_gamut, ultrahdr_color_gamut>> {
+public:
+  JpegRAPIEncodeAndDecodeTest()
+        : mP010ColorGamut(std::get<0>(GetParam())), mYuv420ColorGamut(std::get<1>(GetParam())){};
+
+  const ultrahdr_color_gamut mP010ColorGamut;
+  const ultrahdr_color_gamut mYuv420ColorGamut;
+};
+
 /* Test Encode API-0 and Decode */
-TEST(JpegRTest, EncodeAPI0AndDecodeTest) {
+TEST_P(JpegRAPIEncodeAndDecodeTest, EncodeAPI0AndDecodeTest) {
   // reference encode
   UhdrUnCompressedStructWrapper rawImg(kImageWidth, kImageHeight, YCbCr_p010);
-  ASSERT_TRUE(rawImg.setImageColorGamut(ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100));
+  ASSERT_TRUE(rawImg.setImageColorGamut(mP010ColorGamut));
   ASSERT_TRUE(rawImg.allocateMemory());
   ASSERT_TRUE(rawImg.loadRawResource(kYCbCrP010FileName));
   UhdrCompressedStructWrapper jpgImg(kImageWidth, kImageHeight);
@@ -1392,8 +1402,8 @@
   // encode with luma stride set
   {
     UhdrUnCompressedStructWrapper rawImg2(kImageWidth, kImageHeight, YCbCr_p010);
-    ASSERT_TRUE(rawImg2.setImageColorGamut(ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100));
-    ASSERT_TRUE(rawImg2.setImageStride(kImageWidth + 128, 0));
+    ASSERT_TRUE(rawImg2.setImageColorGamut(mP010ColorGamut));
+    ASSERT_TRUE(rawImg2.setImageStride(kImageWidth + 18, 0));
     ASSERT_TRUE(rawImg2.allocateMemory());
     ASSERT_TRUE(rawImg2.loadRawResource(kYCbCrP010FileName));
     UhdrCompressedStructWrapper jpgImg2(kImageWidth, kImageHeight);
@@ -1410,8 +1420,8 @@
   // encode with luma and chroma stride set
   {
     UhdrUnCompressedStructWrapper rawImg2(kImageWidth, kImageHeight, YCbCr_p010);
-    ASSERT_TRUE(rawImg2.setImageColorGamut(ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100));
-    ASSERT_TRUE(rawImg2.setImageStride(kImageWidth + 128, kImageWidth + 256));
+    ASSERT_TRUE(rawImg2.setImageColorGamut(mP010ColorGamut));
+    ASSERT_TRUE(rawImg2.setImageStride(kImageWidth + 18, kImageWidth + 28));
     ASSERT_TRUE(rawImg2.setChromaMode(false));
     ASSERT_TRUE(rawImg2.allocateMemory());
     ASSERT_TRUE(rawImg2.loadRawResource(kYCbCrP010FileName));
@@ -1429,8 +1439,8 @@
   // encode with chroma stride set
   {
     UhdrUnCompressedStructWrapper rawImg2(kImageWidth, kImageHeight, YCbCr_p010);
-    ASSERT_TRUE(rawImg2.setImageColorGamut(ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100));
-    ASSERT_TRUE(rawImg2.setImageStride(0, kImageWidth + 64));
+    ASSERT_TRUE(rawImg2.setImageColorGamut(mP010ColorGamut));
+    ASSERT_TRUE(rawImg2.setImageStride(0, kImageWidth + 34));
     ASSERT_TRUE(rawImg2.setChromaMode(false));
     ASSERT_TRUE(rawImg2.allocateMemory());
     ASSERT_TRUE(rawImg2.loadRawResource(kYCbCrP010FileName));
@@ -1445,6 +1455,24 @@
     ASSERT_EQ(jpg1->length, jpg2->length);
     ASSERT_EQ(0, memcmp(jpg1->data, jpg2->data, jpg1->length));
   }
+  // encode with luma and chroma stride set but no chroma ptr
+  {
+    UhdrUnCompressedStructWrapper rawImg2(kImageWidth, kImageHeight, YCbCr_p010);
+    ASSERT_TRUE(rawImg2.setImageColorGamut(mP010ColorGamut));
+    ASSERT_TRUE(rawImg2.setImageStride(kImageWidth, kImageWidth + 38));
+    ASSERT_TRUE(rawImg2.allocateMemory());
+    ASSERT_TRUE(rawImg2.loadRawResource(kYCbCrP010FileName));
+    UhdrCompressedStructWrapper jpgImg2(kImageWidth, kImageHeight);
+    ASSERT_TRUE(jpgImg2.allocateMemory());
+    ASSERT_EQ(uHdrLib.encodeJPEGR(rawImg2.getImageHandle(),
+                                  ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+                                  jpgImg2.getImageHandle(), kQuality, nullptr),
+              OK);
+    auto jpg1 = jpgImg.getImageHandle();
+    auto jpg2 = jpgImg2.getImageHandle();
+    ASSERT_EQ(jpg1->length, jpg2->length);
+    ASSERT_EQ(0, memcmp(jpg1->data, jpg2->data, jpg1->length));
+  }
 
   auto jpg1 = jpgImg.getImageHandle();
 #ifdef DUMP_OUTPUT
@@ -1457,13 +1485,13 @@
 }
 
 /* Test Encode API-1 and Decode */
-TEST(JpegRTest, EncodeAPI1AndDecodeTest) {
+TEST_P(JpegRAPIEncodeAndDecodeTest, EncodeAPI1AndDecodeTest) {
   UhdrUnCompressedStructWrapper rawImgP010(kImageWidth, kImageHeight, YCbCr_p010);
-  ASSERT_TRUE(rawImgP010.setImageColorGamut(ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100));
+  ASSERT_TRUE(rawImgP010.setImageColorGamut(mP010ColorGamut));
   ASSERT_TRUE(rawImgP010.allocateMemory());
   ASSERT_TRUE(rawImgP010.loadRawResource(kYCbCrP010FileName));
   UhdrUnCompressedStructWrapper rawImg420(kImageWidth, kImageHeight, YCbCr_420);
-  ASSERT_TRUE(rawImg420.setImageColorGamut(ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT709));
+  ASSERT_TRUE(rawImg420.setImageColorGamut(mYuv420ColorGamut));
   ASSERT_TRUE(rawImg420.allocateMemory());
   ASSERT_TRUE(rawImg420.loadRawResource(kYCbCr420FileName));
   UhdrCompressedStructWrapper jpgImg(kImageWidth, kImageHeight);
@@ -1473,10 +1501,10 @@
                                 ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
                                 jpgImg.getImageHandle(), kQuality, nullptr),
             OK);
-  // encode with luma stride set
+  // encode with luma stride set p010
   {
     UhdrUnCompressedStructWrapper rawImg2P010(kImageWidth, kImageHeight, YCbCr_p010);
-    ASSERT_TRUE(rawImg2P010.setImageColorGamut(ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100));
+    ASSERT_TRUE(rawImg2P010.setImageColorGamut(mP010ColorGamut));
     ASSERT_TRUE(rawImg2P010.setImageStride(kImageWidth + 128, 0));
     ASSERT_TRUE(rawImg2P010.allocateMemory());
     ASSERT_TRUE(rawImg2P010.loadRawResource(kYCbCrP010FileName));
@@ -1491,10 +1519,10 @@
     ASSERT_EQ(jpg1->length, jpg2->length);
     ASSERT_EQ(0, memcmp(jpg1->data, jpg2->data, jpg1->length));
   }
-  // encode with luma and chroma stride set
+  // encode with luma and chroma stride set p010
   {
     UhdrUnCompressedStructWrapper rawImg2P010(kImageWidth, kImageHeight, YCbCr_p010);
-    ASSERT_TRUE(rawImg2P010.setImageColorGamut(ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100));
+    ASSERT_TRUE(rawImg2P010.setImageColorGamut(mP010ColorGamut));
     ASSERT_TRUE(rawImg2P010.setImageStride(kImageWidth + 128, kImageWidth + 256));
     ASSERT_TRUE(rawImg2P010.setChromaMode(false));
     ASSERT_TRUE(rawImg2P010.allocateMemory());
@@ -1510,10 +1538,10 @@
     ASSERT_EQ(jpg1->length, jpg2->length);
     ASSERT_EQ(0, memcmp(jpg1->data, jpg2->data, jpg1->length));
   }
-  // encode with chroma stride set
+  // encode with chroma stride set p010
   {
     UhdrUnCompressedStructWrapper rawImg2P010(kImageWidth, kImageHeight, YCbCr_p010);
-    ASSERT_TRUE(rawImg2P010.setImageColorGamut(ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100));
+    ASSERT_TRUE(rawImg2P010.setImageColorGamut(mP010ColorGamut));
     ASSERT_TRUE(rawImg2P010.setImageStride(0, kImageWidth + 64));
     ASSERT_TRUE(rawImg2P010.setChromaMode(false));
     ASSERT_TRUE(rawImg2P010.allocateMemory());
@@ -1529,6 +1557,98 @@
     ASSERT_EQ(jpg1->length, jpg2->length);
     ASSERT_EQ(0, memcmp(jpg1->data, jpg2->data, jpg1->length));
   }
+  // encode with luma and chroma stride set but no chroma ptr p010
+  {
+    UhdrUnCompressedStructWrapper rawImg2P010(kImageWidth, kImageHeight, YCbCr_p010);
+    ASSERT_TRUE(rawImg2P010.setImageColorGamut(mP010ColorGamut));
+    ASSERT_TRUE(rawImg2P010.setImageStride(kImageWidth + 64, kImageWidth + 256));
+    ASSERT_TRUE(rawImg2P010.allocateMemory());
+    ASSERT_TRUE(rawImg2P010.loadRawResource(kYCbCrP010FileName));
+    UhdrCompressedStructWrapper jpgImg2(kImageWidth, kImageHeight);
+    ASSERT_TRUE(jpgImg2.allocateMemory());
+    ASSERT_EQ(uHdrLib.encodeJPEGR(rawImg2P010.getImageHandle(), rawImg420.getImageHandle(),
+                                  ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+                                  jpgImg2.getImageHandle(), kQuality, nullptr),
+              OK);
+    auto jpg1 = jpgImg.getImageHandle();
+    auto jpg2 = jpgImg2.getImageHandle();
+    ASSERT_EQ(jpg1->length, jpg2->length);
+    ASSERT_EQ(0, memcmp(jpg1->data, jpg2->data, jpg1->length));
+  }
+  // encode with luma stride set 420
+  {
+    UhdrUnCompressedStructWrapper rawImg2420(kImageWidth, kImageHeight, YCbCr_420);
+    ASSERT_TRUE(rawImg2420.setImageColorGamut(mYuv420ColorGamut));
+    ASSERT_TRUE(rawImg2420.setImageStride(kImageWidth + 14, 0));
+    ASSERT_TRUE(rawImg2420.allocateMemory());
+    ASSERT_TRUE(rawImg2420.loadRawResource(kYCbCr420FileName));
+    UhdrCompressedStructWrapper jpgImg2(kImageWidth, kImageHeight);
+    ASSERT_TRUE(jpgImg2.allocateMemory());
+    ASSERT_EQ(uHdrLib.encodeJPEGR(rawImgP010.getImageHandle(), rawImg2420.getImageHandle(),
+                                  ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+                                  jpgImg2.getImageHandle(), kQuality, nullptr),
+              OK);
+    auto jpg1 = jpgImg.getImageHandle();
+    auto jpg2 = jpgImg2.getImageHandle();
+    ASSERT_EQ(jpg1->length, jpg2->length);
+    ASSERT_EQ(0, memcmp(jpg1->data, jpg2->data, jpg1->length));
+  }
+  // encode with luma and chroma stride set 420
+  {
+    UhdrUnCompressedStructWrapper rawImg2420(kImageWidth, kImageHeight, YCbCr_420);
+    ASSERT_TRUE(rawImg2420.setImageColorGamut(mYuv420ColorGamut));
+    ASSERT_TRUE(rawImg2420.setImageStride(kImageWidth + 46, kImageWidth / 2 + 34));
+    ASSERT_TRUE(rawImg2420.setChromaMode(false));
+    ASSERT_TRUE(rawImg2420.allocateMemory());
+    ASSERT_TRUE(rawImg2420.loadRawResource(kYCbCr420FileName));
+    UhdrCompressedStructWrapper jpgImg2(kImageWidth, kImageHeight);
+    ASSERT_TRUE(jpgImg2.allocateMemory());
+    ASSERT_EQ(uHdrLib.encodeJPEGR(rawImgP010.getImageHandle(), rawImg2420.getImageHandle(),
+                                  ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+                                  jpgImg2.getImageHandle(), kQuality, nullptr),
+              OK);
+    auto jpg1 = jpgImg.getImageHandle();
+    auto jpg2 = jpgImg2.getImageHandle();
+    ASSERT_EQ(jpg1->length, jpg2->length);
+    ASSERT_EQ(0, memcmp(jpg1->data, jpg2->data, jpg1->length));
+  }
+  // encode with chroma stride set 420
+  {
+    UhdrUnCompressedStructWrapper rawImg2420(kImageWidth, kImageHeight, YCbCr_420);
+    ASSERT_TRUE(rawImg2420.setImageColorGamut(mYuv420ColorGamut));
+    ASSERT_TRUE(rawImg2420.setImageStride(0, kImageWidth / 2 + 38));
+    ASSERT_TRUE(rawImg2420.setChromaMode(false));
+    ASSERT_TRUE(rawImg2420.allocateMemory());
+    ASSERT_TRUE(rawImg2420.loadRawResource(kYCbCr420FileName));
+    UhdrCompressedStructWrapper jpgImg2(kImageWidth, kImageHeight);
+    ASSERT_TRUE(jpgImg2.allocateMemory());
+    ASSERT_EQ(uHdrLib.encodeJPEGR(rawImgP010.getImageHandle(), rawImg2420.getImageHandle(),
+                                  ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+                                  jpgImg2.getImageHandle(), kQuality, nullptr),
+              OK);
+    auto jpg1 = jpgImg.getImageHandle();
+    auto jpg2 = jpgImg2.getImageHandle();
+    ASSERT_EQ(jpg1->length, jpg2->length);
+    ASSERT_EQ(0, memcmp(jpg1->data, jpg2->data, jpg1->length));
+  }
+  // encode with luma and chroma stride set but no chroma ptr 420
+  {
+    UhdrUnCompressedStructWrapper rawImg2420(kImageWidth, kImageHeight, YCbCr_420);
+    ASSERT_TRUE(rawImg2420.setImageColorGamut(mYuv420ColorGamut));
+    ASSERT_TRUE(rawImg2420.setImageStride(kImageWidth + 26, kImageWidth / 2 + 44));
+    ASSERT_TRUE(rawImg2420.allocateMemory());
+    ASSERT_TRUE(rawImg2420.loadRawResource(kYCbCr420FileName));
+    UhdrCompressedStructWrapper jpgImg2(kImageWidth, kImageHeight);
+    ASSERT_TRUE(jpgImg2.allocateMemory());
+    ASSERT_EQ(uHdrLib.encodeJPEGR(rawImgP010.getImageHandle(), rawImg2420.getImageHandle(),
+                                  ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+                                  jpgImg2.getImageHandle(), kQuality, nullptr),
+              OK);
+    auto jpg1 = jpgImg.getImageHandle();
+    auto jpg2 = jpgImg2.getImageHandle();
+    ASSERT_EQ(jpg1->length, jpg2->length);
+    ASSERT_EQ(0, memcmp(jpg1->data, jpg2->data, jpg1->length));
+  }
 
   auto jpg1 = jpgImg.getImageHandle();
 
@@ -1542,13 +1662,13 @@
 }
 
 /* Test Encode API-2 and Decode */
-TEST(JpegRTest, EncodeAPI2AndDecodeTest) {
+TEST_P(JpegRAPIEncodeAndDecodeTest, EncodeAPI2AndDecodeTest) {
   UhdrUnCompressedStructWrapper rawImgP010(kImageWidth, kImageHeight, YCbCr_p010);
-  ASSERT_TRUE(rawImgP010.setImageColorGamut(ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100));
+  ASSERT_TRUE(rawImgP010.setImageColorGamut(mP010ColorGamut));
   ASSERT_TRUE(rawImgP010.allocateMemory());
   ASSERT_TRUE(rawImgP010.loadRawResource(kYCbCrP010FileName));
   UhdrUnCompressedStructWrapper rawImg420(kImageWidth, kImageHeight, YCbCr_420);
-  ASSERT_TRUE(rawImg420.setImageColorGamut(ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT709));
+  ASSERT_TRUE(rawImg420.setImageColorGamut(mYuv420ColorGamut));
   ASSERT_TRUE(rawImg420.allocateMemory());
   ASSERT_TRUE(rawImg420.loadRawResource(kYCbCr420FileName));
   UhdrCompressedStructWrapper jpgImg(kImageWidth, kImageHeight);
@@ -1565,7 +1685,7 @@
   // encode with luma stride set
   {
     UhdrUnCompressedStructWrapper rawImg2P010(kImageWidth, kImageHeight, YCbCr_p010);
-    ASSERT_TRUE(rawImg2P010.setImageColorGamut(ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100));
+    ASSERT_TRUE(rawImg2P010.setImageColorGamut(mP010ColorGamut));
     ASSERT_TRUE(rawImg2P010.setImageStride(kImageWidth + 128, 0));
     ASSERT_TRUE(rawImg2P010.allocateMemory());
     ASSERT_TRUE(rawImg2P010.loadRawResource(kYCbCrP010FileName));
@@ -1583,7 +1703,7 @@
   // encode with luma and chroma stride set
   {
     UhdrUnCompressedStructWrapper rawImg2P010(kImageWidth, kImageHeight, YCbCr_p010);
-    ASSERT_TRUE(rawImg2P010.setImageColorGamut(ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100));
+    ASSERT_TRUE(rawImg2P010.setImageColorGamut(mP010ColorGamut));
     ASSERT_TRUE(rawImg2P010.setImageStride(kImageWidth + 128, kImageWidth + 256));
     ASSERT_TRUE(rawImg2P010.setChromaMode(false));
     ASSERT_TRUE(rawImg2P010.allocateMemory());
@@ -1602,7 +1722,7 @@
   // encode with chroma stride set
   {
     UhdrUnCompressedStructWrapper rawImg2P010(kImageWidth, kImageHeight, YCbCr_p010);
-    ASSERT_TRUE(rawImg2P010.setImageColorGamut(ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100));
+    ASSERT_TRUE(rawImg2P010.setImageColorGamut(mP010ColorGamut));
     ASSERT_TRUE(rawImg2P010.setImageStride(0, kImageWidth + 64));
     ASSERT_TRUE(rawImg2P010.setChromaMode(false));
     ASSERT_TRUE(rawImg2P010.allocateMemory());
@@ -1618,6 +1738,62 @@
     ASSERT_EQ(jpg1->length, jpg2->length);
     ASSERT_EQ(0, memcmp(jpg1->data, jpg2->data, jpg1->length));
   }
+  // encode with luma stride set
+  {
+    UhdrUnCompressedStructWrapper rawImg2420(kImageWidth, kImageHeight, YCbCr_420);
+    ASSERT_TRUE(rawImg2420.setImageColorGamut(mYuv420ColorGamut));
+    ASSERT_TRUE(rawImg2420.setImageStride(kImageWidth + 128, 0));
+    ASSERT_TRUE(rawImg2420.allocateMemory());
+    ASSERT_TRUE(rawImg2420.loadRawResource(kYCbCr420FileName));
+    UhdrCompressedStructWrapper jpgImg2(kImageWidth, kImageHeight);
+    ASSERT_TRUE(jpgImg2.allocateMemory());
+    ASSERT_EQ(uHdrLib.encodeJPEGR(rawImgP010.getImageHandle(), rawImg2420.getImageHandle(), sdr,
+                                  ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+                                  jpgImg2.getImageHandle()),
+              OK);
+    auto jpg1 = jpgImg.getImageHandle();
+    auto jpg2 = jpgImg2.getImageHandle();
+    ASSERT_EQ(jpg1->length, jpg2->length);
+    ASSERT_EQ(0, memcmp(jpg1->data, jpg2->data, jpg1->length));
+  }
+  // encode with luma and chroma stride set
+  {
+    UhdrUnCompressedStructWrapper rawImg2420(kImageWidth, kImageHeight, YCbCr_420);
+    ASSERT_TRUE(rawImg2420.setImageColorGamut(mYuv420ColorGamut));
+    ASSERT_TRUE(rawImg2420.setImageStride(kImageWidth + 128, kImageWidth + 256));
+    ASSERT_TRUE(rawImg2420.setChromaMode(false));
+    ASSERT_TRUE(rawImg2420.allocateMemory());
+    ASSERT_TRUE(rawImg2420.loadRawResource(kYCbCr420FileName));
+    UhdrCompressedStructWrapper jpgImg2(kImageWidth, kImageHeight);
+    ASSERT_TRUE(jpgImg2.allocateMemory());
+    ASSERT_EQ(uHdrLib.encodeJPEGR(rawImgP010.getImageHandle(), rawImg2420.getImageHandle(), sdr,
+                                  ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+                                  jpgImg2.getImageHandle()),
+              OK);
+    auto jpg1 = jpgImg.getImageHandle();
+    auto jpg2 = jpgImg2.getImageHandle();
+    ASSERT_EQ(jpg1->length, jpg2->length);
+    ASSERT_EQ(0, memcmp(jpg1->data, jpg2->data, jpg1->length));
+  }
+  // encode with chroma stride set
+  {
+    UhdrUnCompressedStructWrapper rawImg2420(kImageWidth, kImageHeight, YCbCr_420);
+    ASSERT_TRUE(rawImg2420.setImageColorGamut(mYuv420ColorGamut));
+    ASSERT_TRUE(rawImg2420.setImageStride(0, kImageWidth + 64));
+    ASSERT_TRUE(rawImg2420.setChromaMode(false));
+    ASSERT_TRUE(rawImg2420.allocateMemory());
+    ASSERT_TRUE(rawImg2420.loadRawResource(kYCbCr420FileName));
+    UhdrCompressedStructWrapper jpgImg2(kImageWidth, kImageHeight);
+    ASSERT_TRUE(jpgImg2.allocateMemory());
+    ASSERT_EQ(uHdrLib.encodeJPEGR(rawImgP010.getImageHandle(), rawImg2420.getImageHandle(), sdr,
+                                  ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+                                  jpgImg2.getImageHandle()),
+              OK);
+    auto jpg1 = jpgImg.getImageHandle();
+    auto jpg2 = jpgImg2.getImageHandle();
+    ASSERT_EQ(jpg1->length, jpg2->length);
+    ASSERT_EQ(0, memcmp(jpg1->data, jpg2->data, jpg1->length));
+  }
 
   auto jpg1 = jpgImg.getImageHandle();
 
@@ -1631,9 +1807,9 @@
 }
 
 /* Test Encode API-3 and Decode */
-TEST(JpegRTest, EncodeAPI3AndDecodeTest) {
+TEST_P(JpegRAPIEncodeAndDecodeTest, EncodeAPI3AndDecodeTest) {
   UhdrUnCompressedStructWrapper rawImgP010(kImageWidth, kImageHeight, YCbCr_p010);
-  ASSERT_TRUE(rawImgP010.setImageColorGamut(ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100));
+  ASSERT_TRUE(rawImgP010.setImageColorGamut(mP010ColorGamut));
   ASSERT_TRUE(rawImgP010.allocateMemory());
   ASSERT_TRUE(rawImgP010.loadRawResource(kYCbCrP010FileName));
   UhdrCompressedStructWrapper jpgImg(kImageWidth, kImageHeight);
@@ -1650,7 +1826,7 @@
   // encode with luma stride set
   {
     UhdrUnCompressedStructWrapper rawImg2P010(kImageWidth, kImageHeight, YCbCr_p010);
-    ASSERT_TRUE(rawImg2P010.setImageColorGamut(ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100));
+    ASSERT_TRUE(rawImg2P010.setImageColorGamut(mP010ColorGamut));
     ASSERT_TRUE(rawImg2P010.setImageStride(kImageWidth + 128, 0));
     ASSERT_TRUE(rawImg2P010.allocateMemory());
     ASSERT_TRUE(rawImg2P010.loadRawResource(kYCbCrP010FileName));
@@ -1668,7 +1844,7 @@
   // encode with luma and chroma stride set
   {
     UhdrUnCompressedStructWrapper rawImg2P010(kImageWidth, kImageHeight, YCbCr_p010);
-    ASSERT_TRUE(rawImg2P010.setImageColorGamut(ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100));
+    ASSERT_TRUE(rawImg2P010.setImageColorGamut(mP010ColorGamut));
     ASSERT_TRUE(rawImg2P010.setImageStride(kImageWidth + 128, kImageWidth + 256));
     ASSERT_TRUE(rawImg2P010.setChromaMode(false));
     ASSERT_TRUE(rawImg2P010.allocateMemory());
@@ -1687,7 +1863,7 @@
   // encode with chroma stride set
   {
     UhdrUnCompressedStructWrapper rawImg2P010(kImageWidth, kImageHeight, YCbCr_p010);
-    ASSERT_TRUE(rawImg2P010.setImageColorGamut(ultrahdr_color_gamut::ULTRAHDR_COLORGAMUT_BT2100));
+    ASSERT_TRUE(rawImg2P010.setImageColorGamut(mP010ColorGamut));
     ASSERT_TRUE(rawImg2P010.setImageStride(0, kImageWidth + 64));
     ASSERT_TRUE(rawImg2P010.setChromaMode(false));
     ASSERT_TRUE(rawImg2P010.allocateMemory());
@@ -1703,6 +1879,24 @@
     ASSERT_EQ(jpg1->length, jpg2->length);
     ASSERT_EQ(0, memcmp(jpg1->data, jpg2->data, jpg1->length));
   }
+  // encode with luma and chroma stride set and no chroma ptr
+  {
+    UhdrUnCompressedStructWrapper rawImg2P010(kImageWidth, kImageHeight, YCbCr_p010);
+    ASSERT_TRUE(rawImg2P010.setImageColorGamut(mP010ColorGamut));
+    ASSERT_TRUE(rawImg2P010.setImageStride(kImageWidth + 32, kImageWidth + 256));
+    ASSERT_TRUE(rawImg2P010.allocateMemory());
+    ASSERT_TRUE(rawImg2P010.loadRawResource(kYCbCrP010FileName));
+    UhdrCompressedStructWrapper jpgImg2(kImageWidth, kImageHeight);
+    ASSERT_TRUE(jpgImg2.allocateMemory());
+    ASSERT_EQ(uHdrLib.encodeJPEGR(rawImg2P010.getImageHandle(), sdr,
+                                  ultrahdr_transfer_function::ULTRAHDR_TF_HLG,
+                                  jpgImg2.getImageHandle()),
+              OK);
+    auto jpg1 = jpgImg.getImageHandle();
+    auto jpg2 = jpgImg2.getImageHandle();
+    ASSERT_EQ(jpg1->length, jpg2->length);
+    ASSERT_EQ(0, memcmp(jpg1->data, jpg2->data, jpg1->length));
+  }
 
   auto jpg1 = jpgImg.getImageHandle();
 
@@ -1715,6 +1909,13 @@
   ASSERT_NO_FATAL_FAILURE(decodeJpegRImg(jpg1, "decode_api3_output.rgb"));
 }
 
+INSTANTIATE_TEST_SUITE_P(
+        JpegRAPIParameterizedTests, JpegRAPIEncodeAndDecodeTest,
+        ::testing::Combine(::testing::Values(ULTRAHDR_COLORGAMUT_BT709, ULTRAHDR_COLORGAMUT_P3,
+                                             ULTRAHDR_COLORGAMUT_BT2100),
+                           ::testing::Values(ULTRAHDR_COLORGAMUT_BT709, ULTRAHDR_COLORGAMUT_P3,
+                                             ULTRAHDR_COLORGAMUT_BT2100)));
+
 // ============================================================================
 // Profiling
 // ============================================================================
@@ -1796,6 +1997,25 @@
                                    .width = 0,
                                    .height = 0,
                                    .colorGamut = ULTRAHDR_COLORGAMUT_UNSPECIFIED};
+  {
+    auto rawImg = rawImgP010.getImageHandle();
+    if (rawImg->luma_stride == 0) rawImg->luma_stride = rawImg->width;
+    if (!rawImg->chroma_data) {
+      uint16_t* data = reinterpret_cast<uint16_t*>(rawImg->data);
+      rawImg->chroma_data = data + rawImg->luma_stride * rawImg->height;
+      rawImg->chroma_stride = rawImg->luma_stride;
+    }
+  }
+  {
+    auto rawImg = rawImg420.getImageHandle();
+    if (rawImg->luma_stride == 0) rawImg->luma_stride = rawImg->width;
+    if (!rawImg->chroma_data) {
+      uint8_t* data = reinterpret_cast<uint8_t*>(rawImg->data);
+      rawImg->chroma_data = data + rawImg->luma_stride * rawImg->height;
+      rawImg->chroma_stride = rawImg->luma_stride / 2;
+    }
+  }
+
   JpegRBenchmark benchmark;
   ASSERT_NO_FATAL_FAILURE(benchmark.BenchmarkGenerateGainMap(rawImg420.getImageHandle(),
                                                              rawImgP010.getImageHandle(), &metadata,
diff --git a/opengl/libs/EGL/Loader.cpp b/opengl/libs/EGL/Loader.cpp
index 8d0eb59..04e2fff 100644
--- a/opengl/libs/EGL/Loader.cpp
+++ b/opengl/libs/EGL/Loader.cpp
@@ -41,24 +41,44 @@
 /*
  * EGL userspace drivers must be provided either:
  * - as a single library:
- *      /vendor/lib/egl/libGLES.so
+ *      /vendor/${LIB}/egl/libGLES.so
  *
  * - as separate libraries:
- *      /vendor/lib/egl/libEGL.so
- *      /vendor/lib/egl/libGLESv1_CM.so
- *      /vendor/lib/egl/libGLESv2.so
+ *      /vendor/${LIB}/egl/libEGL.so
+ *      /vendor/${LIB}/egl/libGLESv1_CM.so
+ *      /vendor/${LIB}/egl/libGLESv2.so
  *
  * For backward compatibility and to facilitate the transition to
  * this new naming scheme, the loader will additionally look for:
  *
- *      /{vendor|system}/lib/egl/lib{GLES | [EGL|GLESv1_CM|GLESv2]}_*.so
+ *      /vendor/${LIB}/egl/lib{GLES | [EGL|GLESv1_CM|GLESv2]}_${SUFFIX}.so
  *
  */
 
-Loader& Loader::getInstance() {
-    static Loader loader;
-    return loader;
-}
+#ifndef SYSTEM_LIB_PATH
+#if defined(__LP64__)
+#define SYSTEM_LIB_PATH "/system/lib64"
+#else
+#define SYSTEM_LIB_PATH "/system/lib"
+#endif
+#endif
+
+static const char* PERSIST_DRIVER_SUFFIX_PROPERTY = "persist.graphics.egl";
+static const char* RO_DRIVER_SUFFIX_PROPERTY = "ro.hardware.egl";
+static const char* RO_BOARD_PLATFORM_PROPERTY = "ro.board.platform";
+
+static const char* HAL_SUBNAME_KEY_PROPERTIES[3] = {
+        PERSIST_DRIVER_SUFFIX_PROPERTY,
+        RO_DRIVER_SUFFIX_PROPERTY,
+        RO_BOARD_PLATFORM_PROPERTY,
+};
+
+static const char* const VENDOR_LIB_EGL_DIR =
+#if defined(__LP64__)
+        "/vendor/lib64/egl";
+#else
+        "/vendor/lib/egl";
+#endif
 
 static void* do_dlopen(const char* path, int mode) {
     ATRACE_CALL();
@@ -80,6 +100,17 @@
     return android_unload_sphal_library(dso);
 }
 
+static void* load_wrapper(const char* path) {
+    void* so = do_dlopen(path, RTLD_NOW | RTLD_LOCAL);
+    ALOGE_IF(!so, "dlopen(\"%s\") failed: %s", path, dlerror());
+    return so;
+}
+
+Loader& Loader::getInstance() {
+    static Loader loader;
+    return loader;
+}
+
 Loader::driver_t::driver_t(void* gles)
 {
     dso[0] = gles;
@@ -123,30 +154,6 @@
 Loader::~Loader() {
 }
 
-static void* load_wrapper(const char* path) {
-    void* so = do_dlopen(path, RTLD_NOW | RTLD_LOCAL);
-    ALOGE_IF(!so, "dlopen(\"%s\") failed: %s", path, dlerror());
-    return so;
-}
-
-#ifndef EGL_WRAPPER_DIR
-#if defined(__LP64__)
-#define EGL_WRAPPER_DIR "/system/lib64"
-#else
-#define EGL_WRAPPER_DIR "/system/lib"
-#endif
-#endif
-
-static const char* PERSIST_DRIVER_SUFFIX_PROPERTY = "persist.graphics.egl";
-static const char* RO_DRIVER_SUFFIX_PROPERTY = "ro.hardware.egl";
-static const char* RO_BOARD_PLATFORM_PROPERTY = "ro.board.platform";
-
-static const char* HAL_SUBNAME_KEY_PROPERTIES[3] = {
-        PERSIST_DRIVER_SUFFIX_PROPERTY,
-        RO_DRIVER_SUFFIX_PROPERTY,
-        RO_BOARD_PLATFORM_PROPERTY,
-};
-
 // Check whether the loaded system drivers should be unloaded in order to
 // load ANGLE or the updatable graphics drivers.
 // If ANGLE namespace is set, it means the application is identified to run on top of ANGLE.
@@ -169,6 +176,11 @@
         }
     }
 
+    // Return true if native GLES drivers should be used and ANGLE is already loaded.
+    if (android::GraphicsEnv::getInstance().shouldUseNativeDriver() && cnx->angleLoaded) {
+        return true;
+    }
+
     // Return true if updated driver namespace is set.
     ns = android::GraphicsEnv::getInstance().getDriverNamespace();
     if (ns) {
@@ -240,16 +252,28 @@
     if (!hnd) {
         // Secondly, try to load from driver apk.
         hnd = attempt_to_load_updated_driver(cnx);
+
+        // If updated driver apk is set but fail to load, abort here.
+        LOG_ALWAYS_FATAL_IF(android::GraphicsEnv::getInstance().getDriverNamespace(),
+                            "couldn't find an OpenGL ES implementation from %s",
+                            android::GraphicsEnv::getInstance().getDriverPath().c_str());
     }
 
+    // Attempt to load native GLES drivers specified by ro.hardware.egl if native is selected.
+    // If native is selected but fail to load, abort.
+    if (!hnd && android::GraphicsEnv::getInstance().shouldUseNativeDriver()) {
+        auto driverSuffix = base::GetProperty(RO_DRIVER_SUFFIX_PROPERTY, "");
+        LOG_ALWAYS_FATAL_IF(driverSuffix.empty(),
+                            "Native GLES driver is selected but not specified in %s",
+                            RO_DRIVER_SUFFIX_PROPERTY);
+        hnd = attempt_to_load_system_driver(cnx, driverSuffix.c_str(), true);
+        LOG_ALWAYS_FATAL_IF(!hnd, "Native GLES driver is selected but failed to load. %s=%s",
+                            RO_DRIVER_SUFFIX_PROPERTY, driverSuffix.c_str());
+    }
+
+    // Finally, try to load default driver.
     bool failToLoadFromDriverSuffixProperty = false;
     if (!hnd) {
-        // If updated driver apk is set but fail to load, abort here.
-        if (android::GraphicsEnv::getInstance().getDriverNamespace()) {
-            LOG_ALWAYS_FATAL("couldn't find an OpenGL ES implementation from %s",
-                             android::GraphicsEnv::getInstance().getDriverPath().c_str());
-        }
-        // Finally, try to load system driver.
         // Start by searching for the library name appended by the system
         // properties of the GLES userspace driver in both locations.
         // i.e.:
@@ -306,13 +330,13 @@
                         HAL_SUBNAME_KEY_PROPERTIES[2]);
 
     if (!cnx->libEgl) {
-        cnx->libEgl = load_wrapper(EGL_WRAPPER_DIR "/libEGL.so");
+        cnx->libEgl = load_wrapper(SYSTEM_LIB_PATH "/libEGL.so");
     }
     if (!cnx->libGles1) {
-        cnx->libGles1 = load_wrapper(EGL_WRAPPER_DIR "/libGLESv1_CM.so");
+        cnx->libGles1 = load_wrapper(SYSTEM_LIB_PATH "/libGLESv1_CM.so");
     }
     if (!cnx->libGles2) {
-        cnx->libGles2 = load_wrapper(EGL_WRAPPER_DIR "/libGLESv2.so");
+        cnx->libGles2 = load_wrapper(SYSTEM_LIB_PATH "/libGLESv2.so");
     }
 
     if (!cnx->libEgl || !cnx->libGles2 || !cnx->libGles1) {
@@ -415,31 +439,19 @@
     class MatchFile {
     public:
         static std::string find(const char* libraryName, const bool exact) {
-            const char* const searchPaths[] = {
-#if defined(__LP64__)
-                    "/vendor/lib64/egl",
-                    "/system/lib64/egl"
-#else
-                    "/vendor/lib/egl",
-                    "/system/lib/egl"
-#endif
-            };
-
-            for (auto dir : searchPaths) {
-                std::string absolutePath;
-                if (find(absolutePath, libraryName, dir, exact)) {
-                    return absolutePath;
-                }
+            std::string absolutePath;
+            if (findLibPath(absolutePath, libraryName, exact)) {
+                return absolutePath;
             }
 
             // Driver not found. gah.
             return std::string();
         }
     private:
-        static bool find(std::string& result,
-                const std::string& pattern, const char* const search, bool exact) {
+        static bool findLibPath(std::string& result, const std::string& pattern, bool exact) {
+            const std::string vendorLibEglDirString = std::string(VENDOR_LIB_EGL_DIR);
             if (exact) {
-                std::string absolutePath = std::string(search) + "/" + pattern + ".so";
+                std::string absolutePath = vendorLibEglDirString + "/" + pattern + ".so";
                 if (!access(absolutePath.c_str(), R_OK)) {
                     result = absolutePath;
                     return true;
@@ -447,7 +459,7 @@
                 return false;
             }
 
-            DIR* d = opendir(search);
+            DIR* d = opendir(VENDOR_LIB_EGL_DIR);
             if (d != nullptr) {
                 struct dirent* e;
                 while ((e = readdir(d)) != nullptr) {
@@ -460,7 +472,7 @@
                     }
                     if (strstr(e->d_name, pattern.c_str()) == e->d_name) {
                         if (!strcmp(e->d_name + strlen(e->d_name) - 3, ".so")) {
-                            result = std::string(search) + "/" + e->d_name;
+                            result = vendorLibEglDirString + "/" + e->d_name;
                             closedir(d);
                             return true;
                         }
diff --git a/services/inputflinger/tests/InputReader_test.cpp b/services/inputflinger/tests/InputReader_test.cpp
index 477beaf..1b19870 100644
--- a/services/inputflinger/tests/InputReader_test.cpp
+++ b/services/inputflinger/tests/InputReader_test.cpp
@@ -1343,19 +1343,8 @@
         mFakePolicy = sp<FakeInputReaderPolicy>::make();
         mFakePointerController = std::make_shared<FakePointerController>();
         mFakePolicy->setPointerController(mFakePointerController);
-        mTestListener = std::make_unique<TestInputListener>(/*eventHappenedTimeout=*/2000ms,
-                                                            /*eventDidNotHappenTimeout=*/30ms);
 
-        mReader = std::make_unique<InputReader>(std::make_shared<EventHub>(), mFakePolicy,
-                                                *mTestListener);
-        ASSERT_EQ(mReader->start(), OK);
-
-        // Since this test is run on a real device, all the input devices connected
-        // to the test device will show up in mReader. We wait for those input devices to
-        // show up before beginning the tests.
-        ASSERT_NO_FATAL_FAILURE(mFakePolicy->assertInputDevicesChanged());
-        ASSERT_NO_FATAL_FAILURE(mTestListener->assertNotifyInputDevicesChangedWasCalled());
-        ASSERT_NO_FATAL_FAILURE(mTestListener->assertNotifyConfigurationChangedWasCalled());
+        setupInputReader();
     }
 
     void TearDown() override {
@@ -1376,6 +1365,22 @@
                                       });
         return it != inputDevices.end() ? std::make_optional(*it) : std::nullopt;
     }
+
+    void setupInputReader() {
+        mTestListener = std::make_unique<TestInputListener>(/*eventHappenedTimeout=*/2000ms,
+                                                            /*eventDidNotHappenTimeout=*/30ms);
+
+        mReader = std::make_unique<InputReader>(std::make_shared<EventHub>(), mFakePolicy,
+                                                *mTestListener);
+        ASSERT_EQ(mReader->start(), OK);
+
+        // Since this test is run on a real device, all the input devices connected
+        // to the test device will show up in mReader. We wait for those input devices to
+        // show up before beginning the tests.
+        ASSERT_NO_FATAL_FAILURE(mFakePolicy->assertInputDevicesChanged());
+        ASSERT_NO_FATAL_FAILURE(mTestListener->assertNotifyInputDevicesChangedWasCalled());
+        ASSERT_NO_FATAL_FAILURE(mTestListener->assertNotifyConfigurationChangedWasCalled());
+    }
 };
 
 TEST_F(InputReaderIntegrationTest, TestInvalidDevice) {
@@ -1509,7 +1514,7 @@
 
 // --- TouchIntegrationTest ---
 
-class TouchIntegrationTest : public InputReaderIntegrationTest {
+class BaseTouchIntegrationTest : public InputReaderIntegrationTest {
 protected:
     const std::string UNIQUE_ID = "local:0";
 
@@ -1554,7 +1559,55 @@
     InputDeviceInfo mDeviceInfo;
 };
 
-TEST_F(TouchIntegrationTest, MultiTouchDeviceSource) {
+enum class TouchIntegrationTestDisplays { DISPLAY_INTERNAL, DISPLAY_INPUT_PORT, DISPLAY_UNIQUE_ID };
+
+class TouchIntegrationTest : public BaseTouchIntegrationTest,
+                             public testing::WithParamInterface<TouchIntegrationTestDisplays> {
+protected:
+    static constexpr std::optional<uint8_t> DISPLAY_PORT = 0;
+    const std::string INPUT_PORT = "uinput_touch/input0";
+
+    void SetUp() override {
+#if !defined(__ANDROID__)
+        GTEST_SKIP();
+#endif
+        if (GetParam() == TouchIntegrationTestDisplays::DISPLAY_INTERNAL) {
+            BaseTouchIntegrationTest::SetUp();
+            return;
+        }
+
+        // setup policy with a input-port or UniqueId association to the display
+        bool isInputPortAssociation =
+                GetParam() == TouchIntegrationTestDisplays::DISPLAY_INPUT_PORT;
+
+        mFakePolicy = sp<FakeInputReaderPolicy>::make();
+        if (isInputPortAssociation) {
+            mFakePolicy->addInputPortAssociation(INPUT_PORT, DISPLAY_PORT.value());
+        } else {
+            mFakePolicy->addInputUniqueIdAssociation(INPUT_PORT, UNIQUE_ID);
+        }
+        mFakePointerController = std::make_shared<FakePointerController>();
+        mFakePolicy->setPointerController(mFakePointerController);
+
+        InputReaderIntegrationTest::setupInputReader();
+
+        mDevice = createUinputDevice<UinputTouchScreen>(Rect(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT),
+                                                        INPUT_PORT);
+        ASSERT_NO_FATAL_FAILURE(mFakePolicy->assertInputDevicesChanged());
+
+        // Add a display linked to a physical port or UniqueId.
+        setDisplayInfoAndReconfigure(DISPLAY_ID, DISPLAY_WIDTH, DISPLAY_HEIGHT, ui::ROTATION_0,
+                                     UNIQUE_ID, isInputPortAssociation ? DISPLAY_PORT : NO_PORT,
+                                     ViewportType::INTERNAL);
+        ASSERT_NO_FATAL_FAILURE(mFakePolicy->assertInputDevicesChanged());
+        ASSERT_NO_FATAL_FAILURE(mTestListener->assertNotifyConfigurationChangedWasCalled());
+        const auto info = findDeviceByName(mDevice->getName());
+        ASSERT_TRUE(info);
+        mDeviceInfo = *info;
+    }
+};
+
+TEST_P(TouchIntegrationTest, MultiTouchDeviceSource) {
     // The UinputTouchScreen is an MT device that supports MT_TOOL_TYPE and also supports stylus
     // buttons. It should show up as a touchscreen, stylus, and keyboard (for reporting button
     // presses).
@@ -1562,7 +1615,7 @@
               mDeviceInfo.getSources());
 }
 
-TEST_F(TouchIntegrationTest, InputEvent_ProcessSingleTouch) {
+TEST_P(TouchIntegrationTest, InputEvent_ProcessSingleTouch) {
     NotifyMotionArgs args;
     const Point centerPoint = mDevice->getCenterPoint();
 
@@ -1586,7 +1639,7 @@
     ASSERT_EQ(AMOTION_EVENT_ACTION_UP, args.action);
 }
 
-TEST_F(TouchIntegrationTest, InputEvent_ProcessMultiTouch) {
+TEST_P(TouchIntegrationTest, InputEvent_ProcessMultiTouch) {
     NotifyMotionArgs args;
     const Point centerPoint = mDevice->getCenterPoint();
 
@@ -1642,7 +1695,7 @@
  * palms, and wants to cancel Pointer 1, then it is safe to simply drop POINTER_1_UP event without
  * losing information about non-palm pointers.
  */
-TEST_F(TouchIntegrationTest, MultiTouch_PointerMoveAndSecondPointerUp) {
+TEST_P(TouchIntegrationTest, MultiTouch_PointerMoveAndSecondPointerUp) {
     NotifyMotionArgs args;
     const Point centerPoint = mDevice->getCenterPoint();
 
@@ -1685,7 +1738,7 @@
  * In this scenario, the movement of the second pointer just prior to liftoff is ignored, and never
  * gets sent to the listener.
  */
-TEST_F(TouchIntegrationTest, MultiTouch_PointerMoveAndSecondPointerMoveAndUp) {
+TEST_P(TouchIntegrationTest, MultiTouch_PointerMoveAndSecondPointerMoveAndUp) {
     NotifyMotionArgs args;
     const Point centerPoint = mDevice->getCenterPoint();
 
@@ -1725,7 +1778,7 @@
     assertReceivedMotion(AMOTION_EVENT_ACTION_MOVE, {centerPoint + Point(5, 5)});
 }
 
-TEST_F(TouchIntegrationTest, InputEvent_ProcessPalm) {
+TEST_P(TouchIntegrationTest, InputEvent_ProcessPalm) {
     NotifyMotionArgs args;
     const Point centerPoint = mDevice->getCenterPoint();
 
@@ -1776,7 +1829,7 @@
     ASSERT_EQ(AMOTION_EVENT_ACTION_UP, args.action);
 }
 
-TEST_F(TouchIntegrationTest, NotifiesPolicyWhenStylusGestureStarted) {
+TEST_P(TouchIntegrationTest, NotifiesPolicyWhenStylusGestureStarted) {
     const Point centerPoint = mDevice->getCenterPoint();
 
     // Send down with the pen tool selected. The policy should be notified of the stylus presence.
@@ -1828,19 +1881,69 @@
     ASSERT_NO_FATAL_FAILURE(mFakePolicy->assertStylusGestureNotified(mDeviceInfo.getId()));
 }
 
+TEST_P(TouchIntegrationTest, ExternalStylusConnectedDuringTouchGesture) {
+    const Point centerPoint = mDevice->getCenterPoint();
+
+    // Down
+    mDevice->sendSlot(FIRST_SLOT);
+    mDevice->sendTrackingId(FIRST_TRACKING_ID);
+    mDevice->sendDown(centerPoint);
+    mDevice->sendSync();
+    ASSERT_NO_FATAL_FAILURE(mTestListener->assertNotifyMotionWasCalled(
+            WithMotionAction(AMOTION_EVENT_ACTION_DOWN)));
+
+    // Move
+    mDevice->sendMove(centerPoint + Point(1, 1));
+    mDevice->sendSync();
+    ASSERT_NO_FATAL_FAILURE(mTestListener->assertNotifyMotionWasCalled(
+            WithMotionAction(AMOTION_EVENT_ACTION_MOVE)));
+
+    // Connecting an external stylus mid-gesture should not interrupt the ongoing gesture stream.
+    auto externalStylus = createUinputDevice<UinputExternalStylus>();
+    ASSERT_NO_FATAL_FAILURE(mFakePolicy->assertInputDevicesChanged());
+    ASSERT_NO_FATAL_FAILURE(mTestListener->assertNotifyConfigurationChangedWasCalled());
+    const auto stylusInfo = findDeviceByName(externalStylus->getName());
+    ASSERT_TRUE(stylusInfo);
+
+    // Move
+    mDevice->sendMove(centerPoint + Point(2, 2));
+    mDevice->sendSync();
+    ASSERT_NO_FATAL_FAILURE(mTestListener->assertNotifyMotionWasCalled(
+            WithMotionAction(AMOTION_EVENT_ACTION_MOVE)));
+
+    // Disconnecting an external stylus mid-gesture should not interrupt the ongoing gesture stream.
+    externalStylus.reset();
+    ASSERT_NO_FATAL_FAILURE(mFakePolicy->assertInputDevicesChanged());
+    ASSERT_NO_FATAL_FAILURE(mTestListener->assertNotifyConfigurationChangedWasCalled());
+    ASSERT_NO_FATAL_FAILURE(mTestListener->assertNotifyMotionWasNotCalled());
+
+    // Up
+    mDevice->sendUp();
+    mDevice->sendSync();
+    ASSERT_NO_FATAL_FAILURE(
+            mTestListener->assertNotifyMotionWasCalled(WithMotionAction(AMOTION_EVENT_ACTION_UP)));
+
+    ASSERT_NO_FATAL_FAILURE(mTestListener->assertNotifyMotionWasNotCalled());
+}
+
+INSTANTIATE_TEST_SUITE_P(TouchIntegrationTestDisplayVariants, TouchIntegrationTest,
+                         testing::Values(TouchIntegrationTestDisplays::DISPLAY_INTERNAL,
+                                         TouchIntegrationTestDisplays::DISPLAY_INPUT_PORT,
+                                         TouchIntegrationTestDisplays::DISPLAY_UNIQUE_ID));
+
 // --- StylusButtonIntegrationTest ---
 
 // Verify the behavior of button presses reported by various kinds of styluses, including buttons
 // reported by the touchscreen's device, by a fused external stylus, and by an un-fused external
 // stylus.
 template <typename UinputStylusDevice>
-class StylusButtonIntegrationTest : public TouchIntegrationTest {
+class StylusButtonIntegrationTest : public BaseTouchIntegrationTest {
 protected:
     void SetUp() override {
 #if !defined(__ANDROID__)
         GTEST_SKIP();
 #endif
-        TouchIntegrationTest::SetUp();
+        BaseTouchIntegrationTest::SetUp();
         mTouchscreen = mDevice.get();
         mTouchscreenInfo = mDeviceInfo;
 
@@ -1878,8 +1981,8 @@
     std::unique_ptr<UinputStylusDevice> mStylusDeviceLifecycleTracker{};
 
     // Hide the base class's device to expose it with a different name for readability.
-    using TouchIntegrationTest::mDevice;
-    using TouchIntegrationTest::mDeviceInfo;
+    using BaseTouchIntegrationTest::mDevice;
+    using BaseTouchIntegrationTest::mDeviceInfo;
 };
 
 using StylusButtonIntegrationTestTypes =
@@ -2131,7 +2234,7 @@
 // Verify the behavior of an external stylus. An external stylus can report pressure or button
 // data independently of the touchscreen, which is then sent as a MotionEvent as part of an
 // ongoing stylus gesture that is being emitted by the touchscreen.
-using ExternalStylusIntegrationTest = TouchIntegrationTest;
+using ExternalStylusIntegrationTest = BaseTouchIntegrationTest;
 
 TEST_F(ExternalStylusIntegrationTest, FusedExternalStylusPressureReported) {
     const Point centerPoint = mDevice->getCenterPoint();
diff --git a/services/inputflinger/tests/TestInputListener.cpp b/services/inputflinger/tests/TestInputListener.cpp
index fc917dd..41e250f 100644
--- a/services/inputflinger/tests/TestInputListener.cpp
+++ b/services/inputflinger/tests/TestInputListener.cpp
@@ -57,6 +57,18 @@
                                            "Expected notifyDeviceReset() to have been called."));
 }
 
+void TestInputListener::clearNotifyDeviceResetCalls() {
+    std::scoped_lock<std::mutex> lock(mLock);
+    std::get<std::vector<NotifyDeviceResetArgs>>(mQueues).clear();
+}
+
+void TestInputListener::assertNotifyDeviceResetWasCalled(
+        const ::testing::Matcher<NotifyDeviceResetArgs>& matcher) {
+    NotifyDeviceResetArgs outEventArgs;
+    ASSERT_NO_FATAL_FAILURE(assertNotifyDeviceResetWasCalled(&outEventArgs));
+    ASSERT_THAT(outEventArgs, matcher);
+}
+
 void TestInputListener::assertNotifyDeviceResetWasNotCalled() {
     ASSERT_NO_FATAL_FAILURE(
             assertNotCalled<NotifyDeviceResetArgs>("notifyDeviceReset() should not be called."));
diff --git a/services/inputflinger/tests/TestInputListener.h b/services/inputflinger/tests/TestInputListener.h
index deb6048..3c5e014 100644
--- a/services/inputflinger/tests/TestInputListener.h
+++ b/services/inputflinger/tests/TestInputListener.h
@@ -43,6 +43,10 @@
 
     void assertNotifyConfigurationChangedWasNotCalled();
 
+    void clearNotifyDeviceResetCalls();
+
+    void assertNotifyDeviceResetWasCalled(const ::testing::Matcher<NotifyDeviceResetArgs>& matcher);
+
     void assertNotifyDeviceResetWasCalled(NotifyDeviceResetArgs* outEventArgs = nullptr);
 
     void assertNotifyDeviceResetWasNotCalled();
diff --git a/services/inputflinger/tests/TestInputListenerMatchers.h b/services/inputflinger/tests/TestInputListenerMatchers.h
index 020ea86..183383f 100644
--- a/services/inputflinger/tests/TestInputListenerMatchers.h
+++ b/services/inputflinger/tests/TestInputListenerMatchers.h
@@ -139,6 +139,10 @@
         return mDeviceId == args.deviceId;
     }
 
+    bool MatchAndExplain(const NotifyDeviceResetArgs& args, std::ostream*) const {
+        return mDeviceId == args.deviceId;
+    }
+
     bool MatchAndExplain(const InputEvent& event, std::ostream*) const {
         return mDeviceId == event.getDeviceId();
     }
diff --git a/services/inputflinger/tests/UinputDevice.cpp b/services/inputflinger/tests/UinputDevice.cpp
index 97a2614..7ceaccf 100644
--- a/services/inputflinger/tests/UinputDevice.cpp
+++ b/services/inputflinger/tests/UinputDevice.cpp
@@ -159,10 +159,11 @@
 
 // --- UinputTouchScreen ---
 
-UinputTouchScreen::UinputTouchScreen(const Rect& size)
+UinputTouchScreen::UinputTouchScreen(const Rect& size, const std::string& physicalPort)
       : UinputKeyboard(DEVICE_NAME, PRODUCT_ID,
                        {BTN_TOUCH, BTN_TOOL_PEN, BTN_STYLUS, BTN_STYLUS2, BTN_STYLUS3}),
-        mSize(size) {}
+        mSize(size),
+        mPhysicalPort(physicalPort) {}
 
 void UinputTouchScreen::configureDevice(int fd, uinput_user_dev* device) {
     UinputKeyboard::configureDevice(fd, device);
@@ -177,6 +178,9 @@
     ioctl(fd, UI_SET_ABSBIT, ABS_MT_TRACKING_ID);
     ioctl(fd, UI_SET_ABSBIT, ABS_MT_TOOL_TYPE);
     ioctl(fd, UI_SET_PROPBIT, INPUT_PROP_DIRECT);
+    if (!mPhysicalPort.empty()) {
+        ioctl(fd, UI_SET_PHYS, mPhysicalPort.c_str());
+    }
 
     device->absmin[ABS_MT_SLOT] = RAW_SLOT_MIN;
     device->absmax[ABS_MT_SLOT] = RAW_SLOT_MAX;
diff --git a/services/inputflinger/tests/UinputDevice.h b/services/inputflinger/tests/UinputDevice.h
index 51e331d..5b07465 100644
--- a/services/inputflinger/tests/UinputDevice.h
+++ b/services/inputflinger/tests/UinputDevice.h
@@ -197,11 +197,12 @@
     const Point getCenterPoint();
 
 protected:
-    explicit UinputTouchScreen(const Rect& size);
+    explicit UinputTouchScreen(const Rect& size, const std::string& physicalPort = "");
 
 private:
     void configureDevice(int fd, uinput_user_dev* device) override;
     const Rect mSize;
+    const std::string mPhysicalPort;
 };
 
 } // namespace android
diff --git a/services/inputflinger/tests/fuzzers/Android.bp b/services/inputflinger/tests/fuzzers/Android.bp
index d7980f5..9313a89 100644
--- a/services/inputflinger/tests/fuzzers/Android.bp
+++ b/services/inputflinger/tests/fuzzers/Android.bp
@@ -56,6 +56,15 @@
     ],
     fuzz_config: {
         cc: ["android-framework-input@google.com"],
+        componentid: 155276,
+        hotlists: [
+            "4593311",
+        ],
+        description: "The fuzzer targets the APIs of libinputflinger library",
+        vector: "local_no_privileges_required",
+        service_privilege: "privileged",
+        users: "multi_user",
+        fuzzed_code_usage: "shipped",
     },
 }
 
diff --git a/services/powermanager/Android.bp b/services/powermanager/Android.bp
index 2523f3b..8b16890 100644
--- a/services/powermanager/Android.bp
+++ b/services/powermanager/Android.bp
@@ -9,7 +9,7 @@
 
 cc_library_shared {
     name: "libpowermanager",
-
+    defaults: ["android.hardware.power-ndk_export_shared"],
     srcs: [
         "BatterySaverPolicyConfig.cpp",
         "CoolingDevice.cpp",
@@ -41,7 +41,6 @@
         "android.hardware.power@1.1",
         "android.hardware.power@1.2",
         "android.hardware.power@1.3",
-        "android.hardware.power-V4-ndk",
     ],
 
     export_shared_lib_headers: [
@@ -49,7 +48,6 @@
         "android.hardware.power@1.1",
         "android.hardware.power@1.2",
         "android.hardware.power@1.3",
-        "android.hardware.power-V4-ndk",
     ],
 
     cflags: [
diff --git a/services/powermanager/benchmarks/Android.bp b/services/powermanager/benchmarks/Android.bp
index 03fc38d..2b5ddb1 100644
--- a/services/powermanager/benchmarks/Android.bp
+++ b/services/powermanager/benchmarks/Android.bp
@@ -23,6 +23,7 @@
 
 cc_benchmark {
     name: "libpowermanager_benchmarks",
+    defaults: ["android.hardware.power-ndk_shared"],
     srcs: [
         "main.cpp",
         "PowerHalAidlBenchmarks.cpp",
@@ -41,7 +42,6 @@
         "android.hardware.power@1.1",
         "android.hardware.power@1.2",
         "android.hardware.power@1.3",
-        "android.hardware.power-V4-ndk",
     ],
     static_libs: [
         "libtestUtil",
diff --git a/services/powermanager/tests/Android.bp b/services/powermanager/tests/Android.bp
index 08fcdc8..6fc96c0 100644
--- a/services/powermanager/tests/Android.bp
+++ b/services/powermanager/tests/Android.bp
@@ -23,6 +23,10 @@
 
 cc_test {
     name: "libpowermanager_test",
+    defaults: [
+        "android.hardware.power-ndk_shared",
+        "android.hardware.power-ndk_shared",
+    ],
     test_suites: ["device-tests"],
     srcs: [
         "IThermalManagerTest.cpp",
@@ -52,7 +56,6 @@
         "android.hardware.power@1.1",
         "android.hardware.power@1.2",
         "android.hardware.power@1.3",
-        "android.hardware.power-V4-ndk",
     ],
     static_libs: [
         "libgmock",
diff --git a/services/sensorservice/SensorService.cpp b/services/sensorservice/SensorService.cpp
index 398d602..cfafc69 100644
--- a/services/sensorservice/SensorService.cpp
+++ b/services/sensorservice/SensorService.cpp
@@ -63,8 +63,10 @@
 #include <sys/types.h>
 #include <unistd.h>
 
+#include <condition_variable>
 #include <ctime>
 #include <future>
+#include <mutex>
 #include <string>
 
 #include <private/android_filesystem_config.h>
@@ -196,6 +198,16 @@
     if (mRuntimeSensorCallbacks.find(deviceId) == mRuntimeSensorCallbacks.end()) {
         mRuntimeSensorCallbacks.emplace(deviceId, callback);
     }
+
+    if (mRuntimeSensorHandler == nullptr) {
+        mRuntimeSensorEventBuffer =
+                new sensors_event_t[SensorEventQueue::MAX_RECEIVE_BUFFER_EVENT_COUNT];
+        mRuntimeSensorHandler = new RuntimeSensorHandler(this);
+        // Use PRIORITY_URGENT_DISPLAY as the injected sensor events should be dispatched as soon as
+        // possible, and also for consistency within the SensorService.
+        mRuntimeSensorHandler->run("RuntimeSensorHandler", PRIORITY_URGENT_DISPLAY);
+    }
+
     return handle;
 }
 
@@ -232,8 +244,9 @@
 }
 
 status_t SensorService::sendRuntimeSensorEvent(const sensors_event_t& event) {
-    Mutex::Autolock _l(mLock);
+    std::unique_lock<std::mutex> lock(mRutimeSensorThreadMutex);
     mRuntimeSensorEventQueue.push(event);
+    mRuntimeSensorsCv.notify_all();
     return OK;
 }
 
@@ -458,6 +471,7 @@
             const size_t minBufferSize = SensorEventQueue::MAX_RECEIVE_BUFFER_EVENT_COUNT;
             mSensorEventBuffer = new sensors_event_t[minBufferSize];
             mSensorEventScratch = new sensors_event_t[minBufferSize];
+            mRuntimeSensorEventBuffer = nullptr;
             mMapFlushEventsToConnections = new wp<const SensorEventConnection> [minBufferSize];
             mCurrentOperatingMode = NORMAL;
 
@@ -1089,7 +1103,6 @@
         recordLastValueLocked(mSensorEventBuffer, count);
 
         // handle virtual sensors
-        bool bufferNeedsSorting = false;
         if (count && vcount) {
             sensors_event_t const * const event = mSensorEventBuffer;
             if (!mActiveVirtualSensors.empty()) {
@@ -1125,37 +1138,11 @@
                     // record the last synthesized values
                     recordLastValueLocked(&mSensorEventBuffer[count], k);
                     count += k;
-                    bufferNeedsSorting = true;
+                    sortEventBuffer(mSensorEventBuffer, count);
                 }
             }
         }
 
-        // handle runtime sensors
-        {
-            size_t k = 0;
-            while (!mRuntimeSensorEventQueue.empty()) {
-                if (count + k >= minBufferSize) {
-                    ALOGE("buffer too small to hold all events: count=%zd, k=%zu, size=%zu",
-                          count, k, minBufferSize);
-                    break;
-                }
-                mSensorEventBuffer[count + k] = mRuntimeSensorEventQueue.front();
-                mRuntimeSensorEventQueue.pop();
-                k++;
-            }
-            if (k) {
-                // record the last synthesized values
-                recordLastValueLocked(&mSensorEventBuffer[count], k);
-                count += k;
-                bufferNeedsSorting = true;
-            }
-        }
-
-        if (bufferNeedsSorting) {
-            // sort the buffer by time-stamps
-            sortEventBuffer(mSensorEventBuffer, count);
-        }
-
         // handle backward compatibility for RotationVector sensor
         if (halVersion < SENSORS_DEVICE_API_VERSION_1_0) {
             for (int i = 0; i < count; i++) {
@@ -1234,7 +1221,7 @@
         bool needsWakeLock = false;
         for (const sp<SensorEventConnection>& connection : activeConnections) {
             connection->sendEvents(mSensorEventBuffer, count, mSensorEventScratch,
-                    mMapFlushEventsToConnections);
+                                   mMapFlushEventsToConnections);
             needsWakeLock |= connection->needsWakeLock();
             // If the connection has one-shot sensors, it may be cleaned up after first trigger.
             // Early check for one-shot sensors.
@@ -1253,6 +1240,46 @@
     return false;
 }
 
+void SensorService::processRuntimeSensorEvents() {
+    size_t count = 0;
+    const size_t maxBufferSize = SensorEventQueue::MAX_RECEIVE_BUFFER_EVENT_COUNT;
+
+    {
+        std::unique_lock<std::mutex> lock(mRutimeSensorThreadMutex);
+
+        if (mRuntimeSensorEventQueue.empty()) {
+            mRuntimeSensorsCv.wait(lock, [this] { return !mRuntimeSensorEventQueue.empty(); });
+        }
+
+        // Pop the events from the queue into the buffer until it's empty or the buffer is full.
+        while (!mRuntimeSensorEventQueue.empty()) {
+            if (count >= maxBufferSize) {
+                ALOGE("buffer too small to hold all events: count=%zd, size=%zu", count,
+                      maxBufferSize);
+                break;
+            }
+            mRuntimeSensorEventBuffer[count] = mRuntimeSensorEventQueue.front();
+            mRuntimeSensorEventQueue.pop();
+            count++;
+        }
+    }
+
+    if (count) {
+        ConnectionSafeAutolock connLock = mConnectionHolder.lock(mLock);
+
+        recordLastValueLocked(mRuntimeSensorEventBuffer, count);
+        sortEventBuffer(mRuntimeSensorEventBuffer, count);
+
+        for (const sp<SensorEventConnection>& connection : connLock.getActiveConnections()) {
+            connection->sendEvents(mRuntimeSensorEventBuffer, count, /* scratch= */ nullptr,
+                                   /* mapFlushEventsToConnections= */ nullptr);
+            if (connection->hasOneShotSensors()) {
+                cleanupAutoDisabledSensorLocked(connection, mRuntimeSensorEventBuffer, count);
+            }
+        }
+    }
+}
+
 sp<Looper> SensorService::getLooper() const {
     return mLooper;
 }
@@ -1300,6 +1327,14 @@
     return false;
 }
 
+bool SensorService::RuntimeSensorHandler::threadLoop() {
+    ALOGD("new thread RuntimeSensorHandler");
+    do {
+        mService->processRuntimeSensorEvents();
+    } while (!Thread::exitPending());
+    return false;
+}
+
 void SensorService::recordLastValueLocked(
         const sensors_event_t* buffer, size_t count) {
     for (size_t i = 0; i < count; i++) {
diff --git a/services/sensorservice/SensorService.h b/services/sensorservice/SensorService.h
index 545f6c2..bf43101 100644
--- a/services/sensorservice/SensorService.h
+++ b/services/sensorservice/SensorService.h
@@ -42,6 +42,8 @@
 
 #include <stdint.h>
 #include <sys/types.h>
+#include <condition_variable>
+#include <mutex>
 #include <queue>
 #include <unordered_map>
 #include <unordered_set>
@@ -208,6 +210,7 @@
     class SensorEventAckReceiver;
     class SensorRecord;
     class SensorRegistrationInfo;
+    class RuntimeSensorHandler;
 
     // Promoting a SensorEventConnection or SensorDirectConnection from wp to sp must be done with
     // mLock held, but destroying that sp must be done unlocked to avoid a race condition that
@@ -264,6 +267,14 @@
         SortedVector< wp<SensorDirectConnection> > mDirectConnections;
     };
 
+    class RuntimeSensorHandler : public Thread {
+        sp<SensorService> const mService;
+    public:
+        virtual bool threadLoop();
+        explicit RuntimeSensorHandler(const sp<SensorService>& service) : mService(service) {
+        }
+    };
+
     // If accessing a sensor we need to make sure the UID has access to it. If
     // the app UID is idle then it cannot access sensors and gets no trigger
     // events, no on-change events, flush event behavior does not change, and
@@ -368,6 +379,8 @@
     // Thread interface
     virtual bool threadLoop();
 
+    void processRuntimeSensorEvents();
+
     // ISensorServer interface
     virtual Vector<Sensor> getSensorList(const String16& opPackageName);
     virtual Vector<Sensor> getDynamicSensorList(const String16& opPackageName);
@@ -512,6 +525,10 @@
     uint32_t mSocketBufferSize;
     sp<Looper> mLooper;
     sp<SensorEventAckReceiver> mAckReceiver;
+    sp<RuntimeSensorHandler> mRuntimeSensorHandler;
+    // Mutex and CV used to notify the mRuntimeSensorHandler thread that there are new events.
+    std::mutex mRutimeSensorThreadMutex;
+    std::condition_variable mRuntimeSensorsCv;
 
     // protected by mLock
     mutable Mutex mLock;
@@ -519,7 +536,7 @@
     std::unordered_set<int> mActiveVirtualSensors;
     SensorConnectionHolder mConnectionHolder;
     bool mWakeLockAcquired;
-    sensors_event_t *mSensorEventBuffer, *mSensorEventScratch;
+    sensors_event_t *mSensorEventBuffer, *mSensorEventScratch, *mRuntimeSensorEventBuffer;
     // WARNING: these SensorEventConnection instances must not be promoted to sp, except via
     // modification to add support for them in ConnectionSafeAutolock
     wp<const SensorEventConnection> * mMapFlushEventsToConnections;
diff --git a/services/sensorservice/tests/sensorservicetest.cpp b/services/sensorservice/tests/sensorservicetest.cpp
index e939d51..1baf397 100644
--- a/services/sensorservice/tests/sensorservicetest.cpp
+++ b/services/sensorservice/tests/sensorservicetest.cpp
@@ -87,7 +87,7 @@
     int ret = mgr.createDirectChannel(
             kMemSize, ASENSOR_DIRECT_CHANNEL_TYPE_SHARED_MEMORY, resourceHandle);
 
-    // Should print -22 (BAD_VALUE) and the device runtime shouldn't restart
+    // Should not succeed (ret != OK) and the device runtime shouldn't restart
     printf("createInvalidDirectChannel=%d\n", ret);
 
     // Secondary test: correct channel creation & destruction (should print 0)
diff --git a/services/surfaceflinger/Android.bp b/services/surfaceflinger/Android.bp
index 326645e..0101c17 100644
--- a/services/surfaceflinger/Android.bp
+++ b/services/surfaceflinger/Android.bp
@@ -26,6 +26,7 @@
     name: "libsurfaceflinger_defaults",
     defaults: [
         "android.hardware.graphics.composer3-ndk_shared",
+        "android.hardware.power-ndk_shared",
         "librenderengine_deps",
         "libtimestats_deps",
         "surfaceflinger_defaults",
@@ -48,7 +49,6 @@
         "android.hardware.graphics.composer@2.2",
         "android.hardware.graphics.composer@2.3",
         "android.hardware.graphics.composer@2.4",
-        "android.hardware.power-V4-ndk",
         "libbase",
         "libbinder",
         "libbinder_ndk",
diff --git a/services/surfaceflinger/ClientCache.h b/services/surfaceflinger/ClientCache.h
index b56b252..fefc040 100644
--- a/services/surfaceflinger/ClientCache.h
+++ b/services/surfaceflinger/ClientCache.h
@@ -29,7 +29,9 @@
 #include <set>
 #include <unordered_map>
 
-#define BUFFER_CACHE_MAX_SIZE 64
+// 4096 is based on 64 buffers * 64 layers. Once this limit is reached, the least recently used
+// buffer is uncached before the new buffer is cached.
+#define BUFFER_CACHE_MAX_SIZE 4096
 
 namespace android {
 
diff --git a/services/surfaceflinger/CompositionEngine/Android.bp b/services/surfaceflinger/CompositionEngine/Android.bp
index 3426495..06c5e4c 100644
--- a/services/surfaceflinger/CompositionEngine/Android.bp
+++ b/services/surfaceflinger/CompositionEngine/Android.bp
@@ -11,6 +11,7 @@
     name: "libcompositionengine_defaults",
     defaults: [
         "android.hardware.graphics.composer3-ndk_shared",
+        "android.hardware.power-ndk_shared",
         "librenderengine_deps",
         "libtimestats_deps",
         "surfaceflinger_defaults",
@@ -27,7 +28,6 @@
         "android.hardware.graphics.composer@2.4",
         "android.hardware.power@1.0",
         "android.hardware.power@1.3",
-        "android.hardware.power-V4-ndk",
         "libbase",
         "libcutils",
         "libgui",
diff --git a/services/surfaceflinger/CompositionEngine/src/ClientCompositionRequestCache.cpp b/services/surfaceflinger/CompositionEngine/src/ClientCompositionRequestCache.cpp
index 7e020ee..752257b 100644
--- a/services/surfaceflinger/CompositionEngine/src/ClientCompositionRequestCache.cpp
+++ b/services/surfaceflinger/CompositionEngine/src/ClientCompositionRequestCache.cpp
@@ -45,8 +45,7 @@
             lhs.useTextureFiltering == rhs.useTextureFiltering &&
             lhs.textureTransform == rhs.textureTransform &&
             lhs.usePremultipliedAlpha == rhs.usePremultipliedAlpha &&
-            lhs.isOpaque == rhs.isOpaque && lhs.isY410BT2020 == rhs.isY410BT2020 &&
-            lhs.maxLuminanceNits == rhs.maxLuminanceNits;
+            lhs.isOpaque == rhs.isOpaque && lhs.maxLuminanceNits == rhs.maxLuminanceNits;
 }
 
 inline bool equalIgnoringBuffer(const renderengine::LayerSettings& lhs,
diff --git a/services/surfaceflinger/CompositionEngine/src/DisplayColorProfile.cpp b/services/surfaceflinger/CompositionEngine/src/DisplayColorProfile.cpp
index 8f67f36..97725ea 100644
--- a/services/surfaceflinger/CompositionEngine/src/DisplayColorProfile.cpp
+++ b/services/surfaceflinger/CompositionEngine/src/DisplayColorProfile.cpp
@@ -220,17 +220,6 @@
     minLuminance = minLuminance <= 0.0 ? sDefaultMinLumiance : minLuminance;
     maxLuminance = maxLuminance <= 0.0 ? sDefaultMaxLumiance : maxLuminance;
     maxAverageLuminance = maxAverageLuminance <= 0.0 ? sDefaultMaxLumiance : maxAverageLuminance;
-    if (args.hasWideColorGamut) {
-        // insert HDR10/HLG as we will force client composition for HDR10/HLG
-        // layers
-        if (!hasHDR10Support()) {
-            types.push_back(ui::Hdr::HDR10);
-        }
-
-        if (!hasHLGSupport()) {
-            types.push_back(ui::Hdr::HLG);
-        }
-    }
 
     mHdrCapabilities = HdrCapabilities(types, maxLuminance, maxAverageLuminance, minLuminance);
 }
diff --git a/services/surfaceflinger/CompositionEngine/src/Output.cpp b/services/surfaceflinger/CompositionEngine/src/Output.cpp
index d4230f5..0b11e74 100644
--- a/services/surfaceflinger/CompositionEngine/src/Output.cpp
+++ b/services/surfaceflinger/CompositionEngine/src/Output.cpp
@@ -895,13 +895,19 @@
 compositionengine::OutputLayer* Output::findLayerRequestingBackgroundComposition() const {
     compositionengine::OutputLayer* layerRequestingBgComposition = nullptr;
     for (auto* layer : getOutputLayersOrderedByZ()) {
-        auto* compState = layer->getLayerFE().getCompositionState();
+        const auto* compState = layer->getLayerFE().getCompositionState();
 
         // If any layer has a sideband stream, we will disable blurs. In that case, we don't
         // want to force client composition because of the blur.
         if (compState->sidebandStream != nullptr) {
             return nullptr;
         }
+
+        // If RenderEngine cannot render protected content, we cannot blur.
+        if (compState->hasProtectedContent &&
+            !getCompositionEngine().getRenderEngine().supportsProtectedContent()) {
+            return nullptr;
+        }
         if (compState->isOpaque) {
             continue;
         }
diff --git a/services/surfaceflinger/CompositionEngine/tests/DisplayColorProfileTest.cpp b/services/surfaceflinger/CompositionEngine/tests/DisplayColorProfileTest.cpp
index b3ff2ec..03a97dc 100644
--- a/services/surfaceflinger/CompositionEngine/tests/DisplayColorProfileTest.cpp
+++ b/services/surfaceflinger/CompositionEngine/tests/DisplayColorProfileTest.cpp
@@ -282,39 +282,6 @@
     }
 }
 
-TEST_F(DisplayColorProfileTest, ctorSignalsHdrSupportForAnyWideColorGamutDevice) {
-    {
-        // If the output does not profile wide color gamut, then no HDR modes
-        // will be profileed in the generated HDR capabilities.
-        auto profile = ProfileFactory().setHasWideColorGamut(false).build();
-
-        EXPECT_THAT(profile.getHdrCapabilities().getSupportedHdrTypes(), IsEmpty());
-    }
-
-    {
-        // If the HWC does not show profile for certain HDR modes, then the
-        // generated HDR capabilities will indicate profile anyway.
-        auto profile = ProfileFactory().setHasWideColorGamut(true).build();
-
-        EXPECT_THAT(profile.getHdrCapabilities().getSupportedHdrTypes(), SizeIs(2));
-        EXPECT_THAT(profile.getHdrCapabilities().getSupportedHdrTypes(), Contains(Hdr::HDR10));
-        EXPECT_THAT(profile.getHdrCapabilities().getSupportedHdrTypes(), Contains(Hdr::HLG));
-    }
-
-    {
-        // If the HWC profiles the HDR modes, then the generated capabilities
-        // still has one entry for each HDR type.
-        auto profile = ProfileFactory()
-                               .setHasWideColorGamut(true)
-                               .addHdrTypes({Hdr::HLG, Hdr::HDR10})
-                               .build();
-
-        EXPECT_THAT(profile.getHdrCapabilities().getSupportedHdrTypes(), SizeIs(2));
-        EXPECT_THAT(profile.getHdrCapabilities().getSupportedHdrTypes(), Contains(Hdr::HDR10));
-        EXPECT_THAT(profile.getHdrCapabilities().getSupportedHdrTypes(), Contains(Hdr::HLG));
-    }
-}
-
 /* ------------------------------------------------------------------------
  * DisplayColorProfile::hasRenderIntent
  */
diff --git a/services/surfaceflinger/DisplayDevice.cpp b/services/surfaceflinger/DisplayDevice.cpp
index 32bd890..70ccaf8 100644
--- a/services/surfaceflinger/DisplayDevice.cpp
+++ b/services/surfaceflinger/DisplayDevice.cpp
@@ -38,7 +38,6 @@
 #include <log/log.h>
 #include <system/window.h>
 
-#include "Display/DisplaySnapshot.h"
 #include "DisplayDevice.h"
 #include "FrontEnd/DisplayInfo.h"
 #include "HdrSdrRatioOverlay.h"
@@ -214,10 +213,7 @@
     ATRACE_INT(mRenderFrameRateFPSTrace.c_str(), renderFps.getIntValue());
 
     mRefreshRateSelector->setActiveMode(modeId, renderFps);
-
-    if (mRefreshRateOverlay) {
-        mRefreshRateOverlay->changeRefreshRate(displayFps, renderFps);
-    }
+    updateRefreshRateOverlayRate(displayFps, renderFps);
 }
 
 status_t DisplayDevice::initiateModeChange(const ActiveModeInfo& info,
@@ -231,10 +227,18 @@
         return BAD_VALUE;
     }
     mUpcomingActiveMode = info;
-    ATRACE_INT(mActiveModeFPSHwcTrace.c_str(), info.modeOpt->modePtr->getFps().getIntValue());
-    return mHwComposer.setActiveModeWithConstraints(getPhysicalId(),
-                                                    info.modeOpt->modePtr->getHwcId(), constraints,
-                                                    outTimeline);
+    mIsModeSetPending = true;
+
+    const auto& pendingMode = *info.modeOpt->modePtr;
+    ATRACE_INT(mActiveModeFPSHwcTrace.c_str(), pendingMode.getFps().getIntValue());
+
+    return mHwComposer.setActiveModeWithConstraints(getPhysicalId(), pendingMode.getHwcId(),
+                                                    constraints, outTimeline);
+}
+
+void DisplayDevice::finalizeModeChange(DisplayModeId modeId, Fps displayFps, Fps renderFps) {
+    setActiveMode(modeId, displayFps, renderFps);
+    mIsModeSetPending = false;
 }
 
 nsecs_t DisplayDevice::getVsyncPeriodFromHWC() const {
@@ -466,7 +470,7 @@
     mRefreshRateOverlay = std::make_unique<RefreshRateOverlay>(fpsRange, features);
     mRefreshRateOverlay->setLayerStack(getLayerStack());
     mRefreshRateOverlay->setViewport(getSize());
-    updateRefreshRateOverlayRate(getActiveMode().modePtr->getFps(), getActiveMode().fps);
+    updateRefreshRateOverlayRate(getActiveMode().modePtr->getFps(), getActiveMode().fps, setByHwc);
 }
 
 void DisplayDevice::updateRefreshRateOverlayRate(Fps displayFps, Fps renderFps, bool setByHwc) {
diff --git a/services/surfaceflinger/DisplayDevice.h b/services/surfaceflinger/DisplayDevice.h
index e92125a..a3fa701 100644
--- a/services/surfaceflinger/DisplayDevice.h
+++ b/services/surfaceflinger/DisplayDevice.h
@@ -218,6 +218,8 @@
         return mUpcomingActiveMode;
     }
 
+    bool isModeSetPending() const REQUIRES(kMainThreadContext) { return mIsModeSetPending; }
+
     scheduler::FrameRateMode getActiveMode() const REQUIRES(kMainThreadContext) {
         return mRefreshRateSelector->getActiveMode();
     }
@@ -229,6 +231,9 @@
                                 hal::VsyncPeriodChangeTimeline* outTimeline)
             REQUIRES(kMainThreadContext);
 
+    void finalizeModeChange(DisplayModeId, Fps displayFps, Fps renderFps)
+            REQUIRES(kMainThreadContext);
+
     scheduler::RefreshRateSelector& refreshRateSelector() const { return *mRefreshRateSelector; }
 
     // Extends the lifetime of the RefreshRateSelector, so it can outlive this DisplayDevice.
@@ -313,7 +318,9 @@
     ActiveModeInfo mDesiredActiveMode GUARDED_BY(mActiveModeLock);
     TracedOrdinal<bool> mDesiredActiveModeChanged GUARDED_BY(mActiveModeLock) =
             {ftl::Concat("DesiredActiveModeChanged-", getId().value).c_str(), false};
+
     ActiveModeInfo mUpcomingActiveMode GUARDED_BY(kMainThreadContext);
+    bool mIsModeSetPending GUARDED_BY(kMainThreadContext) = false;
 };
 
 struct DisplayDeviceState {
diff --git a/services/surfaceflinger/DisplayHardware/HWC2.cpp b/services/surfaceflinger/DisplayHardware/HWC2.cpp
index aaf2523..0c2b77d 100644
--- a/services/surfaceflinger/DisplayHardware/HWC2.cpp
+++ b/services/surfaceflinger/DisplayHardware/HWC2.cpp
@@ -311,6 +311,14 @@
 }
 
 Error Display::supportsDoze(bool* outSupport) const {
+    {
+        std::scoped_lock lock(mDisplayCapabilitiesMutex);
+        if (!mDisplayCapabilities) {
+            // The display has not turned on since boot, so DOZE support is unknown.
+            ALOGW("%s: haven't queried capabilities yet!", __func__);
+            return Error::NO_RESOURCES;
+        }
+    }
     *outSupport = hasCapability(DisplayCapability::DOZE);
     return Error::NONE;
 }
diff --git a/services/surfaceflinger/DisplayHardware/HWComposer.cpp b/services/surfaceflinger/DisplayHardware/HWComposer.cpp
index 3177b33..a9bb928 100644
--- a/services/surfaceflinger/DisplayHardware/HWComposer.cpp
+++ b/services/surfaceflinger/DisplayHardware/HWComposer.cpp
@@ -616,19 +616,29 @@
             ALOGV("setPowerMode: Calling HWC %s", to_string(mode).c_str());
             {
                 bool supportsDoze = false;
-                auto error = hwcDisplay->supportsDoze(&supportsDoze);
-                if (error != hal::Error::NONE) {
-                    LOG_HWC_ERROR("supportsDoze", error, displayId);
-                }
+                const auto queryDozeError = hwcDisplay->supportsDoze(&supportsDoze);
 
-                if (!supportsDoze) {
+                // queryDozeError might be NO_RESOURCES, in the case of a display that has never
+                // been turned on. In that case, attempt to set to DOZE anyway.
+                if (!supportsDoze && queryDozeError == hal::Error::NONE) {
                     mode = hal::PowerMode::ON;
                 }
 
-                error = hwcDisplay->setPowerMode(mode);
+                auto error = hwcDisplay->setPowerMode(mode);
                 if (error != hal::Error::NONE) {
                     LOG_HWC_ERROR(("setPowerMode(" + to_string(mode) + ")").c_str(), error,
                                   displayId);
+                    // If the display had never been turned on, so its doze
+                    // support was unknown, it may truly not support doze. Try
+                    // switching it to ON instead.
+                    if (queryDozeError == hal::Error::NO_RESOURCES) {
+                        ALOGD("%s: failed to set %s to %s. Trying again with ON", __func__,
+                              to_string(displayId).c_str(), to_string(mode).c_str());
+                        error = hwcDisplay->setPowerMode(hal::PowerMode::ON);
+                        if (error != hal::Error::NONE) {
+                            LOG_HWC_ERROR("setPowerMode(ON)", error, displayId);
+                        }
+                    }
                 }
             }
             break;
diff --git a/services/surfaceflinger/FrontEnd/LayerSnapshot.cpp b/services/surfaceflinger/FrontEnd/LayerSnapshot.cpp
index d389a79..d42bce6 100644
--- a/services/surfaceflinger/FrontEnd/LayerSnapshot.cpp
+++ b/services/surfaceflinger/FrontEnd/LayerSnapshot.cpp
@@ -19,6 +19,7 @@
 #define LOG_TAG "SurfaceFlinger"
 
 #include "LayerSnapshot.h"
+#include "Layer.h"
 
 namespace android::surfaceflinger::frontend {
 
@@ -345,10 +346,9 @@
     clientChanges = requested.what;
     changes = requested.changes;
     contentDirty = requested.what & layer_state_t::CONTENT_DIRTY;
-    // TODO(b/238781169) scope down the changes to only buffer updates.
-    hasReadyFrame = requested.hasReadyFrame();
+    hasReadyFrame = requested.autoRefresh;
     sidebandStreamHasFrame = requested.hasSidebandStreamFrame();
-    updateSurfaceDamage(requested, hasReadyFrame, forceFullDamage, surfaceDamage);
+    updateSurfaceDamage(requested, requested.hasReadyFrame(), forceFullDamage, surfaceDamage);
 
     if (forceUpdate || requested.what & layer_state_t::eTransparentRegionChanged) {
         transparentRegionHint = requested.transparentRegion;
@@ -364,7 +364,7 @@
         geomBufferUsesDisplayInverseTransform = requested.transformToDisplayInverse;
     }
     if (forceUpdate || requested.what & layer_state_t::eDataspaceChanged) {
-        dataspace = requested.dataspace;
+        dataspace = Layer::translateDataspace(requested.dataspace);
     }
     if (forceUpdate || requested.what & layer_state_t::eExtendedRangeBrightnessChanged) {
         currentHdrSdrRatio = requested.currentHdrSdrRatio;
@@ -469,19 +469,9 @@
     if (forceUpdate ||
         requested.what &
                 (layer_state_t::eBufferChanged | layer_state_t::eDataspaceChanged |
-                 layer_state_t::eApiChanged)) {
-        isHdrY410 = requested.dataspace == ui::Dataspace::BT2020_ITU_PQ &&
-                requested.api == NATIVE_WINDOW_API_MEDIA &&
-                requested.bufferData->getPixelFormat() == HAL_PIXEL_FORMAT_RGBA_1010102;
-    }
-
-    if (forceUpdate ||
-        requested.what &
-                (layer_state_t::eBufferChanged | layer_state_t::eDataspaceChanged |
                  layer_state_t::eApiChanged | layer_state_t::eShadowRadiusChanged |
                  layer_state_t::eBlurRegionsChanged | layer_state_t::eStretchChanged)) {
-        forceClientComposition = isHdrY410 || shadowSettings.length > 0 ||
-                requested.blurRegions.size() > 0 || stretchEffect.hasEffect();
+        forceClientComposition = shadowSettings.length > 0 || stretchEffect.hasEffect();
     }
 
     if (forceUpdate ||
diff --git a/services/surfaceflinger/FrontEnd/LayerSnapshot.h b/services/surfaceflinger/FrontEnd/LayerSnapshot.h
index 1afcef9..92d23e2 100644
--- a/services/surfaceflinger/FrontEnd/LayerSnapshot.h
+++ b/services/surfaceflinger/FrontEnd/LayerSnapshot.h
@@ -72,14 +72,13 @@
     Rect transformedBoundsWithoutTransparentRegion;
     renderengine::ShadowSettings shadowSettings;
     bool premultipliedAlpha;
-    bool isHdrY410;
     ui::Transform parentTransform;
     Rect bufferSize;
     Rect croppedBufferSize;
     std::shared_ptr<renderengine::ExternalTexture> externalTexture;
     gui::LayerMetadata layerMetadata;
     gui::LayerMetadata relativeLayerMetadata;
-    bool hasReadyFrame;
+    bool hasReadyFrame; // used in post composition to check if there is another frame ready
     ui::Transform localTransformInverse;
     gui::WindowInfo inputInfo;
     ui::Transform localTransform;
diff --git a/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.cpp b/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.cpp
index 23cfe928..159d0f0 100644
--- a/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.cpp
+++ b/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.cpp
@@ -854,8 +854,9 @@
     }
 
     if (forceUpdate || snapshot.clientChanges & layer_state_t::eCornerRadiusChanged ||
-        snapshot.changes.any(RequestedLayerState::Changes::Geometry)) {
-        updateRoundedCorner(snapshot, requested, parentSnapshot);
+        snapshot.changes.any(RequestedLayerState::Changes::Geometry |
+                             RequestedLayerState::Changes::BufferUsageFlags)) {
+        updateRoundedCorner(snapshot, requested, parentSnapshot, args);
     }
 
     if (forceUpdate || snapshot.clientChanges & layer_state_t::eShadowRadiusChanged ||
@@ -870,8 +871,8 @@
     }
 
     // computed snapshot properties
-    snapshot.forceClientComposition = snapshot.isHdrY410 || snapshot.shadowSettings.length > 0 ||
-            requested.blurRegions.size() > 0 || snapshot.stretchEffect.hasEffect();
+    snapshot.forceClientComposition =
+            snapshot.shadowSettings.length > 0 || snapshot.stretchEffect.hasEffect();
     snapshot.contentOpaque = snapshot.isContentOpaque();
     snapshot.isOpaque = snapshot.contentOpaque && !snapshot.roundedCorner.hasRoundedCorners() &&
             snapshot.color.a == 1.f;
@@ -886,7 +887,12 @@
 
 void LayerSnapshotBuilder::updateRoundedCorner(LayerSnapshot& snapshot,
                                                const RequestedLayerState& requested,
-                                               const LayerSnapshot& parentSnapshot) {
+                                               const LayerSnapshot& parentSnapshot,
+                                               const Args& args) {
+    if (args.skipRoundCornersWhenProtected && requested.isProtected()) {
+        snapshot.roundedCorner = RoundedCornerState();
+        return;
+    }
     snapshot.roundedCorner = RoundedCornerState();
     RoundedCornerState parentRoundedCorner;
     if (parentSnapshot.roundedCorner.hasRoundedCorners()) {
diff --git a/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.h b/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.h
index d361605..3d64b36 100644
--- a/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.h
+++ b/services/surfaceflinger/FrontEnd/LayerSnapshotBuilder.h
@@ -54,6 +54,7 @@
         std::unordered_set<uint32_t> excludeLayerIds;
         const std::unordered_map<std::string, bool>& supportedLayerGenericMetadata;
         const std::unordered_map<std::string, uint32_t>& genericLayerMetadataKeyMap;
+        bool skipRoundCornersWhenProtected = false;
     };
     LayerSnapshotBuilder();
 
@@ -103,7 +104,7 @@
                                     bool parentIsRelative, const Args& args);
     static void resetRelativeState(LayerSnapshot& snapshot);
     static void updateRoundedCorner(LayerSnapshot& snapshot, const RequestedLayerState& layerState,
-                                    const LayerSnapshot& parentSnapshot);
+                                    const LayerSnapshot& parentSnapshot, const Args& args);
     void updateLayerBounds(LayerSnapshot& snapshot, const RequestedLayerState& layerState,
                            const LayerSnapshot& parentSnapshot, uint32_t displayRotationFlags);
     static void updateShadows(LayerSnapshot& snapshot, const RequestedLayerState& requested,
diff --git a/services/surfaceflinger/FrontEnd/RequestedLayerState.cpp b/services/surfaceflinger/FrontEnd/RequestedLayerState.cpp
index a4777d1..d979c46 100644
--- a/services/surfaceflinger/FrontEnd/RequestedLayerState.cpp
+++ b/services/surfaceflinger/FrontEnd/RequestedLayerState.cpp
@@ -147,6 +147,8 @@
     const ui::Size oldBufferSize = hadBuffer
             ? ui::Size(externalTexture->getWidth(), externalTexture->getHeight())
             : ui::Size();
+    const uint64_t oldUsageFlags = hadBuffer ? externalTexture->getUsage() : 0;
+
     const bool hadSideStream = sidebandStream != nullptr;
     const layer_state_t& clientState = resolvedComposerState.state;
     const bool hadBlur = hasBlur();
@@ -177,6 +179,10 @@
                 changes |= RequestedLayerState::Changes::BufferSize;
                 changes |= RequestedLayerState::Changes::Geometry;
             }
+            const uint64_t usageFlags = hasBuffer ? externalTexture->getUsage() : 0;
+            if (oldUsageFlags != usageFlags) {
+                changes |= RequestedLayerState::Changes::BufferUsageFlags;
+            }
         }
 
         if (hasBuffer != hadBuffer) {
@@ -570,6 +576,10 @@
     return true;
 }
 
+bool RequestedLayerState::isProtected() const {
+    return externalTexture && externalTexture->getUsage() & GRALLOC_USAGE_PROTECTED;
+}
+
 void RequestedLayerState::clearChanges() {
     what = 0;
     changes.clear();
diff --git a/services/surfaceflinger/FrontEnd/RequestedLayerState.h b/services/surfaceflinger/FrontEnd/RequestedLayerState.h
index 1c19d6d..0309302 100644
--- a/services/surfaceflinger/FrontEnd/RequestedLayerState.h
+++ b/services/surfaceflinger/FrontEnd/RequestedLayerState.h
@@ -56,6 +56,7 @@
         Animation = 1u << 17,
         BufferSize = 1u << 18,
         GameMode = 1u << 19,
+        BufferUsageFlags = 1u << 20,
     };
     static Rect reduce(const Rect& win, const Region& exclude);
     RequestedLayerState(const LayerCreationArgs&);
@@ -85,6 +86,7 @@
     bool willReleaseBufferOnLatch() const;
     bool backpressureEnabled() const;
     bool isSimpleBufferUpdate(const layer_state_t&) const;
+    bool isProtected() const;
 
     // Layer serial number.  This gives layers an explicit ordering, so we
     // have a stable sort order when their layer stack and Z-order are
diff --git a/services/surfaceflinger/FrontEnd/TransactionHandler.cpp b/services/surfaceflinger/FrontEnd/TransactionHandler.cpp
index 0d3c6eb..ca7c3c2 100644
--- a/services/surfaceflinger/FrontEnd/TransactionHandler.cpp
+++ b/services/surfaceflinger/FrontEnd/TransactionHandler.cpp
@@ -188,21 +188,36 @@
 }
 
 void TransactionHandler::onTransactionQueueStalled(uint64_t transactionId,
-                                                   sp<ITransactionCompletedListener>& listener,
-                                                   const std::string& reason) {
-    if (std::find(mStalledTransactions.begin(), mStalledTransactions.end(), transactionId) !=
-        mStalledTransactions.end()) {
-        return;
-    }
-
-    mStalledTransactions.push_back(transactionId);
-    listener->onTransactionQueueStalled(String8(reason.c_str()));
+                                                   StalledTransactionInfo stalledTransactionInfo) {
+    std::lock_guard lock{mStalledMutex};
+    mStalledTransactions.emplace(transactionId, std::move(stalledTransactionInfo));
 }
 
-void TransactionHandler::removeFromStalledTransactions(uint64_t id) {
-    auto it = std::find(mStalledTransactions.begin(), mStalledTransactions.end(), id);
-    if (it != mStalledTransactions.end()) {
-        mStalledTransactions.erase(it);
+void TransactionHandler::removeFromStalledTransactions(uint64_t transactionId) {
+    std::lock_guard lock{mStalledMutex};
+    mStalledTransactions.erase(transactionId);
+}
+
+std::optional<TransactionHandler::StalledTransactionInfo>
+TransactionHandler::getStalledTransactionInfo(pid_t pid) {
+    std::lock_guard lock{mStalledMutex};
+    for (auto [_, stalledTransactionInfo] : mStalledTransactions) {
+        if (pid == stalledTransactionInfo.pid) {
+            return stalledTransactionInfo;
+        }
+    }
+    return std::nullopt;
+}
+
+void TransactionHandler::onLayerDestroyed(uint32_t layerId) {
+    std::lock_guard lock{mStalledMutex};
+    for (auto it = mStalledTransactions.begin(); it != mStalledTransactions.end();) {
+        if (it->second.layerId == layerId) {
+            it = mStalledTransactions.erase(it);
+        } else {
+            it++;
+        }
     }
 }
+
 } // namespace android::surfaceflinger::frontend
diff --git a/services/surfaceflinger/FrontEnd/TransactionHandler.h b/services/surfaceflinger/FrontEnd/TransactionHandler.h
index 04183bc..00f6bce 100644
--- a/services/surfaceflinger/FrontEnd/TransactionHandler.h
+++ b/services/surfaceflinger/FrontEnd/TransactionHandler.h
@@ -18,6 +18,7 @@
 
 #include <semaphore.h>
 #include <cstdint>
+#include <optional>
 #include <vector>
 
 #include <LocklessQueue.h>
@@ -63,9 +64,18 @@
     std::vector<TransactionState> flushTransactions();
     void addTransactionReadyFilter(TransactionFilter&&);
     void queueTransaction(TransactionState&&);
-    void onTransactionQueueStalled(uint64_t transactionId, sp<ITransactionCompletedListener>&,
-                                   const std::string& reason);
+
+    struct StalledTransactionInfo {
+        pid_t pid;
+        uint32_t layerId;
+        std::string layerName;
+        uint64_t bufferId;
+        uint64_t frameNumber;
+    };
+    void onTransactionQueueStalled(uint64_t transactionId, StalledTransactionInfo);
     void removeFromStalledTransactions(uint64_t transactionId);
+    std::optional<StalledTransactionInfo> getStalledTransactionInfo(pid_t pid);
+    void onLayerDestroyed(uint32_t layerId);
 
 private:
     // For unit tests
@@ -81,7 +91,10 @@
     LocklessQueue<TransactionState> mLocklessTransactionQueue;
     std::atomic<size_t> mPendingTransactionCount = 0;
     ftl::SmallVector<TransactionFilter, 2> mTransactionReadyFilters;
-    std::vector<uint64_t> mStalledTransactions;
+
+    std::mutex mStalledMutex;
+    std::unordered_map<uint64_t /* transactionId */, StalledTransactionInfo> mStalledTransactions
+            GUARDED_BY(mStalledMutex);
 };
 } // namespace surfaceflinger::frontend
 } // namespace android
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index 3a41f15..59a8825 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -594,8 +594,8 @@
     snapshot->localTransformInverse = snapshot->localTransform.inverse();
     snapshot->blendMode = static_cast<Hwc2::IComposerClient::BlendMode>(blendMode);
     snapshot->alpha = alpha;
-    snapshot->backgroundBlurRadius = drawingState.backgroundBlurRadius;
-    snapshot->blurRegions = drawingState.blurRegions;
+    snapshot->backgroundBlurRadius = getBackgroundBlurRadius();
+    snapshot->blurRegions = getBlurRegions();
     snapshot->stretchEffect = getStretchEffect();
 }
 
@@ -659,13 +659,12 @@
     // Force client composition for special cases known only to the front-end.
     // Rounded corners no longer force client composition, since we may use a
     // hole punch so that the layer will appear to have rounded corners.
-    if (isHdrY410() || drawShadows() || drawingState.blurRegions.size() > 0 ||
-        snapshot->stretchEffect.hasEffect()) {
+    if (drawShadows() || snapshot->stretchEffect.hasEffect()) {
         snapshot->forceClientComposition = true;
     }
     // If there are no visible region changes, we still need to update blur parameters.
-    snapshot->blurRegions = drawingState.blurRegions;
-    snapshot->backgroundBlurRadius = drawingState.backgroundBlurRadius;
+    snapshot->blurRegions = getBlurRegions();
+    snapshot->backgroundBlurRadius = getBackgroundBlurRadius();
 
     // Layer framerate is used in caching decisions.
     // Retrieve it from the scheduler which maintains an instance of LayerHistory, and store it in
@@ -836,12 +835,12 @@
         mFlinger->mUpdateInputInfo = true;
     }
 
-    commitTransaction(mDrawingState);
+    commitTransaction();
 
     return flags;
 }
 
-void Layer::commitTransaction(State&) {
+void Layer::commitTransaction() {
     // Set the present state for all bufferlessSurfaceFramesTX to Presented. The
     // bufferSurfaceFrameTX will be presented in latchBuffer.
     for (auto& [token, surfaceFrame] : mDrawingState.bufferlessSurfaceFramesTX) {
@@ -2111,6 +2110,13 @@
 }
 
 RoundedCornerState Layer::getRoundedCornerState() const {
+    // Today's DPUs cannot do rounded corners. If RenderEngine cannot render
+    // protected content, remove rounded corners from protected content so it
+    // can be rendered by the DPU.
+    if (isProtected() && !mFlinger->getRenderEngine().supportsProtectedContent()) {
+        return {};
+    }
+
     // Get parent settings
     RoundedCornerState parentSettings;
     const auto& parent = mDrawingParent.promote();
@@ -3881,13 +3887,6 @@
     return true;
 }
 
-bool Layer::isHdrY410() const {
-    // pixel format is HDR Y410 masquerading as RGBA_1010102
-    return (mBufferInfo.mDataspace == ui::Dataspace::BT2020_ITU_PQ &&
-            mBufferInfo.mApi == NATIVE_WINDOW_API_MEDIA &&
-            mBufferInfo.mPixelFormat == HAL_PIXEL_FORMAT_RGBA_1010102);
-}
-
 sp<LayerFE> Layer::getCompositionEngineLayerFE() const {
     // There's no need to get a CE Layer if the layer isn't going to draw anything.
     return hasSomethingToDraw() ? mLegacyLayerFE : nullptr;
@@ -4289,7 +4288,6 @@
     snapshot->contentOpaque = isOpaque(mDrawingState);
     snapshot->layerOpaqueFlagSet =
             (mDrawingState.flags & layer_state_t::eLayerOpaque) == layer_state_t::eLayerOpaque;
-    snapshot->isHdrY410 = isHdrY410();
     sp<Layer> p = mDrawingParent.promote();
     if (p != nullptr) {
         snapshot->parentTransform = p->getTransform();
diff --git a/services/surfaceflinger/Layer.h b/services/surfaceflinger/Layer.h
index 5d77657..d1912e4 100644
--- a/services/surfaceflinger/Layer.h
+++ b/services/surfaceflinger/Layer.h
@@ -427,12 +427,10 @@
     bool needsFilteringForScreenshots(const DisplayDevice*, const ui::Transform&) const;
 
     // from graphics API
-    ui::Dataspace translateDataspace(ui::Dataspace dataspace);
+    static ui::Dataspace translateDataspace(ui::Dataspace dataspace);
     void updateCloneBufferInfo();
     uint64_t mPreviousFrameNumber = 0;
 
-    bool isHdrY410() const;
-
     /*
      * called after composition.
      * returns true if the layer latched a new buffer this frame.
@@ -912,6 +910,7 @@
     void setTransformHint(std::optional<ui::Transform::RotationFlags> transformHint) {
         mTransformHint = transformHint;
     }
+    void commitTransaction();
     // Keeps track of the previously presented layer stacks. This is used to get
     // the release fences from the correct displays when we release the last buffer
     // from the layer.
@@ -932,7 +931,6 @@
     void preparePerFrameCompositionState();
     void preparePerFrameBufferCompositionState();
     void preparePerFrameEffectsCompositionState();
-    virtual void commitTransaction(State& stateToCommit);
     void gatherBufferInfo();
     void onSurfaceFrameCreated(const std::shared_ptr<frametimeline::SurfaceFrame>&);
 
diff --git a/services/surfaceflinger/LayerFE.cpp b/services/surfaceflinger/LayerFE.cpp
index f855f27..5ae52ab 100644
--- a/services/surfaceflinger/LayerFE.cpp
+++ b/services/surfaceflinger/LayerFE.cpp
@@ -16,7 +16,7 @@
 
 // #define LOG_NDEBUG 0
 #undef LOG_TAG
-#define LOG_TAG "LayerFE"
+#define LOG_TAG "SurfaceFlinger"
 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
 
 #include <gui/GLConsumer.h>
@@ -225,7 +225,6 @@
     layerSettings.source.buffer.fence = mSnapshot->acquireFence;
     layerSettings.source.buffer.textureName = mSnapshot->textureName;
     layerSettings.source.buffer.usePremultipliedAlpha = mSnapshot->premultipliedAlpha;
-    layerSettings.source.buffer.isY410BT2020 = mSnapshot->isHdrY410;
     bool hasSmpte2086 = mSnapshot->hdrMetadata.validTypes & HdrMetadata::SMPTE2086;
     bool hasCta861_3 = mSnapshot->hdrMetadata.validTypes & HdrMetadata::CTA861_3;
     float maxLuminance = 0.f;
diff --git a/services/surfaceflinger/OWNERS b/services/surfaceflinger/OWNERS
index 4734097..3270e4c 100644
--- a/services/surfaceflinger/OWNERS
+++ b/services/surfaceflinger/OWNERS
@@ -1,3 +1,5 @@
+# Bug component: 1075131
+
 adyabr@google.com
 alecmouri@google.com
 chaviw@google.com
diff --git a/services/surfaceflinger/Scheduler/Scheduler.cpp b/services/surfaceflinger/Scheduler/Scheduler.cpp
index d6d7725..5a19ec5 100644
--- a/services/surfaceflinger/Scheduler/Scheduler.cpp
+++ b/services/surfaceflinger/Scheduler/Scheduler.cpp
@@ -186,7 +186,17 @@
     FrameTargeter& pacesetterTargeter = *pacesetterOpt->get().targeterPtr;
     pacesetterTargeter.beginFrame(beginFrameArgs, *pacesetterOpt->get().schedulePtr);
 
-    if (!compositor.commit(pacesetterTargeter.target())) return;
+    FrameTargets targets;
+    targets.try_emplace(pacesetterId, &pacesetterTargeter.target());
+
+    for (const auto& [id, display] : mDisplays) {
+        if (id == pacesetterId) continue;
+
+        const FrameTargeter& targeter = *display.targeterPtr;
+        targets.try_emplace(id, &targeter.target());
+    }
+
+    if (!compositor.commit(pacesetterId, targets)) return;
 
     // TODO(b/256196556): Choose the frontrunner display.
     FrameTargeters targeters;
diff --git a/services/surfaceflinger/Scheduler/include/scheduler/interface/ICompositor.h b/services/surfaceflinger/Scheduler/include/scheduler/interface/ICompositor.h
index 6fe813a..12ee36e 100644
--- a/services/surfaceflinger/Scheduler/include/scheduler/interface/ICompositor.h
+++ b/services/surfaceflinger/Scheduler/include/scheduler/interface/ICompositor.h
@@ -29,6 +29,7 @@
 class FrameTarget;
 class FrameTargeter;
 
+using FrameTargets = ui::PhysicalDisplayMap<PhysicalDisplayId, const scheduler::FrameTarget*>;
 using FrameTargeters = ui::PhysicalDisplayMap<PhysicalDisplayId, scheduler::FrameTargeter*>;
 
 } // namespace scheduler
@@ -39,7 +40,7 @@
 
     // Commits transactions for layers and displays. Returns whether any state has been invalidated,
     // i.e. whether a frame should be composited for each display.
-    virtual bool commit(const scheduler::FrameTarget&) = 0;
+    virtual bool commit(PhysicalDisplayId pacesetterId, const scheduler::FrameTargets&) = 0;
 
     // Composites a frame for each display. CompositionEngine performs GPU and/or HAL composition
     // via RenderEngine and the Composer HAL, respectively.
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index 9f24dd6..06adfec 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -644,14 +644,6 @@
     return getPhysicalDisplayTokenLocked(displayId);
 }
 
-status_t SurfaceFlinger::getColorManagement(bool* outGetColorManagement) const {
-    if (!outGetColorManagement) {
-        return BAD_VALUE;
-    }
-    *outGetColorManagement = useColorManagement;
-    return NO_ERROR;
-}
-
 HWComposer& SurfaceFlinger::getHwComposer() const {
     return mCompositionEngine->getHwComposer();
 }
@@ -812,7 +804,6 @@
     auto builder = renderengine::RenderEngineCreationArgs::Builder()
                            .setPixelFormat(static_cast<int32_t>(defaultCompositionPixelFormat))
                            .setImageCacheSize(maxFrameBufferAcquiredBuffers)
-                           .setUseColorManagerment(useColorManagement)
                            .setEnableProtectedContext(enable_protected_contents(false))
                            .setPrecacheToneMapperShaderOnly(false)
                            .setSupportsBackgroundBlur(mSupportsBlur)
@@ -1195,9 +1186,9 @@
 }
 
 void SurfaceFlinger::setDesiredActiveMode(display::DisplayModeRequest&& request, bool force) {
-    ATRACE_CALL();
-
     const auto displayId = request.mode.modePtr->getPhysicalDisplayId();
+    ATRACE_NAME(ftl::Concat(__func__, ' ', displayId.value).c_str());
+
     const auto display = getDisplayDeviceLocked(displayId);
     if (!display) {
         ALOGW("%s: display is no longer valid", __func__);
@@ -1225,17 +1216,24 @@
             // As we called to set period, we will call to onRefreshRateChangeCompleted once
             // VsyncController model is locked.
             mScheduler->modulateVsync(displayId, &VsyncModulator::onRefreshRateChangeInitiated);
-            updatePhaseConfiguration(mode.fps);
+
+            if (displayId == mActiveDisplayId) {
+                updatePhaseConfiguration(mode.fps);
+            }
+
             mScheduler->setModeChangePending(true);
             break;
         case DisplayDevice::DesiredActiveModeAction::InitiateRenderRateSwitch:
             mScheduler->setRenderRate(displayId, mode.fps);
-            updatePhaseConfiguration(mode.fps);
-            mRefreshRateStats->setRefreshRate(mode.fps);
-            if (display->getPhysicalId() == mActiveDisplayId && emitEvent) {
-                mScheduler->onPrimaryDisplayModeChanged(mAppConnectionHandle, mode);
+
+            if (displayId == mActiveDisplayId) {
+                updatePhaseConfiguration(mode.fps);
+                mRefreshRateStats->setRefreshRate(mode.fps);
             }
 
+            if (emitEvent) {
+                dispatchDisplayModeChangeEvent(displayId, mode);
+            }
             break;
         case DisplayDevice::DesiredActiveModeAction::None:
             break;
@@ -1291,24 +1289,20 @@
     return future.get();
 }
 
-void SurfaceFlinger::updateInternalStateWithChangedMode() {
-    ATRACE_CALL();
+void SurfaceFlinger::finalizeDisplayModeChange(DisplayDevice& display) {
+    const auto displayId = display.getPhysicalId();
+    ATRACE_NAME(ftl::Concat(__func__, ' ', displayId.value).c_str());
 
-    const auto display = getDefaultDisplayDeviceLocked();
-    if (!display) {
-        return;
-    }
-
-    const auto upcomingModeInfo = display->getUpcomingActiveMode();
+    const auto upcomingModeInfo = display.getUpcomingActiveMode();
     if (!upcomingModeInfo.modeOpt) {
         // There is no pending mode change. This can happen if the active
         // display changed and the mode change happened on a different display.
         return;
     }
 
-    if (display->getActiveMode().modePtr->getResolution() !=
+    if (display.getActiveMode().modePtr->getResolution() !=
         upcomingModeInfo.modeOpt->modePtr->getResolution()) {
-        auto& state = mCurrentState.displays.editValueFor(display->getDisplayToken());
+        auto& state = mCurrentState.displays.editValueFor(display.getDisplayToken());
         // We need to generate new sequenceId in order to recreate the display (and this
         // way the framebuffer).
         state.sequenceId = DisplayDeviceState{}.sequenceId;
@@ -1319,27 +1313,24 @@
         return;
     }
 
-    mPhysicalDisplays.get(display->getPhysicalId())
-            .transform(&PhysicalDisplay::snapshotRef)
-            .transform(ftl::unit_fn([&](const display::DisplaySnapshot& snapshot) {
-                FTL_FAKE_GUARD(kMainThreadContext,
-                               display->setActiveMode(upcomingModeInfo.modeOpt->modePtr->getId(),
-                                                      upcomingModeInfo.modeOpt->modePtr->getFps(),
-                                                      upcomingModeInfo.modeOpt->fps));
-            }));
+    const auto& activeMode = *upcomingModeInfo.modeOpt;
+    display.finalizeModeChange(activeMode.modePtr->getId(), activeMode.modePtr->getFps(),
+                               activeMode.fps);
 
-    const Fps refreshRate = upcomingModeInfo.modeOpt->fps;
-    mRefreshRateStats->setRefreshRate(refreshRate);
-    updatePhaseConfiguration(refreshRate);
+    if (displayId == mActiveDisplayId) {
+        mRefreshRateStats->setRefreshRate(activeMode.fps);
+        updatePhaseConfiguration(activeMode.fps);
+    }
 
     if (upcomingModeInfo.event != scheduler::DisplayModeEvent::None) {
-        mScheduler->onPrimaryDisplayModeChanged(mAppConnectionHandle, *upcomingModeInfo.modeOpt);
+        dispatchDisplayModeChangeEvent(displayId, activeMode);
     }
 }
 
 void SurfaceFlinger::clearDesiredActiveModeState(const sp<DisplayDevice>& display) {
     display->clearDesiredActiveModeState();
     if (display->getPhysicalId() == mActiveDisplayId) {
+        // TODO(b/255635711): Check for pending mode changes on other displays.
         mScheduler->setModeChangePending(false);
     }
 }
@@ -1353,21 +1344,18 @@
     clearDesiredActiveModeState(display);
     mScheduler->resyncToHardwareVsync(displayId, true /* allowToEnable */, displayFps);
     mScheduler->setRenderRate(displayId, renderFps);
-    updatePhaseConfiguration(renderFps);
+
+    if (displayId == mActiveDisplayId) {
+        updatePhaseConfiguration(renderFps);
+    }
 }
 
-void SurfaceFlinger::setActiveModeInHwcIfNeeded() {
+void SurfaceFlinger::initiateDisplayModeChanges() {
     ATRACE_CALL();
 
     std::optional<PhysicalDisplayId> displayToUpdateImmediately;
 
     for (const auto& [id, physical] : mPhysicalDisplays) {
-        const auto& snapshot = physical.snapshot();
-
-        if (snapshot.connectionType() != ui::DisplayConnectionType::Internal) {
-            continue;
-        }
-
         const auto display = getDisplayDeviceLocked(id);
         if (!display) continue;
 
@@ -1378,14 +1366,14 @@
             continue;
         }
 
-        if (id != mActiveDisplayId) {
-            // Display is no longer the active display, so abort the mode change.
+        if (!display->isPoweredOn()) {
+            // Display is no longer powered on, so abort the mode change.
             clearDesiredActiveModeState(display);
             continue;
         }
 
         const auto desiredModeId = desiredActiveMode->modeOpt->modePtr->getId();
-        const auto displayModePtrOpt = snapshot.displayModes().get(desiredModeId);
+        const auto displayModePtrOpt = physical.snapshot().displayModes().get(desiredModeId);
 
         if (!displayModePtrOpt) {
             ALOGW("Desired display mode is no longer supported. Mode ID = %d",
@@ -1435,19 +1423,18 @@
 
         if (outTimeline.refreshRequired) {
             scheduleComposite(FrameHint::kNone);
-            mSetActiveModePending = true;
         } else {
-            // Updating the internal state should be done outside the loop,
-            // because it can recreate a DisplayDevice and modify mDisplays
-            // which will invalidate the iterator.
+            // TODO(b/255635711): Remove `displayToUpdateImmediately` to `finalizeDisplayModeChange`
+            // for all displays. This was only needed when the loop iterated over `mDisplays` rather
+            // than `mPhysicalDisplays`.
             displayToUpdateImmediately = display->getPhysicalId();
         }
     }
 
     if (displayToUpdateImmediately) {
-        updateInternalStateWithChangedMode();
-
         const auto display = getDisplayDeviceLocked(*displayToUpdateImmediately);
+        finalizeDisplayModeChange(*display);
+
         const auto desiredActiveMode = display->getDesiredActiveMode();
         if (desiredActiveMode && display->getActiveMode() == desiredActiveMode->modeOpt) {
             desiredActiveModeChangeDone(display);
@@ -2315,7 +2302,9 @@
                      .forceFullDamage = mForceFullDamage,
                      .supportedLayerGenericMetadata =
                              getHwComposer().getSupportedLayerGenericMetadata(),
-                     .genericLayerMetadataKeyMap = getGenericLayerMetadataKeyMap()};
+                     .genericLayerMetadataKeyMap = getGenericLayerMetadataKeyMap(),
+                     .skipRoundCornersWhenProtected =
+                             !getRenderEngine().supportsProtectedContent()};
         mLayerSnapshotBuilder.update(args);
     }
 
@@ -2324,7 +2313,7 @@
         mUpdateInputInfo = true;
     }
     if (mLayerLifecycleManager.getGlobalChanges().any(Changes::VisibleRegion | Changes::Hierarchy |
-                                                      Changes::Visibility)) {
+                                                      Changes::Visibility | Changes::Geometry)) {
         mVisibleRegionsDirty = true;
     }
     if (mLayerLifecycleManager.getGlobalChanges().any(Changes::Hierarchy | Changes::FrameRate)) {
@@ -2339,6 +2328,7 @@
     if (!mLegacyFrontEndEnabled) {
         ATRACE_NAME("DisplayCallbackAndStatsUpdates");
         applyTransactions(update.transactions, vsyncId);
+        traverseLegacyLayers([&](Layer* layer) { layer->commitTransaction(); });
         const nsecs_t latchTime = systemTime();
         bool unused = false;
 
@@ -2363,20 +2353,22 @@
                 mLayersWithBuffersRemoved.emplace(it->second);
             }
             it->second->latchBufferImpl(unused, latchTime, bgColorOnly);
-            mLayersWithQueuedFrames.emplace(it->second);
-        }
-
-        for (auto& snapshot : mLayerSnapshotBuilder.getSnapshots()) {
-            updateLayerHistory(*snapshot);
-            if (!snapshot->hasReadyFrame) continue;
             newDataLatched = true;
-            if (!snapshot->isVisible) break;
 
-            Region visibleReg;
-            visibleReg.set(snapshot->transformedBoundsWithoutTransparentRegion);
-            invalidateLayerStack(snapshot->outputFilter, visibleReg);
+            mLayersWithQueuedFrames.emplace(it->second);
+            mLayersIdsWithQueuedFrames.emplace(it->second->sequence);
         }
 
+        mLayerSnapshotBuilder.forEachVisibleSnapshot([&](const frontend::LayerSnapshot& snapshot) {
+            updateLayerHistory(snapshot);
+            if (mLayersIdsWithQueuedFrames.find(snapshot.path.id) ==
+                mLayersIdsWithQueuedFrames.end())
+                return;
+            Region visibleReg;
+            visibleReg.set(snapshot.transformedBoundsWithoutTransparentRegion);
+            invalidateLayerStack(snapshot.outputFilter, visibleReg);
+        });
+
         for (auto& destroyedLayer : mLayerLifecycleManager.getDestroyedLayers()) {
             mLegacyLayers.erase(destroyedLayer->id);
         }
@@ -2398,7 +2390,10 @@
     return mustComposite;
 }
 
-bool SurfaceFlinger::commit(const scheduler::FrameTarget& pacesetterFrameTarget) {
+bool SurfaceFlinger::commit(PhysicalDisplayId pacesetterId,
+                            const scheduler::FrameTargets& frameTargets) {
+    const scheduler::FrameTarget& pacesetterFrameTarget = *frameTargets.get(pacesetterId)->get();
+
     const VsyncId vsyncId = pacesetterFrameTarget.vsyncId();
     ATRACE_NAME(ftl::Concat(__func__, ' ', ftl::to_underlying(vsyncId)).c_str());
 
@@ -2411,20 +2406,35 @@
         mTracingEnabledChanged = false;
     }
 
-    // If we are in the middle of a mode change and the fence hasn't
-    // fired yet just wait for the next commit.
-    if (mSetActiveModePending) {
-        if (pacesetterFrameTarget.isFramePending()) {
-            mScheduler->scheduleFrame();
-            return false;
-        }
+    // If a mode set is pending and the fence hasn't fired yet, wait for the next commit.
+    if (std::any_of(frameTargets.begin(), frameTargets.end(),
+                    [this](const auto& pair) FTL_FAKE_GUARD(mStateLock)
+                            FTL_FAKE_GUARD(kMainThreadContext) {
+                                if (!pair.second->isFramePending()) return false;
 
-        // We received the present fence from the HWC, so we assume it successfully updated
-        // the mode, hence we update SF.
-        mSetActiveModePending = false;
-        {
-            Mutex::Autolock lock(mStateLock);
-            updateInternalStateWithChangedMode();
+                                if (const auto display = getDisplayDeviceLocked(pair.first)) {
+                                    return display->isModeSetPending();
+                                }
+
+                                return false;
+                            })) {
+        mScheduler->scheduleFrame();
+        return false;
+    }
+
+    {
+        Mutex::Autolock lock(mStateLock);
+
+        for (const auto [id, target] : frameTargets) {
+            // TODO(b/241285876): This is `nullptr` when the DisplayDevice is about to be removed in
+            // this commit, since the PhysicalDisplay has already been removed. Rather than checking
+            // for `nullptr` below, change Scheduler::onFrameSignal to filter out the FrameTarget of
+            // the removed display.
+            const auto display = getDisplayDeviceLocked(id);
+
+            if (display && display->isModeSetPending()) {
+                finalizeDisplayModeChange(*display);
+            }
         }
     }
 
@@ -2515,7 +2525,7 @@
                                                         ? &mLayerHierarchyBuilder.getHierarchy()
                                                         : nullptr,
                                                 updateAttachedChoreographer);
-        setActiveModeInHwcIfNeeded();
+        initiateDisplayModeChanges();
     }
 
     updateCursorAsync();
@@ -2603,9 +2613,7 @@
             refreshArgs.layersWithQueuedFrames.push_back(layerFE);
     }
 
-    refreshArgs.outputColorSetting = useColorManagement
-            ? mDisplayColorSetting
-            : compositionengine::OutputColorSetting::kUnmanaged;
+    refreshArgs.outputColorSetting = mDisplayColorSetting;
     refreshArgs.forceOutputColorMode = mForceColorMode;
 
     refreshArgs.updatingOutputGeometryThisFrame = mVisibleRegionsDirty;
@@ -2727,6 +2735,7 @@
     mScheduler->modulateVsync({}, &VsyncModulator::onDisplayRefresh, hasGpuUseOrReuse);
 
     mLayersWithQueuedFrames.clear();
+    mLayersIdsWithQueuedFrames.clear();
     if (mLayerTracingEnabled && mLayerTracing.flagIsSet(LayerTracing::TRACE_COMPOSITION)) {
         // This will block and should only be used for debugging.
         addToLayerTracing(mVisibleRegionsDirty, pacesetterTarget.frameBeginTime(), vsyncId);
@@ -3322,6 +3331,16 @@
     mScheduler->onHotplugReceived(mSfConnectionHandle, displayId, connected);
 }
 
+void SurfaceFlinger::dispatchDisplayModeChangeEvent(PhysicalDisplayId displayId,
+                                                    const scheduler::FrameRateMode& mode) {
+    // TODO(b/255635821): Merge code paths and move to Scheduler.
+    const auto onDisplayModeChanged = displayId == mActiveDisplayId
+            ? &scheduler::Scheduler::onPrimaryDisplayModeChanged
+            : &scheduler::Scheduler::onNonPrimaryDisplayModeChanged;
+
+    ((*mScheduler).*onDisplayModeChanged)(mAppConnectionHandle, mode);
+}
+
 sp<DisplayDevice> SurfaceFlinger::setupNewDisplayDeviceInternal(
         const wp<IBinder>& displayToken,
         std::shared_ptr<compositionengine::Display> compositionDisplay,
@@ -3365,18 +3384,16 @@
 
         creationArgs.isPrimary = physical->id == getPrimaryDisplayIdLocked();
 
-        if (useColorManagement) {
-            mPhysicalDisplays.get(physical->id)
-                    .transform(&PhysicalDisplay::snapshotRef)
-                    .transform(ftl::unit_fn([&](const display::DisplaySnapshot& snapshot) {
-                        for (const auto mode : snapshot.colorModes()) {
-                            creationArgs.hasWideColorGamut |= ui::isWideColorMode(mode);
-                            creationArgs.hwcColorModes
-                                    .emplace(mode,
-                                             getHwComposer().getRenderIntents(physical->id, mode));
-                        }
-                    }));
-        }
+        mPhysicalDisplays.get(physical->id)
+                .transform(&PhysicalDisplay::snapshotRef)
+                .transform(ftl::unit_fn([&](const display::DisplaySnapshot& snapshot) {
+                    for (const auto mode : snapshot.colorModes()) {
+                        creationArgs.hasWideColorGamut |= ui::isWideColorMode(mode);
+                        creationArgs.hwcColorModes
+                                .emplace(mode,
+                                         getHwComposer().getRenderIntents(physical->id, mode));
+                    }
+                }));
     }
 
     if (const auto id = HalDisplayId::tryCast(compositionDisplay->getId())) {
@@ -3420,14 +3437,8 @@
                                                     RenderIntent::COLORIMETRIC});
 
     if (const auto& physical = state.physical) {
-        mPhysicalDisplays.get(physical->id)
-                .transform(&PhysicalDisplay::snapshotRef)
-                .transform(ftl::unit_fn([&](const display::DisplaySnapshot& snapshot) {
-                    FTL_FAKE_GUARD(kMainThreadContext,
-                                   display->setActiveMode(physical->activeMode->getId(),
-                                                          physical->activeMode->getFps(),
-                                                          physical->activeMode->getFps()));
-                }));
+        const auto& mode = *physical->activeMode;
+        display->setActiveMode(mode.getId(), mode.getFps(), mode.getFps());
     }
 
     display->setLayerFilter(makeLayerFilterForDisplay(display->getId(), state.layerStack));
@@ -3946,12 +3957,8 @@
 
         if (!display) continue;
 
-        const bool isInternalDisplay = mPhysicalDisplays.get(displayId)
-                                               .transform(&PhysicalDisplay::isInternal)
-                                               .value_or(false);
-
-        if (isInternalDisplay && displayId != mActiveDisplayId) {
-            ALOGV("%s(%s): Inactive display", __func__, to_string(displayId).c_str());
+        if (!display->isPoweredOn()) {
+            ALOGV("%s(%s): Display is powered off", __func__, to_string(displayId).c_str());
             continue;
         }
 
@@ -3959,7 +3966,7 @@
             setDesiredActiveMode(std::move(request));
         } else {
             ALOGV("%s: Mode %d is disallowed for display %s", __func__, modePtr->getId().value(),
-                  to_string(display->getId()).c_str());
+                  to_string(displayId).c_str());
         }
     }
 }
@@ -4443,9 +4450,13 @@
                     (flushState.queueProcessTime - transaction.postTime) >
                             std::chrono::nanoseconds(4s).count()) {
                     mTransactionHandler
-                            .onTransactionQueueStalled(transaction.id, listener,
-                                                       "Buffer processing hung up due to stuck "
-                                                       "fence. Indicates GPU hang");
+                            .onTransactionQueueStalled(transaction.id,
+                                                       {.pid = layer->getOwnerPid(),
+                                                        .layerId = static_cast<uint32_t>(
+                                                                layer->getSequence()),
+                                                        .layerName = layer->getDebugName(),
+                                                        .bufferId = s.bufferData->getId(),
+                                                        .frameNumber = s.bufferData->frameNumber});
                 }
                 ATRACE_FORMAT("fence unsignaled %s", layer->getDebugName());
                 return TraverseBuffersReturnValues::STOP_TRAVERSAL;
@@ -4538,9 +4549,12 @@
                     (flushState.queueProcessTime - transaction.postTime) >
                             std::chrono::nanoseconds(4s).count()) {
                     mTransactionHandler
-                            .onTransactionQueueStalled(transaction.id, listener,
-                                                       "Buffer processing hung up due to stuck "
-                                                       "fence. Indicates GPU hang");
+                            .onTransactionQueueStalled(transaction.id,
+                                                       {.pid = layer->ownerPid.val(),
+                                                        .layerId = layer->id,
+                                                        .layerName = layer->name,
+                                                        .bufferId = s.bufferData->getId(),
+                                                        .frameNumber = s.bufferData->frameNumber});
                 }
                 ATRACE_FORMAT("fence unsignaled %s", layer->name.c_str());
                 return TraverseBuffersReturnValues::STOP_TRAVERSAL;
@@ -5368,6 +5382,9 @@
     if (what & layer_state_t::eSidebandStreamChanged) {
         if (layer->setSidebandStream(s.sidebandStream)) flags |= eTraversalNeeded;
     }
+    if (what & layer_state_t::eDataspaceChanged) {
+        if (layer->setDataspace(s.dataspace)) flags |= eTraversalNeeded;
+    }
     if (what & layer_state_t::eBufferChanged) {
         std::optional<ui::Transform::RotationFlags> transformHint = std::nullopt;
         frontend::LayerSnapshot* snapshot = mLayerSnapshotBuilder.getSnapshot(layer->sequence);
@@ -5567,6 +5584,8 @@
         mDestroyedHandles.emplace_back(layerId);
     }
 
+    mTransactionHandler.onLayerDestroyed(layerId);
+
     Mutex::Autolock lock(mStateLock);
     markLayerPendingRemovalLocked(layer);
     layer->onHandleDestroyed();
@@ -5694,18 +5713,22 @@
         // Turn off the display
 
         if (displayId == mActiveDisplayId) {
-            if (setSchedFifo(false) != NO_ERROR) {
-                ALOGW("Failed to set SCHED_OTHER after powering off active display: %s",
-                      strerror(errno));
-            }
-            if (setSchedAttr(false) != NO_ERROR) {
-                ALOGW("Failed set uclamp.min after powering off active display: %s",
-                      strerror(errno));
-            }
+            if (const auto display = getActivatableDisplay()) {
+                onActiveDisplayChangedLocked(activeDisplay.get(), *display);
+            } else {
+                if (setSchedFifo(false) != NO_ERROR) {
+                    ALOGW("Failed to set SCHED_OTHER after powering off active display: %s",
+                          strerror(errno));
+                }
+                if (setSchedAttr(false) != NO_ERROR) {
+                    ALOGW("Failed set uclamp.min after powering off active display: %s",
+                          strerror(errno));
+                }
 
-            if (*currentModeOpt != hal::PowerMode::DOZE_SUSPEND) {
-                mScheduler->disableHardwareVsync(displayId, true);
-                mScheduler->enableSyntheticVsync();
+                if (*currentModeOpt != hal::PowerMode::DOZE_SUSPEND) {
+                    mScheduler->disableHardwareVsync(displayId, true);
+                    mScheduler->enableSyntheticVsync();
+                }
             }
         }
 
@@ -6071,7 +6094,6 @@
 
 void SurfaceFlinger::dumpWideColorInfo(std::string& result) const {
     StringAppendF(&result, "Device supports wide color: %d\n", mSupportsWideColor);
-    StringAppendF(&result, "Device uses color management: %d\n", useColorManagement);
     StringAppendF(&result, "DisplayColorSetting: %s\n",
                   decodeDisplayColorSetting(mDisplayColorSetting).c_str());
 
@@ -6127,6 +6149,13 @@
         displayProto->set_id(display->getId().value);
         displayProto->set_name(display->getDisplayName());
         displayProto->set_layer_stack(display->getLayerStack().id);
+
+        if (!display->isVirtual()) {
+            const auto dpi = display->refreshRateSelector().getActiveMode().modePtr->getDpi();
+            displayProto->set_dpi_x(dpi.x);
+            displayProto->set_dpi_y(dpi.y);
+        }
+
         LayerProtoHelper::writeSizeToProto(display->getWidth(), display->getHeight(),
                                            [&]() { return displayProto->mutable_size(); });
         LayerProtoHelper::writeToProto(display->getLayerStackSpaceRect(), [&]() {
@@ -6739,8 +6768,6 @@
                 DisplayColorSetting setting = static_cast<DisplayColorSetting>(data.readInt32());
                 switch (setting) {
                     case DisplayColorSetting::kManaged:
-                        reply->writeBool(useColorManagement);
-                        break;
                     case DisplayColorSetting::kUnmanaged:
                         reply->writeBool(true);
                         break;
@@ -6773,7 +6800,8 @@
             }
             // Is device color managed?
             case 1030: {
-                reply->writeBool(useColorManagement);
+                // ColorDisplayManager stil calls this
+                reply->writeBool(true);
                 return NO_ERROR;
             }
             // Override default composition data space
@@ -7311,16 +7339,27 @@
 
 } // namespace
 
-status_t SurfaceFlinger::captureDisplay(const DisplayCaptureArgs& args,
-                                        const sp<IScreenCaptureListener>& captureListener) {
+static void invokeScreenCaptureError(const status_t status,
+                                     const sp<IScreenCaptureListener>& captureListener) {
+    ScreenCaptureResults captureResults;
+    captureResults.fenceResult = base::unexpected(status);
+    captureListener->onScreenCaptureCompleted(captureResults);
+}
+
+void SurfaceFlinger::captureDisplay(const DisplayCaptureArgs& args,
+                                    const sp<IScreenCaptureListener>& captureListener) {
     ATRACE_CALL();
 
     status_t validate = validateScreenshotPermissions(args);
     if (validate != OK) {
-        return validate;
+        invokeScreenCaptureError(validate, captureListener);
+        return;
     }
 
-    if (!args.displayToken) return BAD_VALUE;
+    if (!args.displayToken) {
+        invokeScreenCaptureError(BAD_VALUE, captureListener);
+        return;
+    }
 
     wp<const DisplayDevice> displayWeak;
     ui::LayerStack layerStack;
@@ -7329,7 +7368,10 @@
     {
         Mutex::Autolock lock(mStateLock);
         sp<DisplayDevice> display = getDisplayDeviceLocked(args.displayToken);
-        if (!display) return NAME_NOT_FOUND;
+        if (!display) {
+            invokeScreenCaptureError(NAME_NOT_FOUND, captureListener);
+            return;
+        }
         displayWeak = display;
         layerStack = display->getLayerStack();
 
@@ -7344,7 +7386,8 @@
                 excludeLayerIds.emplace(excludeLayer);
             } else {
                 ALOGW("Invalid layer handle passed as excludeLayer to captureDisplay");
-                return NAME_NOT_FOUND;
+                invokeScreenCaptureError(NAME_NOT_FOUND, captureListener);
+                return;
             }
         }
     }
@@ -7367,14 +7410,12 @@
         getLayerSnapshots = RenderArea::fromTraverseLayersLambda(traverseLayers);
     }
 
-    auto future = captureScreenCommon(std::move(renderAreaFuture), getLayerSnapshots, reqSize,
-                                      args.pixelFormat, args.allowProtected, args.grayscale,
-                                      captureListener);
-    return fenceStatus(future.get());
+    captureScreenCommon(std::move(renderAreaFuture), getLayerSnapshots, reqSize, args.pixelFormat,
+                        args.allowProtected, args.grayscale, captureListener);
 }
 
-status_t SurfaceFlinger::captureDisplay(DisplayId displayId,
-                                        const sp<IScreenCaptureListener>& captureListener) {
+void SurfaceFlinger::captureDisplay(DisplayId displayId,
+                                    const sp<IScreenCaptureListener>& captureListener) {
     ui::LayerStack layerStack;
     wp<const DisplayDevice> displayWeak;
     ui::Size size;
@@ -7383,7 +7424,8 @@
 
         const auto display = getDisplayDeviceLocked(displayId);
         if (!display) {
-            return NAME_NOT_FOUND;
+            invokeScreenCaptureError(NAME_NOT_FOUND, captureListener);
+            return;
         }
 
         displayWeak = display;
@@ -7411,25 +7453,25 @@
 
     if (captureListener == nullptr) {
         ALOGE("capture screen must provide a capture listener callback");
-        return BAD_VALUE;
+        invokeScreenCaptureError(BAD_VALUE, captureListener);
+        return;
     }
 
     constexpr bool kAllowProtected = false;
     constexpr bool kGrayscale = false;
 
-    auto future = captureScreenCommon(std::move(renderAreaFuture), getLayerSnapshots, size,
-                                      ui::PixelFormat::RGBA_8888, kAllowProtected, kGrayscale,
-                                      captureListener);
-    return fenceStatus(future.get());
+    captureScreenCommon(std::move(renderAreaFuture), getLayerSnapshots, size,
+                        ui::PixelFormat::RGBA_8888, kAllowProtected, kGrayscale, captureListener);
 }
 
-status_t SurfaceFlinger::captureLayers(const LayerCaptureArgs& args,
-                                       const sp<IScreenCaptureListener>& captureListener) {
+void SurfaceFlinger::captureLayers(const LayerCaptureArgs& args,
+                                   const sp<IScreenCaptureListener>& captureListener) {
     ATRACE_CALL();
 
     status_t validate = validateScreenshotPermissions(args);
     if (validate != OK) {
-        return validate;
+        invokeScreenCaptureError(validate, captureListener);
+        return;
     }
 
     ui::Size reqSize;
@@ -7447,13 +7489,15 @@
         parent = LayerHandle::getLayer(args.layerHandle);
         if (parent == nullptr) {
             ALOGE("captureLayers called with an invalid or removed parent");
-            return NAME_NOT_FOUND;
+            invokeScreenCaptureError(NAME_NOT_FOUND, captureListener);
+            return;
         }
 
         if (!canCaptureBlackoutContent &&
             parent->getDrawingState().flags & layer_state_t::eLayerSecure) {
             ALOGW("Attempting to capture secure layer: PERMISSION_DENIED");
-            return PERMISSION_DENIED;
+            invokeScreenCaptureError(PERMISSION_DENIED, captureListener);
+            return;
         }
 
         Rect parentSourceBounds = parent->getCroppedBufferSize(parent->getDrawingState());
@@ -7470,7 +7514,8 @@
         if (crop.isEmpty() || args.frameScaleX <= 0.0f || args.frameScaleY <= 0.0f) {
             // Error out if the layer has no source bounds (i.e. they are boundless) and a source
             // crop was not specified, or an invalid frame scale was provided.
-            return BAD_VALUE;
+            invokeScreenCaptureError(BAD_VALUE, captureListener);
+            return;
         }
         reqSize = ui::Size(crop.width() * args.frameScaleX, crop.height() * args.frameScaleY);
 
@@ -7480,7 +7525,8 @@
                 excludeLayerIds.emplace(excludeLayer);
             } else {
                 ALOGW("Invalid layer handle passed as excludeLayer to captureLayers");
-                return NAME_NOT_FOUND;
+                invokeScreenCaptureError(NAME_NOT_FOUND, captureListener);
+                return;
             }
         }
     } // mStateLock
@@ -7488,7 +7534,8 @@
     // really small crop or frameScale
     if (reqSize.width <= 0 || reqSize.height <= 0) {
         ALOGW("Failed to captureLayes: crop or scale too small");
-        return BAD_VALUE;
+        invokeScreenCaptureError(BAD_VALUE, captureListener);
+        return;
     }
 
     bool childrenOnly = args.childrenOnly;
@@ -7552,26 +7599,27 @@
 
     if (captureListener == nullptr) {
         ALOGE("capture screen must provide a capture listener callback");
-        return BAD_VALUE;
+        invokeScreenCaptureError(BAD_VALUE, captureListener);
+        return;
     }
 
-    auto future = captureScreenCommon(std::move(renderAreaFuture), getLayerSnapshots, reqSize,
-                                      args.pixelFormat, args.allowProtected, args.grayscale,
-                                      captureListener);
-    return fenceStatus(future.get());
+    captureScreenCommon(std::move(renderAreaFuture), getLayerSnapshots, reqSize, args.pixelFormat,
+                        args.allowProtected, args.grayscale, captureListener);
 }
 
-ftl::SharedFuture<FenceResult> SurfaceFlinger::captureScreenCommon(
-        RenderAreaFuture renderAreaFuture, GetLayerSnapshotsFunction getLayerSnapshots,
-        ui::Size bufferSize, ui::PixelFormat reqPixelFormat, bool allowProtected, bool grayscale,
-        const sp<IScreenCaptureListener>& captureListener) {
+void SurfaceFlinger::captureScreenCommon(RenderAreaFuture renderAreaFuture,
+                                         GetLayerSnapshotsFunction getLayerSnapshots,
+                                         ui::Size bufferSize, ui::PixelFormat reqPixelFormat,
+                                         bool allowProtected, bool grayscale,
+                                         const sp<IScreenCaptureListener>& captureListener) {
     ATRACE_CALL();
 
     if (exceedsMaxRenderTargetSize(bufferSize.getWidth(), bufferSize.getHeight())) {
         ALOGE("Attempted to capture screen with size (%" PRId32 ", %" PRId32
               ") that exceeds render target size limit.",
               bufferSize.getWidth(), bufferSize.getHeight());
-        return ftl::yield<FenceResult>(base::unexpected(BAD_VALUE)).share();
+        invokeScreenCaptureError(BAD_VALUE, captureListener);
+        return;
     }
 
     // Loop over all visible layers to see whether there's any protected layer. A protected layer is
@@ -7611,14 +7659,16 @@
         // Otherwise an irreponsible process may cause an SF crash by allocating
         // too much.
         ALOGE("%s: Buffer failed to allocate: %d", __func__, bufferStatus);
-        return ftl::yield<FenceResult>(base::unexpected(bufferStatus)).share();
+        invokeScreenCaptureError(bufferStatus, captureListener);
+        return;
     }
     const std::shared_ptr<renderengine::ExternalTexture> texture = std::make_shared<
             renderengine::impl::ExternalTexture>(buffer, getRenderEngine(),
                                                  renderengine::impl::ExternalTexture::Usage::
                                                          WRITEABLE);
-    return captureScreenCommon(std::move(renderAreaFuture), getLayerSnapshots, texture,
-                               false /* regionSampling */, grayscale, captureListener);
+    auto fence = captureScreenCommon(std::move(renderAreaFuture), getLayerSnapshots, texture,
+                                     false /* regionSampling */, grayscale, captureListener);
+    fence.get();
 }
 
 ftl::SharedFuture<FenceResult> SurfaceFlinger::captureScreenCommon(
@@ -7922,6 +7972,7 @@
         const sp<DisplayDevice>& display,
         const scheduler::RefreshRateSelector::PolicyVariant& policy) {
     const auto displayId = display->getPhysicalId();
+    ATRACE_NAME(ftl::Concat(__func__, ' ', displayId.value).c_str());
 
     Mutex::Autolock lock(mStateLock);
 
@@ -7942,13 +7993,11 @@
             break;
     }
 
-    const bool isInternalDisplay = mPhysicalDisplays.get(displayId)
-                                           .transform(&PhysicalDisplay::isInternal)
-                                           .value_or(false);
-
-    if (isInternalDisplay && displayId != mActiveDisplayId) {
-        // The policy will be be applied when the display becomes active.
-        ALOGV("%s(%s): Inactive display", __func__, to_string(displayId).c_str());
+    // TODO(b/255635711): Apply the policy once the display is powered on, which is currently only
+    // done for the internal display that becomes active on fold/unfold. For now, assume that DM
+    // always powers on the secondary (internal or external) display before setting its policy.
+    if (!display->isPoweredOn()) {
+        ALOGV("%s(%s): Display is powered off", __func__, to_string(displayId).c_str());
         return NO_ERROR;
     }
 
@@ -8161,19 +8210,22 @@
     bool setByHwc = getHwComposer().hasCapability(Capability::REFRESH_RATE_CHANGED_CALLBACK_DEBUG);
     for (const auto& [id, display] : mPhysicalDisplays) {
         if (display.snapshot().connectionType() == ui::DisplayConnectionType::Internal) {
-            if (setByHwc) {
-                const auto status =
-                        getHwComposer().setRefreshRateChangedCallbackDebugEnabled(id, enable);
-                if (status != NO_ERROR) {
-                    ALOGE("Error updating the refresh rate changed callback debug enabled");
-                    return;
-                }
-            }
-
             if (const auto device = getDisplayDeviceLocked(id)) {
-                device->enableRefreshRateOverlay(enable, setByHwc, mRefreshRateOverlaySpinner,
-                                                 mRefreshRateOverlayRenderRate,
-                                                 mRefreshRateOverlayShowInMiddle);
+                const auto enableOverlay = [&](const bool setByHwc) FTL_FAKE_GUARD(
+                                                   kMainThreadContext) {
+                    device->enableRefreshRateOverlay(enable, setByHwc, mRefreshRateOverlaySpinner,
+                                                     mRefreshRateOverlayRenderRate,
+                                                     mRefreshRateOverlayShowInMiddle);
+                };
+                enableOverlay(setByHwc);
+                if (setByHwc) {
+                    const auto status =
+                            getHwComposer().setRefreshRateChangedCallbackDebugEnabled(id, enable);
+                    if (status != NO_ERROR) {
+                        ALOGE("Error updating the refresh rate changed callback debug enabled");
+                        enableOverlay(/*setByHwc*/ false);
+                    }
+                }
             }
         }
     }
@@ -8293,6 +8345,20 @@
     getRenderEngine().onActiveDisplaySizeChanged(activeDisplay.getSize());
 }
 
+sp<DisplayDevice> SurfaceFlinger::getActivatableDisplay() const {
+    if (mPhysicalDisplays.size() == 1) return nullptr;
+
+    // TODO(b/255635821): Choose the pacesetter display, considering both internal and external
+    // displays. For now, pick the other internal display, assuming a dual-display foldable.
+    return findDisplay([this](const DisplayDevice& display) REQUIRES(mStateLock) {
+        const auto idOpt = PhysicalDisplayId::tryCast(display.getId());
+        return idOpt && *idOpt != mActiveDisplayId && display.isPoweredOn() &&
+                mPhysicalDisplays.get(*idOpt)
+                        .transform(&PhysicalDisplay::isInternal)
+                        .value_or(false);
+    });
+}
+
 void SurfaceFlinger::onActiveDisplayChangedLocked(const DisplayDevice* inactiveDisplayPtr,
                                                   const DisplayDevice& activeDisplay) {
     ATRACE_CALL();
@@ -8311,7 +8377,9 @@
 
     resetPhaseConfiguration(activeDisplay.getActiveMode().fps);
 
+    // TODO(b/255635711): Check for pending mode changes on other displays.
     mScheduler->setModeChangePending(false);
+
     mScheduler->setPacesetterDisplay(mActiveDisplayId);
 
     onActiveDisplaySizeChanged(activeDisplay);
@@ -8339,6 +8407,12 @@
     return NO_ERROR;
 }
 
+status_t SurfaceFlinger::getStalledTransactionInfo(
+        int pid, std::optional<TransactionHandler::StalledTransactionInfo>& result) {
+    result = mTransactionHandler.getStalledTransactionInfo(pid);
+    return NO_ERROR;
+}
+
 std::shared_ptr<renderengine::ExternalTexture> SurfaceFlinger::getExternalTextureFromBufferData(
         BufferData& bufferData, const char* layerName, uint64_t transactionId) {
     if (bufferData.buffer &&
@@ -8614,7 +8688,9 @@
                      .excludeLayerIds = std::move(excludeLayerIds),
                      .supportedLayerGenericMetadata =
                              getHwComposer().getSupportedLayerGenericMetadata(),
-                     .genericLayerMetadataKeyMap = getGenericLayerMetadataKeyMap()};
+                     .genericLayerMetadataKeyMap = getGenericLayerMetadataKeyMap(),
+                     .skipRoundCornersWhenProtected =
+                             !getRenderEngine().supportsProtectedContent()};
         mLayerSnapshotBuilder.update(args);
 
         auto getLayerSnapshotsFn =
@@ -8649,7 +8725,9 @@
                      .excludeLayerIds = std::move(excludeLayerIds),
                      .supportedLayerGenericMetadata =
                              getHwComposer().getSupportedLayerGenericMetadata(),
-                     .genericLayerMetadataKeyMap = getGenericLayerMetadataKeyMap()};
+                     .genericLayerMetadataKeyMap = getGenericLayerMetadataKeyMap(),
+                     .skipRoundCornersWhenProtected =
+                             !getRenderEngine().supportsProtectedContent()};
         mLayerSnapshotBuilder.update(args);
 
         auto getLayerSnapshotsFn =
@@ -8842,33 +8920,35 @@
         outInfo->secure = info.secure;
         outInfo->installOrientation = static_cast<gui::Rotation>(info.installOrientation);
 
-        gui::DeviceProductInfo dinfo;
-        std::optional<DeviceProductInfo> dpi = info.deviceProductInfo;
-        dinfo.name = std::move(dpi->name);
-        dinfo.manufacturerPnpId =
-                std::vector<uint8_t>(dpi->manufacturerPnpId.begin(), dpi->manufacturerPnpId.end());
-        dinfo.productId = dpi->productId;
-        dinfo.relativeAddress =
-                std::vector<uint8_t>(dpi->relativeAddress.begin(), dpi->relativeAddress.end());
-        if (const auto* model =
-                    std::get_if<DeviceProductInfo::ModelYear>(&dpi->manufactureOrModelDate)) {
-            gui::DeviceProductInfo::ModelYear modelYear;
-            modelYear.year = model->year;
-            dinfo.manufactureOrModelDate.set<Tag::modelYear>(modelYear);
-        } else if (const auto* manufacture = std::get_if<DeviceProductInfo::ManufactureYear>(
-                           &dpi->manufactureOrModelDate)) {
-            gui::DeviceProductInfo::ManufactureYear date;
-            date.modelYear.year = manufacture->year;
-            dinfo.manufactureOrModelDate.set<Tag::manufactureYear>(date);
-        } else if (const auto* manufacture = std::get_if<DeviceProductInfo::ManufactureWeekAndYear>(
-                           &dpi->manufactureOrModelDate)) {
-            gui::DeviceProductInfo::ManufactureWeekAndYear date;
-            date.manufactureYear.modelYear.year = manufacture->year;
-            date.week = manufacture->week;
-            dinfo.manufactureOrModelDate.set<Tag::manufactureWeekAndYear>(date);
-        }
+        if (const std::optional<DeviceProductInfo> dpi = info.deviceProductInfo) {
+            gui::DeviceProductInfo dinfo;
+            dinfo.name = std::move(dpi->name);
+            dinfo.manufacturerPnpId = std::vector<uint8_t>(dpi->manufacturerPnpId.begin(),
+                                                           dpi->manufacturerPnpId.end());
+            dinfo.productId = dpi->productId;
+            dinfo.relativeAddress =
+                    std::vector<uint8_t>(dpi->relativeAddress.begin(), dpi->relativeAddress.end());
+            if (const auto* model =
+                        std::get_if<DeviceProductInfo::ModelYear>(&dpi->manufactureOrModelDate)) {
+                gui::DeviceProductInfo::ModelYear modelYear;
+                modelYear.year = model->year;
+                dinfo.manufactureOrModelDate.set<Tag::modelYear>(modelYear);
+            } else if (const auto* manufacture = std::get_if<DeviceProductInfo::ManufactureYear>(
+                               &dpi->manufactureOrModelDate)) {
+                gui::DeviceProductInfo::ManufactureYear date;
+                date.modelYear.year = manufacture->year;
+                dinfo.manufactureOrModelDate.set<Tag::manufactureYear>(date);
+            } else if (const auto* manufacture =
+                               std::get_if<DeviceProductInfo::ManufactureWeekAndYear>(
+                                       &dpi->manufactureOrModelDate)) {
+                gui::DeviceProductInfo::ManufactureWeekAndYear date;
+                date.manufactureYear.modelYear.year = manufacture->year;
+                date.week = manufacture->week;
+                dinfo.manufactureOrModelDate.set<Tag::manufactureWeekAndYear>(date);
+            }
 
-        outInfo->deviceProductInfo = dinfo;
+            outInfo->deviceProductInfo = dinfo;
+        }
     }
     return binderStatusFromStatusT(status);
 }
@@ -9055,28 +9135,28 @@
 
 binder::Status SurfaceComposerAIDL::captureDisplay(
         const DisplayCaptureArgs& args, const sp<IScreenCaptureListener>& captureListener) {
-    status_t status = mFlinger->captureDisplay(args, captureListener);
-    return binderStatusFromStatusT(status);
+    mFlinger->captureDisplay(args, captureListener);
+    return binderStatusFromStatusT(NO_ERROR);
 }
 
 binder::Status SurfaceComposerAIDL::captureDisplayById(
         int64_t displayId, const sp<IScreenCaptureListener>& captureListener) {
-    status_t status;
+    // status_t status;
     IPCThreadState* ipc = IPCThreadState::self();
     const int uid = ipc->getCallingUid();
     if (uid == AID_ROOT || uid == AID_GRAPHICS || uid == AID_SYSTEM || uid == AID_SHELL) {
         std::optional<DisplayId> id = DisplayId::fromValue(static_cast<uint64_t>(displayId));
-        status = mFlinger->captureDisplay(*id, captureListener);
+        mFlinger->captureDisplay(*id, captureListener);
     } else {
-        status = PERMISSION_DENIED;
+        invokeScreenCaptureError(PERMISSION_DENIED, captureListener);
     }
-    return binderStatusFromStatusT(status);
+    return binderStatusFromStatusT(NO_ERROR);
 }
 
 binder::Status SurfaceComposerAIDL::captureLayers(
         const LayerCaptureArgs& args, const sp<IScreenCaptureListener>& captureListener) {
-    status_t status = mFlinger->captureLayers(args, captureListener);
-    return binderStatusFromStatusT(status);
+    mFlinger->captureLayers(args, captureListener);
+    return binderStatusFromStatusT(NO_ERROR);
 }
 
 binder::Status SurfaceComposerAIDL::overrideHdrTypes(const sp<IBinder>& display,
@@ -9123,11 +9203,6 @@
     return binderStatusFromStatusT(status);
 }
 
-binder::Status SurfaceComposerAIDL::getColorManagement(bool* outGetColorManagement) {
-    status_t status = mFlinger->getColorManagement(outGetColorManagement);
-    return binderStatusFromStatusT(status);
-}
-
 binder::Status SurfaceComposerAIDL::getCompositionPreference(gui::CompositionPreference* outPref) {
     ui::Dataspace dataspace;
     ui::PixelFormat pixelFormat;
@@ -9442,6 +9517,28 @@
     return binderStatusFromStatusT(status);
 }
 
+binder::Status SurfaceComposerAIDL::getStalledTransactionInfo(
+        int pid, std::optional<gui::StalledTransactionInfo>* outInfo) {
+    const int callingPid = IPCThreadState::self()->getCallingPid();
+    const int callingUid = IPCThreadState::self()->getCallingUid();
+    if (!checkPermission(sAccessSurfaceFlinger, callingPid, callingUid)) {
+        return binderStatusFromStatusT(PERMISSION_DENIED);
+    }
+
+    std::optional<TransactionHandler::StalledTransactionInfo> stalledTransactionInfo;
+    status_t status = mFlinger->getStalledTransactionInfo(pid, stalledTransactionInfo);
+    if (stalledTransactionInfo) {
+        gui::StalledTransactionInfo result;
+        result.layerName = String16{stalledTransactionInfo->layerName.c_str()},
+        result.bufferId = stalledTransactionInfo->bufferId,
+        result.frameNumber = stalledTransactionInfo->frameNumber,
+        outInfo->emplace(std::move(result));
+    } else {
+        outInfo->reset();
+    }
+    return binderStatusFromStatusT(status);
+}
+
 status_t SurfaceComposerAIDL::checkAccessPermission(bool usePermissionCache) {
     if (!mFlinger->callingThreadHasUnscopedSurfaceFlingerAccess(usePermissionCache)) {
         IPCThreadState* ipc = IPCThreadState::self();
diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h
index b07910d..5d2115c 100644
--- a/services/surfaceflinger/SurfaceFlinger.h
+++ b/services/surfaceflinger/SurfaceFlinger.h
@@ -236,9 +236,6 @@
     static uint32_t maxGraphicsWidth;
     static uint32_t maxGraphicsHeight;
 
-    // Indicate if device wants color management on its display.
-    static const constexpr bool useColorManagement = true;
-
     static bool useContextPriority;
 
     // The data space and pixel format that SurfaceFlinger expects hardware composer
@@ -535,9 +532,9 @@
             EventRegistrationFlags eventRegistration = {},
             const sp<IBinder>& layerHandle = nullptr);
 
-    status_t captureDisplay(const DisplayCaptureArgs&, const sp<IScreenCaptureListener>&);
-    status_t captureDisplay(DisplayId, const sp<IScreenCaptureListener>&);
-    status_t captureLayers(const LayerCaptureArgs&, const sp<IScreenCaptureListener>&);
+    void captureDisplay(const DisplayCaptureArgs&, const sp<IScreenCaptureListener>&);
+    void captureDisplay(DisplayId, const sp<IScreenCaptureListener>&);
+    void captureLayers(const LayerCaptureArgs&, const sp<IScreenCaptureListener>&);
 
     status_t getDisplayStats(const sp<IBinder>& displayToken, DisplayStatInfo* stats);
     status_t getDisplayState(const sp<IBinder>& displayToken, ui::DisplayState*)
@@ -567,7 +564,6 @@
                               const std::vector<ui::Hdr>& hdrTypes);
     status_t onPullAtom(const int32_t atomId, std::vector<uint8_t>* pulledData, bool* success);
     status_t getLayerDebugInfo(std::vector<gui::LayerDebugInfo>* outLayers);
-    status_t getColorManagement(bool* outGetColorManagement) const;
     status_t getCompositionPreference(ui::Dataspace* outDataspace, ui::PixelFormat* outPixelFormat,
                                       ui::Dataspace* outWideColorGamutDataspace,
                                       ui::PixelFormat* outWideColorGamutPixelFormat) const;
@@ -622,6 +618,9 @@
     status_t removeWindowInfosListener(
             const sp<gui::IWindowInfosListener>& windowInfosListener) const;
 
+    status_t getStalledTransactionInfo(
+            int pid, std::optional<TransactionHandler::StalledTransactionInfo>& result);
+
     // Implements IBinder::DeathRecipient.
     void binderDied(const wp<IBinder>& who) override;
 
@@ -638,7 +637,8 @@
 
     // ICompositor overrides:
     void configure() override REQUIRES(kMainThreadContext);
-    bool commit(const scheduler::FrameTarget&) override REQUIRES(kMainThreadContext);
+    bool commit(PhysicalDisplayId pacesetterId, const scheduler::FrameTargets&) override
+            REQUIRES(kMainThreadContext);
     CompositeResultsPerDisplay composite(PhysicalDisplayId pacesetterId,
                                          const scheduler::FrameTargeters&) override
             REQUIRES(kMainThreadContext);
@@ -684,11 +684,10 @@
             REQUIRES(mStateLock);
 
     status_t setActiveModeFromBackdoor(const sp<display::DisplayToken>&, DisplayModeId);
-    // Sets the active mode and a new refresh rate in SF.
-    void updateInternalStateWithChangedMode() REQUIRES(mStateLock, kMainThreadContext);
-    // Calls to setActiveMode on the main thread if there is a pending mode change
-    // that needs to be applied.
-    void setActiveModeInHwcIfNeeded() REQUIRES(mStateLock, kMainThreadContext);
+
+    void initiateDisplayModeChanges() REQUIRES(mStateLock, kMainThreadContext);
+    void finalizeDisplayModeChange(DisplayDevice&) REQUIRES(mStateLock, kMainThreadContext);
+
     void clearDesiredActiveModeState(const sp<DisplayDevice>&) REQUIRES(mStateLock);
     // Called when active mode is no longer is progress
     void desiredActiveModeChangeDone(const sp<DisplayDevice>&) REQUIRES(mStateLock);
@@ -836,10 +835,9 @@
     // Boot animation, on/off animations and screen capture
     void startBootAnim();
 
-    ftl::SharedFuture<FenceResult> captureScreenCommon(RenderAreaFuture, GetLayerSnapshotsFunction,
-                                                       ui::Size bufferSize, ui::PixelFormat,
-                                                       bool allowProtected, bool grayscale,
-                                                       const sp<IScreenCaptureListener>&);
+    void captureScreenCommon(RenderAreaFuture, GetLayerSnapshotsFunction, ui::Size bufferSize,
+                             ui::PixelFormat, bool allowProtected, bool grayscale,
+                             const sp<IScreenCaptureListener>&);
     ftl::SharedFuture<FenceResult> captureScreenCommon(
             RenderAreaFuture, GetLayerSnapshotsFunction,
             const std::shared_ptr<renderengine::ExternalTexture>&, bool regionSampling,
@@ -942,7 +940,8 @@
     template <typename Predicate>
     sp<DisplayDevice> findDisplay(Predicate p) const REQUIRES(mStateLock) {
         const auto it = std::find_if(mDisplays.begin(), mDisplays.end(),
-                                     [&](const auto& pair) { return p(*pair.second); });
+                                     [&](const auto& pair)
+                                             REQUIRES(mStateLock) { return p(*pair.second); });
 
         return it == mDisplays.end() ? nullptr : it->second;
     }
@@ -1011,7 +1010,9 @@
                                const DisplayDeviceState& drawingState)
             REQUIRES(mStateLock, kMainThreadContext);
 
-    void dispatchDisplayHotplugEvent(PhysicalDisplayId displayId, bool connected);
+    void dispatchDisplayHotplugEvent(PhysicalDisplayId, bool connected);
+    void dispatchDisplayModeChangeEvent(PhysicalDisplayId, const scheduler::FrameRateMode&)
+            REQUIRES(mStateLock);
 
     /*
      * VSYNC
@@ -1053,6 +1054,9 @@
     VirtualDisplayId acquireVirtualDisplay(ui::Size, ui::PixelFormat) REQUIRES(mStateLock);
     void releaseVirtualDisplay(VirtualDisplayId);
 
+    // Returns a display other than `mActiveDisplayId` that can be activated, if any.
+    sp<DisplayDevice> getActivatableDisplay() const REQUIRES(mStateLock, kMainThreadContext);
+
     void onActiveDisplayChangedLocked(const DisplayDevice* inactiveDisplayPtr,
                                       const DisplayDevice& activeDisplay)
             REQUIRES(mStateLock, kMainThreadContext);
@@ -1206,6 +1210,8 @@
     // latched.
     std::unordered_set<sp<Layer>, SpHash<Layer>> mLayersWithQueuedFrames;
     std::unordered_set<sp<Layer>, SpHash<Layer>> mLayersWithBuffersRemoved;
+    std::unordered_set<uint32_t> mLayersIdsWithQueuedFrames;
+
     // Tracks layers that need to update a display's dirty region.
     std::vector<sp<Layer>> mLayersPendingRefresh;
     // Sorted list of layers that were composed during previous frame. This is used to
@@ -1330,9 +1336,6 @@
     std::unique_ptr<scheduler::RefreshRateStats> mRefreshRateStats;
     scheduler::PresentLatencyTracker mPresentLatencyTracker GUARDED_BY(kMainThreadContext);
 
-    // below flags are set by main thread only
-    bool mSetActiveModePending = false;
-
     bool mLumaSampling = true;
     sp<RegionSamplingThread> mRegionSamplingThread;
     sp<FpsReporter> mFpsReporter;
@@ -1508,7 +1511,6 @@
                                     const std::vector<int32_t>& hdrTypes) override;
     binder::Status onPullAtom(int32_t atomId, gui::PullAtomData* outPullData) override;
     binder::Status getLayerDebugInfo(std::vector<gui::LayerDebugInfo>* outLayers) override;
-    binder::Status getColorManagement(bool* outGetColorManagement) override;
     binder::Status getCompositionPreference(gui::CompositionPreference* outPref) override;
     binder::Status getDisplayedContentSamplingAttributes(
             const sp<IBinder>& display, gui::ContentSamplingAttributes* outAttrs) override;
@@ -1560,6 +1562,8 @@
                                           gui::WindowInfosListenerInfo* outInfo) override;
     binder::Status removeWindowInfosListener(
             const sp<gui::IWindowInfosListener>& windowInfosListener) override;
+    binder::Status getStalledTransactionInfo(int pid,
+                                             std::optional<gui::StalledTransactionInfo>* outInfo);
 
 private:
     static const constexpr bool kUsePermissionCache = true;
diff --git a/services/surfaceflinger/fuzzer/surfaceflinger_fuzzers_utils.h b/services/surfaceflinger/fuzzer/surfaceflinger_fuzzers_utils.h
index 28ac664..ca1af6e 100644
--- a/services/surfaceflinger/fuzzer/surfaceflinger_fuzzers_utils.h
+++ b/services/surfaceflinger/fuzzer/surfaceflinger_fuzzers_utils.h
@@ -286,7 +286,7 @@
 private:
     // ICompositor overrides:
     void configure() override {}
-    bool commit(const scheduler::FrameTarget&) override { return false; }
+    bool commit(PhysicalDisplayId, const scheduler::FrameTargets&) override { return false; }
     CompositeResultsPerDisplay composite(PhysicalDisplayId,
                                          const scheduler::FrameTargeters&) override {
         return {};
diff --git a/services/surfaceflinger/layerproto/display.proto b/services/surfaceflinger/layerproto/display.proto
index c8cd926..64de775 100644
--- a/services/surfaceflinger/layerproto/display.proto
+++ b/services/surfaceflinger/layerproto/display.proto
@@ -35,4 +35,8 @@
     TransformProto transform = 6;
 
     bool is_virtual = 7;
+
+    double dpi_x = 8;
+
+    double dpi_y = 9;
 }
diff --git a/services/surfaceflinger/tests/LayerRenderTypeTransaction_test.cpp b/services/surfaceflinger/tests/LayerRenderTypeTransaction_test.cpp
index b8068f7..2b1834d 100644
--- a/services/surfaceflinger/tests/LayerRenderTypeTransaction_test.cpp
+++ b/services/surfaceflinger/tests/LayerRenderTypeTransaction_test.cpp
@@ -1479,15 +1479,11 @@
     matrix[2][2] = 0.11;
 
     // degamma before applying the matrix
-    if (mColorManagementUsed) {
-        ColorTransformHelper::DegammaColor(expected);
-    }
+    ColorTransformHelper::DegammaColor(expected);
 
     ColorTransformHelper::applyMatrix(expected, matrix);
 
-    if (mColorManagementUsed) {
-        ColorTransformHelper::GammaColor(expected);
-    }
+    ColorTransformHelper::GammaColor(expected);
 
     const Color expectedColor = {uint8_t(expected.r * 255), uint8_t(expected.g * 255),
                                  uint8_t(expected.b * 255), 255};
@@ -1537,15 +1533,11 @@
     matrix[2][2] = 0.11;
 
     // degamma before applying the matrix
-    if (mColorManagementUsed) {
-        ColorTransformHelper::DegammaColor(expected);
-    }
+    ColorTransformHelper::DegammaColor(expected);
 
     ColorTransformHelper::applyMatrix(expected, matrix);
 
-    if (mColorManagementUsed) {
-        ColorTransformHelper::GammaColor(expected);
-    }
+    ColorTransformHelper::GammaColor(expected);
 
     const Color expectedColor = {uint8_t(expected.r * 255), uint8_t(expected.g * 255),
                                  uint8_t(expected.b * 255), 255};
@@ -1608,16 +1600,12 @@
     matrixParent[2][2] = 0.10;
 
     // degamma before applying the matrix
-    if (mColorManagementUsed) {
-        ColorTransformHelper::DegammaColor(expected);
-    }
+    ColorTransformHelper::DegammaColor(expected);
 
     ColorTransformHelper::applyMatrix(expected, matrixChild);
     ColorTransformHelper::applyMatrix(expected, matrixParent);
 
-    if (mColorManagementUsed) {
-        ColorTransformHelper::GammaColor(expected);
-    }
+    ColorTransformHelper::GammaColor(expected);
 
     const Color expectedColor = {uint8_t(expected.r * 255), uint8_t(expected.g * 255),
                                  uint8_t(expected.b * 255), 255};
diff --git a/services/surfaceflinger/tests/LayerTransactionTest.h b/services/surfaceflinger/tests/LayerTransactionTest.h
index badd5be..2bdb8a4 100644
--- a/services/surfaceflinger/tests/LayerTransactionTest.h
+++ b/services/surfaceflinger/tests/LayerTransactionTest.h
@@ -47,9 +47,6 @@
         ASSERT_NO_FATAL_FAILURE(SetUpDisplay());
 
         sp<gui::ISurfaceComposer> sf(ComposerServiceAIDL::getComposerService());
-        binder::Status status = sf->getColorManagement(&mColorManagementUsed);
-        ASSERT_NO_FATAL_FAILURE(gui::aidl_utils::statusTFromBinderStatus(status));
-
         mCaptureArgs.displayToken = mDisplay;
     }
 
@@ -282,7 +279,6 @@
     const int32_t mLayerZBase = std::numeric_limits<int32_t>::max() - 256;
 
     sp<SurfaceControl> mBlackBgSurface;
-    bool mColorManagementUsed;
 
     DisplayCaptureArgs mCaptureArgs;
     ScreenCaptureResults mCaptureResults;
diff --git a/services/surfaceflinger/tests/unittests/Android.bp b/services/surfaceflinger/tests/unittests/Android.bp
index fa5fa95..3dd33b9 100644
--- a/services/surfaceflinger/tests/unittests/Android.bp
+++ b/services/surfaceflinger/tests/unittests/Android.bp
@@ -148,6 +148,7 @@
     defaults: [
         "android.hardware.graphics.common-ndk_static",
         "android.hardware.graphics.composer3-ndk_static",
+        "android.hardware.power-ndk_static",
         "librenderengine_deps",
     ],
     static_libs: [
@@ -161,7 +162,6 @@
         "android.hardware.power@1.1",
         "android.hardware.power@1.2",
         "android.hardware.power@1.3",
-        "android.hardware.power-V4-ndk",
         "libaidlcommonsupport",
         "libcompositionengine_mocks",
         "libcompositionengine",
diff --git a/services/surfaceflinger/tests/unittests/CompositionTest.cpp b/services/surfaceflinger/tests/unittests/CompositionTest.cpp
index e8a9cfe..14fa492 100644
--- a/services/surfaceflinger/tests/unittests/CompositionTest.cpp
+++ b/services/surfaceflinger/tests/unittests/CompositionTest.cpp
@@ -600,7 +600,6 @@
                     EXPECT_THAT(layer.source.buffer.buffer, Not(IsNull()));
                     EXPECT_THAT(layer.source.buffer.fence, Not(IsNull()));
                     EXPECT_EQ(DEFAULT_TEXTURE_ID, layer.source.buffer.textureName);
-                    EXPECT_EQ(false, layer.source.buffer.isY410BT2020);
                     EXPECT_EQ(true, layer.source.buffer.usePremultipliedAlpha);
                     EXPECT_EQ(false, layer.source.buffer.isOpaque);
                     EXPECT_EQ(0.0, layer.geometry.roundedCornersRadius.x);
diff --git a/services/surfaceflinger/tests/unittests/FrameRateSelectionPriorityTest.cpp b/services/surfaceflinger/tests/unittests/FrameRateSelectionPriorityTest.cpp
index 1c9aee7..d30d5b8 100644
--- a/services/surfaceflinger/tests/unittests/FrameRateSelectionPriorityTest.cpp
+++ b/services/surfaceflinger/tests/unittests/FrameRateSelectionPriorityTest.cpp
@@ -99,8 +99,7 @@
 }
 
 void RefreshRateSelectionTest::commitTransaction(Layer* layer) {
-    auto c = layer->getDrawingState();
-    layer->commitTransaction(c);
+    layer->commitTransaction();
 }
 
 namespace {
diff --git a/services/surfaceflinger/tests/unittests/LayerHierarchyTest.h b/services/surfaceflinger/tests/unittests/LayerHierarchyTest.h
index e475b84..f64ba2a 100644
--- a/services/surfaceflinger/tests/unittests/LayerHierarchyTest.h
+++ b/services/surfaceflinger/tests/unittests/LayerHierarchyTest.h
@@ -17,6 +17,8 @@
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
 
+#include <gui/fake/BufferData.h>
+
 #include "Client.h" // temporarily needed for LayerCreationArgs
 #include "FrontEnd/LayerCreationArgs.h"
 #include "FrontEnd/LayerHierarchy.h"
@@ -333,6 +335,43 @@
         mLifecycleManager.applyTransactions(transactions);
     }
 
+    void setRoundedCorners(uint32_t id, float radius) {
+        std::vector<TransactionState> transactions;
+        transactions.emplace_back();
+        transactions.back().states.push_back({});
+
+        transactions.back().states.front().state.what = layer_state_t::eCornerRadiusChanged;
+        transactions.back().states.front().layerId = id;
+        transactions.back().states.front().state.cornerRadius = radius;
+        mLifecycleManager.applyTransactions(transactions);
+    }
+
+    void setBuffer(uint32_t id, std::shared_ptr<renderengine::ExternalTexture> texture) {
+        std::vector<TransactionState> transactions;
+        transactions.emplace_back();
+        transactions.back().states.push_back({});
+
+        transactions.back().states.front().state.what = layer_state_t::eBufferChanged;
+        transactions.back().states.front().layerId = id;
+        transactions.back().states.front().externalTexture = texture;
+        transactions.back().states.front().state.bufferData =
+                std::make_shared<fake::BufferData>(texture->getId(), texture->getWidth(),
+                                                   texture->getHeight(), texture->getPixelFormat(),
+                                                   texture->getUsage());
+        mLifecycleManager.applyTransactions(transactions);
+    }
+
+    void setDataspace(uint32_t id, ui::Dataspace dataspace) {
+        std::vector<TransactionState> transactions;
+        transactions.emplace_back();
+        transactions.back().states.push_back({});
+
+        transactions.back().states.front().state.what = layer_state_t::eDataspaceChanged;
+        transactions.back().states.front().layerId = id;
+        transactions.back().states.front().state.dataspace = dataspace;
+        mLifecycleManager.applyTransactions(transactions);
+    }
+
     LayerLifecycleManager mLifecycleManager;
 };
 
diff --git a/services/surfaceflinger/tests/unittests/LayerSnapshotTest.cpp b/services/surfaceflinger/tests/unittests/LayerSnapshotTest.cpp
index a581d5b..84c3775 100644
--- a/services/surfaceflinger/tests/unittests/LayerSnapshotTest.cpp
+++ b/services/surfaceflinger/tests/unittests/LayerSnapshotTest.cpp
@@ -17,11 +17,14 @@
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
 
+#include <renderengine/mock/FakeExternalTexture.h>
+
 #include "FrontEnd/LayerHierarchy.h"
 #include "FrontEnd/LayerLifecycleManager.h"
 #include "FrontEnd/LayerSnapshotBuilder.h"
 #include "Layer.h"
 #include "LayerHierarchyTest.h"
+#include "ui/GraphicTypes.h"
 
 #define UPDATE_AND_VERIFY(BUILDER, ...)                                    \
     ({                                                                     \
@@ -68,12 +71,17 @@
         setColor(id);
     }
 
-    void updateAndVerify(LayerSnapshotBuilder& actualBuilder, bool hasDisplayChanges,
-                         const std::vector<uint32_t> expectedVisibleLayerIdsInZOrder) {
+    void update(LayerSnapshotBuilder& actualBuilder, LayerSnapshotBuilder::Args& args) {
         if (mLifecycleManager.getGlobalChanges().test(RequestedLayerState::Changes::Hierarchy)) {
             mHierarchyBuilder.update(mLifecycleManager.getLayers(),
                                      mLifecycleManager.getDestroyedLayers());
         }
+        args.root = mHierarchyBuilder.getHierarchy();
+        actualBuilder.update(args);
+    }
+
+    void updateAndVerify(LayerSnapshotBuilder& actualBuilder, bool hasDisplayChanges,
+                         const std::vector<uint32_t> expectedVisibleLayerIdsInZOrder) {
         LayerSnapshotBuilder::Args args{.root = mHierarchyBuilder.getHierarchy(),
                                         .layerLifecycleManager = mLifecycleManager,
                                         .includeMetadata = false,
@@ -83,7 +91,7 @@
                                         .supportsBlur = true,
                                         .supportedLayerGenericMetadata = {},
                                         .genericLayerMetadataKeyMap = {}};
-        actualBuilder.update(args);
+        update(actualBuilder, args);
 
         // rebuild layer snapshots from scratch and verify that it matches the updated state.
         LayerSnapshotBuilder expectedBuilder(args);
@@ -596,4 +604,56 @@
               scheduler::LayerInfo::FrameRateCompatibility::Default);
 }
 
+TEST_F(LayerSnapshotTest, translateDataspace) {
+    setDataspace(1, ui::Dataspace::UNKNOWN);
+    UPDATE_AND_VERIFY(mSnapshotBuilder, STARTING_ZORDER);
+    EXPECT_EQ(getSnapshot({.id = 1})->dataspace, ui::Dataspace::V0_SRGB);
+}
+
+TEST_F(LayerSnapshotTest, skipRoundCornersWhenProtected) {
+    setRoundedCorners(1, 42.f);
+    setRoundedCorners(2, 42.f);
+    setCrop(1, Rect{1000, 1000});
+    setCrop(2, Rect{1000, 1000});
+
+    UPDATE_AND_VERIFY(mSnapshotBuilder, STARTING_ZORDER);
+    EXPECT_TRUE(getSnapshot({.id = 1})->roundedCorner.hasRoundedCorners());
+    EXPECT_EQ(getSnapshot({.id = 1})->roundedCorner.radius.x, 42.f);
+    EXPECT_TRUE(getSnapshot({.id = 2})->roundedCorner.hasRoundedCorners());
+
+    // add a buffer with the protected bit, check rounded corners are not set when
+    // skipRoundCornersWhenProtected == true
+    setBuffer(1,
+              std::make_shared<
+                      renderengine::mock::FakeExternalTexture>(1U /*width*/, 1U /*height*/,
+                                                               1ULL /* bufferId */,
+                                                               HAL_PIXEL_FORMAT_RGBA_8888,
+                                                               GRALLOC_USAGE_PROTECTED /*usage*/));
+
+    LayerSnapshotBuilder::Args args{.root = mHierarchyBuilder.getHierarchy(),
+                                    .layerLifecycleManager = mLifecycleManager,
+                                    .includeMetadata = false,
+                                    .displays = mFrontEndDisplayInfos,
+                                    .displayChanges = false,
+                                    .globalShadowSettings = globalShadowSettings,
+                                    .supportsBlur = true,
+                                    .supportedLayerGenericMetadata = {},
+                                    .genericLayerMetadataKeyMap = {},
+                                    .skipRoundCornersWhenProtected = true};
+    update(mSnapshotBuilder, args);
+    EXPECT_FALSE(getSnapshot({.id = 1})->roundedCorner.hasRoundedCorners());
+    // layer 2 doesn't have a buffer and should be unaffected
+    EXPECT_TRUE(getSnapshot({.id = 2})->roundedCorner.hasRoundedCorners());
+
+    // remove protected bit, check rounded corners are set
+    setBuffer(1,
+              std::make_shared<renderengine::mock::FakeExternalTexture>(1U /*width*/, 1U /*height*/,
+                                                                        2ULL /* bufferId */,
+                                                                        HAL_PIXEL_FORMAT_RGBA_8888,
+                                                                        0 /*usage*/));
+    update(mSnapshotBuilder, args);
+    EXPECT_TRUE(getSnapshot({.id = 1})->roundedCorner.hasRoundedCorners());
+    EXPECT_EQ(getSnapshot({.id = 1})->roundedCorner.radius.x, 42.f);
+}
+
 } // namespace android::surfaceflinger::frontend
diff --git a/services/surfaceflinger/tests/unittests/MessageQueueTest.cpp b/services/surfaceflinger/tests/unittests/MessageQueueTest.cpp
index 1dcf222..9aa089f 100644
--- a/services/surfaceflinger/tests/unittests/MessageQueueTest.cpp
+++ b/services/surfaceflinger/tests/unittests/MessageQueueTest.cpp
@@ -35,7 +35,7 @@
 
 struct NoOpCompositor final : ICompositor {
     void configure() override {}
-    bool commit(const scheduler::FrameTarget&) override { return false; }
+    bool commit(PhysicalDisplayId, const scheduler::FrameTargets&) override { return false; }
     CompositeResultsPerDisplay composite(PhysicalDisplayId,
                                          const scheduler::FrameTargeters&) override {
         return {};
diff --git a/services/surfaceflinger/tests/unittests/SetFrameRateTest.cpp b/services/surfaceflinger/tests/unittests/SetFrameRateTest.cpp
index 44ab569..a1e4e25 100644
--- a/services/surfaceflinger/tests/unittests/SetFrameRateTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SetFrameRateTest.cpp
@@ -85,8 +85,7 @@
 
 void SetFrameRateTest::commitTransaction() {
     for (auto layer : mLayers) {
-        auto c = layer->getDrawingState();
-        layer->commitTransaction(c);
+        layer->commitTransaction();
     }
 }
 
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_DisplayModeSwitching.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_DisplayModeSwitching.cpp
index 703bdda..24eb318 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_DisplayModeSwitching.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_DisplayModeSwitching.cpp
@@ -67,10 +67,36 @@
                 .WillByDefault(Return(true));
     }
 
+    static constexpr HWDisplayId kInnerDisplayHwcId = PrimaryDisplayVariant::HWC_DISPLAY_ID;
+    static constexpr HWDisplayId kOuterDisplayHwcId = kInnerDisplayHwcId + 1;
+
+    auto injectOuterDisplay() {
+        constexpr PhysicalDisplayId kOuterDisplayId = PhysicalDisplayId::fromPort(254u);
+
+        constexpr bool kIsPrimary = false;
+        TestableSurfaceFlinger::FakeHwcDisplayInjector(kOuterDisplayId, hal::DisplayType::PHYSICAL,
+                                                       kIsPrimary)
+                .setHwcDisplayId(kOuterDisplayHwcId)
+                .setPowerMode(hal::PowerMode::OFF)
+                .inject(&mFlinger, mComposer);
+
+        mOuterDisplay = mFakeDisplayInjector.injectInternalDisplay(
+                [&](FakeDisplayDeviceInjector& injector) {
+                    injector.setPowerMode(hal::PowerMode::OFF);
+                    injector.setDisplayModes(mock::cloneForDisplay(kOuterDisplayId, kModes),
+                                             kModeId120);
+                },
+                {.displayId = kOuterDisplayId,
+                 .hwcDisplayId = kOuterDisplayHwcId,
+                 .isPrimary = kIsPrimary});
+
+        return std::forward_as_tuple(mDisplay, mOuterDisplay);
+    }
+
 protected:
     void setupScheduler(std::shared_ptr<scheduler::RefreshRateSelector>);
 
-    sp<DisplayDevice> mDisplay;
+    sp<DisplayDevice> mDisplay, mOuterDisplay;
     mock::EventThread* mAppEventThread;
 
     static constexpr DisplayModeId kModeId60{0};
@@ -328,32 +354,16 @@
     return true;
 }
 
-TEST_F(DisplayModeSwitchingTest, multiDisplay) {
-    constexpr HWDisplayId kInnerDisplayHwcId = PrimaryDisplayVariant::HWC_DISPLAY_ID;
-    constexpr HWDisplayId kOuterDisplayHwcId = kInnerDisplayHwcId + 1;
+TEST_F(DisplayModeSwitchingTest, innerXorOuterDisplay) {
+    const auto [innerDisplay, outerDisplay] = injectOuterDisplay();
 
-    constexpr PhysicalDisplayId kOuterDisplayId = PhysicalDisplayId::fromPort(254u);
-
-    constexpr bool kIsPrimary = false;
-    TestableSurfaceFlinger::FakeHwcDisplayInjector(kOuterDisplayId, hal::DisplayType::PHYSICAL,
-                                                   kIsPrimary)
-            .setHwcDisplayId(kOuterDisplayHwcId)
-            .inject(&mFlinger, mComposer);
-
-    const auto outerDisplay = mFakeDisplayInjector.injectInternalDisplay(
-            [&](FakeDisplayDeviceInjector& injector) {
-                injector.setDisplayModes(mock::cloneForDisplay(kOuterDisplayId, kModes),
-                                         kModeId120);
-            },
-            {.displayId = kOuterDisplayId,
-             .hwcDisplayId = kOuterDisplayHwcId,
-             .isPrimary = kIsPrimary});
-
-    const auto& innerDisplay = mDisplay;
+    EXPECT_TRUE(innerDisplay->isPoweredOn());
+    EXPECT_FALSE(outerDisplay->isPoweredOn());
 
     EXPECT_THAT(innerDisplay, ModeSettledTo(kModeId60));
     EXPECT_THAT(outerDisplay, ModeSettledTo(kModeId120));
 
+    // Only the inner display is powered on.
     mFlinger.onActiveDisplayChanged(nullptr, *innerDisplay);
 
     EXPECT_THAT(innerDisplay, ModeSettledTo(kModeId60));
@@ -388,6 +398,10 @@
     EXPECT_THAT(innerDisplay, ModeSettledTo(kModeId90));
     EXPECT_THAT(outerDisplay, ModeSettledTo(kModeId120));
 
+    innerDisplay->setPowerMode(hal::PowerMode::OFF);
+    outerDisplay->setPowerMode(hal::PowerMode::ON);
+
+    // Only the outer display is powered on.
     mFlinger.onActiveDisplayChanged(innerDisplay.get(), *outerDisplay);
 
     EXPECT_THAT(innerDisplay, ModeSettledTo(kModeId90));
@@ -409,5 +423,107 @@
     EXPECT_THAT(outerDisplay, ModeSettledTo(kModeId60));
 }
 
+TEST_F(DisplayModeSwitchingTest, innerAndOuterDisplay) {
+    const auto [innerDisplay, outerDisplay] = injectOuterDisplay();
+
+    EXPECT_TRUE(innerDisplay->isPoweredOn());
+    EXPECT_FALSE(outerDisplay->isPoweredOn());
+
+    EXPECT_THAT(innerDisplay, ModeSettledTo(kModeId60));
+    EXPECT_THAT(outerDisplay, ModeSettledTo(kModeId120));
+
+    outerDisplay->setPowerMode(hal::PowerMode::ON);
+
+    // Both displays are powered on.
+    mFlinger.onActiveDisplayChanged(nullptr, *innerDisplay);
+
+    EXPECT_THAT(innerDisplay, ModeSettledTo(kModeId60));
+    EXPECT_THAT(outerDisplay, ModeSettledTo(kModeId120));
+
+    EXPECT_EQ(NO_ERROR,
+              mFlinger.setDesiredDisplayModeSpecs(innerDisplay->getDisplayToken().promote(),
+                                                  mock::createDisplayModeSpecs(kModeId90.value(),
+                                                                               false, 0.f, 120.f)));
+
+    EXPECT_EQ(NO_ERROR,
+              mFlinger.setDesiredDisplayModeSpecs(outerDisplay->getDisplayToken().promote(),
+                                                  mock::createDisplayModeSpecs(kModeId60.value(),
+                                                                               false, 0.f, 120.f)));
+
+    EXPECT_THAT(innerDisplay, ModeSwitchingTo(&mFlinger, kModeId90));
+    EXPECT_THAT(outerDisplay, ModeSwitchingTo(&mFlinger, kModeId60));
+
+    const VsyncPeriodChangeTimeline timeline{.refreshRequired = true};
+    EXPECT_CALL(*mComposer,
+                setActiveConfigWithConstraints(kInnerDisplayHwcId,
+                                               hal::HWConfigId(kModeId90.value()), _, _))
+            .WillOnce(DoAll(SetArgPointee<3>(timeline), Return(Error::NONE)));
+
+    EXPECT_CALL(*mComposer,
+                setActiveConfigWithConstraints(kOuterDisplayHwcId,
+                                               hal::HWConfigId(kModeId60.value()), _, _))
+            .WillOnce(DoAll(SetArgPointee<3>(timeline), Return(Error::NONE)));
+
+    mFlinger.commit();
+
+    EXPECT_THAT(innerDisplay, ModeSwitchingTo(&mFlinger, kModeId90));
+    EXPECT_THAT(outerDisplay, ModeSwitchingTo(&mFlinger, kModeId60));
+
+    mFlinger.commit();
+
+    EXPECT_THAT(innerDisplay, ModeSettledTo(kModeId90));
+    EXPECT_THAT(outerDisplay, ModeSettledTo(kModeId60));
+}
+
+TEST_F(DisplayModeSwitchingTest, powerOffDuringModeSet) {
+    const auto [innerDisplay, outerDisplay] = injectOuterDisplay();
+
+    EXPECT_TRUE(innerDisplay->isPoweredOn());
+    EXPECT_FALSE(outerDisplay->isPoweredOn());
+
+    EXPECT_THAT(innerDisplay, ModeSettledTo(kModeId60));
+    EXPECT_THAT(outerDisplay, ModeSettledTo(kModeId120));
+
+    outerDisplay->setPowerMode(hal::PowerMode::ON);
+
+    // Both displays are powered on.
+    mFlinger.onActiveDisplayChanged(nullptr, *innerDisplay);
+
+    EXPECT_THAT(innerDisplay, ModeSettledTo(kModeId60));
+    EXPECT_THAT(outerDisplay, ModeSettledTo(kModeId120));
+
+    EXPECT_EQ(NO_ERROR,
+              mFlinger.setDesiredDisplayModeSpecs(innerDisplay->getDisplayToken().promote(),
+                                                  mock::createDisplayModeSpecs(kModeId90.value(),
+                                                                               false, 0.f, 120.f)));
+
+    EXPECT_EQ(NO_ERROR,
+              mFlinger.setDesiredDisplayModeSpecs(outerDisplay->getDisplayToken().promote(),
+                                                  mock::createDisplayModeSpecs(kModeId60.value(),
+                                                                               false, 0.f, 120.f)));
+
+    EXPECT_THAT(innerDisplay, ModeSwitchingTo(&mFlinger, kModeId90));
+    EXPECT_THAT(outerDisplay, ModeSwitchingTo(&mFlinger, kModeId60));
+
+    // Power off the outer display before the mode has been set.
+    outerDisplay->setPowerMode(hal::PowerMode::OFF);
+
+    const VsyncPeriodChangeTimeline timeline{.refreshRequired = true};
+    EXPECT_CALL(*mComposer,
+                setActiveConfigWithConstraints(kInnerDisplayHwcId,
+                                               hal::HWConfigId(kModeId90.value()), _, _))
+            .WillOnce(DoAll(SetArgPointee<3>(timeline), Return(Error::NONE)));
+
+    mFlinger.commit();
+
+    EXPECT_THAT(innerDisplay, ModeSwitchingTo(&mFlinger, kModeId90));
+    EXPECT_THAT(outerDisplay, ModeSettledTo(kModeId120));
+
+    mFlinger.commit();
+
+    EXPECT_THAT(innerDisplay, ModeSettledTo(kModeId90));
+    EXPECT_THAT(outerDisplay, ModeSettledTo(kModeId120));
+}
+
 } // namespace
 } // namespace android
diff --git a/services/surfaceflinger/tests/unittests/SurfaceFlinger_FoldableTest.cpp b/services/surfaceflinger/tests/unittests/SurfaceFlinger_FoldableTest.cpp
index bd2344c..ed8d909 100644
--- a/services/surfaceflinger/tests/unittests/SurfaceFlinger_FoldableTest.cpp
+++ b/services/surfaceflinger/tests/unittests/SurfaceFlinger_FoldableTest.cpp
@@ -55,13 +55,17 @@
     sp<DisplayDevice> mInnerDisplay, mOuterDisplay;
 };
 
-TEST_F(FoldableTest, foldUnfold) {
+TEST_F(FoldableTest, promotesPacesetterOnBoot) {
     // When the device boots, the inner display should be the pacesetter.
     ASSERT_EQ(mFlinger.scheduler()->pacesetterDisplayId(), kInnerDisplayId);
 
     // ...and should still be after powering on.
     mFlinger.setPowerModeInternal(mInnerDisplay, PowerMode::ON);
     ASSERT_EQ(mFlinger.scheduler()->pacesetterDisplayId(), kInnerDisplayId);
+}
+
+TEST_F(FoldableTest, promotesPacesetterOnFoldUnfold) {
+    mFlinger.setPowerModeInternal(mInnerDisplay, PowerMode::ON);
 
     // The outer display should become the pacesetter after folding.
     mFlinger.setPowerModeInternal(mInnerDisplay, PowerMode::OFF);
@@ -72,6 +76,10 @@
     mFlinger.setPowerModeInternal(mOuterDisplay, PowerMode::OFF);
     mFlinger.setPowerModeInternal(mInnerDisplay, PowerMode::ON);
     ASSERT_EQ(mFlinger.scheduler()->pacesetterDisplayId(), kInnerDisplayId);
+}
+
+TEST_F(FoldableTest, promotesPacesetterOnConcurrentPowerOn) {
+    mFlinger.setPowerModeInternal(mInnerDisplay, PowerMode::ON);
 
     // The inner display should stay the pacesetter if both are powered on.
     // TODO(b/255635821): The pacesetter should depend on the displays' refresh rates.
@@ -81,6 +89,28 @@
     // The outer display should become the pacesetter if designated.
     mFlinger.scheduler()->setPacesetterDisplay(kOuterDisplayId);
     ASSERT_EQ(mFlinger.scheduler()->pacesetterDisplayId(), kOuterDisplayId);
+
+    // The inner display should become the pacesetter if designated.
+    mFlinger.scheduler()->setPacesetterDisplay(kInnerDisplayId);
+    ASSERT_EQ(mFlinger.scheduler()->pacesetterDisplayId(), kInnerDisplayId);
+}
+
+TEST_F(FoldableTest, promotesPacesetterOnConcurrentPowerOff) {
+    mFlinger.setPowerModeInternal(mInnerDisplay, PowerMode::ON);
+    mFlinger.setPowerModeInternal(mOuterDisplay, PowerMode::ON);
+
+    // The outer display should become the pacesetter if the inner display powers off.
+    mFlinger.setPowerModeInternal(mInnerDisplay, PowerMode::OFF);
+    ASSERT_EQ(mFlinger.scheduler()->pacesetterDisplayId(), kOuterDisplayId);
+
+    // The outer display should stay the pacesetter if both are powered on.
+    // TODO(b/255635821): The pacesetter should depend on the displays' refresh rates.
+    mFlinger.setPowerModeInternal(mInnerDisplay, PowerMode::ON);
+    ASSERT_EQ(mFlinger.scheduler()->pacesetterDisplayId(), kOuterDisplayId);
+
+    // The inner display should become the pacesetter if the outer display powers off.
+    mFlinger.setPowerModeInternal(mOuterDisplay, PowerMode::OFF);
+    ASSERT_EQ(mFlinger.scheduler()->pacesetterDisplayId(), kInnerDisplayId);
 }
 
 TEST_F(FoldableTest, doesNotRequestHardwareVsyncIfPoweredOff) {
diff --git a/services/surfaceflinger/tests/unittests/TestableScheduler.h b/services/surfaceflinger/tests/unittests/TestableScheduler.h
index f3c9d0d..151b178 100644
--- a/services/surfaceflinger/tests/unittests/TestableScheduler.h
+++ b/services/surfaceflinger/tests/unittests/TestableScheduler.h
@@ -180,7 +180,7 @@
 private:
     // ICompositor overrides:
     void configure() override {}
-    bool commit(const scheduler::FrameTarget&) override { return false; }
+    bool commit(PhysicalDisplayId, const scheduler::FrameTargets&) override { return false; }
     CompositeResultsPerDisplay composite(PhysicalDisplayId,
                                          const scheduler::FrameTargeters&) override {
         return {};
diff --git a/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h b/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h
index 9b3a893..e59d44d 100644
--- a/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h
+++ b/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h
@@ -386,10 +386,19 @@
                                   .sfWorkDuration = 10ms},
                                  *mScheduler->getVsyncSchedule());
 
-        mFlinger->commit(frameTargeter.target());
+        scheduler::FrameTargets targets;
+        scheduler::FrameTargeters targeters;
+
+        for (const auto& [id, display] :
+             FTL_FAKE_GUARD(mFlinger->mStateLock, mFlinger->mPhysicalDisplays)) {
+            targets.try_emplace(id, &frameTargeter.target());
+            targeters.try_emplace(id, &frameTargeter);
+        }
+
+        mFlinger->commit(displayId, targets);
 
         if (composite) {
-            mFlinger->composite(displayId, ftl::init::map(displayId, &frameTargeter));
+            mFlinger->composite(displayId, targeters);
         }
     }
 
diff --git a/services/surfaceflinger/tests/unittests/TransactionApplicationTest.cpp b/services/surfaceflinger/tests/unittests/TransactionApplicationTest.cpp
index 644b8c7..1f2a1ed 100644
--- a/services/surfaceflinger/tests/unittests/TransactionApplicationTest.cpp
+++ b/services/surfaceflinger/tests/unittests/TransactionApplicationTest.cpp
@@ -306,6 +306,47 @@
     ~FakeExternalTexture() = default;
 };
 
+TEST_F(TransactionApplicationTest, ApplyTokensUseDifferentQueues) {
+    auto applyToken1 = sp<BBinder>::make();
+    auto applyToken2 = sp<BBinder>::make();
+
+    // Transaction 1 has a buffer with an unfired fence. It should not be ready to be applied.
+    TransactionState transaction1;
+    transaction1.applyToken = applyToken1;
+    transaction1.id = 42069;
+    transaction1.states.emplace_back();
+    transaction1.states[0].state.what |= layer_state_t::eBufferChanged;
+    transaction1.states[0].state.bufferData =
+            std::make_shared<fake::BufferData>(/* bufferId */ 1, /* width */ 1, /* height */ 1,
+                                               /* pixelFormat */ 0, /* outUsage */ 0);
+    transaction1.states[0].externalTexture =
+            std::make_shared<FakeExternalTexture>(*transaction1.states[0].state.bufferData);
+    transaction1.states[0].state.surface =
+            sp<Layer>::make(LayerCreationArgs(mFlinger.flinger(), nullptr, "TestLayer", 0, {}))
+                    ->getHandle();
+    auto fence = sp<mock::MockFence>::make();
+    EXPECT_CALL(*fence, getStatus()).WillRepeatedly(Return(Fence::Status::Unsignaled));
+    transaction1.states[0].state.bufferData->acquireFence = std::move(fence);
+    transaction1.states[0].state.bufferData->flags = BufferData::BufferDataChange::fenceChanged;
+    transaction1.isAutoTimestamp = true;
+
+    // Transaction 2 should be ready to be applied.
+    TransactionState transaction2;
+    transaction2.applyToken = applyToken2;
+    transaction2.id = 2;
+    transaction2.isAutoTimestamp = true;
+
+    mFlinger.setTransactionStateInternal(transaction1);
+    mFlinger.setTransactionStateInternal(transaction2);
+    mFlinger.flushTransactionQueues();
+    auto transactionQueues = mFlinger.getPendingTransactionQueue();
+
+    // Transaction 1 is still in its queue.
+    EXPECT_EQ(transactionQueues[applyToken1].size(), 1u);
+    // Transaction 2 has been dequeued.
+    EXPECT_EQ(transactionQueues[applyToken2].size(), 0u);
+}
+
 class LatchUnsignaledTest : public TransactionApplicationTest {
 public:
     void TearDown() override {
diff --git a/services/surfaceflinger/tests/unittests/TransactionFrameTracerTest.cpp b/services/surfaceflinger/tests/unittests/TransactionFrameTracerTest.cpp
index 764d19b..00b5bf0 100644
--- a/services/surfaceflinger/tests/unittests/TransactionFrameTracerTest.cpp
+++ b/services/surfaceflinger/tests/unittests/TransactionFrameTracerTest.cpp
@@ -61,10 +61,7 @@
         return sp<Layer>::make(args);
     }
 
-    void commitTransaction(Layer* layer) {
-        auto c = layer->getDrawingState();
-        layer->commitTransaction(c);
-    }
+    void commitTransaction(Layer* layer) { layer->commitTransaction(); }
 
     TestableSurfaceFlinger mFlinger;
     renderengine::mock::RenderEngine* mRenderEngine = new renderengine::mock::RenderEngine();
diff --git a/services/surfaceflinger/tests/unittests/TransactionSurfaceFrameTest.cpp b/services/surfaceflinger/tests/unittests/TransactionSurfaceFrameTest.cpp
index e2c6491..caa265f 100644
--- a/services/surfaceflinger/tests/unittests/TransactionSurfaceFrameTest.cpp
+++ b/services/surfaceflinger/tests/unittests/TransactionSurfaceFrameTest.cpp
@@ -60,10 +60,7 @@
         return sp<Layer>::make(args);
     }
 
-    void commitTransaction(Layer* layer) {
-        auto c = layer->getDrawingState();
-        layer->commitTransaction(c);
-    }
+    void commitTransaction(Layer* layer) { layer->commitTransaction(); }
 
     TestableSurfaceFlinger mFlinger;
     renderengine::mock::RenderEngine* mRenderEngine = new renderengine::mock::RenderEngine();
diff --git a/services/surfaceflinger/tests/unittests/mock/DisplayHardware/MockIPowerHintSession.h b/services/surfaceflinger/tests/unittests/mock/DisplayHardware/MockIPowerHintSession.h
index 2b9520f..364618d 100644
--- a/services/surfaceflinger/tests/unittests/mock/DisplayHardware/MockIPowerHintSession.h
+++ b/services/surfaceflinger/tests/unittests/mock/DisplayHardware/MockIPowerHintSession.h
@@ -23,6 +23,7 @@
 
 using aidl::android::hardware::power::IPowerHintSession;
 using aidl::android::hardware::power::SessionHint;
+using aidl::android::hardware::power::SessionMode;
 using android::binder::Status;
 
 using namespace aidl::android::hardware::power;
@@ -45,6 +46,7 @@
                 (override));
     MOCK_METHOD(ndk::ScopedAStatus, sendHint, (SessionHint), (override));
     MOCK_METHOD(ndk::ScopedAStatus, setThreads, (const ::std::vector<int32_t>&), (override));
+    MOCK_METHOD(ndk::ScopedAStatus, setMode, (SessionMode, bool), (override));
 };
 
 } // namespace android::Hwc2::mock
diff --git a/services/vibratorservice/test/VibratorCallbackSchedulerTest.cpp b/services/vibratorservice/test/VibratorCallbackSchedulerTest.cpp
index 4c0910a..106ab9e 100644
--- a/services/vibratorservice/test/VibratorCallbackSchedulerTest.cpp
+++ b/services/vibratorservice/test/VibratorCallbackSchedulerTest.cpp
@@ -38,6 +38,9 @@
 
 // -------------------------------------------------------------------------------------------------
 
+// Delay allowed for the scheduler to process callbacks during this test.
+static const auto TEST_TIMEOUT = 50ms;
+
 class VibratorCallbackSchedulerTest : public Test {
 public:
     void SetUp() override {
@@ -67,46 +70,51 @@
         return std::vector<int32_t>(mExpiredCallbacks);
     }
 
-    bool waitForCallbacks(uint32_t callbackCount, milliseconds timeout) {
-        time_point<steady_clock> expiration = steady_clock::now() + timeout;
+    int32_t waitForCallbacks(int32_t callbackCount, milliseconds timeout) {
+        time_point<steady_clock> expiration = steady_clock::now() + timeout + TEST_TIMEOUT;
+        int32_t expiredCallbackCount = 0;
         while (steady_clock::now() < expiration) {
             std::lock_guard<std::mutex> lock(mMutex);
-            if (callbackCount <= mExpiredCallbacks.size()) {
-                return true;
+            expiredCallbackCount = mExpiredCallbacks.size();
+            if (callbackCount <= expiredCallbackCount) {
+                return expiredCallbackCount;
             }
             mCondition.wait_until(mMutex, expiration);
         }
-        return false;
+        return expiredCallbackCount;
     }
 };
 
 // -------------------------------------------------------------------------------------------------
 
 TEST_F(VibratorCallbackSchedulerTest, TestScheduleRunsOnlyAfterDelay) {
-    mScheduler->schedule(createCallback(1), 15ms);
+    time_point<steady_clock> startTime = steady_clock::now();
+    mScheduler->schedule(createCallback(1), 50ms);
 
-    // Not triggered before delay.
-    ASSERT_FALSE(waitForCallbacks(1, 10ms));
-    ASSERT_TRUE(getExpiredCallbacks().empty());
+    ASSERT_EQ(1, waitForCallbacks(1, 50ms));
+    time_point<steady_clock> callbackTime = steady_clock::now();
 
-    ASSERT_TRUE(waitForCallbacks(1, 10ms));
+    // Callback happened at least 50ms after the beginning of the test.
+    ASSERT_TRUE(startTime + 50ms <= callbackTime);
     ASSERT_THAT(getExpiredCallbacks(), ElementsAre(1));
 }
 
 TEST_F(VibratorCallbackSchedulerTest, TestScheduleMultipleCallbacksRunsInDelayOrder) {
-    mScheduler->schedule(createCallback(1), 10ms);
-    mScheduler->schedule(createCallback(2), 5ms);
-    mScheduler->schedule(createCallback(3), 1ms);
+    // Schedule first callbacks long enough that all 3 will be scheduled together and run in order.
+    mScheduler->schedule(createCallback(1), 50ms);
+    mScheduler->schedule(createCallback(2), 40ms);
+    mScheduler->schedule(createCallback(3), 10ms);
 
-    ASSERT_TRUE(waitForCallbacks(3, 15ms));
+    ASSERT_EQ(3, waitForCallbacks(3, 50ms));
     ASSERT_THAT(getExpiredCallbacks(), ElementsAre(3, 2, 1));
 }
 
 TEST_F(VibratorCallbackSchedulerTest, TestDestructorDropsPendingCallbacksAndKillsThread) {
-    mScheduler->schedule(createCallback(1), 5ms);
+    // Schedule callback long enough that scheduler will be destroyed while it's still scheduled.
+    mScheduler->schedule(createCallback(1), 50ms);
     mScheduler.reset(nullptr);
 
-    // Should time out waiting for callback to run.
-    ASSERT_FALSE(waitForCallbacks(1, 10ms));
+    // Should timeout waiting for callback to run.
+    ASSERT_EQ(0, waitForCallbacks(1, 50ms));
     ASSERT_TRUE(getExpiredCallbacks().empty());
 }
diff --git a/vulkan/include/vulkan/vk_android_native_buffer.h b/vulkan/include/vulkan/vk_android_native_buffer.h
index 40cf9fb..e78f470 100644
--- a/vulkan/include/vulkan/vk_android_native_buffer.h
+++ b/vulkan/include/vulkan/vk_android_native_buffer.h
@@ -55,7 +55,12 @@
  * This version of the extension is largely designed to clean up the mix of
  * GrallocUsage and GrallocUsage2
  */
-#define VK_ANDROID_NATIVE_BUFFER_SPEC_VERSION 9
+/*
+ * NOTE ON VK_ANDROID_NATIVE_BUFFER_SPEC_VERSION 10
+ *
+ * This version of the extension cleans up a bug introduced in version 9
+ */
+#define VK_ANDROID_NATIVE_BUFFER_SPEC_VERSION 10
 #define VK_ANDROID_NATIVE_BUFFER_EXTENSION_NAME "VK_ANDROID_native_buffer"
 
 #define VK_ANDROID_NATIVE_BUFFER_ENUM(type, id) \
@@ -69,6 +74,8 @@
     VK_ANDROID_NATIVE_BUFFER_ENUM(VkStructureType, 2)
 #define VK_STRUCTURE_TYPE_GRALLOC_USAGE_INFO_ANDROID \
     VK_ANDROID_NATIVE_BUFFER_ENUM(VkStructureType, 3)
+#define VK_STRUCTURE_TYPE_GRALLOC_USAGE_INFO_2_ANDROID \
+    VK_ANDROID_NATIVE_BUFFER_ENUM(VkStructureType, 4)
 
 /* clang-format off */
 typedef enum VkSwapchainImageUsageFlagBitsANDROID {
@@ -152,6 +159,23 @@
     VkImageUsageFlags                 imageUsage;
 } VkGrallocUsageInfoANDROID;
 
+/*
+ * struct VkGrallocUsageInfo2ANDROID
+ *
+ * sType: VK_STRUCTURE_TYPE_GRALLOC_USAGE_INFO_2_ANDROID
+ * pNext: NULL or a pointer to a structure extending this structure
+ * format: value specifying the format the image will be created with
+ * imageUsage: bitmask of VkImageUsageFlagBits describing intended usage
+ * swapchainImageUsage: is a bitmask of VkSwapchainImageUsageFlagsANDROID
+ */
+typedef struct {
+    VkStructureType                   sType;
+    const void*                       pNext;
+    VkFormat                          format;
+    VkImageUsageFlags                 imageUsage;
+    VkSwapchainImageUsageFlagsANDROID swapchainImageUsage;
+} VkGrallocUsageInfo2ANDROID;
+
 /* DEPRECATED in SPEC_VERSION 6 */
 typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainGrallocUsageANDROID)(
     VkDevice                          device,
@@ -168,12 +192,18 @@
     uint64_t*                         grallocConsumerUsage,
     uint64_t*                         grallocProducerUsage);
 
-/* ADDED in SPEC_VERSION 9 */
+/* DEPRECATED in SPEC_VERSION 10 */
 typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainGrallocUsage3ANDROID)(
     VkDevice                          device,
     const VkGrallocUsageInfoANDROID*  grallocUsageInfo,
     uint64_t*                         grallocUsage);
 
+/* ADDED in SPEC_VERSION 10 */
+typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainGrallocUsage4ANDROID)(
+    VkDevice                          device,
+    const VkGrallocUsageInfo2ANDROID* grallocUsageInfo,
+    uint64_t*                         grallocUsage);
+
 typedef VkResult (VKAPI_PTR *PFN_vkAcquireImageANDROID)(
     VkDevice                          device,
     VkImage                           image,
@@ -208,13 +238,20 @@
     uint64_t*                         grallocProducerUsage
 );
 
-/* ADDED in SPEC_VERSION 9 */
+/* DEPRECATED in SPEC_VERSION 10 */
 VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainGrallocUsage3ANDROID(
     VkDevice                          device,
     const VkGrallocUsageInfoANDROID*  grallocUsageInfo,
     uint64_t*                         grallocUsage
 );
 
+/* ADDED in SPEC_VERSION 10 */
+VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainGrallocUsage4ANDROID(
+    VkDevice                          device,
+    const VkGrallocUsageInfo2ANDROID* grallocUsageInfo,
+    uint64_t*                         grallocUsage
+);
+
 VKAPI_ATTR VkResult VKAPI_CALL vkAcquireImageANDROID(
     VkDevice                          device,
     VkImage                           image,
diff --git a/vulkan/libvulkan/driver.cpp b/vulkan/libvulkan/driver.cpp
index 273cdd5..bdba27e 100644
--- a/vulkan/libvulkan/driver.cpp
+++ b/vulkan/libvulkan/driver.cpp
@@ -763,6 +763,17 @@
             continue;
         }
 
+        // Ignore duplicate extensions (see: b/288929054)
+        bool duplicate_entry = false;
+        for (uint32_t j = 0; j < filter.name_count; j++) {
+            if (strcmp(name, filter.names[j]) == 0) {
+                duplicate_entry = true;
+                break;
+            }
+        }
+        if (duplicate_entry == true)
+            continue;
+
         filter.names[filter.name_count++] = name;
         if (ext_bit != ProcHook::EXTENSION_UNKNOWN) {
             if (ext_bit == ProcHook::ANDROID_native_buffer)
@@ -1422,13 +1433,15 @@
     if ((wrapper.GetHalExtensions()[ProcHook::ANDROID_native_buffer]) &&
         !data->driver.GetSwapchainGrallocUsageANDROID &&
         !data->driver.GetSwapchainGrallocUsage2ANDROID &&
-        !data->driver.GetSwapchainGrallocUsage3ANDROID) {
+        !data->driver.GetSwapchainGrallocUsage3ANDROID &&
+        !data->driver.GetSwapchainGrallocUsage4ANDROID) {
         ALOGE(
             "Driver's implementation of ANDROID_native_buffer is broken;"
             " must expose at least one of "
             "vkGetSwapchainGrallocUsageANDROID or "
             "vkGetSwapchainGrallocUsage2ANDROID or "
-            "vkGetSwapchainGrallocUsage3ANDROID");
+            "vkGetSwapchainGrallocUsage3ANDROID or "
+            "vkGetSwapchainGrallocUsage4ANDROID");
 
         data->driver.DestroyDevice(dev, pAllocator);
         FreeDeviceData(data, data_allocator);
diff --git a/vulkan/libvulkan/driver_gen.cpp b/vulkan/libvulkan/driver_gen.cpp
index 798af5c..8f09008 100644
--- a/vulkan/libvulkan/driver_gen.cpp
+++ b/vulkan/libvulkan/driver_gen.cpp
@@ -512,6 +512,13 @@
         nullptr,
     },
     {
+        "vkGetSwapchainGrallocUsage4ANDROID",
+        ProcHook::DEVICE,
+        ProcHook::ANDROID_native_buffer,
+        nullptr,
+        nullptr,
+    },
+    {
         "vkGetSwapchainGrallocUsageANDROID",
         ProcHook::DEVICE,
         ProcHook::ANDROID_native_buffer,
@@ -692,6 +699,7 @@
     INIT_PROC_EXT(ANDROID_native_buffer, false, dev, GetSwapchainGrallocUsageANDROID);
     INIT_PROC_EXT(ANDROID_native_buffer, false, dev, GetSwapchainGrallocUsage2ANDROID);
     INIT_PROC_EXT(ANDROID_native_buffer, false, dev, GetSwapchainGrallocUsage3ANDROID);
+    INIT_PROC_EXT(ANDROID_native_buffer, false, dev, GetSwapchainGrallocUsage4ANDROID);
     INIT_PROC_EXT(ANDROID_native_buffer, true, dev, AcquireImageANDROID);
     INIT_PROC_EXT(ANDROID_native_buffer, true, dev, QueueSignalReleaseImageANDROID);
     // clang-format on
diff --git a/vulkan/libvulkan/driver_gen.h b/vulkan/libvulkan/driver_gen.h
index 31ba04b..4527214 100644
--- a/vulkan/libvulkan/driver_gen.h
+++ b/vulkan/libvulkan/driver_gen.h
@@ -128,6 +128,7 @@
     PFN_vkGetSwapchainGrallocUsageANDROID GetSwapchainGrallocUsageANDROID;
     PFN_vkGetSwapchainGrallocUsage2ANDROID GetSwapchainGrallocUsage2ANDROID;
     PFN_vkGetSwapchainGrallocUsage3ANDROID GetSwapchainGrallocUsage3ANDROID;
+    PFN_vkGetSwapchainGrallocUsage4ANDROID GetSwapchainGrallocUsage4ANDROID;
     PFN_vkAcquireImageANDROID AcquireImageANDROID;
     PFN_vkQueueSignalReleaseImageANDROID QueueSignalReleaseImageANDROID;
     // clang-format on
diff --git a/vulkan/libvulkan/layers_extensions.cpp b/vulkan/libvulkan/layers_extensions.cpp
index a14fed2..d059f8f 100644
--- a/vulkan/libvulkan/layers_extensions.cpp
+++ b/vulkan/libvulkan/layers_extensions.cpp
@@ -23,6 +23,7 @@
 #include <dlfcn.h>
 #include <string.h>
 #include <sys/prctl.h>
+#include <unistd.h>
 
 #include <mutex>
 #include <string>
@@ -362,6 +363,7 @@
 void ForEachFileInZip(const std::string& zipname,
                       const std::string& dir_in_zip,
                       Functor functor) {
+    static const size_t kPageSize = getpagesize();
     int32_t err;
     ZipArchiveHandle zip = nullptr;
     if ((err = OpenArchive(zipname.c_str(), &zip)) != 0) {
@@ -389,7 +391,7 @@
         // the APK. Loading still may fail for other reasons, but this at least
         // lets us avoid failed-to-load log messages in the typical case of
         // compressed and/or unaligned libraries.
-        if (entry.method != kCompressStored || entry.offset % PAGE_SIZE != 0)
+        if (entry.method != kCompressStored || entry.offset % kPageSize != 0)
             continue;
         functor(filename);
     }
diff --git a/vulkan/libvulkan/swapchain.cpp b/vulkan/libvulkan/swapchain.cpp
index 07b9569..bffbe9d 100644
--- a/vulkan/libvulkan/swapchain.cpp
+++ b/vulkan/libvulkan/swapchain.cpp
@@ -1576,7 +1576,47 @@
     void* usage_info_pNext = nullptr;
     VkImageCompressionControlEXT image_compression = {};
     uint64_t native_usage = 0;
-    if (dispatch.GetSwapchainGrallocUsage3ANDROID) {
+    if (dispatch.GetSwapchainGrallocUsage4ANDROID) {
+        ATRACE_BEGIN("GetSwapchainGrallocUsage4ANDROID");
+        VkGrallocUsageInfo2ANDROID gralloc_usage_info = {};
+        gralloc_usage_info.sType =
+            VK_STRUCTURE_TYPE_GRALLOC_USAGE_INFO_2_ANDROID;
+        gralloc_usage_info.format = create_info->imageFormat;
+        gralloc_usage_info.imageUsage = create_info->imageUsage;
+        gralloc_usage_info.swapchainImageUsage = swapchain_image_usage;
+
+        // Look through the pNext chain for an image compression control struct
+        // if one is found AND the appropriate extensions are enabled,
+        // append it to be the gralloc usage pNext chain
+        const VkSwapchainCreateInfoKHR* create_infos = create_info;
+        while (create_infos->pNext) {
+            create_infos = reinterpret_cast<const VkSwapchainCreateInfoKHR*>(
+                create_infos->pNext);
+            switch (create_infos->sType) {
+                case VK_STRUCTURE_TYPE_IMAGE_COMPRESSION_CONTROL_EXT: {
+                    const VkImageCompressionControlEXT* compression_infos =
+                        reinterpret_cast<const VkImageCompressionControlEXT*>(
+                            create_infos);
+                    image_compression = *compression_infos;
+                    image_compression.pNext = nullptr;
+                    usage_info_pNext = &image_compression;
+                } break;
+
+                default:
+                    // Ignore all other info structs
+                    break;
+            }
+        }
+        gralloc_usage_info.pNext = usage_info_pNext;
+
+        result = dispatch.GetSwapchainGrallocUsage4ANDROID(
+            device, &gralloc_usage_info, &native_usage);
+        ATRACE_END();
+        if (result != VK_SUCCESS) {
+            ALOGE("vkGetSwapchainGrallocUsage4ANDROID failed: %d", result);
+            return VK_ERROR_SURFACE_LOST_KHR;
+        }
+    } else if (dispatch.GetSwapchainGrallocUsage3ANDROID) {
         ATRACE_BEGIN("GetSwapchainGrallocUsage3ANDROID");
         VkGrallocUsageInfoANDROID gralloc_usage_info = {};
         gralloc_usage_info.sType = VK_STRUCTURE_TYPE_GRALLOC_USAGE_INFO_ANDROID;
diff --git a/vulkan/nulldrv/null_driver.cpp b/vulkan/nulldrv/null_driver.cpp
index f998b1a..2e87f17 100644
--- a/vulkan/nulldrv/null_driver.cpp
+++ b/vulkan/nulldrv/null_driver.cpp
@@ -959,6 +959,17 @@
     return VK_SUCCESS;
 }
 
+VkResult GetSwapchainGrallocUsage4ANDROID(
+    VkDevice,
+    const VkGrallocUsageInfo2ANDROID* grallocUsageInfo,
+    uint64_t* grallocUsage) {
+    // The null driver never reads or writes the gralloc buffer
+    ALOGV("TODO: vk%s - grallocUsageInfo->format:%i", __FUNCTION__,
+          grallocUsageInfo->format);
+    *grallocUsage = 0;
+    return VK_SUCCESS;
+}
+
 VkResult AcquireImageANDROID(VkDevice,
                              VkImage,
                              int fence,
diff --git a/vulkan/nulldrv/null_driver_gen.cpp b/vulkan/nulldrv/null_driver_gen.cpp
index 0cb7bd3..935535f 100644
--- a/vulkan/nulldrv/null_driver_gen.cpp
+++ b/vulkan/nulldrv/null_driver_gen.cpp
@@ -262,6 +262,7 @@
     {"vkGetSemaphoreCounterValue", reinterpret_cast<PFN_vkVoidFunction>(static_cast<PFN_vkGetSemaphoreCounterValue>(GetSemaphoreCounterValue))},
     {"vkGetSwapchainGrallocUsage2ANDROID", reinterpret_cast<PFN_vkVoidFunction>(static_cast<PFN_vkGetSwapchainGrallocUsage2ANDROID>(GetSwapchainGrallocUsage2ANDROID))},
     {"vkGetSwapchainGrallocUsage3ANDROID", reinterpret_cast<PFN_vkVoidFunction>(static_cast<PFN_vkGetSwapchainGrallocUsage3ANDROID>(GetSwapchainGrallocUsage3ANDROID))},
+    {"vkGetSwapchainGrallocUsage4ANDROID", reinterpret_cast<PFN_vkVoidFunction>(static_cast<PFN_vkGetSwapchainGrallocUsage4ANDROID>(GetSwapchainGrallocUsage4ANDROID))},
     {"vkGetSwapchainGrallocUsageANDROID", reinterpret_cast<PFN_vkVoidFunction>(static_cast<PFN_vkGetSwapchainGrallocUsageANDROID>(GetSwapchainGrallocUsageANDROID))},
     {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(static_cast<PFN_vkInvalidateMappedMemoryRanges>(InvalidateMappedMemoryRanges))},
     {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(static_cast<PFN_vkMapMemory>(MapMemory))},
diff --git a/vulkan/nulldrv/null_driver_gen.h b/vulkan/nulldrv/null_driver_gen.h
index 5c7fea0..fb3bd05 100644
--- a/vulkan/nulldrv/null_driver_gen.h
+++ b/vulkan/nulldrv/null_driver_gen.h
@@ -210,6 +210,7 @@
 VKAPI_ATTR VkResult GetSwapchainGrallocUsageANDROID(VkDevice device, VkFormat format, VkImageUsageFlags imageUsage, int* grallocUsage);
 VKAPI_ATTR VkResult GetSwapchainGrallocUsage2ANDROID(VkDevice device, VkFormat format, VkImageUsageFlags imageUsage, VkSwapchainImageUsageFlagsANDROID swapchainImageUsage, uint64_t* grallocConsumerUsage, uint64_t* grallocProducerUsage);
 VKAPI_ATTR VkResult GetSwapchainGrallocUsage3ANDROID(VkDevice device, const VkGrallocUsageInfoANDROID* grallocUsageInfo, uint64_t* grallocUsage);
+VKAPI_ATTR VkResult GetSwapchainGrallocUsage4ANDROID(VkDevice device, const VkGrallocUsageInfo2ANDROID* grallocUsageInfo, uint64_t* grallocUsage);
 VKAPI_ATTR VkResult AcquireImageANDROID(VkDevice device, VkImage image, int nativeFenceFd, VkSemaphore semaphore, VkFence fence);
 VKAPI_ATTR VkResult QueueSignalReleaseImageANDROID(VkQueue queue, uint32_t waitSemaphoreCount, const VkSemaphore* pWaitSemaphores, VkImage image, int* pNativeFenceFd);
 VKAPI_ATTR VkResult CreateRenderPass2(VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass);
diff --git a/vulkan/scripts/generator_common.py b/vulkan/scripts/generator_common.py
index c25c6cb..866c1b7 100644
--- a/vulkan/scripts/generator_common.py
+++ b/vulkan/scripts/generator_common.py
@@ -70,6 +70,7 @@
     'vkGetSwapchainGrallocUsageANDROID',
     'vkGetSwapchainGrallocUsage2ANDROID',
     'vkGetSwapchainGrallocUsage3ANDROID',
+    'vkGetSwapchainGrallocUsage4ANDROID',
 ]
 
 # Dict for mapping dispatch table to a type.