Merge "Log Binder Proxy Limit reached only once" into pi-dev
diff --git a/cmds/atrace/atrace.cpp b/cmds/atrace/atrace.cpp
index f65f4f8..fe9dd56 100644
--- a/cmds/atrace/atrace.cpp
+++ b/cmds/atrace/atrace.cpp
@@ -58,6 +58,7 @@
 #define MAX_SYS_FILES 10
 
 const char* k_traceTagsProperty = "debug.atrace.tags.enableflags";
+const char* k_userInitiatedTraceProperty = "debug.atrace.user_initiated";
 
 const char* k_traceAppsNumberProperty = "debug.atrace.app_number";
 const char* k_traceAppsPropertyTemplate = "debug.atrace.app_%d";
@@ -125,6 +126,7 @@
         { OPT,      "events/sched/sched_waking/enable" },
         { OPT,      "events/sched/sched_blocked_reason/enable" },
         { OPT,      "events/sched/sched_cpu_hotplug/enable" },
+        { OPT,      "events/sched/sched_pi_setprio/enable" },
         { OPT,      "events/cgroup/enable" },
     } },
     { "irq",        "IRQ Events",   0, {
@@ -185,7 +187,10 @@
         { REQ,      "events/cpufreq_interactive/enable" },
     } },
     { "sync",       "Synchronization",  0, {
-        { REQ,      "events/sync/enable" },
+        // before linux kernel 4.9
+        { OPT,      "events/sync/enable" },
+        // starting in linux kernel 4.9
+        { OPT,      "events/fence/enable" },
     } },
     { "workq",      "Kernel Workqueues", 0, {
         { REQ,      "events/workqueue/enable" },
@@ -443,6 +448,16 @@
     return setKernelOptionEnable(k_tracingOverwriteEnablePath, enable);
 }
 
+// Set the user initiated trace property
+static bool setUserInitiatedTraceProperty(bool enable)
+{
+    if (!android::base::SetProperty(k_userInitiatedTraceProperty, enable ? "1" : "")) {
+        fprintf(stderr, "error setting user initiated strace system property\n");
+        return false;
+    }
+    return true;
+}
+
 // Enable or disable kernel tracing.
 static bool setTracingEnabled(bool enable)
 {
@@ -836,6 +851,8 @@
 {
     bool ok = true;
 
+    ok &= setUserInitiatedTraceProperty(true);
+
     // Set up the tracing options.
     ok &= setCategoriesEnableFromFile(g_categoriesFile);
     ok &= setTraceOverwriteEnable(g_traceOverwrite);
@@ -883,6 +900,7 @@
     setTraceBufferSizeKB(1);
     setPrintTgidEnableIfPresent(false);
     setKernelTraceFuncs(NULL);
+    setUserInitiatedTraceProperty(false);
 }
 
 // Enable tracing in the kernel.
diff --git a/cmds/atrace/atrace.rc b/cmds/atrace/atrace.rc
index 8421568..d3d0711 100644
--- a/cmds/atrace/atrace.rc
+++ b/cmds/atrace/atrace.rc
@@ -1,6 +1,6 @@
 ## Permissions to allow system-wide tracing to the kernel trace buffer.
 ##
-on post-fs
+on late-init
 
 # Allow writing to the kernel trace log.
     chmod 0222 /sys/kernel/debug/tracing/trace_marker
@@ -29,6 +29,8 @@
     chmod 0666 /sys/kernel/tracing/events/sched/sched_blocked_reason/enable
     chmod 0666 /sys/kernel/debug/tracing/events/sched/sched_cpu_hotplug/enable
     chmod 0666 /sys/kernel/tracing/events/sched/sched_cpu_hotplug/enable
+    chmod 0666 /sys/kernel/debug/tracing/events/sched/sched_pi_setprio/enable
+    chmod 0666 /sys/kernel/tracing/events/sched/sched_pi_setprio/enable
     chmod 0666 /sys/kernel/debug/tracing/events/cgroup/enable
     chmod 0666 /sys/kernel/tracing/events/cgroup/enable
     chmod 0666 /sys/kernel/debug/tracing/events/power/cpu_frequency/enable
@@ -81,6 +83,11 @@
     chmod 0666 /sys/kernel/tracing/events/i2c/smbus_reply/enable
     chmod 0666 /sys/kernel/debug/tracing/events/lowmemorykiller/enable
     chmod 0666 /sys/kernel/tracing/events/lowmemorykiller/enable
+    chmod 0666 /sys/kernel/debug/tracing/events/sync/enable
+    chmod 0666 /sys/kernel/tracing/events/sync/enable
+    chmod 0666 /sys/kernel/debug/tracing/events/fence/enable
+    chmod 0666 /sys/kernel/tracing/events/fence/enable
+
 
     # disk
     chmod 0666 /sys/kernel/tracing/events/f2fs/f2fs_sync_file_enter/enable
diff --git a/cmds/atrace/atrace_userdebug.rc b/cmds/atrace/atrace_userdebug.rc
index 93a29ba..f4e5b98 100644
--- a/cmds/atrace/atrace_userdebug.rc
+++ b/cmds/atrace/atrace_userdebug.rc
@@ -5,8 +5,6 @@
 # Access control to these files is now entirely in selinux policy.
 
 on post-fs
-    chmod 0666 /sys/kernel/tracing/events/sync/enable
-    chmod 0666 /sys/kernel/debug/tracing/events/sync/enable
     chmod 0666 /sys/kernel/tracing/events/workqueue/enable
     chmod 0666 /sys/kernel/debug/tracing/events/workqueue/enable
     chmod 0666 /sys/kernel/tracing/events/regulator/enable
diff --git a/cmds/dumpstate/dumpstate.cpp b/cmds/dumpstate/dumpstate.cpp
index 3e7d2a0..9d897f5 100644
--- a/cmds/dumpstate/dumpstate.cpp
+++ b/cmds/dumpstate/dumpstate.cpp
@@ -1504,6 +1504,7 @@
 // This method collects dumpsys for telephony debugging only
 static void DumpstateTelephonyOnly() {
     DurationReporter duration_reporter("DUMPSTATE");
+    const CommandOptions DUMPSYS_COMPONENTS_OPTIONS = CommandOptions::WithTimeout(60).Build();
 
     DumpstateRadioCommon();
 
@@ -1529,6 +1530,13 @@
     RunDumpsys("TELEPHONY SERVICES", {"activity", "service", "TelephonyDebugService"});
 
     printf("========================================================\n");
+    printf("== Running Application Services (non-platform)\n");
+    printf("========================================================\n");
+
+    RunDumpsys("APP SERVICES NON-PLATFORM", {"activity", "service", "all-non-platform"},
+            DUMPSYS_COMPONENTS_OPTIONS);
+
+    printf("========================================================\n");
     printf("== dumpstate: done (id %d)\n", ds.id_);
     printf("========================================================\n");
 }
diff --git a/cmds/installd/dexopt.cpp b/cmds/installd/dexopt.cpp
index 2777662..624d7d6 100644
--- a/cmds/installd/dexopt.cpp
+++ b/cmds/installd/dexopt.cpp
@@ -740,6 +740,7 @@
 static void run_profman(const std::vector<unique_fd>& profile_fds,
                         const unique_fd& reference_profile_fd,
                         const std::vector<unique_fd>* apk_fds,
+                        const std::vector<std::string>* dex_locations,
                         bool copy_and_update) {
     const char* profman_bin = is_debug_runtime() ? "/system/bin/profmand" : "/system/bin/profman";
 
@@ -762,6 +763,13 @@
         }
     }
 
+    std::vector<std::string> dex_location_args;
+    if (dex_locations != nullptr) {
+        for (size_t k = 0; k < dex_locations->size(); k++) {
+            dex_location_args.push_back("--dex-location=" + (*dex_locations)[k]);
+        }
+    }
+
     // program name, reference profile fd, the final NULL and the profile fds
     const char* argv[3 + profile_args.size() + apk_args.size() + (copy_and_update ? 1 : 0)];
     int i = 0;
@@ -773,9 +781,13 @@
     for (size_t k = 0; k < apk_args.size(); k++) {
         argv[i++] = apk_args[k].c_str();
     }
+    for (size_t k = 0; k < dex_location_args.size(); k++) {
+        argv[i++] = dex_location_args[k].c_str();
+    }
     if (copy_and_update) {
         argv[i++] = "--copy-and-update-profile-key";
     }
+
     // Do not add after dex2oat_flags, they should override others for debugging.
     argv[i] = NULL;
 
@@ -787,20 +799,26 @@
 [[ noreturn ]]
 static void run_profman_merge(const std::vector<unique_fd>& profiles_fd,
                               const unique_fd& reference_profile_fd,
-                              const std::vector<unique_fd>* apk_fds = nullptr) {
-    run_profman(profiles_fd, reference_profile_fd, apk_fds, /*copy_and_update*/false);
+                              const std::vector<unique_fd>* apk_fds = nullptr,
+                              const std::vector<std::string>* dex_locations = nullptr) {
+    run_profman(profiles_fd, reference_profile_fd, apk_fds, dex_locations,
+            /*copy_and_update*/false);
 }
 
 [[ noreturn ]]
 static void run_profman_copy_and_update(unique_fd&& profile_fd,
                                         unique_fd&& reference_profile_fd,
-                                        unique_fd&& apk_fd) {
+                                        unique_fd&& apk_fd,
+                                        const std::string& dex_location) {
     std::vector<unique_fd> profiles_fd;
     profiles_fd.push_back(std::move(profile_fd));
     std::vector<unique_fd> apk_fds;
     apk_fds.push_back(std::move(apk_fd));
+    std::vector<std::string> dex_locations;
+    dex_locations.push_back(dex_location);
 
-    run_profman(profiles_fd, reference_profile_fd, &apk_fds, /*copy_and_update*/true);
+    run_profman(profiles_fd, reference_profile_fd, &apk_fds, &dex_locations,
+            /*copy_and_update*/true);
 }
 
 // Decides if profile guided compilation is needed or not based on existing profiles.
@@ -2598,7 +2616,8 @@
     }
 }
 
-bool open_classpath_files(const std::string& classpath, std::vector<unique_fd>* apk_fds) {
+bool open_classpath_files(const std::string& classpath, std::vector<unique_fd>* apk_fds,
+        std::vector<std::string>* dex_locations) {
     std::vector<std::string> classpaths_elems = base::Split(classpath, ":");
     for (const std::string& elem : classpaths_elems) {
         unique_fd fd(TEMP_FAILURE_RETRY(open(elem.c_str(), O_RDONLY)));
@@ -2607,6 +2626,7 @@
             return false;
         } else {
             apk_fds->push_back(std::move(fd));
+            dex_locations->push_back(elem);
         }
     }
     return true;
@@ -2636,7 +2656,8 @@
     // Open the class paths elements. These will be used to filter out profile data that does
     // not belong to the classpath during merge.
     std::vector<unique_fd> apk_fds;
-    if (!open_classpath_files(classpath, &apk_fds)) {
+    std::vector<std::string> dex_locations;
+    if (!open_classpath_files(classpath, &apk_fds, &dex_locations)) {
         return false;
     }
 
@@ -2644,7 +2665,7 @@
     if (pid == 0) {
         /* child -- drop privileges before continuing */
         drop_capabilities(app_shared_gid);
-        run_profman_merge(profiles_fd, snapshot_fd, &apk_fds);
+        run_profman_merge(profiles_fd, snapshot_fd, &apk_fds, &dex_locations);
     }
 
     /* parent */
@@ -2694,7 +2715,8 @@
     // Open the classpath elements. These will be used to filter out profile data that does
     // not belong to the classpath during merge.
     std::vector<unique_fd> apk_fds;
-    if (!open_classpath_files(classpath, &apk_fds)) {
+    std::vector<std::string> dex_locations;
+    if (!open_classpath_files(classpath, &apk_fds, &dex_locations)) {
         return false;
     }
 
@@ -2721,7 +2743,9 @@
             /* child -- drop privileges before continuing */
             drop_capabilities(AID_SYSTEM);
 
-            run_profman_merge(profiles_fd, snapshot_fd, &apk_fds);
+            // The introduction of new access flags into boot jars causes them to
+            // fail dex file verification.
+            run_profman_merge(profiles_fd, snapshot_fd, &apk_fds, &dex_locations);
         }
 
         /* parent */
@@ -2784,7 +2808,8 @@
         // The copy and update takes ownership over the fds.
         run_profman_copy_and_update(std::move(dex_metadata_fd),
                                     std::move(ref_profile_fd),
-                                    std::move(apk_fd));
+                                    std::move(apk_fd),
+                                    code_path);
     }
 
     /* parent */
diff --git a/data/etc/aosp_excluded_hardware.xml b/data/etc/aosp_excluded_hardware.xml
new file mode 100644
index 0000000..013f278
--- /dev/null
+++ b/data/etc/aosp_excluded_hardware.xml
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<permissions>
+    <!-- This should be used to exclude this feature from aosp targets. As aosp configurations
+    may or may not have a valid location provider -->
+    <unavailable-feature name="android.hardware.location.network" />
+</permissions>
diff --git a/data/etc/go_handheld_core_hardware.xml b/data/etc/go_handheld_core_hardware.xml
index ffebc9f..8b5a461 100644
--- a/data/etc/go_handheld_core_hardware.xml
+++ b/data/etc/go_handheld_core_hardware.xml
@@ -42,6 +42,7 @@
     <feature name="android.software.print" />
     <feature name="android.software.companion_device_setup" />
     <feature name="android.software.autofill" />
+    <feature name="android.software.cant_save_state" />
 
     <!-- Feature to specify if the device supports adding device admins. -->
     <feature name="android.software.device_admin" />
diff --git a/data/etc/handheld_core_hardware.xml b/data/etc/handheld_core_hardware.xml
index c76e611..060a334 100644
--- a/data/etc/handheld_core_hardware.xml
+++ b/data/etc/handheld_core_hardware.xml
@@ -50,6 +50,7 @@
     <feature name="android.software.print" />
     <feature name="android.software.companion_device_setup" />
     <feature name="android.software.autofill" />
+    <feature name="android.software.cant_save_state" />
 
     <!-- Feature to specify if the device supports adding device admins. -->
     <feature name="android.software.device_admin" />
diff --git a/data/etc/tablet_core_hardware.xml b/data/etc/tablet_core_hardware.xml
index 9b88648..6db2627 100644
--- a/data/etc/tablet_core_hardware.xml
+++ b/data/etc/tablet_core_hardware.xml
@@ -50,6 +50,7 @@
     <feature name="android.software.print" />
     <feature name="android.software.companion_device_setup" />
     <feature name="android.software.autofill" />
+    <feature name="android.software.cant_save_state" />
 
     <!-- Feature to specify if the device supports adding device admins. -->
     <feature name="android.software.device_admin" />
diff --git a/headers/media_plugin/media/openmax/OMX_Audio.h b/headers/media_plugin/media/openmax/OMX_Audio.h
index 9c0296b..f8a36bd 100644
--- a/headers/media_plugin/media/openmax/OMX_Audio.h
+++ b/headers/media_plugin/media/openmax/OMX_Audio.h
@@ -263,6 +263,7 @@
   OMX_AUDIO_AACObjectLD = 23,       /**< AAC Low Delay object (Error Resilient) */
   OMX_AUDIO_AACObjectHE_PS = 29,    /**< AAC High Efficiency with Parametric Stereo coding (HE-AAC v2, object type PS) */
   OMX_AUDIO_AACObjectELD = 39,      /** AAC Enhanced Low Delay. NOTE: Pending Khronos standardization **/
+  OMX_AUDIO_AACObjectXHE = 42,      /** extended High Efficiency AAC. NOTE: Pending Khronos standardization */
   OMX_AUDIO_AACObjectKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
   OMX_AUDIO_AACObjectVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
   OMX_AUDIO_AACObjectMax = 0x7FFFFFFF
diff --git a/headers/media_plugin/media/openmax/OMX_AudioExt.h b/headers/media_plugin/media/openmax/OMX_AudioExt.h
index 05c2232..8409553 100644
--- a/headers/media_plugin/media/openmax/OMX_AudioExt.h
+++ b/headers/media_plugin/media/openmax/OMX_AudioExt.h
@@ -82,6 +82,7 @@
                                    limit the audio signal. Use 0 to let encoder decide */
 } OMX_AUDIO_PARAM_ANDROID_OPUSTYPE;
 
+/** deprecated. use OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE */
 typedef struct OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE {
     OMX_U32 nSize;            /**< size of the structure in bytes */
     OMX_VERSIONTYPE nVersion; /**< OMX specification version information */
@@ -94,6 +95,19 @@
     OMX_S32 nPCMLimiterEnable;     /**< Signal level limiting, 0 for disable, 1 for enable, -1 if unspecified */
 } OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE;
 
+typedef struct OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE {
+    OMX_U32 nSize;            /**< size of the structure in bytes */
+    OMX_VERSIONTYPE nVersion; /**< OMX specification version information */
+    OMX_S32 nMaxOutputChannels;    /**< Maximum channel count to be output, -1 if unspecified, 0 if downmixing disabled */
+    OMX_S32 nDrcCut;               /**< The DRC attenuation factor, between 0 and 127, -1 if unspecified */
+    OMX_S32 nDrcBoost;             /**< The DRC amplification factor, between 0 and 127, -1 if unspecified */
+    OMX_S32 nHeavyCompression;     /**< 0 for light compression, 1 for heavy compression, -1 if unspecified */
+    OMX_S32 nTargetReferenceLevel; /**< Target reference level, between 0 and 127, -1 if unspecified */
+    OMX_S32 nEncodedTargetLevel;   /**< Target reference level assumed at the encoder, between 0 and 127, -1 if unspecified */
+    OMX_S32 nPCMLimiterEnable;     /**< Signal level limiting, 0 for disable, 1 for enable, -1 if unspecified */
+    OMX_S32 nDrcEffectType;        /**< MPEG-D DRC effect type, between -1 and 6, -2 if unspecified */
+} OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE;
+
 typedef struct OMX_AUDIO_PARAM_ANDROID_PROFILETYPE {
    OMX_U32 nSize;
    OMX_VERSIONTYPE nVersion;
diff --git a/headers/media_plugin/media/openmax/OMX_IndexExt.h b/headers/media_plugin/media/openmax/OMX_IndexExt.h
index c2bf97e..716d959 100644
--- a/headers/media_plugin/media/openmax/OMX_IndexExt.h
+++ b/headers/media_plugin/media/openmax/OMX_IndexExt.h
@@ -63,6 +63,7 @@
     OMX_IndexParamAudioAndroidAacPresentation,      /**< reference: OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE */
     OMX_IndexParamAudioAndroidEac3,                 /**< reference: OMX_AUDIO_PARAM_ANDROID_EAC3TYPE */
     OMX_IndexParamAudioProfileQuerySupported,       /**< reference: OMX_AUDIO_PARAM_ANDROID_PROFILETYPE */
+    OMX_IndexParamAudioAndroidAacDrcPresentation,   /**< reference: OMX_AUDIO_PARAM_ANDROID_AACDRCPRESENTATIONTYPE */
     OMX_IndexExtAudioEndUnused,
 
     /* Image parameters and configurations */
diff --git a/include/android/hardware_buffer_jni.h b/include/android/hardware_buffer_jni.h
index 6020870..7c4be24 100644
--- a/include/android/hardware_buffer_jni.h
+++ b/include/android/hardware_buffer_jni.h
@@ -31,9 +31,11 @@
 
 /**
  * Return the AHardwareBuffer associated with a Java HardwareBuffer object,
- * for interacting with it through native code.  This acquires a reference
- * on the AHardwareBuffer that is returned; be sure to use
- * AHardwareBuffer_release() when done with it so that it doesn't leak.
+ * for interacting with it through native code. This method does not acquire any
+ * additional reference to the AHardwareBuffer that is returned. To keep the
+ * AHardwareBuffer live after the Java HardwareBuffer object got garbage
+ * collected, be sure to use AHardwareBuffer_acquire() to acquire an additional
+ * reference.
  */
 AHardwareBuffer* AHardwareBuffer_fromHardwareBuffer(JNIEnv* env,
         jobject hardwareBufferObj);
diff --git a/libs/binder/BpBinder.cpp b/libs/binder/BpBinder.cpp
index a58168f..449a9e9 100644
--- a/libs/binder/BpBinder.cpp
+++ b/libs/binder/BpBinder.cpp
@@ -145,7 +145,7 @@
     ALOGV("Creating BpBinder %p handle %d\n", this, mHandle);
 
     extendObjectLifetime(OBJECT_LIFETIME_WEAK);
-    IPCThreadState::self()->incWeakHandle(handle);
+    IPCThreadState::self()->incWeakHandle(handle, this);
 }
 
 bool BpBinder::isDescriptorCached() const {
@@ -409,7 +409,7 @@
 {
     ALOGV("onFirstRef BpBinder %p handle %d\n", this, mHandle);
     IPCThreadState* ipc = IPCThreadState::self();
-    if (ipc) ipc->incStrongHandle(mHandle);
+    if (ipc) ipc->incStrongHandle(mHandle, this);
 }
 
 void BpBinder::onLastStrongRef(const void* /*id*/)
diff --git a/libs/binder/IPCThreadState.cpp b/libs/binder/IPCThreadState.cpp
index ba9bf61..fd552b4 100644
--- a/libs/binder/IPCThreadState.cpp
+++ b/libs/binder/IPCThreadState.cpp
@@ -409,6 +409,15 @@
     if (mProcess->mDriverFD <= 0)
         return;
     talkWithDriver(false);
+    // The flush could have caused post-write refcount decrements to have
+    // been executed, which in turn could result in BC_RELEASE/BC_DECREFS
+    // being queued in mOut. So flush again, if we need to.
+    if (mOut.dataSize() > 0) {
+        talkWithDriver(false);
+    }
+    if (mOut.dataSize() > 0) {
+        ALOGW("mOut.dataSize() > 0 after flushCommands()");
+    }
 }
 
 void IPCThreadState::blockUntilThreadAvailable()
@@ -501,6 +510,21 @@
     }
 }
 
+void IPCThreadState::processPostWriteDerefs()
+{
+    for (size_t i = 0; i < mPostWriteWeakDerefs.size(); i++) {
+        RefBase::weakref_type* refs = mPostWriteWeakDerefs[i];
+        refs->decWeak(mProcess.get());
+    }
+    mPostWriteWeakDerefs.clear();
+
+    for (size_t i = 0; i < mPostWriteStrongDerefs.size(); i++) {
+        RefBase* obj = mPostWriteStrongDerefs[i];
+        obj->decStrong(mProcess.get());
+    }
+    mPostWriteStrongDerefs.clear();
+}
+
 void IPCThreadState::joinThreadPool(bool isMain)
 {
     LOG_THREADPOOL("**** THREAD %p (PID %d) IS JOINING THE THREAD POOL\n", (void*)pthread_self(), getpid());
@@ -627,11 +651,14 @@
     return err;
 }
 
-void IPCThreadState::incStrongHandle(int32_t handle)
+void IPCThreadState::incStrongHandle(int32_t handle, BpBinder *proxy)
 {
     LOG_REMOTEREFS("IPCThreadState::incStrongHandle(%d)\n", handle);
     mOut.writeInt32(BC_ACQUIRE);
     mOut.writeInt32(handle);
+    // Create a temp reference until the driver has handled this command.
+    proxy->incStrong(mProcess.get());
+    mPostWriteStrongDerefs.push(proxy);
 }
 
 void IPCThreadState::decStrongHandle(int32_t handle)
@@ -641,11 +668,14 @@
     mOut.writeInt32(handle);
 }
 
-void IPCThreadState::incWeakHandle(int32_t handle)
+void IPCThreadState::incWeakHandle(int32_t handle, BpBinder *proxy)
 {
     LOG_REMOTEREFS("IPCThreadState::incWeakHandle(%d)\n", handle);
     mOut.writeInt32(BC_INCREFS);
     mOut.writeInt32(handle);
+    // Create a temp reference until the driver has handled this command.
+    proxy->getWeakRefs()->incWeak(mProcess.get());
+    mPostWriteWeakDerefs.push(proxy->getWeakRefs());
 }
 
 void IPCThreadState::decWeakHandle(int32_t handle)
@@ -897,8 +927,10 @@
         if (bwr.write_consumed > 0) {
             if (bwr.write_consumed < mOut.dataSize())
                 mOut.remove(0, bwr.write_consumed);
-            else
+            else {
                 mOut.setDataSize(0);
+                processPostWriteDerefs();
+            }
         }
         if (bwr.read_consumed > 0) {
             mIn.setDataSize(bwr.read_consumed);
diff --git a/libs/binder/IServiceManager.cpp b/libs/binder/IServiceManager.cpp
index 711143c..be3bbf7 100644
--- a/libs/binder/IServiceManager.cpp
+++ b/libs/binder/IServiceManager.cpp
@@ -24,6 +24,7 @@
 #include <binder/IPermissionController.h>
 #endif
 #include <binder/Parcel.h>
+#include <cutils/properties.h>
 #include <utils/String8.h>
 #include <utils/SystemClock.h>
 #include <utils/CallStack.h>
@@ -142,20 +143,35 @@
 
     virtual sp<IBinder> getService(const String16& name) const
     {
-        unsigned n;
-        for (n = 0; n < 5; n++){
-            if (n > 0) {
-                if (!strcmp(ProcessState::self()->getDriverName().c_str(), "/dev/vndbinder")) {
-                    ALOGI("Waiting for vendor service %s...", String8(name).string());
-                    CallStack stack(LOG_TAG);
-                } else {
-                    ALOGI("Waiting for service %s...", String8(name).string());
-                }
-                sleep(1);
+        sp<IBinder> svc = checkService(name);
+        if (svc != NULL) return svc;
+
+        const bool isVendorService =
+            strcmp(ProcessState::self()->getDriverName().c_str(), "/dev/vndbinder") == 0;
+        const long timeout = uptimeMillis() + 5000;
+        if (!gSystemBootCompleted) {
+            char bootCompleted[PROPERTY_VALUE_MAX];
+            property_get("sys.boot_completed", bootCompleted, "0");
+            gSystemBootCompleted = strcmp(bootCompleted, "1") == 0 ? true : false;
+        }
+        // retry interval in millisecond.
+        const long sleepTime = gSystemBootCompleted ? 1000 : 100;
+
+        int n = 0;
+        while (uptimeMillis() < timeout) {
+            n++;
+            if (isVendorService) {
+                ALOGI("Waiting for vendor service %s...", String8(name).string());
+                CallStack stack(LOG_TAG);
+            } else if (n%10 == 0) {
+                ALOGI("Waiting for service %s...", String8(name).string());
             }
+            usleep(1000*sleepTime);
+
             sp<IBinder> svc = checkService(name);
             if (svc != NULL) return svc;
         }
+        ALOGW("Service %s didn't start. Returning NULL", String8(name).string());
         return NULL;
     }
 
diff --git a/libs/binder/Static.cpp b/libs/binder/Static.cpp
index 9899b65..c5c3fd5 100644
--- a/libs/binder/Static.cpp
+++ b/libs/binder/Static.cpp
@@ -97,5 +97,6 @@
 #ifndef __ANDROID_VNDK__
 sp<IPermissionController> gPermissionController;
 #endif
+bool gSystemBootCompleted = false;
 
 }   // namespace android
diff --git a/libs/binder/include/binder/IPCThreadState.h b/libs/binder/include/binder/IPCThreadState.h
index 245607e..c1d9a9a 100644
--- a/libs/binder/include/binder/IPCThreadState.h
+++ b/libs/binder/include/binder/IPCThreadState.h
@@ -64,9 +64,9 @@
                                          uint32_t code, const Parcel& data,
                                          Parcel* reply, uint32_t flags);
 
-            void                incStrongHandle(int32_t handle);
+            void                incStrongHandle(int32_t handle, BpBinder *proxy);
             void                decStrongHandle(int32_t handle);
-            void                incWeakHandle(int32_t handle);
+            void                incWeakHandle(int32_t handle, BpBinder *proxy);
             void                decWeakHandle(int32_t handle);
             status_t            attemptIncStrongHandle(int32_t handle);
     static  void                expungeHandle(int32_t handle, IBinder* binder);
@@ -106,6 +106,7 @@
             status_t            getAndExecuteCommand();
             status_t            executeCommand(int32_t command);
             void                processPendingDerefs();
+            void                processPostWriteDerefs();
 
             void                clearCaller();
 
@@ -118,7 +119,8 @@
     const   sp<ProcessState>    mProcess;
             Vector<BBinder*>    mPendingStrongDerefs;
             Vector<RefBase::weakref_type*> mPendingWeakDerefs;
-
+            Vector<RefBase*>    mPostWriteStrongDerefs;
+            Vector<RefBase::weakref_type*> mPostWriteWeakDerefs;
             Parcel              mIn;
             Parcel              mOut;
             status_t            mLastError;
diff --git a/libs/binder/include/private/binder/Static.h b/libs/binder/include/private/binder/Static.h
index f04bcae..6ca7592 100644
--- a/libs/binder/include/private/binder/Static.h
+++ b/libs/binder/include/private/binder/Static.h
@@ -41,5 +41,6 @@
 #ifndef __ANDROID_VNDK__
 extern sp<IPermissionController> gPermissionController;
 #endif
+extern bool gSystemBootCompleted;
 
 }   // namespace android
diff --git a/libs/dumputils/dump_utils.cpp b/libs/dumputils/dump_utils.cpp
index 8f6b1bd..8b2f842 100644
--- a/libs/dumputils/dump_utils.cpp
+++ b/libs/dumputils/dump_utils.cpp
@@ -42,6 +42,7 @@
 /* list of hal interface to dump containing process during native dumps */
 static const char* hal_interfaces_to_dump[] {
         "android.hardware.audio@2.0::IDevicesFactory",
+        "android.hardware.audio@4.0::IDevicesFactory",
         "android.hardware.bluetooth@1.0::IBluetoothHci",
         "android.hardware.camera.provider@2.4::ICameraProvider",
         "android.hardware.drm@1.0::IDrmFactory",
diff --git a/libs/gui/Android.bp b/libs/gui/Android.bp
index 2768ad8..73f2147 100644
--- a/libs/gui/Android.bp
+++ b/libs/gui/Android.bp
@@ -142,10 +142,26 @@
         "android.hardware.configstore-utils",
     ],
 
+    // bufferhub is not used when building libgui for vendors
+    target: {
+        vendor: {
+            cflags: ["-DNO_BUFFERHUB"],
+            exclude_srcs: [
+                "BufferHubConsumer.cpp",
+                "BufferHubProducer.cpp",
+            ],
+            exclude_shared_libs: [
+                "libbufferhubqueue",
+                "libpdx_default_transport",
+            ],
+        },
+    },
+
     header_libs: [
         "libdvr_headers",
         "libnativebase_headers",
         "libgui_headers",
+        "libpdx_headers",
     ],
 
     export_shared_lib_headers: [
diff --git a/libs/gui/BufferHubProducer.cpp b/libs/gui/BufferHubProducer.cpp
index 061710a..ae5cca2 100644
--- a/libs/gui/BufferHubProducer.cpp
+++ b/libs/gui/BufferHubProducer.cpp
@@ -136,7 +136,7 @@
                                           uint32_t height, PixelFormat format, uint64_t usage,
                                           uint64_t* /*outBufferAge*/,
                                           FrameEventHistoryDelta* /* out_timestamps */) {
-    ALOGW("dequeueBuffer: w=%u, h=%u, format=%d, usage=%" PRIu64, width, height, format, usage);
+    ALOGV("dequeueBuffer: w=%u, h=%u, format=%d, usage=%" PRIu64, width, height, format, usage);
 
     status_t ret;
     std::unique_lock<std::mutex> lock(mutex_);
@@ -208,7 +208,7 @@
 
     buffers_[slot].mBufferState.freeQueued();
     buffers_[slot].mBufferState.dequeue();
-    ALOGW("dequeueBuffer: slot=%zu", slot);
+    ALOGV("dequeueBuffer: slot=%zu", slot);
 
     // TODO(jwcai) Handle fence properly. |BufferHub| has full fence support, we
     // just need to exopose that through |BufferHubQueue| once we need fence.
diff --git a/libs/gui/BufferItemConsumer.cpp b/libs/gui/BufferItemConsumer.cpp
index 34e6d80..89bc0c4 100644
--- a/libs/gui/BufferItemConsumer.cpp
+++ b/libs/gui/BufferItemConsumer.cpp
@@ -92,10 +92,13 @@
     Mutex::Autolock _l(mMutex);
 
     err = addReleaseFenceLocked(item.mSlot, item.mGraphicBuffer, releaseFence);
+    if (err != OK) {
+        BI_LOGE("Failed to addReleaseFenceLocked");
+    }
 
     err = releaseBufferLocked(item.mSlot, item.mGraphicBuffer, EGL_NO_DISPLAY,
             EGL_NO_SYNC_KHR);
-    if (err != OK) {
+    if (err != OK && err != IGraphicBufferConsumer::STALE_BUFFER_SLOT) {
         BI_LOGE("Failed to release buffer: %s (%d)",
                 strerror(-err), err);
     }
diff --git a/libs/gui/BufferQueue.cpp b/libs/gui/BufferQueue.cpp
index 2917f45..a8da134 100644
--- a/libs/gui/BufferQueue.cpp
+++ b/libs/gui/BufferQueue.cpp
@@ -18,8 +18,11 @@
 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
 //#define LOG_NDEBUG 0
 
+#ifndef NO_BUFFERHUB
 #include <gui/BufferHubConsumer.h>
 #include <gui/BufferHubProducer.h>
+#endif
+
 #include <gui/BufferQueue.h>
 #include <gui/BufferQueueConsumer.h>
 #include <gui/BufferQueueCore.h>
@@ -103,6 +106,7 @@
     *outConsumer = consumer;
 }
 
+#ifndef NO_BUFFERHUB
 void BufferQueue::createBufferHubQueue(sp<IGraphicBufferProducer>* outProducer,
                                        sp<IGraphicBufferConsumer>* outConsumer) {
     LOG_ALWAYS_FATAL_IF(outProducer == NULL, "BufferQueue: outProducer must not be NULL");
@@ -128,5 +132,6 @@
     *outProducer = producer;
     *outConsumer = consumer;
 }
+#endif
 
 }; // namespace android
diff --git a/libs/gui/IGraphicBufferProducer.cpp b/libs/gui/IGraphicBufferProducer.cpp
index 777a3e5..0749fde 100644
--- a/libs/gui/IGraphicBufferProducer.cpp
+++ b/libs/gui/IGraphicBufferProducer.cpp
@@ -27,7 +27,9 @@
 #include <binder/Parcel.h>
 #include <binder/IInterface.h>
 
+#ifndef NO_BUFFERHUB
 #include <gui/BufferHubProducer.h>
+#endif
 #include <gui/BufferQueueDefs.h>
 #include <gui/IGraphicBufferProducer.h>
 #include <gui/IProducerListener.h>
@@ -706,6 +708,7 @@
         }
         case USE_BUFFER_HUB: {
             ALOGE("createFromParcel: BufferHub not implemented.");
+#ifndef NO_BUFFERHUB
             dvr::ProducerQueueParcelable producerParcelable;
             res = producerParcelable.readFromParcel(parcel);
             if (res != NO_ERROR) {
@@ -713,6 +716,9 @@
                 return nullptr;
             }
             return BufferHubProducer::Create(std::move(producerParcelable));
+#else
+            return nullptr;
+#endif
         }
         default: {
             ALOGE("createFromParcel: Unexpected mgaic: 0x%x.", outMagic);
diff --git a/libs/gui/include/gui/BufferQueue.h b/libs/gui/include/gui/BufferQueue.h
index f175573..da95274 100644
--- a/libs/gui/include/gui/BufferQueue.h
+++ b/libs/gui/include/gui/BufferQueue.h
@@ -79,9 +79,11 @@
             sp<IGraphicBufferConsumer>* outConsumer,
             bool consumerIsSurfaceFlinger = false);
 
+#ifndef NO_BUFFERHUB
     // Creates an IGraphicBufferProducer and IGraphicBufferConsumer pair backed by BufferHub.
     static void createBufferHubQueue(sp<IGraphicBufferProducer>* outProducer,
                                      sp<IGraphicBufferConsumer>* outConsumer);
+#endif
 
     BufferQueue() = delete; // Create through createBufferQueue
 };
diff --git a/libs/ui/Android.bp b/libs/ui/Android.bp
index 1a9fb8b..ff9d19e 100644
--- a/libs/ui/Android.bp
+++ b/libs/ui/Android.bp
@@ -84,7 +84,6 @@
         "libhidlbase",
         "libhidltransport",
         "libhwbinder",
-        "libpdx_default_transport",
         "libsync",
         "libutils",
         "libutilscallstack",
@@ -106,6 +105,7 @@
         "libnativebase_headers",
         "libhardware_headers",
         "libui_headers",
+        "libpdx_headers",
     ],
 
     export_static_lib_headers: [
diff --git a/libs/ui/HdrCapabilities.cpp b/libs/ui/HdrCapabilities.cpp
index 50f9bf1..a36911d 100644
--- a/libs/ui/HdrCapabilities.cpp
+++ b/libs/ui/HdrCapabilities.cpp
@@ -27,7 +27,6 @@
 HdrCapabilities::HdrCapabilities(HdrCapabilities&& other) = default;
 HdrCapabilities& HdrCapabilities::operator=(HdrCapabilities&& other) = default;
 
-
 size_t HdrCapabilities::getFlattenedSize() const {
     return  sizeof(mMaxLuminance) +
             sizeof(mMaxAverageLuminance) +
diff --git a/libs/vr/libbufferhub/Android.bp b/libs/vr/libbufferhub/Android.bp
index b38ecc7..7b5ad44 100644
--- a/libs/vr/libbufferhub/Android.bp
+++ b/libs/vr/libbufferhub/Android.bp
@@ -56,15 +56,6 @@
     export_header_lib_headers: [
         "libnativebase_headers",
     ],
-    vendor_available: false,
-    vndk: {
-        enabled: true,
-    },
-    target: {
-        vendor: {
-            exclude_srcs: ["detached_buffer.cpp"],
-        },
-    },
 }
 
 cc_test {
diff --git a/libs/vr/libbufferhubqueue/Android.bp b/libs/vr/libbufferhubqueue/Android.bp
index eeec9ec..9f72c05 100644
--- a/libs/vr/libbufferhubqueue/Android.bp
+++ b/libs/vr/libbufferhubqueue/Android.bp
@@ -59,10 +59,6 @@
     static_libs: staticLibraries,
     shared_libs: sharedLibraries,
     header_libs: headerLibraries,
-    vendor_available: false,
-    vndk: {
-        enabled: true,
-    },
 }
 
 subdirs = ["benchmarks", "tests"]
diff --git a/libs/vr/libdvr/Android.bp b/libs/vr/libdvr/Android.bp
index d0e34ee..16906f5 100644
--- a/libs/vr/libdvr/Android.bp
+++ b/libs/vr/libdvr/Android.bp
@@ -16,10 +16,7 @@
 cc_library_headers {
     name: "libdvr_headers",
     export_include_dirs: ["include"],
-    vendor_available: false,
-    vndk: {
-        enabled: true,
-    },
+    vendor_available: true,
 }
 
 cflags = [
diff --git a/libs/vr/libpdx/Android.bp b/libs/vr/libpdx/Android.bp
index 9b84d65..1a9d727 100644
--- a/libs/vr/libpdx/Android.bp
+++ b/libs/vr/libpdx/Android.bp
@@ -1,3 +1,9 @@
+cc_library_headers {
+    name: "libpdx_headers",
+    export_include_dirs: ["private"],
+    vendor_available: true,
+}
+
 cc_library_static {
     name: "libpdx",
     clang: true,
@@ -8,8 +14,8 @@
         "-DLOG_TAG=\"libpdx\"",
         "-DTRACE=0",
     ],
-    export_include_dirs: ["private"],
-    local_include_dirs: ["private"],
+    header_libs: ["libpdx_headers"],
+    export_header_lib_headers: ["libpdx_headers"],
     srcs: [
         "client.cpp",
         "service.cpp",
@@ -22,10 +28,6 @@
         "libutils",
         "liblog",
     ],
-    vendor_available: false,
-    vndk: {
-        enabled: true,
-    },
 }
 
 cc_test {
diff --git a/libs/vr/libpdx_default_transport/Android.bp b/libs/vr/libpdx_default_transport/Android.bp
index 475eb50..74b8c8b 100644
--- a/libs/vr/libpdx_default_transport/Android.bp
+++ b/libs/vr/libpdx_default_transport/Android.bp
@@ -12,10 +12,6 @@
     name: "pdx_default_transport_lib_defaults",
     export_include_dirs: ["private"],
     whole_static_libs: ["libpdx"],
-    vendor_available: false,
-    vndk: {
-        enabled: true,
-    },
 }
 
 cc_defaults {
@@ -37,10 +33,6 @@
         "pdx_default_transport_lib_defaults",
         "pdx_use_transport_uds",
     ],
-    vendor_available: false,
-    vndk: {
-        enabled: true,
-    },
     shared_libs: [
         "libbase",
         "libbinder",
diff --git a/libs/vr/libpdx_uds/Android.bp b/libs/vr/libpdx_uds/Android.bp
index 79cfdf6..d640950 100644
--- a/libs/vr/libpdx_uds/Android.bp
+++ b/libs/vr/libpdx_uds/Android.bp
@@ -30,10 +30,6 @@
     whole_static_libs: [
         "libselinux",
     ],
-    vendor_available: false,
-    vndk: {
-        enabled: true,
-    },
 }
 
 cc_test {
diff --git a/opengl/libs/EGL/eglApi.cpp b/opengl/libs/EGL/eglApi.cpp
index b022a20..c65bddf 100644
--- a/opengl/libs/EGL/eglApi.cpp
+++ b/opengl/libs/EGL/eglApi.cpp
@@ -475,6 +475,12 @@
     return HAL_DATASPACE_UNKNOWN;
 }
 
+// Get the colorspace value that should be reported from queries. When the colorspace
+// is unknown (no attribute passed), default to reporting LINEAR.
+static EGLint getReportedColorSpace(EGLint colorspace) {
+    return colorspace == EGL_UNKNOWN ? EGL_GL_COLORSPACE_LINEAR_KHR : colorspace;
+}
+
 // Returns a list of color spaces understood by the vendor EGL driver.
 static std::vector<EGLint> getDriverColorSpaces(egl_display_ptr dp,
                                                 android_pixel_format format) {
@@ -569,7 +575,8 @@
     // supports wide color.
     const bool colorSpaceIsNarrow =
         *colorSpace == EGL_GL_COLORSPACE_SRGB_KHR ||
-        *colorSpace == EGL_GL_COLORSPACE_LINEAR_KHR;
+        *colorSpace == EGL_GL_COLORSPACE_LINEAR_KHR ||
+        *colorSpace == EGL_UNKNOWN;
     if (window && !colorSpaceIsNarrow) {
         bool windowSupportsWideColor = true;
         // Ordinarily we'd put a call to native_window_get_wide_color_support
@@ -718,7 +725,7 @@
         getNativePixelFormat(iDpy, cnx, config, &format);
 
         // now select correct colorspace and dataspace based on user's attribute list
-        EGLint colorSpace = EGL_GL_COLORSPACE_LINEAR_KHR;
+        EGLint colorSpace = EGL_UNKNOWN;
         std::vector<EGLint> strippedAttribList;
         if (!processAttributes(dp, window, format, attrib_list, &colorSpace,
                                &strippedAttribList)) {
@@ -757,7 +764,8 @@
                 iDpy, config, window, attrib_list);
         if (surface != EGL_NO_SURFACE) {
             egl_surface_t* s =
-                    new egl_surface_t(dp.get(), config, window, surface, colorSpace, cnx);
+                    new egl_surface_t(dp.get(), config, window, surface,
+                                      getReportedColorSpace(colorSpace), cnx);
             return s;
         }
 
@@ -782,7 +790,7 @@
         getNativePixelFormat(iDpy, cnx, config, &format);
 
         // now select a corresponding sRGB format if needed
-        EGLint colorSpace = EGL_GL_COLORSPACE_LINEAR_KHR;
+        EGLint colorSpace = EGL_UNKNOWN;
         std::vector<EGLint> strippedAttribList;
         if (!processAttributes(dp, nullptr, format, attrib_list, &colorSpace,
                                &strippedAttribList)) {
@@ -794,7 +802,9 @@
         EGLSurface surface = cnx->egl.eglCreatePixmapSurface(
                 dp->disp.dpy, config, pixmap, attrib_list);
         if (surface != EGL_NO_SURFACE) {
-            egl_surface_t* s = new egl_surface_t(dp.get(), config, NULL, surface, colorSpace, cnx);
+            egl_surface_t* s =
+                    new egl_surface_t(dp.get(), config, NULL, surface,
+                                      getReportedColorSpace(colorSpace), cnx);
             return s;
         }
     }
@@ -814,7 +824,7 @@
         getNativePixelFormat(iDpy, cnx, config, &format);
 
         // Select correct colorspace based on user's attribute list
-        EGLint colorSpace = EGL_GL_COLORSPACE_LINEAR_KHR;
+        EGLint colorSpace = EGL_UNKNOWN;
         std::vector<EGLint> strippedAttribList;
         if (!processAttributes(dp, nullptr, format, attrib_list, &colorSpace,
                                &strippedAttribList)) {
@@ -826,7 +836,9 @@
         EGLSurface surface = cnx->egl.eglCreatePbufferSurface(
                 dp->disp.dpy, config, attrib_list);
         if (surface != EGL_NO_SURFACE) {
-            egl_surface_t* s = new egl_surface_t(dp.get(), config, NULL, surface, colorSpace, cnx);
+            egl_surface_t* s =
+                    new egl_surface_t(dp.get(), config, NULL, surface,
+                                      getReportedColorSpace(colorSpace), cnx);
             return s;
         }
     }
@@ -1733,13 +1745,31 @@
     ContextRef _c(dp.get(), ctx);
     egl_context_t * const c = _c.get();
 
+    // Temporary hack: eglImageCreateKHR should accept EGL_GL_COLORSPACE_LINEAR_KHR,
+    // EGL_GL_COLORSPACE_SRGB_KHR and EGL_GL_COLORSPACE_DEFAULT_EXT if
+    // EGL_EXT_image_gl_colorspace is supported, but some drivers don't like
+    // the DEFAULT value and generate an error.
+    std::vector<EGLint> strippedAttribList;
+    for (const EGLint *attr = attrib_list; attr && attr[0] != EGL_NONE; attr += 2) {
+        if (attr[0] == EGL_GL_COLORSPACE_KHR &&
+            dp->haveExtension("EGL_EXT_image_gl_colorspace")) {
+            if (attr[1] != EGL_GL_COLORSPACE_LINEAR_KHR &&
+                attr[1] != EGL_GL_COLORSPACE_SRGB_KHR) {
+                continue;
+            }
+        }
+        strippedAttribList.push_back(attr[0]);
+        strippedAttribList.push_back(attr[1]);
+    }
+    strippedAttribList.push_back(EGL_NONE);
+
     EGLImageKHR result = EGL_NO_IMAGE_KHR;
     egl_connection_t* const cnx = &gEGLImpl;
     if (cnx->dso && cnx->egl.eglCreateImageKHR) {
         result = cnx->egl.eglCreateImageKHR(
                 dp->disp.dpy,
                 c ? c->context : EGL_NO_CONTEXT,
-                target, buffer, attrib_list);
+                target, buffer, strippedAttribList.data());
     }
     return result;
 }
diff --git a/opengl/libs/EGL/getProcAddress.cpp b/opengl/libs/EGL/getProcAddress.cpp
index 0183621..fedc789 100644
--- a/opengl/libs/EGL/getProcAddress.cpp
+++ b/opengl/libs/EGL/getProcAddress.cpp
@@ -80,46 +80,44 @@
 
 #elif defined(__i386__)
 
-    #define API_ENTRY(_api) __attribute__((noinline,optimize("omit-frame-pointer"))) _api
+    #define API_ENTRY(_api) __attribute__((naked)) _api
 
     #define CALL_GL_EXTENSION_API(_api)                         \
-         register void** fn;                                    \
          __asm__ volatile(                                      \
-            "mov %%gs:0, %[fn]\n"                               \
-            "mov %P[tls](%[fn]), %[fn]\n"                       \
-            "test %[fn], %[fn]\n"                               \
-            "cmovne %P[api](%[fn]), %[fn]\n"                    \
-            "test %[fn], %[fn]\n"                               \
+            "mov %%gs:0, %%eax\n"                               \
+            "mov %P[tls](%%eax), %%eax\n"                       \
+            "test %%eax, %%eax\n"                               \
+            "cmovne %P[api](%%eax), %%eax\n"                    \
+            "test %%eax, %%eax\n"                               \
             "je 1f\n"                                           \
-            "jmp *%[fn]\n"                                      \
-            "1:\n"                                              \
-            : [fn] "=r" (fn)                                    \
+            "jmp *%%eax\n"                                      \
+            "1: ret\n"                                          \
+            :                                                   \
             : [tls] "i" (TLS_SLOT_OPENGL_API*sizeof(void*)),    \
               [api] "i" (__builtin_offsetof(gl_hooks_t,         \
                                       ext.extensions[_api]))    \
-            : "cc"                                              \
+            : "eax", "cc"                                       \
             );
 
 #elif defined(__x86_64__)
 
-    #define API_ENTRY(_api) __attribute__((noinline,optimize("omit-frame-pointer"))) _api
+    #define API_ENTRY(_api) __attribute__((naked)) _api
 
     #define CALL_GL_EXTENSION_API(_api)                         \
-         register void** fn;                                    \
          __asm__ volatile(                                      \
-            "mov %%fs:0, %[fn]\n"                               \
-            "mov %P[tls](%[fn]), %[fn]\n"                       \
-            "test %[fn], %[fn]\n"                               \
-            "cmovne %P[api](%[fn]), %[fn]\n"                    \
-            "test %[fn], %[fn]\n"                               \
+            "mov %%fs:0, %%rax\n"                               \
+            "mov %P[tls](%%rax), %%rax\n"                       \
+            "test %%rax, %%rax\n"                               \
+            "cmovne %P[api](%%rax), %%rax\n"                    \
+            "test %%rax, %%rax\n"                               \
             "je 1f\n"                                           \
-            "jmp *%[fn]\n"                                      \
-            "1:\n"                                              \
-            : [fn] "=r" (fn)                                    \
+            "jmp *%%rax\n"                                      \
+            "1: ret\n"                                          \
+            :                                                   \
             : [tls] "i" (TLS_SLOT_OPENGL_API*sizeof(void*)),    \
               [api] "i" (__builtin_offsetof(gl_hooks_t,         \
                                       ext.extensions[_api]))    \
-            : "cc"                                              \
+            : "rax", "cc"                                       \
             );
 
 #elif defined(__mips64)
diff --git a/services/sensorservice/RecentEventLogger.cpp b/services/sensorservice/RecentEventLogger.cpp
index 62e9ce0..1025a88 100644
--- a/services/sensorservice/RecentEventLogger.cpp
+++ b/services/sensorservice/RecentEventLogger.cpp
@@ -100,7 +100,8 @@
 size_t RecentEventLogger::logSizeBySensorType(int sensorType) {
     return (sensorType == SENSOR_TYPE_STEP_COUNTER ||
             sensorType == SENSOR_TYPE_SIGNIFICANT_MOTION ||
-            sensorType == SENSOR_TYPE_ACCELEROMETER) ? LOG_SIZE_LARGE : LOG_SIZE;
+            sensorType == SENSOR_TYPE_ACCELEROMETER ||
+            sensorType == SENSOR_TYPE_LIGHT) ? LOG_SIZE_LARGE : LOG_SIZE;
 }
 
 RecentEventLogger::SensorEventLog::SensorEventLog(const sensors_event_t& e) : mEvent(e) {
diff --git a/services/surfaceflinger/Android.bp b/services/surfaceflinger/Android.bp
index 5b1e631..c2bb6ad 100644
--- a/services/surfaceflinger/Android.bp
+++ b/services/surfaceflinger/Android.bp
@@ -22,6 +22,7 @@
         "android.frameworks.vr.composer@1.0",
         "android.hardware.configstore-utils",
         "android.hardware.configstore@1.0",
+        "android.hardware.configstore@1.1",
         "android.hardware.graphics.allocator@2.0",
         "android.hardware.graphics.composer@2.1",
         "android.hardware.graphics.composer@2.2",
@@ -45,6 +46,7 @@
         "libpdx_default_transport",
         "libprotobuf-cpp-lite",
         "libsync",
+        "libtimestats_proto",
         "libui",
         "libutils",
         "libvulkan",
@@ -124,6 +126,7 @@
         "SurfaceFlinger.cpp",
         "SurfaceInterceptor.cpp",
         "SurfaceTracing.cpp",
+        "TimeStats/TimeStats.cpp",
         "Transform.cpp",
     ],
 }
@@ -172,6 +175,7 @@
         "liblayers_proto",
         "liblog",
         "libsurfaceflinger",
+        "libtimestats_proto",
         "libutils",
     ],
     static_libs: [
@@ -213,5 +217,6 @@
 
 subdirs = [
     "layerproto",
+    "TimeStats/timestatsproto",
     "tests",
 ]
diff --git a/services/surfaceflinger/BufferLayer.cpp b/services/surfaceflinger/BufferLayer.cpp
index 7fd9d01..17ed9aa 100644
--- a/services/surfaceflinger/BufferLayer.cpp
+++ b/services/surfaceflinger/BufferLayer.cpp
@@ -316,6 +316,9 @@
     nsecs_t desiredPresentTime = mConsumer->getTimestamp();
     mFrameTracker.setDesiredPresentTime(desiredPresentTime);
 
+    const std::string layerName(getName().c_str());
+    mTimeStats.setDesiredTime(layerName, mCurrentFrameNumber, desiredPresentTime);
+
     std::shared_ptr<FenceTime> frameReadyFence = mConsumer->getCurrentFenceTime();
     if (frameReadyFence->isValid()) {
         mFrameTracker.setFrameReadyFence(std::move(frameReadyFence));
@@ -326,12 +329,15 @@
     }
 
     if (presentFence->isValid()) {
+        mTimeStats.setPresentFence(layerName, mCurrentFrameNumber, presentFence);
         mFrameTracker.setActualPresentFence(std::shared_ptr<FenceTime>(presentFence));
     } else {
         // The HWC doesn't support present fences, so use the refresh
         // timestamp instead.
-        mFrameTracker.setActualPresentTime(
-                mFlinger->getHwComposer().getRefreshTimestamp(HWC_DISPLAY_PRIMARY));
+        const nsecs_t actualPresentTime =
+                mFlinger->getHwComposer().getRefreshTimestamp(HWC_DISPLAY_PRIMARY);
+        mTimeStats.setPresentTime(layerName, mCurrentFrameNumber, actualPresentTime);
+        mFrameTracker.setActualPresentTime(actualPresentTime);
     }
 
     mFrameTracker.advanceFrame();
@@ -441,6 +447,7 @@
         // and return early
         if (queuedBuffer) {
             Mutex::Autolock lock(mQueueItemLock);
+            mTimeStats.removeTimeRecord(getName().c_str(), mQueueItems[0].mFrameNumber);
             mQueueItems.removeAt(0);
             android_atomic_dec(&mQueuedFrames);
         }
@@ -454,6 +461,7 @@
             Mutex::Autolock lock(mQueueItemLock);
             mQueueItems.clear();
             android_atomic_and(0, &mQueuedFrames);
+            mTimeStats.clearLayerRecord(getName().c_str());
         }
 
         // Once we have hit this state, the shadow queue may no longer
@@ -474,10 +482,15 @@
         // Remove any stale buffers that have been dropped during
         // updateTexImage
         while (mQueueItems[0].mFrameNumber != currentFrameNumber) {
+            mTimeStats.removeTimeRecord(getName().c_str(), mQueueItems[0].mFrameNumber);
             mQueueItems.removeAt(0);
             android_atomic_dec(&mQueuedFrames);
         }
 
+        const std::string layerName(getName().c_str());
+        mTimeStats.setAcquireFence(layerName, currentFrameNumber, mQueueItems[0].mFenceTime);
+        mTimeStats.setLatchTime(layerName, currentFrameNumber, latchTime);
+
         mQueueItems.removeAt(0);
     }
 
@@ -515,11 +528,9 @@
         recomputeVisibleRegions = true;
     }
 
-    // Dataspace::V0_SRGB and Dataspace::V0_SRGB_LINEAR are not legacy
-    // data space, however since framework doesn't distinguish them out of
-    // legacy SRGB, we have to treat them as the same for now.
-    // UNKNOWN is treated as legacy SRGB when the connected api is EGL.
     ui::Dataspace dataSpace = mConsumer->getCurrentDataSpace();
+    // treat modern dataspaces as legacy dataspaces whenever possible, until
+    // we can trust the buffer producers
     switch (dataSpace) {
         case ui::Dataspace::V0_SRGB:
             dataSpace = ui::Dataspace::SRGB;
@@ -527,15 +538,22 @@
         case ui::Dataspace::V0_SRGB_LINEAR:
             dataSpace = ui::Dataspace::SRGB_LINEAR;
             break;
-        case ui::Dataspace::UNKNOWN:
-            if (mConsumer->getCurrentApi() == NATIVE_WINDOW_API_EGL) {
-                dataSpace = ui::Dataspace::SRGB;
-            }
+        case ui::Dataspace::V0_JFIF:
+            dataSpace = ui::Dataspace::JFIF;
+            break;
+        case ui::Dataspace::V0_BT601_625:
+            dataSpace = ui::Dataspace::BT601_625;
+            break;
+        case ui::Dataspace::V0_BT601_525:
+            dataSpace = ui::Dataspace::BT601_525;
+            break;
+        case ui::Dataspace::V0_BT709:
+            dataSpace = ui::Dataspace::BT709;
             break;
         default:
             break;
     }
-    setDataSpace(dataSpace);
+    mCurrentDataSpace = dataSpace;
 
     Rect crop(mConsumer->getCurrentCrop());
     const uint32_t transform(mConsumer->getCurrentTransform());
@@ -642,10 +660,10 @@
         setCompositionType(hwcId, HWC2::Composition::Device);
     }
 
-    ALOGV("setPerFrameData: dataspace = %d", mDrawingState.dataSpace);
-    error = hwcLayer->setDataspace(mDrawingState.dataSpace);
+    ALOGV("setPerFrameData: dataspace = %d", mCurrentDataSpace);
+    error = hwcLayer->setDataspace(mCurrentDataSpace);
     if (error != HWC2::Error::None) {
-        ALOGE("[%s] Failed to set dataspace %d: %s (%d)", mName.string(), mDrawingState.dataSpace,
+        ALOGE("[%s] Failed to set dataspace %d: %s (%d)", mName.string(), mCurrentDataSpace,
               to_string(error).c_str(), static_cast<int32_t>(error));
     }
 
@@ -848,9 +866,9 @@
     auto& engine(mFlinger->getRenderEngine());
     engine.setupLayerBlending(mPremultipliedAlpha, isOpaque(s), false /* disableTexture */,
                               getColor());
-    engine.setSourceDataSpace(mCurrentState.dataSpace);
+    engine.setSourceDataSpace(mCurrentDataSpace);
 
-    if (mCurrentState.dataSpace == ui::Dataspace::BT2020_ITU_PQ &&
+    if (mCurrentDataSpace == ui::Dataspace::BT2020_ITU_PQ &&
         mConsumer->getCurrentApi() == NATIVE_WINDOW_API_MEDIA &&
         getBE().compositionInfo.mBuffer->getPixelFormat() == HAL_PIXEL_FORMAT_RGBA_1010102) {
         engine.setSourceY410BT2020(true);
diff --git a/services/surfaceflinger/ColorLayer.cpp b/services/surfaceflinger/ColorLayer.cpp
index c87b669..512564c 100644
--- a/services/surfaceflinger/ColorLayer.cpp
+++ b/services/surfaceflinger/ColorLayer.cpp
@@ -77,9 +77,9 @@
 
     setCompositionType(hwcId, HWC2::Composition::SolidColor);
 
-    error = hwcLayer->setDataspace(mDrawingState.dataSpace);
+    error = hwcLayer->setDataspace(mCurrentDataSpace);
     if (error != HWC2::Error::None) {
-        ALOGE("[%s] Failed to set dataspace %d: %s (%d)", mName.string(), mDrawingState.dataSpace,
+        ALOGE("[%s] Failed to set dataspace %d: %s (%d)", mName.string(), mCurrentDataSpace,
               to_string(error).c_str(), static_cast<int32_t>(error));
     }
 
diff --git a/services/surfaceflinger/DisplayDevice.cpp b/services/surfaceflinger/DisplayDevice.cpp
index 7c6302e..e81df8a 100644
--- a/services/surfaceflinger/DisplayDevice.cpp
+++ b/services/surfaceflinger/DisplayDevice.cpp
@@ -18,6 +18,9 @@
 #undef LOG_TAG
 #define LOG_TAG "DisplayDevice"
 
+#include <array>
+#include <unordered_set>
+
 #include <stdlib.h>
 #include <stdio.h>
 #include <string.h>
@@ -55,6 +58,7 @@
 using namespace android::hardware::configstore;
 using namespace android::hardware::configstore::V1_0;
 using android::ui::ColorMode;
+using android::ui::Dataspace;
 using android::ui::Hdr;
 using android::ui::RenderIntent;
 
@@ -65,6 +69,147 @@
 
 uint32_t DisplayDevice::sPrimaryDisplayOrientation = 0;
 
+namespace {
+
+// ordered list of known SDR color modes
+const std::array<ColorMode, 2> sSdrColorModes = {
+        ColorMode::DISPLAY_P3,
+        ColorMode::SRGB,
+};
+
+// ordered list of known HDR color modes
+const std::array<ColorMode, 2> sHdrColorModes = {
+        ColorMode::BT2100_PQ,
+        ColorMode::BT2100_HLG,
+};
+
+// ordered list of known SDR render intents
+const std::array<RenderIntent, 2> sSdrRenderIntents = {
+        RenderIntent::ENHANCE,
+        RenderIntent::COLORIMETRIC,
+};
+
+// ordered list of known HDR render intents
+const std::array<RenderIntent, 2> sHdrRenderIntents = {
+        RenderIntent::TONE_MAP_ENHANCE,
+        RenderIntent::TONE_MAP_COLORIMETRIC,
+};
+
+// map known color mode to dataspace
+Dataspace colorModeToDataspace(ColorMode mode) {
+    switch (mode) {
+        case ColorMode::SRGB:
+            return Dataspace::SRGB;
+        case ColorMode::DISPLAY_P3:
+            return Dataspace::DISPLAY_P3;
+        case ColorMode::BT2100_HLG:
+            return Dataspace::BT2020_HLG;
+        case ColorMode::BT2100_PQ:
+            return Dataspace::BT2020_PQ;
+        default:
+            return Dataspace::UNKNOWN;
+    }
+}
+
+// Return a list of candidate color modes.
+std::vector<ColorMode> getColorModeCandidates(ColorMode mode) {
+    std::vector<ColorMode> candidates;
+
+    // add mode itself
+    candidates.push_back(mode);
+
+    // check if mode is HDR
+    bool isHdr = false;
+    for (auto hdrMode : sHdrColorModes) {
+        if (hdrMode == mode) {
+            isHdr = true;
+            break;
+        }
+    }
+
+    // add other HDR candidates when mode is HDR
+    if (isHdr) {
+        for (auto hdrMode : sHdrColorModes) {
+            if (hdrMode != mode) {
+                candidates.push_back(hdrMode);
+            }
+        }
+    }
+
+    // add other SDR candidates
+    for (auto sdrMode : sSdrColorModes) {
+        if (sdrMode != mode) {
+            candidates.push_back(sdrMode);
+        }
+    }
+
+    return candidates;
+}
+
+// Return a list of candidate render intents.
+std::vector<RenderIntent> getRenderIntentCandidates(RenderIntent intent) {
+    std::vector<RenderIntent> candidates;
+
+    // add intent itself
+    candidates.push_back(intent);
+
+    // check if intent is HDR
+    bool isHdr = false;
+    for (auto hdrIntent : sHdrRenderIntents) {
+        if (hdrIntent == intent) {
+            isHdr = true;
+            break;
+        }
+    }
+
+    // add other HDR candidates when intent is HDR
+    if (isHdr) {
+        for (auto hdrIntent : sHdrRenderIntents) {
+            if (hdrIntent != intent) {
+                candidates.push_back(hdrIntent);
+            }
+        }
+    }
+
+    // add COLORIMETRIC
+    if (intent != RenderIntent::COLORIMETRIC) {
+        candidates.push_back(RenderIntent::COLORIMETRIC);
+    }
+
+    return candidates;
+}
+
+// Return the best color mode supported by HWC.
+ColorMode getHwcColorMode(
+        const std::unordered_map<ColorMode, std::vector<RenderIntent>>& hwcColorModes,
+        ColorMode mode) {
+    std::vector<ColorMode> candidates = getColorModeCandidates(mode);
+    for (auto candidate : candidates) {
+        auto iter = hwcColorModes.find(candidate);
+        if (iter != hwcColorModes.end()) {
+            return candidate;
+        }
+    }
+
+    return ColorMode::NATIVE;
+}
+
+// Return the best render intent supported by HWC.
+RenderIntent getHwcRenderIntent(const std::vector<RenderIntent>& hwcIntents, RenderIntent intent) {
+    std::vector<RenderIntent> candidates = getRenderIntentCandidates(intent);
+    for (auto candidate : candidates) {
+        for (auto hwcIntent : hwcIntents) {
+            if (candidate == hwcIntent) {
+                return candidate;
+            }
+        }
+    }
+
+    return RenderIntent::COLORIMETRIC;
+}
+
+} // anonymous namespace
+
 // clang-format off
 DisplayDevice::DisplayDevice(
         const sp<SurfaceFlinger>& flinger,
@@ -80,6 +225,7 @@
         bool hasWideColorGamut,
         const HdrCapabilities& hdrCapabilities,
         const int32_t supportedPerFrameMetadata,
+        const std::unordered_map<ColorMode, std::vector<RenderIntent>>& hwcColorModes,
         int initialPowerMode)
     : lastCompositionHadVisibleLayers(false),
       mFlinger(flinger),
@@ -99,7 +245,6 @@
       mFrame(Rect::INVALID_RECT),
       mPowerMode(initialPowerMode),
       mActiveConfig(0),
-      mActiveColorMode(ColorMode::NATIVE),
       mColorTransform(HAL_COLOR_TRANSFORM_IDENTITY),
       mHasWideColorGamut(hasWideColorGamut),
       mHasHdr10(false),
@@ -108,7 +253,10 @@
       mSupportedPerFrameMetadata(supportedPerFrameMetadata)
 {
     // clang-format on
-    for (Hdr hdrType : hdrCapabilities.getSupportedHdrTypes()) {
+    populateColorModes(hwcColorModes);
+
+    std::vector<Hdr> types = hdrCapabilities.getSupportedHdrTypes();
+    for (Hdr hdrType : types) {
         switch (hdrType) {
             case Hdr::HDR10:
                 mHasHdr10 = true;
@@ -124,6 +272,26 @@
         }
     }
 
+    float minLuminance = hdrCapabilities.getDesiredMinLuminance();
+    float maxLuminance = hdrCapabilities.getDesiredMaxLuminance();
+    float maxAverageLuminance = hdrCapabilities.getDesiredMaxAverageLuminance();
+
+    minLuminance = minLuminance <= 0.0 ? sDefaultMinLumiance : minLuminance;
+    maxLuminance = maxLuminance <= 0.0 ? sDefaultMaxLumiance : maxLuminance;
+    maxAverageLuminance = maxAverageLuminance <= 0.0 ? sDefaultMaxLumiance : maxAverageLuminance;
+    if (this->hasWideColorGamut()) {
+        // insert HDR10/HLG as we will force client composition for HDR10/HLG
+        // layers
+        if (!hasHDR10Support()) {
+          types.push_back(Hdr::HDR10);
+        }
+
+        if (!hasHLGSupport()) {
+          types.push_back(Hdr::HLG);
+        }
+    }
+    mHdrCapabilities = HdrCapabilities(types, maxLuminance, maxAverageLuminance, minLuminance);
+
     // initialize the display orientation transform.
     setProjection(DisplayState::eOrientationDefault, mViewport, mFrame);
 }
@@ -437,6 +605,15 @@
     TL.set(-src_x, -src_y);
     TP.set(dst_x, dst_y);
 
+    // need to take care of primary display rotation for mGlobalTransform
+    // for case if the panel is not installed aligned with device orientation
+    if (mType == DisplayType::DISPLAY_PRIMARY) {
+        int primaryDisplayOrientation = mFlinger->getPrimaryDisplayOrientation();
+        DisplayDevice::orientationToTransfrom(
+                (orientation + primaryDisplayOrientation) % (DisplayState::eOrientation270 + 1),
+                w, h, &R);
+    }
+
     // The viewport and frame are both in the logical orientation.
     // Apply the logical translation, scale to physical size, apply the
     // physical translation and finally rotate to the physical orientation.
@@ -508,6 +685,100 @@
     result.append(surfaceDump);
 }
 
+// Map dataspace/intent to the best matched dataspace/colorMode/renderIntent
+// supported by HWC.
+void DisplayDevice::addColorMode(
+        const std::unordered_map<ColorMode, std::vector<RenderIntent>>& hwcColorModes,
+        const ColorMode mode, const RenderIntent intent) {
+    // find the best color mode
+    const ColorMode hwcColorMode = getHwcColorMode(hwcColorModes, mode);
+
+    // find the best render intent
+    auto iter = hwcColorModes.find(hwcColorMode);
+    const auto& hwcIntents =
+            iter != hwcColorModes.end() ? iter->second : std::vector<RenderIntent>();
+    const RenderIntent hwcIntent = getHwcRenderIntent(hwcIntents, intent);
+
+    const Dataspace dataspace = colorModeToDataspace(mode);
+    const Dataspace hwcDataspace = colorModeToDataspace(hwcColorMode);
+
+    ALOGV("DisplayDevice %d/%d: map (%s, %s) to (%s, %s, %s)", mType, mHwcDisplayId,
+          dataspaceDetails(static_cast<android_dataspace_t>(dataspace)).c_str(),
+          decodeRenderIntent(intent).c_str(),
+          dataspaceDetails(static_cast<android_dataspace_t>(hwcDataspace)).c_str(),
+          decodeColorMode(hwcColorMode).c_str(), decodeRenderIntent(hwcIntent).c_str());
+
+    mColorModes[getColorModeKey(dataspace, intent)] = {hwcDataspace, hwcColorMode, hwcIntent};
+}
+
+void DisplayDevice::populateColorModes(
+        const std::unordered_map<ColorMode, std::vector<RenderIntent>>& hwcColorModes) {
+    if (!hasWideColorGamut()) {
+        return;
+    }
+
+    // collect all known SDR render intents
+    std::unordered_set<RenderIntent> sdrRenderIntents(sSdrRenderIntents.begin(),
+                                                      sSdrRenderIntents.end());
+    auto iter = hwcColorModes.find(ColorMode::SRGB);
+    if (iter != hwcColorModes.end()) {
+        for (auto intent : iter->second) {
+            sdrRenderIntents.insert(intent);
+        }
+    }
+
+    // add known SDR combinations
+    for (auto intent : sdrRenderIntents) {
+        for (auto mode : sSdrColorModes) {
+            addColorMode(hwcColorModes, mode, intent);
+        }
+    }
+
+    // add known HDR combinations
+    for (auto intent : sHdrRenderIntents) {
+        for (auto mode : sHdrColorModes) {
+            addColorMode(hwcColorModes, mode, intent);
+        }
+    }
+}
+
+bool DisplayDevice::hasRenderIntent(RenderIntent intent) const {
+    // assume a render intent is supported when SRGB supports it; we should
+    // get rid of that assumption.
+    auto iter = mColorModes.find(getColorModeKey(Dataspace::SRGB, intent));
+    return iter != mColorModes.end() && iter->second.renderIntent == intent;
+}
+
+bool DisplayDevice::hasModernHdrSupport(Dataspace dataspace) const {
+    if ((dataspace == Dataspace::BT2020_PQ && hasHDR10Support()) ||
+        (dataspace == Dataspace::BT2020_HLG && hasHLGSupport())) {
+        auto iter =
+                mColorModes.find(getColorModeKey(dataspace, RenderIntent::TONE_MAP_COLORIMETRIC));
+        return iter != mColorModes.end() && iter->second.dataspace == dataspace;
+    }
+
+    return false;
+}
+
+void DisplayDevice::getBestColorMode(Dataspace dataspace, RenderIntent intent,
+                                     Dataspace* outDataspace, ColorMode* outMode,
+                                     RenderIntent* outIntent) const {
+    auto iter = mColorModes.find(getColorModeKey(dataspace, intent));
+    if (iter != mColorModes.end()) {
+        *outDataspace = iter->second.dataspace;
+        *outMode = iter->second.colorMode;
+        *outIntent = iter->second.renderIntent;
+    } else {
+        ALOGE("map unknown (%s)/(%s) to default color mode",
+              dataspaceDetails(static_cast<android_dataspace_t>(dataspace)).c_str(),
+              decodeRenderIntent(intent).c_str());
+
+        *outDataspace = Dataspace::UNKNOWN;
+        *outMode = ColorMode::NATIVE;
+        *outIntent = RenderIntent::COLORIMETRIC;
+    }
+}
+
 std::atomic<int32_t> DisplayDeviceState::nextDisplayId(1);
 
 DisplayDeviceState::DisplayDeviceState(DisplayDevice::DisplayType type, bool isSecure)
diff --git a/services/surfaceflinger/DisplayDevice.h b/services/surfaceflinger/DisplayDevice.h
index df5d945..b3859b6 100644
--- a/services/surfaceflinger/DisplayDevice.h
+++ b/services/surfaceflinger/DisplayDevice.h
@@ -20,6 +20,7 @@
 #include "Transform.h"
 
 #include <stdlib.h>
+#include <unordered_map>
 
 #include <math/mat4.h>
 
@@ -27,6 +28,7 @@
 #include <gui/ISurfaceComposer.h>
 #include <hardware/hwcomposer_defs.h>
 #include <ui/GraphicTypes.h>
+#include <ui/HdrCapabilities.h>
 #include <ui/Region.h>
 #include <utils/RefBase.h>
 #include <utils/Mutex.h>
@@ -53,6 +55,9 @@
 class DisplayDevice : public LightRefBase<DisplayDevice>
 {
 public:
+    constexpr static float sDefaultMinLumiance = 0.0;
+    constexpr static float sDefaultMaxLumiance = 500.0;
+
     // region in layer-stack space
     mutable Region dirtyRegion;
     // region in screen space
@@ -86,6 +91,7 @@
             bool hasWideColorGamut,
             const HdrCapabilities& hdrCapabilities,
             const int32_t supportedPerFrameMetadata,
+            const std::unordered_map<ui::ColorMode, std::vector<ui::RenderIntent>>& hwcColorModes,
             int initialPowerMode);
     // clang-format on
 
@@ -137,11 +143,32 @@
     // machine happy without actually queueing a buffer if nothing has changed
     status_t beginFrame(bool mustRecompose) const;
     status_t prepareFrame(HWComposer& hwc);
+
     bool hasWideColorGamut() const { return mHasWideColorGamut; }
+    // Whether h/w composer has native support for specific HDR type.
     bool hasHDR10Support() const { return mHasHdr10; }
     bool hasHLGSupport() const { return mHasHLG; }
     bool hasDolbyVisionSupport() const { return mHasDolbyVision; }
 
+    // Return true if the corresponding color mode for the HDR dataspace is
+    // supported.
+    bool hasModernHdrSupport(ui::Dataspace dataspace) const;
+
+    // The returned HdrCapabilities is the combination of HDR capabilities from
+    // hardware composer and RenderEngine. When the DisplayDevice supports wide
+    // color gamut, RenderEngine is able to simulate HDR support in Display P3
+    // color space for both PQ and HLG HDR contents. The minimum and maximum
+    // luminance will be set to sDefaultMinLumiance and sDefaultMaxLumiance
+    // respectively if hardware composer doesn't return meaningful values.
+    const HdrCapabilities& getHdrCapabilities() const { return mHdrCapabilities; }
+
+    // Return true if intent is supported by the display.
+    bool hasRenderIntent(ui::RenderIntent intent) const;
+
+    void getBestColorMode(ui::Dataspace dataspace, ui::RenderIntent intent,
+                          ui::Dataspace* outDataspace, ui::ColorMode* outMode,
+                          ui::RenderIntent* outIntent) const;
+
     void swapBuffers(HWComposer& hwc) const;
 
     // called after h/w composer has completed its set() call
@@ -247,10 +274,10 @@
     // Current active config
     int mActiveConfig;
     // current active color mode
-    ui::ColorMode mActiveColorMode;
+    ui::ColorMode mActiveColorMode = ui::ColorMode::NATIVE;
     // Current active render intent.
-    ui::RenderIntent mActiveRenderIntent;
-    ui::Dataspace mCompositionDataSpace;
+    ui::RenderIntent mActiveRenderIntent = ui::RenderIntent::COLORIMETRIC;
+    ui::Dataspace mCompositionDataSpace = ui::Dataspace::UNKNOWN;
     // Current color transform
     android_color_transform_t mColorTransform;
 
@@ -261,8 +288,28 @@
     bool mHasHdr10;
     bool mHasHLG;
     bool mHasDolbyVision;
-
+    HdrCapabilities mHdrCapabilities;
     const int32_t mSupportedPerFrameMetadata;
+
+    // Mappings from desired Dataspace/RenderIntent to the supported
+    // Dataspace/ColorMode/RenderIntent.
+    using ColorModeKey = uint64_t;
+    struct ColorModeValue {
+        ui::Dataspace dataspace;
+        ui::ColorMode colorMode;
+        ui::RenderIntent renderIntent;
+    };
+
+    static ColorModeKey getColorModeKey(ui::Dataspace dataspace, ui::RenderIntent intent) {
+        return (static_cast<uint64_t>(dataspace) << 32) | static_cast<uint32_t>(intent);
+    }
+    void populateColorModes(
+            const std::unordered_map<ui::ColorMode, std::vector<ui::RenderIntent>>& hwcColorModes);
+    void addColorMode(
+            const std::unordered_map<ui::ColorMode, std::vector<ui::RenderIntent>>& hwcColorModes,
+            const ui::ColorMode mode, const ui::RenderIntent intent);
+
+    std::unordered_map<ColorModeKey, ColorModeValue> mColorModes;
 };
 
 struct DisplayDeviceState {
@@ -305,10 +352,6 @@
     bool isSecure() const override { return mDevice->isSecure(); }
     bool needsFiltering() const override { return mDevice->needsFiltering(); }
     Rect getSourceCrop() const override { return mSourceCrop; }
-    bool getWideColorSupport() const override { return mDevice->hasWideColorGamut(); }
-    ui::Dataspace getDataSpace() const override {
-        return mDevice->getCompositionDataSpace();
-    }
 
 private:
     const sp<const DisplayDevice> mDevice;
diff --git a/services/surfaceflinger/DisplayHardware/HWC2.cpp b/services/surfaceflinger/DisplayHardware/HWC2.cpp
index 3947318..61758b6 100644
--- a/services/surfaceflinger/DisplayHardware/HWC2.cpp
+++ b/services/surfaceflinger/DisplayHardware/HWC2.cpp
@@ -30,8 +30,9 @@
 
 #include <android/configuration.h>
 
-#include <algorithm>
 #include <inttypes.h>
+#include <algorithm>
+#include <iterator>
 #include <set>
 
 using android::Fence;
@@ -338,6 +339,31 @@
     return Error::None;
 }
 
+Error Display::getActiveConfigIndex(int* outIndex) const {
+    ALOGV("[%" PRIu64 "] getActiveConfigIndex", mId);
+    hwc2_config_t configId = 0;
+    auto intError = mComposer.getActiveConfig(mId, &configId);
+    auto error = static_cast<Error>(intError);
+
+    if (error != Error::None) {
+        ALOGE("Unable to get active config for mId:[%" PRIu64 "]", mId);
+        *outIndex = -1;
+        return error;
+    }
+
+    auto pos = mConfigs.find(configId);
+    if (pos != mConfigs.end()) {
+        *outIndex = std::distance(mConfigs.begin(), pos);
+    } else {
+        ALOGE("[%" PRIu64 "] getActiveConfig returned unknown config %u", mId, configId);
+        // Return no error, but the caller needs to check for a negative index
+        // to detect this case
+        *outIndex = -1;
+    }
+
+    return Error::None;
+}
+
 Error Display::getChangedCompositionTypes(
         std::unordered_map<Layer*, Composition>* outTypes)
 {
diff --git a/services/surfaceflinger/DisplayHardware/HWC2.h b/services/surfaceflinger/DisplayHardware/HWC2.h
index 3ac06ec..29d7a47 100644
--- a/services/surfaceflinger/DisplayHardware/HWC2.h
+++ b/services/surfaceflinger/DisplayHardware/HWC2.h
@@ -36,7 +36,6 @@
 #include <unordered_map>
 #include <unordered_set>
 #include <vector>
-#include <map>
 
 namespace android {
     class Fence;
@@ -207,6 +206,7 @@
     [[clang::warn_unused_result]] Error destroyLayer(Layer* layer);
     [[clang::warn_unused_result]] Error getActiveConfig(
             std::shared_ptr<const Config>* outConfig) const;
+    [[clang::warn_unused_result]] Error getActiveConfigIndex(int* outIndex) const;
     [[clang::warn_unused_result]] Error getChangedCompositionTypes(
             std::unordered_map<Layer*, Composition>* outTypes);
     [[clang::warn_unused_result]] Error getColorModes(
@@ -288,9 +288,7 @@
     bool mIsConnected;
     DisplayType mType;
     std::unordered_map<hwc2_layer_t, std::unique_ptr<Layer>> mLayers;
-    // The ordering in this map matters, for getConfigs(), when it is
-    // converted to a vector
-    std::map<hwc2_config_t, std::shared_ptr<const Config>> mConfigs;
+    std::unordered_map<hwc2_config_t, std::shared_ptr<const Config>> mConfigs;
 };
 
 // Convenience C++ class to access hwc2_device_t Layer functions directly.
diff --git a/services/surfaceflinger/DisplayHardware/HWComposer.cpp b/services/surfaceflinger/DisplayHardware/HWComposer.cpp
index f111f2b..f5f7a82 100644
--- a/services/surfaceflinger/DisplayHardware/HWComposer.cpp
+++ b/services/surfaceflinger/DisplayHardware/HWComposer.cpp
@@ -318,6 +318,28 @@
     return config;
 }
 
+int HWComposer::getActiveConfigIndex(int32_t displayId) const {
+    if (!isValidDisplay(displayId)) {
+        ALOGV("getActiveConfigIndex: Attempted to access invalid display %d", displayId);
+        return -1;
+    }
+    int index;
+    auto error = mDisplayData[displayId].hwcDisplay->getActiveConfigIndex(&index);
+    if (error == HWC2::Error::BadConfig) {
+        ALOGE("getActiveConfigIndex: No config active, returning -1");
+        return -1;
+    } else if (error != HWC2::Error::None) {
+        ALOGE("getActiveConfigIndex failed for display %d: %s (%d)", displayId,
+              to_string(error).c_str(), static_cast<int32_t>(error));
+        return -1;
+    } else if (index < 0) {
+        ALOGE("getActiveConfigIndex returned an unknown config for display %d", displayId);
+        return -1;
+    }
+
+    return index;
+}
+
 std::vector<ui::ColorMode> HWComposer::getColorModes(int32_t displayId) const {
     RETURN_IF_INVALID_DISPLAY(displayId, {});
 
@@ -596,7 +618,10 @@
             ALOGV("setPowerMode: Calling HWC %s", to_string(mode).c_str());
             {
                 auto error = hwcDisplay->setPowerMode(mode);
-                LOG_HWC_ERROR(("setPowerMode(" + to_string(mode) + ")").c_str(), error, displayId);
+                if (error != HWC2::Error::None) {
+                    LOG_HWC_ERROR(("setPowerMode(" + to_string(mode) + ")").c_str(),
+                                  error, displayId);
+                }
             }
             break;
         case HWC2::PowerMode::Doze:
@@ -605,14 +630,19 @@
             {
                 bool supportsDoze = false;
                 auto error = hwcDisplay->supportsDoze(&supportsDoze);
-                LOG_HWC_ERROR("supportsDoze", error, displayId);
+                if (error != HWC2::Error::None) {
+                    LOG_HWC_ERROR("supportsDoze", error, displayId);
+                }
 
                 if (!supportsDoze) {
                     mode = HWC2::PowerMode::On;
                 }
 
                 error = hwcDisplay->setPowerMode(mode);
-                LOG_HWC_ERROR(("setPowerMode(" + to_string(mode) + ")").c_str(), error, displayId);
+                if (error != HWC2::Error::None) {
+                    LOG_HWC_ERROR(("setPowerMode(" + to_string(mode) + ")").c_str(),
+                                  error, displayId);
+                }
             }
             break;
         default:
diff --git a/services/surfaceflinger/DisplayHardware/HWComposer.h b/services/surfaceflinger/DisplayHardware/HWComposer.h
index d7f3b08..f968948 100644
--- a/services/surfaceflinger/DisplayHardware/HWComposer.h
+++ b/services/surfaceflinger/DisplayHardware/HWComposer.h
@@ -164,6 +164,7 @@
 
     std::shared_ptr<const HWC2::Display::Config>
             getActiveConfig(int32_t displayId) const;
+    int getActiveConfigIndex(int32_t displayId) const;
 
     std::vector<ui::ColorMode> getColorModes(int32_t displayId) const;
 
diff --git a/services/surfaceflinger/EventControlThread.h b/services/surfaceflinger/EventControlThread.h
index 9be4e7c..cafae53 100644
--- a/services/surfaceflinger/EventControlThread.h
+++ b/services/surfaceflinger/EventControlThread.h
@@ -26,8 +26,6 @@
 
 namespace android {
 
-class SurfaceFlinger;
-
 class EventControlThread {
 public:
     virtual ~EventControlThread();
diff --git a/services/surfaceflinger/EventThread.cpp b/services/surfaceflinger/EventThread.cpp
index bb9c070..bc271c8 100644
--- a/services/surfaceflinger/EventThread.cpp
+++ b/services/surfaceflinger/EventThread.cpp
@@ -26,14 +26,12 @@
 #include <cutils/sched_policy.h>
 
 #include <gui/DisplayEventReceiver.h>
-#include <gui/IDisplayEventConnection.h>
 
 #include <utils/Errors.h>
 #include <utils/String8.h>
 #include <utils/Trace.h>
 
 #include "EventThread.h"
-#include "SurfaceFlinger.h"
 
 using namespace std::chrono_literals;
 
@@ -47,9 +45,11 @@
 
 namespace impl {
 
-EventThread::EventThread(VSyncSource* src, SurfaceFlinger& flinger, bool interceptVSyncs,
-                         const char* threadName)
-      : mVSyncSource(src), mFlinger(flinger), mInterceptVSyncs(interceptVSyncs) {
+EventThread::EventThread(VSyncSource* src, ResyncWithRateLimitCallback resyncWithRateLimitCallback,
+                         InterceptVSyncsCallback interceptVSyncsCallback, const char* threadName)
+      : mVSyncSource(src),
+        mResyncWithRateLimitCallback(resyncWithRateLimitCallback),
+        mInterceptVSyncsCallback(interceptVSyncsCallback) {
     for (auto& event : mVSyncEvent) {
         event.header.type = DisplayEventReceiver::DISPLAY_EVENT_VSYNC;
         event.header.id = 0;
@@ -118,7 +118,9 @@
 void EventThread::requestNextVsync(const sp<EventThread::Connection>& connection) {
     std::lock_guard<std::mutex> lock(mMutex);
 
-    mFlinger.resyncWithRateLimit();
+    if (mResyncWithRateLimitCallback) {
+        mResyncWithRateLimitCallback();
+    }
 
     if (connection->count < 0) {
         connection->count = 0;
@@ -216,8 +218,8 @@
             timestamp = mVSyncEvent[i].header.timestamp;
             if (timestamp) {
                 // we have a vsync event to dispatch
-                if (mInterceptVSyncs) {
-                    mFlinger.mInterceptor->saveVSyncEvent(timestamp);
+                if (mInterceptVSyncsCallback) {
+                    mInterceptVSyncsCallback(timestamp);
                 }
                 *event = mVSyncEvent[i];
                 mVSyncEvent[i].header.timestamp = 0;
diff --git a/services/surfaceflinger/EventThread.h b/services/surfaceflinger/EventThread.h
index 97f0a35..9c13ed2 100644
--- a/services/surfaceflinger/EventThread.h
+++ b/services/surfaceflinger/EventThread.h
@@ -37,6 +37,7 @@
 namespace android {
 // ---------------------------------------------------------------------------
 
+class EventThreadTest;
 class SurfaceFlinger;
 class String8;
 
@@ -82,7 +83,9 @@
     class Connection : public BnDisplayEventConnection {
     public:
         explicit Connection(EventThread* eventThread);
-        status_t postEvent(const DisplayEventReceiver::Event& event);
+        virtual ~Connection();
+
+        virtual status_t postEvent(const DisplayEventReceiver::Event& event);
 
         // count >= 1 : continuous event. count is the vsync rate
         // count == 0 : one-shot event that has not fired
@@ -90,7 +93,6 @@
         int32_t count;
 
     private:
-        virtual ~Connection();
         virtual void onFirstRef();
         status_t stealReceiveChannel(gui::BitTube* outChannel) override;
         status_t setVsyncRate(uint32_t count) override;
@@ -100,8 +102,11 @@
     };
 
 public:
-    EventThread(VSyncSource* src, SurfaceFlinger& flinger, bool interceptVSyncs,
-                const char* threadName);
+    using ResyncWithRateLimitCallback = std::function<void()>;
+    using InterceptVSyncsCallback = std::function<void(nsecs_t)>;
+
+    EventThread(VSyncSource* src, ResyncWithRateLimitCallback resyncWithRateLimitCallback,
+                InterceptVSyncsCallback interceptVSyncsCallback, const char* threadName);
     ~EventThread();
 
     sp<BnDisplayEventConnection> createEventConnection() const override;
@@ -124,6 +129,8 @@
     void setPhaseOffset(nsecs_t phaseOffset) override;
 
 private:
+    friend EventThreadTest;
+
     void threadMain();
     Vector<sp<EventThread::Connection>> waitForEventLocked(std::unique_lock<std::mutex>* lock,
                                                            DisplayEventReceiver::Event* event)
@@ -137,8 +144,9 @@
     void onVSyncEvent(nsecs_t timestamp) override;
 
     // constants
-    VSyncSource* mVSyncSource GUARDED_BY(mMutex) = nullptr;
-    SurfaceFlinger& mFlinger;
+    VSyncSource* const mVSyncSource GUARDED_BY(mMutex) = nullptr;
+    const ResyncWithRateLimitCallback mResyncWithRateLimitCallback;
+    const InterceptVSyncsCallback mInterceptVSyncsCallback;
 
     std::thread mThread;
     mutable std::mutex mMutex;
@@ -155,8 +163,6 @@
 
     // for debugging
     bool mDebugVsyncEnabled GUARDED_BY(mMutex) = false;
-
-    const bool mInterceptVSyncs = false;
 };
 
 // ---------------------------------------------------------------------------
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index bbc974d..b94af77 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -123,7 +123,6 @@
     mCurrentState.layerStack = 0;
     mCurrentState.sequence = 0;
     mCurrentState.requested = mCurrentState.active;
-    mCurrentState.dataSpace = ui::Dataspace::UNKNOWN;
     mCurrentState.appId = 0;
     mCurrentState.type = 0;
 
@@ -1328,19 +1327,6 @@
     return true;
 }
 
-bool Layer::setDataSpace(ui::Dataspace dataSpace) {
-    if (mCurrentState.dataSpace == dataSpace) return false;
-    mCurrentState.sequence++;
-    mCurrentState.dataSpace = dataSpace;
-    mCurrentState.modified = true;
-    setTransactionFlags(eTransactionNeeded);
-    return true;
-}
-
-ui::Dataspace Layer::getDataSpace() const {
-    return mCurrentState.dataSpace;
-}
-
 uint32_t Layer::getLayerStack() const {
     auto p = mDrawingParent.promote();
     if (p == nullptr) {
@@ -1433,7 +1419,7 @@
     info.mColor = ds.color;
     info.mFlags = ds.flags;
     info.mPixelFormat = getPixelFormat();
-    info.mDataSpace = static_cast<android_dataspace>(getDataSpace());
+    info.mDataSpace = static_cast<android_dataspace>(mCurrentDataSpace);
     info.mMatrix[0][0] = ds.active.transform[0][0];
     info.mMatrix[0][1] = ds.active.transform[0][1];
     info.mMatrix[1][0] = ds.active.transform[1][0];
@@ -1532,10 +1518,16 @@
 void Layer::onDisconnect() {
     Mutex::Autolock lock(mFrameEventHistoryMutex);
     mFrameEventHistory.onDisconnect();
+    mTimeStats.onDisconnect(getName().c_str());
 }
 
 void Layer::addAndGetFrameTimestamps(const NewFrameEventsEntry* newTimestamps,
                                      FrameEventHistoryDelta* outDelta) {
+    if (newTimestamps) {
+        mTimeStats.setPostTime(getName().c_str(), newTimestamps->frameNumber,
+                               newTimestamps->postedTime);
+    }
+
     Mutex::Autolock lock(mFrameEventHistoryMutex);
     if (newTimestamps) {
         // If there are any unsignaled fences in the aquire timeline at this
@@ -1644,9 +1636,10 @@
     return true;
 }
 
-bool Layer::isLegacySrgbDataSpace() const {
-    return mDrawingState.dataSpace == ui::Dataspace::SRGB ||
-        mDrawingState.dataSpace == ui::Dataspace::SRGB_LINEAR;
+bool Layer::isLegacyDataSpace() const {
+    // return true when no higher bits are set
+    return !(mCurrentDataSpace & (ui::Dataspace::STANDARD_MASK |
+                ui::Dataspace::TRANSFER_MASK | ui::Dataspace::RANGE_MASK));
 }
 
 void Layer::setParent(const sp<Layer>& layer) {
@@ -1953,7 +1946,10 @@
 
     layerInfo->set_is_opaque(isOpaque(state));
     layerInfo->set_invalidate(contentDirty);
-    layerInfo->set_dataspace(dataspaceDetails(static_cast<android_dataspace>(getDataSpace())));
+
+    // XXX (b/79210409) mCurrentDataSpace is not protected
+    layerInfo->set_dataspace(dataspaceDetails(static_cast<android_dataspace>(mCurrentDataSpace)));
+
     layerInfo->set_pixel_format(decodePixelFormat(getPixelFormat()));
     LayerProtoHelper::writeToProto(getColor(), layerInfo->mutable_color());
     LayerProtoHelper::writeToProto(state.color, layerInfo->mutable_requested_color());
@@ -1972,6 +1968,7 @@
         layerInfo->set_z_order_relative_of(zOrderRelativeOf->sequence);
     }
 
+    // XXX getBE().compositionInfo.mBuffer is not protected
     auto buffer = getBE().compositionInfo.mBuffer;
     if (buffer != nullptr) {
         LayerProtoHelper::writeToProto(buffer, layerInfo->mutable_active_buffer());
diff --git a/services/surfaceflinger/Layer.h b/services/surfaceflinger/Layer.h
index be3967b..7342c8b 100644
--- a/services/surfaceflinger/Layer.h
+++ b/services/surfaceflinger/Layer.h
@@ -41,6 +41,7 @@
 #include "LayerVector.h"
 #include "MonitoredProducer.h"
 #include "SurfaceFlinger.h"
+#include "TimeStats/TimeStats.h"
 #include "Transform.h"
 
 #include <layerproto/LayerProtoHeader.h>
@@ -208,7 +209,6 @@
         // dependent.
         Region activeTransparentRegion;
         Region requestedTransparentRegion;
-        ui::Dataspace dataSpace;
 
         int32_t appId;
         int32_t type;
@@ -286,8 +286,6 @@
     bool setTransparentRegionHint(const Region& transparent);
     bool setFlags(uint8_t flags, uint8_t mask);
     bool setLayerStack(uint32_t layerStack);
-    bool setDataSpace(ui::Dataspace dataSpace);
-    ui::Dataspace getDataSpace() const;
     uint32_t getLayerStack() const;
     void deferTransactionUntil(const sp<IBinder>& barrierHandle, uint64_t frameNumber);
     void deferTransactionUntil(const sp<Layer>& barrierLayer, uint64_t frameNumber);
@@ -298,12 +296,14 @@
     bool reparent(const sp<IBinder>& newParentHandle);
     bool detachChildren();
 
+    ui::Dataspace getDataSpace() const { return mCurrentDataSpace; }
+
     // Before color management is introduced, contents on Android have to be
     // desaturated in order to match what they appears like visually.
     // With color management, these contents will appear desaturated, thus
     // needed to be saturated so that they match what they are designed for
-    // visually. When returns true, legacy SRGB data space is passed to HWC.
-    bool isLegacySrgbDataSpace() const;
+    // visually.
+    bool isLegacyDataSpace() const;
 
     // If we have received a new buffer this frame, we will pass its surface
     // damage down to hardware composer. Otherwise, we must send a region with
@@ -737,10 +737,13 @@
     FenceTimeline mAcquireTimeline;
     FenceTimeline mReleaseTimeline;
 
+    TimeStats& mTimeStats = TimeStats::getInstance();
+
     // main thread
     int mActiveBufferSlot;
     sp<GraphicBuffer> mActiveBuffer;
     sp<NativeHandle> mSidebandStream;
+    ui::Dataspace mCurrentDataSpace = ui::Dataspace::UNKNOWN;
     Rect mCurrentCrop;
     uint32_t mCurrentTransform;
     // We encode unset as -1.
diff --git a/services/surfaceflinger/RenderArea.cpp b/services/surfaceflinger/RenderArea.cpp
index 46ec8e6..1a8edf3 100644
--- a/services/surfaceflinger/RenderArea.cpp
+++ b/services/surfaceflinger/RenderArea.cpp
@@ -1,5 +1,7 @@
 #include "RenderArea.h"
 
+#include <gui/LayerState.h>
+
 namespace android {
 
 float RenderArea::getCaptureFillValue(CaptureFill captureFill) {
@@ -15,7 +17,7 @@
  * Checks that the requested width and height are valid and updates them to the render area
  * dimensions if they are set to 0
  */
-status_t RenderArea::updateDimensions() {
+status_t RenderArea::updateDimensions(int displayRotation) {
     // get screen geometry
 
     uint32_t width = getWidth();
@@ -25,6 +27,10 @@
         std::swap(width, height);
     }
 
+    if (displayRotation & DisplayState::eOrientationSwapMask) {
+        std::swap(width, height);
+    }
+
     if ((mReqWidth > width) || (mReqHeight > height)) {
         ALOGE("size mismatch (%d, %d) > (%d, %d)", mReqWidth, mReqHeight, width, height);
         return BAD_VALUE;
@@ -40,4 +46,4 @@
     return NO_ERROR;
 }
 
-} // namespace android
\ No newline at end of file
+} // namespace android
diff --git a/services/surfaceflinger/RenderArea.h b/services/surfaceflinger/RenderArea.h
index 3630677..96e4b5f 100644
--- a/services/surfaceflinger/RenderArea.h
+++ b/services/surfaceflinger/RenderArea.h
@@ -30,15 +30,13 @@
     virtual bool isSecure() const = 0;
     virtual bool needsFiltering() const = 0;
     virtual Rect getSourceCrop() const = 0;
-    virtual bool getWideColorSupport() const = 0;
-    virtual ui::Dataspace getDataSpace() const = 0;
 
     virtual void render(std::function<void()> drawLayers) { drawLayers(); }
 
     int getReqHeight() const { return mReqHeight; };
     int getReqWidth() const { return mReqWidth; };
     Transform::orientation_flags getRotationFlags() const { return mRotationFlags; };
-    status_t updateDimensions();
+    status_t updateDimensions(int displayRotation);
 
     CaptureFill getCaptureFill() const { return mCaptureFill; };
 
diff --git a/services/surfaceflinger/RenderEngine/Description.cpp b/services/surfaceflinger/RenderEngine/Description.cpp
index 323bdb2..09414fd 100644
--- a/services/surfaceflinger/RenderEngine/Description.cpp
+++ b/services/surfaceflinger/RenderEngine/Description.cpp
@@ -51,10 +51,40 @@
     mProjectionMatrix = mtx;
 }
 
+void Description::setSaturationMatrix(const mat4& mtx) {
+    mSaturationMatrix = mtx;
+}
+
 void Description::setColorMatrix(const mat4& mtx) {
-    const mat4 identity;
     mColorMatrix = mtx;
-    mColorMatrixEnabled = (mtx != identity);
+}
+
+void Description::setInputTransformMatrix(const mat3& matrix) {
+    mInputTransformMatrix = matrix;
+}
+
+void Description::setOutputTransformMatrix(const mat4& matrix) {
+    mOutputTransformMatrix = matrix;
+}
+
+bool Description::hasInputTransformMatrix() const {
+    const mat3 identity;
+    return mInputTransformMatrix != identity;
+}
+
+bool Description::hasOutputTransformMatrix() const {
+    const mat4 identity;
+    return mOutputTransformMatrix != identity;
+}
+
+bool Description::hasColorMatrix() const {
+    const mat4 identity;
+    return mColorMatrix != identity;
+}
+
+bool Description::hasSaturationMatrix() const {
+    const mat4 identity;
+    return mSaturationMatrix != identity;
 }
 
 const mat4& Description::getColorMatrix() const {
@@ -73,4 +103,8 @@
     mOutputTransferFunction = transferFunction;
 }
 
+void Description::setDisplayMaxLuminance(const float maxLuminance) {
+    mDisplayMaxLuminance = maxLuminance;
+}
+
 } /* namespace android */
diff --git a/services/surfaceflinger/RenderEngine/Description.h b/services/surfaceflinger/RenderEngine/Description.h
index 5854ba4..06eaf35 100644
--- a/services/surfaceflinger/RenderEngine/Description.h
+++ b/services/surfaceflinger/RenderEngine/Description.h
@@ -42,7 +42,14 @@
     void disableTexture();
     void setColor(const half4& color);
     void setProjectionMatrix(const mat4& mtx);
+    void setSaturationMatrix(const mat4& mtx);
     void setColorMatrix(const mat4& mtx);
+    void setInputTransformMatrix(const mat3& matrix);
+    void setOutputTransformMatrix(const mat4& matrix);
+    bool hasInputTransformMatrix() const;
+    bool hasOutputTransformMatrix() const;
+    bool hasColorMatrix() const;
+    bool hasSaturationMatrix() const;
     const mat4& getColorMatrix() const;
 
     void setY410BT2020(bool enable);
@@ -55,6 +62,7 @@
     };
     void setInputTransferFunction(TransferFunction transferFunction);
     void setOutputTransferFunction(TransferFunction transferFunction);
+    void setDisplayMaxLuminance(const float maxLuminance);
 
 private:
     friend class Program;
@@ -71,11 +79,6 @@
 
     // color used when texturing is disabled or when setting alpha.
     half4 mColor;
-    // projection matrix
-    mat4 mProjectionMatrix;
-
-    bool mColorMatrixEnabled = false;
-    mat4 mColorMatrix;
 
     // true if the sampled pixel values are in Y410/BT2020 rather than RGBA
     bool mY410BT2020 = false;
@@ -83,6 +86,15 @@
     // transfer functions for the input/output
     TransferFunction mInputTransferFunction = TransferFunction::LINEAR;
     TransferFunction mOutputTransferFunction = TransferFunction::LINEAR;
+
+    float mDisplayMaxLuminance;
+
+    // projection matrix
+    mat4 mProjectionMatrix;
+    mat4 mColorMatrix;
+    mat4 mSaturationMatrix;
+    mat3 mInputTransformMatrix;
+    mat4 mOutputTransformMatrix;
 };
 
 } /* namespace android */
diff --git a/services/surfaceflinger/RenderEngine/GLES20RenderEngine.cpp b/services/surfaceflinger/RenderEngine/GLES20RenderEngine.cpp
index 6e0fa32..90404fa 100644
--- a/services/surfaceflinger/RenderEngine/GLES20RenderEngine.cpp
+++ b/services/surfaceflinger/RenderEngine/GLES20RenderEngine.cpp
@@ -131,15 +131,25 @@
     // mColorBlindnessCorrection = M;
 
     if (mPlatformHasWideColor) {
-        // Compute sRGB to DisplayP3 color transform
-        // NOTE: For now, we are limiting wide-color support to
-        // Display-P3 only.
-        mSrgbToDisplayP3 = mat4(
-                ColorSpaceConnector(ColorSpace::sRGB(), ColorSpace::DisplayP3()).getTransform());
+        ColorSpace srgb(ColorSpace::sRGB());
+        ColorSpace displayP3(ColorSpace::DisplayP3());
+        ColorSpace bt2020(ColorSpace::BT2020());
 
-        // Compute BT2020 to DisplayP3 color transform
-        mBt2020ToDisplayP3 = mat4(
-                ColorSpaceConnector(ColorSpace::BT2020(), ColorSpace::DisplayP3()).getTransform());
+        // Compute sRGB to Display P3 transform matrix.
+        // NOTE: For now, we are limiting output wide color space support to
+        // Display-P3 only.
+        mSrgbToDisplayP3 = mat4(ColorSpaceConnector(srgb, displayP3).getTransform());
+
+        // Compute Display P3 to sRGB transform matrix.
+        mDisplayP3ToSrgb = mat4(ColorSpaceConnector(displayP3, srgb).getTransform());
+
+        // no chromatic adaptation needed since all color spaces use D65 for their white points.
+        mSrgbToXyz = srgb.getRGBtoXYZ();
+        mDisplayP3ToXyz = displayP3.getRGBtoXYZ();
+        mBt2020ToXyz = bt2020.getRGBtoXYZ();
+        mXyzToSrgb = mat4(srgb.getXYZtoRGB());
+        mXyzToDisplayP3 = mat4(displayP3.getXYZtoRGB());
+        mXyzToBt2020 = mat4(bt2020.getXYZtoRGB());
     }
 }
 
@@ -224,6 +234,10 @@
     mOutputDataSpace = dataspace;
 }
 
+void GLES20RenderEngine::setDisplayMaxLuminance(const float maxLuminance) {
+    mState.setDisplayMaxLuminance(maxLuminance);
+}
+
 void GLES20RenderEngine::setupLayerTexturing(const Texture& texture) {
     GLuint target = texture.getTextureTarget();
     glBindTexture(target, texture.getTextureName());
@@ -246,10 +260,12 @@
     mState.setTexture(texture);
 }
 
-mat4 GLES20RenderEngine::setupColorTransform(const mat4& colorTransform) {
-    mat4 oldTransform = mState.getColorMatrix();
+void GLES20RenderEngine::setupColorTransform(const mat4& colorTransform) {
     mState.setColorMatrix(colorTransform);
-    return oldTransform;
+}
+
+void GLES20RenderEngine::setSaturationMatrix(const mat4& saturationMatrix) {
+    mState.setSaturationMatrix(saturationMatrix);
 }
 
 void GLES20RenderEngine::disableTexturing() {
@@ -303,44 +319,100 @@
     glVertexAttribPointer(Program::position, mesh.getVertexSize(), GL_FLOAT, GL_FALSE,
                           mesh.getByteStride(), mesh.getPositions());
 
-    // TODO(b/73825729) Refactor this code block to handle BT2020 color space properly.
-    // DISPLAY_P3 is the only supported wide color output
-    if (mPlatformHasWideColor && mOutputDataSpace == Dataspace::DISPLAY_P3) {
+    // By default, DISPLAY_P3 is the only supported wide color output. However,
+    // when HDR content is present, hardware composer may be able to handle
+    // BT2020 data space, in that case, the output data space is set to be
+    // BT2020_HLG or BT2020_PQ respectively. In GPU fall back we need
+    // to respect this and convert non-HDR content to HDR format.
+    if (mPlatformHasWideColor) {
         Description wideColorState = mState;
-        switch (mDataSpace) {
-            case Dataspace::DISPLAY_P3:
-                // input matches output
-                break;
-            case Dataspace::BT2020_PQ:
-            case Dataspace::BT2020_ITU_PQ:
-                wideColorState.setColorMatrix(mState.getColorMatrix() * mBt2020ToDisplayP3);
-                wideColorState.setInputTransferFunction(Description::TransferFunction::ST2084);
-                wideColorState.setOutputTransferFunction(Description::TransferFunction::SRGB);
-                break;
-            case Dataspace::BT2020_HLG:
-            case Dataspace::BT2020_ITU_HLG:
-                wideColorState.setColorMatrix(mState.getColorMatrix() * mBt2020ToDisplayP3);
-                wideColorState.setInputTransferFunction(Description::TransferFunction::HLG);
-                wideColorState.setOutputTransferFunction(Description::TransferFunction::SRGB);
-                break;
-            default:
-                // treat all other dataspaces as sRGB
-                wideColorState.setColorMatrix(mState.getColorMatrix() * mSrgbToDisplayP3);
-                switch (static_cast<Dataspace>(mDataSpace & Dataspace::TRANSFER_MASK)) {
-                    case Dataspace::TRANSFER_LINEAR:
-                        wideColorState.setInputTransferFunction(
-                                Description::TransferFunction::LINEAR);
-                        break;
-                    default:
-                        // treat all other transfer functions as sRGB
-                        wideColorState.setInputTransferFunction(
-                                Description::TransferFunction::SRGB);
-                        break;
-                }
-                wideColorState.setOutputTransferFunction(Description::TransferFunction::SRGB);
-                ALOGV("drawMesh: gamut transform applied");
-                break;
+        Dataspace inputStandard = static_cast<Dataspace>(mDataSpace & Dataspace::STANDARD_MASK);
+        Dataspace inputTransfer = static_cast<Dataspace>(mDataSpace & Dataspace::TRANSFER_MASK);
+        Dataspace outputStandard = static_cast<Dataspace>(mOutputDataSpace &
+                                                          Dataspace::STANDARD_MASK);
+        Dataspace outputTransfer = static_cast<Dataspace>(mOutputDataSpace &
+                                                          Dataspace::TRANSFER_MASK);
+        bool needsXYZConversion = needsXYZTransformMatrix();
+
+        if (needsXYZConversion) {
+            // The supported input color spaces are standard RGB, Display P3 and BT2020.
+            switch (inputStandard) {
+                case Dataspace::STANDARD_DCI_P3:
+                    wideColorState.setInputTransformMatrix(mDisplayP3ToXyz);
+                    break;
+                case Dataspace::STANDARD_BT2020:
+                    wideColorState.setInputTransformMatrix(mBt2020ToXyz);
+                    break;
+                default:
+                    wideColorState.setInputTransformMatrix(mSrgbToXyz);
+                    break;
+            }
+
+            // The supported output color spaces are BT2020, Display P3 and standard RGB.
+            switch (outputStandard) {
+                case Dataspace::STANDARD_BT2020:
+                    wideColorState.setOutputTransformMatrix(mXyzToBt2020);
+                    break;
+                case Dataspace::STANDARD_DCI_P3:
+                    wideColorState.setOutputTransformMatrix(mXyzToDisplayP3);
+                    break;
+                default:
+                    wideColorState.setOutputTransformMatrix(mXyzToSrgb);
+                    break;
+            }
+        } else if (inputStandard != outputStandard) {
+            // At this point, the input data space and output data space could be both
+            // HDR data spaces, but they match each other, we do nothing in this case.
+            // In addition to the case above, the input data space could be
+            // - scRGB linear
+            // - scRGB non-linear
+            // - sRGB
+            // - Display P3
+            // The output data spaces could be
+            // - sRGB
+            // - Display P3
+            if (outputStandard == Dataspace::STANDARD_BT709) {
+                wideColorState.setOutputTransformMatrix(mDisplayP3ToSrgb);
+            } else if (outputStandard == Dataspace::STANDARD_DCI_P3) {
+                wideColorState.setOutputTransformMatrix(mSrgbToDisplayP3);
+            }
         }
+
+        // we need to convert the RGB value to linear space and convert it back when:
+        // - there is a color matrix that is not an identity matrix, or
+        // - there is a saturation matrix that is not an identity matrix, or
+        // - there is an output transform matrix that is not an identity matrix, or
+        // - the input transfer function doesn't match the output transfer function.
+        if (wideColorState.hasColorMatrix() || wideColorState.hasSaturationMatrix() ||
+            wideColorState.hasOutputTransformMatrix() || inputTransfer != outputTransfer) {
+            switch (inputTransfer) {
+                case Dataspace::TRANSFER_ST2084:
+                    wideColorState.setInputTransferFunction(Description::TransferFunction::ST2084);
+                    break;
+                case Dataspace::TRANSFER_HLG:
+                    wideColorState.setInputTransferFunction(Description::TransferFunction::HLG);
+                    break;
+                case Dataspace::TRANSFER_LINEAR:
+                    wideColorState.setInputTransferFunction(Description::TransferFunction::LINEAR);
+                    break;
+                default:
+                    wideColorState.setInputTransferFunction(Description::TransferFunction::SRGB);
+                    break;
+            }
+
+            switch (outputTransfer) {
+                case Dataspace::TRANSFER_ST2084:
+                    wideColorState.setOutputTransferFunction(Description::TransferFunction::ST2084);
+                    break;
+                case Dataspace::TRANSFER_HLG:
+                    wideColorState.setOutputTransferFunction(Description::TransferFunction::HLG);
+                    break;
+                default:
+                    wideColorState.setOutputTransferFunction(Description::TransferFunction::SRGB);
+                    break;
+            }
+        }
+
         ProgramCache::getInstance().useProgram(wideColorState);
 
         glDrawArrays(mesh.getPrimitive(), 0, mesh.getVertexCount());
@@ -369,6 +441,33 @@
                         dataspaceDetails(static_cast<android_dataspace>(mOutputDataSpace)).c_str());
 }
 
+bool GLES20RenderEngine::isHdrDataSpace(const Dataspace dataSpace) const {
+    const Dataspace standard = static_cast<Dataspace>(dataSpace & Dataspace::STANDARD_MASK);
+    const Dataspace transfer = static_cast<Dataspace>(dataSpace & Dataspace::TRANSFER_MASK);
+    return standard == Dataspace::STANDARD_BT2020 &&
+        (transfer == Dataspace::TRANSFER_ST2084 || transfer == Dataspace::TRANSFER_HLG);
+}
+
+// For convenience, we want to convert the input color space to XYZ color space first,
+// and then convert from XYZ color space to output color space when
+// - SDR and HDR contents are mixed, either SDR content will be converted to HDR or
+//   HDR content will be tone-mapped to SDR; Or,
+// - there are HDR PQ and HLG contents presented at the same time, where we want to convert
+//   HLG content to PQ content.
+// In either case above, we need to operate the Y value in XYZ color space. Thus, when either
+// input data space or output data space is HDR data space, and the input transfer function
+// doesn't match the output transfer function, we would enable an intermediate transfrom to
+// XYZ color space.
+bool GLES20RenderEngine::needsXYZTransformMatrix() const {
+    const bool isInputHdrDataSpace = isHdrDataSpace(mDataSpace);
+    const bool isOutputHdrDataSpace = isHdrDataSpace(mOutputDataSpace);
+    const Dataspace inputTransfer = static_cast<Dataspace>(mDataSpace & Dataspace::TRANSFER_MASK);
+    const Dataspace outputTransfer = static_cast<Dataspace>(mOutputDataSpace &
+                                                            Dataspace::TRANSFER_MASK);
+
+    return (isInputHdrDataSpace || isOutputHdrDataSpace) && inputTransfer != outputTransfer;
+}
+
 // ---------------------------------------------------------------------------
 } // namespace impl
 } // namespace RE
diff --git a/services/surfaceflinger/RenderEngine/GLES20RenderEngine.h b/services/surfaceflinger/RenderEngine/GLES20RenderEngine.h
index 99da19d..de5761b 100644
--- a/services/surfaceflinger/RenderEngine/GLES20RenderEngine.h
+++ b/services/surfaceflinger/RenderEngine/GLES20RenderEngine.h
@@ -75,6 +75,20 @@
     void setSourceY410BT2020(bool enable) override;
     void setSourceDataSpace(ui::Dataspace source) override;
     void setOutputDataSpace(ui::Dataspace dataspace) override;
+    void setDisplayMaxLuminance(const float maxLuminance) override;
+
+    virtual void setupLayerTexturing(const Texture& texture);
+    virtual void setupLayerBlackedOut();
+    virtual void setupFillWithColor(float r, float g, float b, float a);
+    virtual void setupColorTransform(const mat4& colorTransform);
+    virtual void setSaturationMatrix(const mat4& saturationMatrix);
+    virtual void disableTexturing();
+    virtual void disableBlending();
+
+    virtual void drawMesh(const Mesh& mesh);
+
+    virtual size_t getMaxTextureSize() const;
+    virtual size_t getMaxViewportDims() const;
 
     // Current dataspace of layer being rendered
     ui::Dataspace mDataSpace = ui::Dataspace::UNKNOWN;
@@ -85,19 +99,19 @@
     // Currently only supporting sRGB, BT2020 and DisplayP3 color spaces
     const bool mPlatformHasWideColor = false;
     mat4 mSrgbToDisplayP3;
-    mat4 mBt2020ToDisplayP3;
+    mat4 mDisplayP3ToSrgb;
+    mat3 mSrgbToXyz;
+    mat3 mBt2020ToXyz;
+    mat3 mDisplayP3ToXyz;
+    mat4 mXyzToSrgb;
+    mat4 mXyzToDisplayP3;
+    mat4 mXyzToBt2020;
 
-    virtual void setupLayerTexturing(const Texture& texture);
-    virtual void setupLayerBlackedOut();
-    virtual void setupFillWithColor(float r, float g, float b, float a);
-    virtual mat4 setupColorTransform(const mat4& colorTransform);
-    virtual void disableTexturing();
-    virtual void disableBlending();
-
-    virtual void drawMesh(const Mesh& mesh);
-
-    virtual size_t getMaxTextureSize() const;
-    virtual size_t getMaxViewportDims() const;
+private:
+    // A data space is considered HDR data space if it has BT2020 color space
+    // with PQ or HLG transfer function.
+    bool isHdrDataSpace(const ui::Dataspace dataSpace) const;
+    bool needsXYZTransformMatrix() const;
 };
 
 // ---------------------------------------------------------------------------
diff --git a/services/surfaceflinger/RenderEngine/Program.cpp b/services/surfaceflinger/RenderEngine/Program.cpp
index 225bcf0..95adaca 100644
--- a/services/surfaceflinger/RenderEngine/Program.cpp
+++ b/services/surfaceflinger/RenderEngine/Program.cpp
@@ -58,12 +58,13 @@
         mVertexShader = vertexId;
         mFragmentShader = fragmentId;
         mInitialized = true;
-
-        mColorMatrixLoc = glGetUniformLocation(programId, "colorMatrix");
         mProjectionMatrixLoc = glGetUniformLocation(programId, "projection");
         mTextureMatrixLoc = glGetUniformLocation(programId, "texture");
         mSamplerLoc = glGetUniformLocation(programId, "sampler");
         mColorLoc = glGetUniformLocation(programId, "color");
+        mDisplayMaxLuminanceLoc = glGetUniformLocation(programId, "displayMaxLuminance");
+        mInputTransformMatrixLoc = glGetUniformLocation(programId, "inputTransformMatrix");
+        mOutputTransformMatrixLoc = glGetUniformLocation(programId, "outputTransformMatrix");
 
         // set-up the default values for our uniforms
         glUseProgram(programId);
@@ -133,8 +134,28 @@
         const float color[4] = {desc.mColor.r, desc.mColor.g, desc.mColor.b, desc.mColor.a};
         glUniform4fv(mColorLoc, 1, color);
     }
-    if (mColorMatrixLoc >= 0) {
-        glUniformMatrix4fv(mColorMatrixLoc, 1, GL_FALSE, desc.mColorMatrix.asArray());
+    if (mInputTransformMatrixLoc >= 0) {
+        // If the input transform matrix is not identity matrix, we want to merge
+        // the saturation matrix with input transform matrix so that the saturation
+        // matrix is applied at the correct stage.
+        mat4 inputTransformMatrix = mat4(desc.mInputTransformMatrix) * desc.mSaturationMatrix;
+        glUniformMatrix4fv(mInputTransformMatrixLoc, 1, GL_FALSE, inputTransformMatrix.asArray());
+    }
+    if (mOutputTransformMatrixLoc >= 0) {
+        // The output transform matrix and color matrix can be combined as one matrix
+        // that is applied right before applying OETF.
+        mat4 outputTransformMatrix = desc.mColorMatrix * desc.mOutputTransformMatrix;
+        // If there is no input transform matrix, we want to merge the saturation
+        // matrix with output transform matrix to avoid extra matrix multiplication
+        // in shader.
+        if (mInputTransformMatrixLoc < 0) {
+            outputTransformMatrix *= desc.mSaturationMatrix;
+        }
+        glUniformMatrix4fv(mOutputTransformMatrixLoc, 1, GL_FALSE,
+                           outputTransformMatrix.asArray());
+    }
+    if (mDisplayMaxLuminanceLoc >= 0) {
+        glUniform1f(mDisplayMaxLuminanceLoc, desc.mDisplayMaxLuminance);
     }
     // these uniforms are always present
     glUniformMatrix4fv(mProjectionMatrixLoc, 1, GL_FALSE, desc.mProjectionMatrix.asArray());
diff --git a/services/surfaceflinger/RenderEngine/Program.h b/services/surfaceflinger/RenderEngine/Program.h
index 6e57fdd..ae796c5 100644
--- a/services/surfaceflinger/RenderEngine/Program.h
+++ b/services/surfaceflinger/RenderEngine/Program.h
@@ -69,9 +69,6 @@
     /* location of the projection matrix uniform */
     GLint mProjectionMatrixLoc;
 
-    /* location of the color matrix uniform */
-    GLint mColorMatrixLoc;
-
     /* location of the texture matrix uniform */
     GLint mTextureMatrixLoc;
 
@@ -80,6 +77,13 @@
 
     /* location of the color uniform */
     GLint mColorLoc;
+
+    /* location of display luminance uniform */
+    GLint mDisplayMaxLuminanceLoc;
+
+    /* location of transform matrix */
+    GLint mInputTransformMatrixLoc;
+    GLint mOutputTransformMatrixLoc;
 };
 
 } /* namespace android */
diff --git a/services/surfaceflinger/RenderEngine/ProgramCache.cpp b/services/surfaceflinger/RenderEngine/ProgramCache.cpp
index d1887ee..2808a1a 100644
--- a/services/surfaceflinger/RenderEngine/ProgramCache.cpp
+++ b/services/surfaceflinger/RenderEngine/ProgramCache.cpp
@@ -125,13 +125,18 @@
                  description.mPremultipliedAlpha ? Key::BLEND_PREMULT : Key::BLEND_NORMAL)
             .set(Key::OPACITY_MASK,
                  description.mOpaque ? Key::OPACITY_OPAQUE : Key::OPACITY_TRANSLUCENT)
-            .set(Key::COLOR_MATRIX_MASK,
-                 description.mColorMatrixEnabled ? Key::COLOR_MATRIX_ON : Key::COLOR_MATRIX_OFF);
+            .set(Key::Key::INPUT_TRANSFORM_MATRIX_MASK,
+                 description.hasInputTransformMatrix() ?
+                     Key::INPUT_TRANSFORM_MATRIX_ON : Key::INPUT_TRANSFORM_MATRIX_OFF)
+            .set(Key::Key::OUTPUT_TRANSFORM_MATRIX_MASK,
+                 description.hasOutputTransformMatrix() || description.hasColorMatrix() ||
+                 (!description.hasInputTransformMatrix() && description.hasSaturationMatrix()) ?
+                     Key::OUTPUT_TRANSFORM_MATRIX_ON : Key::OUTPUT_TRANSFORM_MATRIX_OFF);
 
     needs.set(Key::Y410_BT2020_MASK,
               description.mY410BT2020 ? Key::Y410_BT2020_ON : Key::Y410_BT2020_OFF);
 
-    if (needs.hasColorMatrix()) {
+    if (needs.hasTransformMatrix() || (needs.getInputTF() != needs.getOutputTF())) {
         switch (description.mInputTransferFunction) {
             case Description::TransferFunction::LINEAR:
             default:
@@ -168,6 +173,293 @@
     return needs;
 }
 
+// Generate EOTF that converts signal values to relative display light,
+// both normalized to [0, 1].
+void ProgramCache::generateEOTF(Formatter& fs, const Key& needs) {
+    switch (needs.getInputTF()) {
+        case Key::INPUT_TF_SRGB:
+            fs << R"__SHADER__(
+                float EOTF_sRGB(float srgb) {
+                    return srgb <= 0.04045 ? srgb / 12.92 : pow((srgb + 0.055) / 1.055, 2.4);
+                }
+
+                vec3 EOTF_sRGB(const vec3 srgb) {
+                    return vec3(EOTF_sRGB(srgb.r), EOTF_sRGB(srgb.g), EOTF_sRGB(srgb.b));
+                }
+
+                vec3 EOTF(const vec3 srgb) {
+                    return sign(srgb.rgb) * EOTF_sRGB(abs(srgb.rgb));
+                }
+            )__SHADER__";
+            break;
+        case Key::INPUT_TF_ST2084:
+            fs << R"__SHADER__(
+                vec3 EOTF(const highp vec3 color) {
+                    const highp float m1 = (2610.0 / 4096.0) / 4.0;
+                    const highp float m2 = (2523.0 / 4096.0) * 128.0;
+                    const highp float c1 = (3424.0 / 4096.0);
+                    const highp float c2 = (2413.0 / 4096.0) * 32.0;
+                    const highp float c3 = (2392.0 / 4096.0) * 32.0;
+
+                    highp vec3 tmp = pow(color, 1.0 / vec3(m2));
+                    tmp = max(tmp - c1, 0.0) / (c2 - c3 * tmp);
+                    return pow(tmp, 1.0 / vec3(m1));
+                }
+            )__SHADER__";
+            break;
+        case Key::INPUT_TF_HLG:
+            fs << R"__SHADER__(
+                highp float EOTF_channel(const highp float channel) {
+                    const highp float a = 0.17883277;
+                    const highp float b = 0.28466892;
+                    const highp float c = 0.55991073;
+                    return channel <= 0.5 ? channel * channel / 3.0 :
+                            (exp((channel - c) / a) + b) / 12.0;
+                }
+
+                vec3 EOTF(const highp vec3 color) {
+                    return vec3(EOTF_channel(color.r), EOTF_channel(color.g),
+                            EOTF_channel(color.b));
+                }
+            )__SHADER__";
+            break;
+        default:
+            fs << R"__SHADER__(
+                vec3 EOTF(const vec3 linear) {
+                    return linear;
+                }
+            )__SHADER__";
+            break;
+    }
+}
+
+void ProgramCache::generateToneMappingProcess(Formatter& fs, const Key& needs) {
+    // Convert relative light to absolute light.
+    switch (needs.getInputTF()) {
+        case Key::INPUT_TF_ST2084:
+            fs << R"__SHADER__(
+                highp vec3 ScaleLuminance(highp vec3 color) {
+                    return color * 10000.0;
+                }
+            )__SHADER__";
+            break;
+        case Key::INPUT_TF_HLG:
+            fs << R"__SHADER__(
+                highp vec3 ScaleLuminance(highp vec3 color) {
+                    // The formula is:
+                    // alpha * pow(Y, gamma - 1.0) * color + beta;
+                    // where alpha is 1000.0, gamma is 1.2, beta is 0.0.
+                    return color * 1000.0 * pow(color.y, 0.2);
+                }
+            )__SHADER__";
+            break;
+        default:
+            fs << R"__SHADER__(
+                highp vec3 ScaleLuminance(highp vec3 color) {
+                    return color * displayMaxLuminance;
+                }
+            )__SHADER__";
+            break;
+    }
+
+    // Tone map absolute light to display luminance range.
+    switch (needs.getInputTF()) {
+        case Key::INPUT_TF_ST2084:
+        case Key::INPUT_TF_HLG:
+            switch (needs.getOutputTF()) {
+                case Key::OUTPUT_TF_HLG:
+                    // Right now when mixed PQ and HLG contents are presented,
+                    // HLG content will always be converted to PQ. However, for
+                    // completeness, we simply clamp the value to [0.0, 1000.0].
+                    fs << R"__SHADER__(
+                        highp vec3 ToneMap(highp vec3 color) {
+                            return clamp(color, 0.0, 1000.0);
+                        }
+                    )__SHADER__";
+                    break;
+                case Key::OUTPUT_TF_ST2084:
+                    fs << R"__SHADER__(
+                        highp vec3 ToneMap(highp vec3 color) {
+                            return color;
+                        }
+                    )__SHADER__";
+                    break;
+                default:
+                    fs << R"__SHADER__(
+                        highp vec3 ToneMap(highp vec3 color) {
+                            const float maxMasteringLumi = 1000.0;
+                            const float maxContentLumi = 1000.0;
+                            const float maxInLumi = min(maxMasteringLumi, maxContentLumi);
+                            float maxOutLumi = displayMaxLuminance;
+
+                            float nits = color.y;
+
+                            // clamp to max input luminance
+                            nits = clamp(nits, 0.0, maxInLumi);
+
+                            // scale [0.0, maxInLumi] to [0.0, maxOutLumi]
+                            if (maxInLumi <= maxOutLumi) {
+                                nits *= maxOutLumi / maxInLumi;
+                            } else {
+                                // three control points
+                                const float x0 = 10.0;
+                                const float y0 = 17.0;
+                                float x1 = maxOutLumi * 0.75;
+                                float y1 = x1;
+                                float x2 = x1 + (maxInLumi - x1) / 2.0;
+                                float y2 = y1 + (maxOutLumi - y1) * 0.75;
+
+                                // horizontal distances between the last three control points
+                                float h12 = x2 - x1;
+                                float h23 = maxInLumi - x2;
+                                // tangents at the last three control points
+                                float m1 = (y2 - y1) / h12;
+                                float m3 = (maxOutLumi - y2) / h23;
+                                float m2 = (m1 + m3) / 2.0;
+
+                                if (nits < x0) {
+                                    // scale [0.0, x0] to [0.0, y0] linearly
+                                    float slope = y0 / x0;
+                                    nits *= slope;
+                                } else if (nits < x1) {
+                                    // scale [x0, x1] to [y0, y1] linearly
+                                    float slope = (y1 - y0) / (x1 - x0);
+                                    nits = y0 + (nits - x0) * slope;
+                                } else if (nits < x2) {
+                                    // scale [x1, x2] to [y1, y2] using Hermite interp
+                                    float t = (nits - x1) / h12;
+                                    nits = (y1 * (1.0 + 2.0 * t) + h12 * m1 * t) * (1.0 - t) * (1.0 - t) +
+                                            (y2 * (3.0 - 2.0 * t) + h12 * m2 * (t - 1.0)) * t * t;
+                                } else {
+                                    // scale [x2, maxInLumi] to [y2, maxOutLumi] using Hermite interp
+                                    float t = (nits - x2) / h23;
+                                    nits = (y2 * (1.0 + 2.0 * t) + h23 * m2 * t) * (1.0 - t) * (1.0 - t) +
+                                            (maxOutLumi * (3.0 - 2.0 * t) + h23 * m3 * (t - 1.0)) * t * t;
+                                }
+                            }
+
+                            return color * (nits / max(1e-6, color.y));
+                        }
+                    )__SHADER__";
+                    break;
+            }
+            break;
+        default:
+            // TODO(73825729) We need to revert the tone mapping in
+            // hardware composer properly.
+            fs << R"__SHADER__(
+                highp vec3 ToneMap(highp vec3 color) {
+                    return color;
+                }
+            )__SHADER__";
+            break;
+    }
+
+    // convert absolute light to relative light.
+    switch (needs.getOutputTF()) {
+        case Key::OUTPUT_TF_ST2084:
+            fs << R"__SHADER__(
+                highp vec3 NormalizeLuminance(highp vec3 color) {
+                    return color / 10000.0;
+                }
+            )__SHADER__";
+            break;
+        case Key::OUTPUT_TF_HLG:
+            fs << R"__SHADER__(
+                highp vec3 NormalizeLuminance(highp vec3 color) {
+                    return color / 1000.0 * pow(color.y / 1000.0, -0.2 / 1.2);
+                }
+            )__SHADER__";
+            break;
+        default:
+            fs << R"__SHADER__(
+                highp vec3 NormalizeLuminance(highp vec3 color) {
+                    return color / displayMaxLuminance;
+                }
+            )__SHADER__";
+            break;
+    }
+}
+
+// Generate OOTF that modifies the relative scence light to relative display light.
+void ProgramCache::generateOOTF(Formatter& fs, const ProgramCache::Key& needs) {
+    if (!needs.needsToneMapping()) {
+        fs << R"__SHADER__(
+            highp vec3 OOTF(const highp vec3 color) {
+                return color;
+            }
+        )__SHADER__";
+    } else {
+        generateToneMappingProcess(fs, needs);
+        fs << R"__SHADER__(
+            highp vec3 OOTF(const highp vec3 color) {
+                return NormalizeLuminance(ToneMap(ScaleLuminance(color)));
+            }
+        )__SHADER__";
+    }
+}
+
+// Generate OETF that converts relative display light to signal values,
+// both normalized to [0, 1]
+void ProgramCache::generateOETF(Formatter& fs, const Key& needs) {
+    switch (needs.getOutputTF()) {
+        case Key::OUTPUT_TF_SRGB:
+            fs << R"__SHADER__(
+                float OETF_sRGB(const float linear) {
+                    return linear <= 0.0031308 ?
+                            linear * 12.92 : (pow(linear, 1.0 / 2.4) * 1.055) - 0.055;
+                }
+
+                vec3 OETF_sRGB(const vec3 linear) {
+                    return vec3(OETF_sRGB(linear.r), OETF_sRGB(linear.g), OETF_sRGB(linear.b));
+                }
+
+                vec3 OETF(const vec3 linear) {
+                    return sign(linear.rgb) * OETF_sRGB(abs(linear.rgb));
+                }
+            )__SHADER__";
+            break;
+        case Key::OUTPUT_TF_ST2084:
+            fs << R"__SHADER__(
+                vec3 OETF(const vec3 linear) {
+                    const highp float m1 = (2610.0 / 4096.0) / 4.0;
+                    const highp float m2 = (2523.0 / 4096.0) * 128.0;
+                    const highp float c1 = (3424.0 / 4096.0);
+                    const highp float c2 = (2413.0 / 4096.0) * 32.0;
+                    const highp float c3 = (2392.0 / 4096.0) * 32.0;
+
+                    highp vec3 tmp = pow(linear, vec3(m1));
+                    tmp = (c1 + c2 * tmp) / (1.0 + c3 * tmp);
+                    return pow(tmp, vec3(m2));
+                }
+            )__SHADER__";
+            break;
+        case Key::OUTPUT_TF_HLG:
+            fs << R"__SHADER__(
+                highp float OETF_channel(const highp float channel) {
+                    const highp float a = 0.17883277;
+                    const highp float b = 0.28466892;
+                    const highp float c = 0.55991073;
+                    return channel <= 1.0 / 12.0 ? sqrt(3.0 * channel) :
+                            a * log(12.0 * channel - b) + c;
+                }
+
+                vec3 OETF(const highp vec3 color) {
+                    return vec3(OETF_channel(color.r), OETF_channel(color.g),
+                            OETF_channel(color.b));
+                }
+            )__SHADER__";
+            break;
+        default:
+            fs << R"__SHADER__(
+                vec3 OETF(const vec3 linear) {
+                    return linear;
+                }
+            )__SHADER__";
+            break;
+    }
+}
+
 String8 ProgramCache::generateVertexShader(const Key& needs) {
     Formatter vs;
     if (needs.isTexturing()) {
@@ -220,224 +512,47 @@
             )__SHADER__";
     }
 
-    if (needs.hasColorMatrix()) {
-        fs << "uniform mat4 colorMatrix;";
-
-        // Generate EOTF that converts signal values to relative display light,
-        // both normalized to [0, 1].
-        switch (needs.getInputTF()) {
-            case Key::INPUT_TF_LINEAR:
-            default:
-                fs << R"__SHADER__(
-                    vec3 EOTF(const vec3 linear) {
-                        return linear;
-                    }
-                )__SHADER__";
-                break;
-            case Key::INPUT_TF_SRGB:
-                fs << R"__SHADER__(
-                    float EOTF_sRGB(float srgb) {
-                        return srgb <= 0.04045 ? srgb / 12.92 : pow((srgb + 0.055) / 1.055, 2.4);
-                    }
-
-                    vec3 EOTF_sRGB(const vec3 srgb) {
-                        return vec3(EOTF_sRGB(srgb.r), EOTF_sRGB(srgb.g), EOTF_sRGB(srgb.b));
-                    }
-
-                    vec3 EOTF(const vec3 srgb) {
-                        return sign(srgb.rgb) * EOTF_sRGB(abs(srgb.rgb));
-                    }
-                )__SHADER__";
-                break;
-            case Key::INPUT_TF_ST2084:
-                fs << R"__SHADER__(
-                    vec3 EOTF(const highp vec3 color) {
-                        const highp float m1 = (2610.0 / 4096.0) / 4.0;
-                        const highp float m2 = (2523.0 / 4096.0) * 128.0;
-                        const highp float c1 = (3424.0 / 4096.0);
-                        const highp float c2 = (2413.0 / 4096.0) * 32.0;
-                        const highp float c3 = (2392.0 / 4096.0) * 32.0;
-
-                        highp vec3 tmp = pow(color, 1.0 / vec3(m2));
-                        tmp = max(tmp - c1, 0.0) / (c2 - c3 * tmp);
-                        return pow(tmp, 1.0 / vec3(m1));
-                    }
-                    )__SHADER__";
-                break;
-          case Key::INPUT_TF_HLG:
-              fs << R"__SHADER__(
-                  highp float EOTF_channel(const highp float channel) {
-                      const highp float a = 0.17883277;
-                      const highp float b = 0.28466892;
-                      const highp float c = 0.55991073;
-                      return channel <= 0.5 ? channel * channel / 3.0 :
-                              (exp((channel - c) / a) + b) / 12.0;
-                  }
-
-                  vec3 EOTF(const highp vec3 color) {
-                      return vec3(EOTF_channel(color.r), EOTF_channel(color.g),
-                              EOTF_channel(color.b));
-                  }
-                  )__SHADER__";
-              break;
+    if (needs.hasTransformMatrix() || (needs.getInputTF() != needs.getOutputTF())) {
+        // Currently, display maximum luminance is needed when doing tone mapping.
+        if (needs.needsToneMapping()) {
+            fs << "uniform float displayMaxLuminance;";
         }
 
-        fs << R"__SHADER__(
-            highp float CalculateY(const highp vec3 color) {
-                // BT2020 standard uses the unadjusted KR = 0.2627,
-                // KB = 0.0593 luminance interpretation for RGB conversion.
-                return color.r * 0.262700 + color.g * 0.677998 +
-                        color.b * 0.059302;
-            }
-        )__SHADER__";
-
-        // Generate OOTF that modifies the relative display light.
-        switch(needs.getInputTF()) {
-            case Key::INPUT_TF_ST2084:
-                fs << R"__SHADER__(
-                    highp vec3 OOTF(const highp vec3 color) {
-                        const float maxLumi = 10000.0;
-                        const float maxMasteringLumi = 1000.0;
-                        const float maxContentLumi = 1000.0;
-                        const float maxInLumi = min(maxMasteringLumi, maxContentLumi);
-                        const float maxOutLumi = 500.0;
-
-                        // Calculate Y value in XYZ color space.
-                        float colorY = CalculateY(color);
-
-                        // convert to nits first
-                        float nits = colorY * maxLumi;
-
-                        // clamp to max input luminance
-                        nits = clamp(nits, 0.0, maxInLumi);
-
-                        // scale [0.0, maxInLumi] to [0.0, maxOutLumi]
-                        if (maxInLumi <= maxOutLumi) {
-                            nits *= maxOutLumi / maxInLumi;
-                        } else {
-                            // three control points
-                            const float x0 = 10.0;
-                            const float y0 = 17.0;
-                            const float x1 = maxOutLumi * 0.75;
-                            const float y1 = x1;
-                            const float x2 = x1 + (maxInLumi - x1) / 2.0;
-                            const float y2 = y1 + (maxOutLumi - y1) * 0.75;
-
-                            // horizontal distances between the last three control points
-                            const float h12 = x2 - x1;
-                            const float h23 = maxInLumi - x2;
-                            // tangents at the last three control points
-                            const float m1 = (y2 - y1) / h12;
-                            const float m3 = (maxOutLumi - y2) / h23;
-                            const float m2 = (m1 + m3) / 2.0;
-
-                            if (nits < x0) {
-                                // scale [0.0, x0] to [0.0, y0] linearly
-                                const float slope = y0 / x0;
-                                nits *= slope;
-                            } else if (nits < x1) {
-                                // scale [x0, x1] to [y0, y1] linearly
-                                const float slope = (y1 - y0) / (x1 - x0);
-                                nits = y0 + (nits - x0) * slope;
-                            } else if (nits < x2) {
-                                // scale [x1, x2] to [y1, y2] using Hermite interp
-                                float t = (nits - x1) / h12;
-                                nits = (y1 * (1.0 + 2.0 * t) + h12 * m1 * t) * (1.0 - t) * (1.0 - t) +
-                                       (y2 * (3.0 - 2.0 * t) + h12 * m2 * (t - 1.0)) * t * t;
-                            } else {
-                                // scale [x2, maxInLumi] to [y2, maxOutLumi] using Hermite interp
-                                float t = (nits - x2) / h23;
-                                nits = (y2 * (1.0 + 2.0 * t) + h23 * m2 * t) * (1.0 - t) * (1.0 - t) +
-                                       (maxOutLumi * (3.0 - 2.0 * t) + h23 * m3 * (t - 1.0)) * t * t;
-                            }
-                        }
-
-                        // convert back to [0.0, 1.0]
-                        float targetY = nits / maxOutLumi;
-                        return color * (targetY / max(1e-6, colorY));
-                    }
-                )__SHADER__";
-                break;
-            case Key::INPUT_TF_HLG:
-                fs << R"__SHADER__(
-                    highp vec3 OOTF(const highp vec3 color) {
-                        const float maxOutLumi = 500.0;
-                        const float gamma = 1.2 + 0.42 * log(maxOutLumi / 1000.0) / log(10.0);
-                        // The formula is:
-                        // alpha * pow(Y, gamma - 1.0) * color + beta;
-                        // where alpha is 1.0, beta is 0.0 as recommended in
-                        // Rec. ITU-R BT.2100-1 TABLE 5.
-                        return pow(CalculateY(color), gamma - 1.0) * color;
-                    }
-                )__SHADER__";
-                break;
-            default:
-                fs << R"__SHADER__(
-                    highp vec3 OOTF(const highp vec3 color) {
-                        return color;
-                    }
-                )__SHADER__";
+        if (needs.hasInputTransformMatrix()) {
+            fs << "uniform mat4 inputTransformMatrix;";
+            fs << R"__SHADER__(
+                highp vec3 InputTransform(const highp vec3 color) {
+                    return vec3(inputTransformMatrix * vec4(color, 1.0));
+                }
+            )__SHADER__";
+        } else {
+            fs << R"__SHADER__(
+                highp vec3 InputTransform(const highp vec3 color) {
+                    return color;
+                }
+            )__SHADER__";
         }
 
-        // Generate OETF that converts relative display light to signal values,
-        // both normalized to [0, 1]
-        switch (needs.getOutputTF()) {
-            case Key::OUTPUT_TF_LINEAR:
-            default:
-                fs << R"__SHADER__(
-                    vec3 OETF(const vec3 linear) {
-                        return linear;
-                    }
-                )__SHADER__";
-                break;
-            case Key::OUTPUT_TF_SRGB:
-                fs << R"__SHADER__(
-                    float OETF_sRGB(const float linear) {
-                        return linear <= 0.0031308 ?
-                                linear * 12.92 : (pow(linear, 1.0 / 2.4) * 1.055) - 0.055;
-                    }
-
-                    vec3 OETF_sRGB(const vec3 linear) {
-                        return vec3(OETF_sRGB(linear.r), OETF_sRGB(linear.g), OETF_sRGB(linear.b));
-                    }
-
-                    vec3 OETF(const vec3 linear) {
-                        return sign(linear.rgb) * OETF_sRGB(abs(linear.rgb));
-                    }
-                )__SHADER__";
-                break;
-            case Key::OUTPUT_TF_ST2084:
-                fs << R"__SHADER__(
-                    vec3 OETF(const vec3 linear) {
-                        const float m1 = (2610.0 / 4096.0) / 4.0;
-                        const float m2 = (2523.0 / 4096.0) * 128.0;
-                        const float c1 = (3424.0 / 4096.0);
-                        const float c2 = (2413.0 / 4096.0) * 32.0;
-                        const float c3 = (2392.0 / 4096.0) * 32.0;
-
-                        vec3 tmp = pow(linear, vec3(m1));
-                        tmp = (c1 + c2 * tmp) / (1.0 + c3 * tmp);
-                        return pow(tmp, vec3(m2));
-                    }
-                )__SHADER__";
-                break;
-            case Key::OUTPUT_TF_HLG:
-                fs << R"__SHADER__(
-                    highp float OETF_channel(const highp float channel) {
-                        const highp float a = 0.17883277;
-                        const highp float b = 0.28466892;
-                        const highp float c = 0.55991073;
-                        return channel <= 1.0 / 12.0 ? sqrt(3.0 * channel) :
-                                a * log(12.0 * channel - b) + c;
-                    }
-
-                    vec3 OETF(const highp vec3 color) {
-                        return vec3(OETF_channel(color.r), OETF_channel(color.g),
-                                OETF_channel(color.b));
-                    }
-                )__SHADER__";
-                break;
+        // the transformation from a wider colorspace to a narrower one can
+        // result in >1.0 or <0.0 pixel values
+        if (needs.hasOutputTransformMatrix()) {
+            fs << "uniform mat4 outputTransformMatrix;";
+            fs << R"__SHADER__(
+                highp vec3 OutputTransform(const highp vec3 color) {
+                    return clamp(vec3(outputTransformMatrix * vec4(color, 1.0)), 0.0, 1.0);
+                }
+            )__SHADER__";
+        } else {
+            fs << R"__SHADER__(
+                highp vec3 OutputTransform(const highp vec3 color) {
+                    return clamp(color, 0.0, 1.0);
+                }
+            )__SHADER__";
         }
+
+        generateEOTF(fs, needs);
+        generateOOTF(fs, needs);
+        generateOETF(fs, needs);
     }
 
     fs << "void main(void) {" << indent;
@@ -463,18 +578,13 @@
         }
     }
 
-    if (needs.hasColorMatrix()) {
+    if (needs.hasTransformMatrix() || (needs.getInputTF() != needs.getOutputTF())) {
         if (!needs.isOpaque() && needs.isPremultiplied()) {
             // un-premultiply if needed before linearization
             // avoid divide by 0 by adding 0.5/256 to the alpha channel
             fs << "gl_FragColor.rgb = gl_FragColor.rgb / (gl_FragColor.a + 0.0019);";
         }
-        fs << "vec4 transformed = colorMatrix * vec4(OOTF(EOTF(gl_FragColor.rgb)), 1);";
-        // the transformation from a wider colorspace to a narrower one can
-        // result in >1.0 or <0.0 pixel values
-        fs << "transformed.rgb = clamp(transformed.rgb, 0.0, 1.0);";
-        // We assume the last row is always {0,0,0,1} and we skip the division by w
-        fs << "gl_FragColor.rgb = OETF(transformed.rgb);";
+        fs << "gl_FragColor.rgb = OETF(OutputTransform(OOTF(InputTransform(EOTF(gl_FragColor.rgb)))));";
         if (!needs.isOpaque() && needs.isPremultiplied()) {
             // and re-premultiply if needed after gamma correction
             fs << "gl_FragColor.rgb = gl_FragColor.rgb * (gl_FragColor.a + 0.0019);";
diff --git a/services/surfaceflinger/RenderEngine/ProgramCache.h b/services/surfaceflinger/RenderEngine/ProgramCache.h
index f67e132..864bc3f 100644
--- a/services/surfaceflinger/RenderEngine/ProgramCache.h
+++ b/services/surfaceflinger/RenderEngine/ProgramCache.h
@@ -28,6 +28,7 @@
 namespace android {
 
 class Description;
+class Formatter;
 class Program;
 class String8;
 
@@ -71,26 +72,31 @@
             TEXTURE_EXT = 1 << TEXTURE_SHIFT,
             TEXTURE_2D = 2 << TEXTURE_SHIFT,
 
-            COLOR_MATRIX_SHIFT = 5,
-            COLOR_MATRIX_MASK = 1 << COLOR_MATRIX_SHIFT,
-            COLOR_MATRIX_OFF = 0 << COLOR_MATRIX_SHIFT,
-            COLOR_MATRIX_ON = 1 << COLOR_MATRIX_SHIFT,
+            INPUT_TRANSFORM_MATRIX_SHIFT = 5,
+            INPUT_TRANSFORM_MATRIX_MASK = 1 << INPUT_TRANSFORM_MATRIX_SHIFT,
+            INPUT_TRANSFORM_MATRIX_OFF = 0 << INPUT_TRANSFORM_MATRIX_SHIFT,
+            INPUT_TRANSFORM_MATRIX_ON = 1 << INPUT_TRANSFORM_MATRIX_SHIFT,
 
-            INPUT_TF_SHIFT = 6,
+            OUTPUT_TRANSFORM_MATRIX_SHIFT = 6,
+            OUTPUT_TRANSFORM_MATRIX_MASK = 1 << OUTPUT_TRANSFORM_MATRIX_SHIFT,
+            OUTPUT_TRANSFORM_MATRIX_OFF = 0 << OUTPUT_TRANSFORM_MATRIX_SHIFT,
+            OUTPUT_TRANSFORM_MATRIX_ON = 1 << OUTPUT_TRANSFORM_MATRIX_SHIFT,
+
+            INPUT_TF_SHIFT = 7,
             INPUT_TF_MASK = 3 << INPUT_TF_SHIFT,
             INPUT_TF_LINEAR = 0 << INPUT_TF_SHIFT,
             INPUT_TF_SRGB = 1 << INPUT_TF_SHIFT,
             INPUT_TF_ST2084 = 2 << INPUT_TF_SHIFT,
             INPUT_TF_HLG = 3 << INPUT_TF_SHIFT,
 
-            OUTPUT_TF_SHIFT = 8,
+            OUTPUT_TF_SHIFT = 9,
             OUTPUT_TF_MASK = 3 << OUTPUT_TF_SHIFT,
             OUTPUT_TF_LINEAR = 0 << OUTPUT_TF_SHIFT,
             OUTPUT_TF_SRGB = 1 << OUTPUT_TF_SHIFT,
             OUTPUT_TF_ST2084 = 2 << OUTPUT_TF_SHIFT,
             OUTPUT_TF_HLG = 3 << OUTPUT_TF_SHIFT,
 
-            Y410_BT2020_SHIFT = 10,
+            Y410_BT2020_SHIFT = 11,
             Y410_BT2020_MASK = 1 << Y410_BT2020_SHIFT,
             Y410_BT2020_OFF = 0 << Y410_BT2020_SHIFT,
             Y410_BT2020_ON = 1 << Y410_BT2020_SHIFT,
@@ -109,9 +115,40 @@
         inline bool isPremultiplied() const { return (mKey & BLEND_MASK) == BLEND_PREMULT; }
         inline bool isOpaque() const { return (mKey & OPACITY_MASK) == OPACITY_OPAQUE; }
         inline bool hasAlpha() const { return (mKey & ALPHA_MASK) == ALPHA_LT_ONE; }
-        inline bool hasColorMatrix() const { return (mKey & COLOR_MATRIX_MASK) == COLOR_MATRIX_ON; }
+        inline bool hasInputTransformMatrix() const {
+            return (mKey & INPUT_TRANSFORM_MATRIX_MASK) == INPUT_TRANSFORM_MATRIX_ON;
+        }
+        inline bool hasOutputTransformMatrix() const {
+            return (mKey & OUTPUT_TRANSFORM_MATRIX_MASK) == OUTPUT_TRANSFORM_MATRIX_ON;
+        }
+        inline bool hasTransformMatrix() const {
+            return hasInputTransformMatrix() || hasOutputTransformMatrix();
+        }
         inline int getInputTF() const { return (mKey & INPUT_TF_MASK); }
         inline int getOutputTF() const { return (mKey & OUTPUT_TF_MASK); }
+
+        // When HDR and non-HDR contents are mixed, or different types of HDR contents are
+        // mixed, we will do a tone mapping process to tone map the input content to output
+        // content. Currently, the following conversions handled, they are:
+        // * SDR -> HLG
+        // * SDR -> PQ
+        // * HLG -> PQ
+        inline bool needsToneMapping() const {
+            int inputTF = getInputTF();
+            int outputTF = getOutputTF();
+
+            // Return false when converting from SDR to SDR.
+            if (inputTF == Key::INPUT_TF_SRGB && outputTF == Key::OUTPUT_TF_LINEAR) {
+                return false;
+            }
+            if (inputTF == Key::INPUT_TF_LINEAR && outputTF == Key::OUTPUT_TF_SRGB) {
+                return false;
+            }
+
+            inputTF >>= Key::INPUT_TF_SHIFT;
+            outputTF >>= Key::OUTPUT_TF_SHIFT;
+            return inputTF != outputTF;
+        }
         inline bool isY410BT2020() const { return (mKey & Y410_BT2020_MASK) == Y410_BT2020_ON; }
 
         // this is the definition of a friend function -- not a method of class Needs
@@ -132,6 +169,14 @@
     void primeCache();
     // compute a cache Key from a Description
     static Key computeKey(const Description& description);
+    // Generate EOTF based from Key.
+    static void generateEOTF(Formatter& fs, const Key& needs);
+    // Generate necessary tone mapping methods for OOTF.
+    static void generateToneMappingProcess(Formatter& fs, const Key& needs);
+    // Generate OOTF based from Key.
+    static void generateOOTF(Formatter& fs, const Key& needs);
+    // Generate OETF based from Key.
+    static void generateOETF(Formatter& fs, const Key& needs);
     // generates a program from the Key
     static Program* generateProgram(const Key& needs);
     // generates the vertex shader from the Key
diff --git a/services/surfaceflinger/RenderEngine/RenderEngine.h b/services/surfaceflinger/RenderEngine/RenderEngine.h
index f78b230..a14acaa 100644
--- a/services/surfaceflinger/RenderEngine/RenderEngine.h
+++ b/services/surfaceflinger/RenderEngine/RenderEngine.h
@@ -112,15 +112,17 @@
     virtual void setupLayerBlackedOut() = 0;
     virtual void setupFillWithColor(float r, float g, float b, float a) = 0;
 
-    virtual mat4 setupColorTransform(const mat4& /* colorTransform */) = 0;
+    virtual void setupColorTransform(const mat4& /* colorTransform */) = 0;
+    virtual void setSaturationMatrix(const mat4& /* saturationMatrix */) = 0;
 
     virtual void disableTexturing() = 0;
     virtual void disableBlending() = 0;
 
-    // wide color support
+    // HDR and wide color gamut support
     virtual void setSourceY410BT2020(bool enable) = 0;
     virtual void setSourceDataSpace(ui::Dataspace source) = 0;
     virtual void setOutputDataSpace(ui::Dataspace dataspace) = 0;
+    virtual void setDisplayMaxLuminance(const float maxLuminance) = 0;
 
     // drawing
     virtual void drawMesh(const Mesh& mesh) = 0;
@@ -223,7 +225,8 @@
 
     void checkErrors() const override;
 
-    mat4 setupColorTransform(const mat4& /* colorTransform */) override { return mat4(); }
+    void setupColorTransform(const mat4& /* colorTransform */) override {}
+    void setSaturationMatrix(const mat4& /* saturationMatrix */) override {}
 
     // internal to RenderEngine
     EGLDisplay getEGLDisplay() const;
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index e12d7ca..f5fa478 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -88,6 +88,8 @@
 #include <cutils/compiler.h>
 
 #include <android/hardware/configstore/1.0/ISurfaceFlingerConfigs.h>
+#include <android/hardware/configstore/1.1/ISurfaceFlingerConfigs.h>
+#include <android/hardware/configstore/1.1/types.h>
 #include <configstore/Utils.h>
 
 #include <layerproto/LayerProtoParser.h>
@@ -158,29 +160,18 @@
     return std::string(value) == "true";
 }
 
-DisplayColorSetting toDisplayColorSetting(int value) {
-    switch(value) {
-        case 0:
-            return DisplayColorSetting::MANAGED;
-        case 1:
-            return DisplayColorSetting::UNMANAGED;
-        case 2:
-            return DisplayColorSetting::ENHANCED;
-        default:
-            return DisplayColorSetting::MANAGED;
-    }
-}
-
 std::string decodeDisplayColorSetting(DisplayColorSetting displayColorSetting) {
     switch(displayColorSetting) {
         case DisplayColorSetting::MANAGED:
-            return std::string("Natural Mode");
+            return std::string("Managed");
         case DisplayColorSetting::UNMANAGED:
-            return std::string("Saturated Mode");
+            return std::string("Unmanaged");
         case DisplayColorSetting::ENHANCED:
-            return std::string("Auto Color Mode");
+            return std::string("Enhanced");
+        default:
+            return std::string("Unknown ") +
+                std::to_string(static_cast<int>(displayColorSetting));
     }
-    return std::string("Unknown Display Color Setting");
 }
 
 NativeWindowSurface::~NativeWindowSurface() = default;
@@ -244,7 +235,6 @@
         mPrimaryDispSync("PrimaryDispSync"),
         mPrimaryHWVsyncEnabled(false),
         mHWVsyncAvailable(false),
-        mHasColorMatrix(false),
         mHasPoweredOff(false),
         mNumLayers(0),
         mVrFlingerRequestsDisplay(false),
@@ -283,6 +273,26 @@
     hasWideColorDisplay =
             getBool<ISurfaceFlingerConfigs, &ISurfaceFlingerConfigs::hasWideColorDisplay>(false);
 
+    V1_1::DisplayOrientation primaryDisplayOrientation =
+        getDisplayOrientation< V1_1::ISurfaceFlingerConfigs, &V1_1::ISurfaceFlingerConfigs::primaryDisplayOrientation>(
+            V1_1::DisplayOrientation::ORIENTATION_0);
+
+    switch (primaryDisplayOrientation) {
+        case V1_1::DisplayOrientation::ORIENTATION_90:
+            mPrimaryDisplayOrientation = DisplayState::eOrientation90;
+            break;
+        case V1_1::DisplayOrientation::ORIENTATION_180:
+            mPrimaryDisplayOrientation = DisplayState::eOrientation180;
+            break;
+        case V1_1::DisplayOrientation::ORIENTATION_270:
+            mPrimaryDisplayOrientation = DisplayState::eOrientation270;
+            break;
+        default:
+            mPrimaryDisplayOrientation = DisplayState::eOrientationDefault;
+            break;
+    }
+    ALOGV("Primary Display Orientation is set to %2d.", mPrimaryDisplayOrientation);
+
     mPrimaryDispSync.init(SurfaceFlinger::hasSyncFramework, SurfaceFlinger::dispSyncPresentTimeOffset);
 
     // debugging stuff...
@@ -317,8 +327,7 @@
     mLayerTripleBufferingDisabled = atoi(value);
     ALOGI_IF(mLayerTripleBufferingDisabled, "Disabling Triple Buffering");
 
-    // TODO (b/74616334): Reduce the default value once we isolate the leak
-    const size_t defaultListSize = 4 * MAX_LAYERS;
+    const size_t defaultListSize = MAX_LAYERS;
     auto listSize = property_get_int32("debug.sf.max_igbp_list_size", int32_t(defaultListSize));
     mMaxGraphicBufferProducerListSize = (listSize > 0) ? size_t(listSize) : defaultListSize;
 
@@ -637,14 +646,21 @@
     mEventThreadSource =
             std::make_unique<DispSyncSource>(&mPrimaryDispSync, SurfaceFlinger::vsyncPhaseOffsetNs,
                                              true, "app");
-    mEventThread = std::make_unique<impl::EventThread>(mEventThreadSource.get(), *this, false,
+    mEventThread = std::make_unique<impl::EventThread>(mEventThreadSource.get(),
+                                                       [this]() { resyncWithRateLimit(); },
+                                                       impl::EventThread::InterceptVSyncsCallback(),
                                                        "appEventThread");
     mSfEventThreadSource =
             std::make_unique<DispSyncSource>(&mPrimaryDispSync,
                                              SurfaceFlinger::sfVsyncPhaseOffsetNs, true, "sf");
 
-    mSFEventThread = std::make_unique<impl::EventThread>(mSfEventThreadSource.get(), *this, true,
-                                                         "sfEventThread");
+    mSFEventThread =
+            std::make_unique<impl::EventThread>(mSfEventThreadSource.get(),
+                                                [this]() { resyncWithRateLimit(); },
+                                                [this](nsecs_t timestamp) {
+                                                    mInterceptor->saveVSyncEvent(timestamp);
+                                                },
+                                                "sfEventThread");
     mEventQueue->setEventThread(mSFEventThread.get());
     mVsyncModulator.setEventThread(mSFEventThread.get());
 
@@ -723,16 +739,17 @@
 }
 
 void SurfaceFlinger::readPersistentProperties() {
+    Mutex::Autolock _l(mStateLock);
+
     char value[PROPERTY_VALUE_MAX];
 
     property_get("persist.sys.sf.color_saturation", value, "1.0");
     mGlobalSaturationFactor = atof(value);
+    updateColorMatrixLocked();
     ALOGV("Saturation is set to %.2f", mGlobalSaturationFactor);
 
     property_get("persist.sys.sf.native_mode", value, "0");
-    mDisplayColorSetting = toDisplayColorSetting(atoi(value));
-    ALOGV("Display Color Setting is set to %s.",
-          decodeDisplayColorSetting(mDisplayColorSetting).c_str());
+    mDisplayColorSetting = static_cast<DisplayColorSetting>(atoi(value));
 }
 
 void SurfaceFlinger::startBootAnim() {
@@ -887,6 +904,11 @@
         // All non-virtual displays are currently considered secure.
         info.secure = true;
 
+        if (type == DisplayDevice::DISPLAY_PRIMARY &&
+            mPrimaryDisplayOrientation & DisplayState::eOrientationSwapMask) {
+            std::swap(info.w, info.h);
+        }
+
         configs->push_back(info);
     }
 
@@ -1017,27 +1039,13 @@
 }
 
 void SurfaceFlinger::setActiveColorModeInternal(const sp<DisplayDevice>& hw,
-                                                ColorMode mode, Dataspace dataSpace) {
+                                                ColorMode mode, Dataspace dataSpace,
+                                                RenderIntent renderIntent) {
     int32_t type = hw->getDisplayType();
     ColorMode currentMode = hw->getActiveColorMode();
     Dataspace currentDataSpace = hw->getCompositionDataSpace();
     RenderIntent currentRenderIntent = hw->getActiveRenderIntent();
 
-    // Natural Mode means it's color managed and the color must be right,
-    // thus we pick RenderIntent::COLORIMETRIC as render intent.
-    // Native Mode means the display is not color managed, and whichever
-    // render intent is picked doesn't matter, thus return
-    // RenderIntent::COLORIMETRIC as default here.
-    RenderIntent renderIntent = RenderIntent::COLORIMETRIC;
-
-    // In Auto Color Mode, we want to strech to panel color space, right now
-    // only the built-in display supports it.
-    if (mDisplayColorSetting == DisplayColorSetting::ENHANCED &&
-        mBuiltinDisplaySupportsEnhance &&
-        hw->getDisplayType() == DisplayDevice::DISPLAY_PRIMARY) {
-        renderIntent = RenderIntent::ENHANCE;
-    }
-
     if (mode == currentMode && dataSpace == currentDataSpace &&
         renderIntent == currentRenderIntent) {
         return;
@@ -1087,7 +1095,8 @@
                 ALOGW("Attempt to set active color mode %s %d for virtual display",
                       decodeColorMode(mMode).c_str(), mMode);
             } else {
-                mFlinger.setActiveColorModeInternal(hw, mMode, Dataspace::UNKNOWN);
+                mFlinger.setActiveColorModeInternal(hw, mMode, Dataspace::UNKNOWN,
+                                                    RenderIntent::COLORIMETRIC);
             }
             return true;
         }
@@ -1119,31 +1128,14 @@
         return BAD_VALUE;
     }
 
-    HdrCapabilities capabilities;
-    int status = getBE().mHwc->getHdrCapabilities(
-        displayDevice->getHwcDisplayId(), &capabilities);
-    if (status == NO_ERROR) {
-        if (displayDevice->hasWideColorGamut()) {
-            std::vector<Hdr> types = capabilities.getSupportedHdrTypes();
-            // insert HDR10/HLG as we will force client composition for HDR10/HLG
-            // layers
-            if (!displayDevice->hasHDR10Support()) {
-                types.push_back(Hdr::HDR10);
-            }
-            if (!displayDevice->hasHLGSupport()) {
-                types.push_back(Hdr::HLG);
-            }
-
-            *outCapabilities = HdrCapabilities(types,
-                    capabilities.getDesiredMaxLuminance(),
-                    capabilities.getDesiredMaxAverageLuminance(),
-                    capabilities.getDesiredMinLuminance());
-        } else {
-            *outCapabilities = std::move(capabilities);
-        }
-    } else {
-        return BAD_VALUE;
-    }
+    // At this point the DisplayDeivce should already be set up,
+    // meaning the luminance information is already queried from
+    // hardware composer and stored properly.
+    const HdrCapabilities& capabilities = displayDevice->getHdrCapabilities();
+    *outCapabilities = HdrCapabilities(capabilities.getSupportedHdrTypes(),
+                                       capabilities.getDesiredMaxLuminance(),
+                                       capabilities.getDesiredMaxAverageLuminance(),
+                                       capabilities.getDesiredMinLuminance());
 
     return NO_ERROR;
 }
@@ -1160,9 +1152,11 @@
             ALOGV("VSync Injections enabled");
             if (mVSyncInjector.get() == nullptr) {
                 mVSyncInjector = std::make_unique<InjectVSyncSource>();
-                mInjectorEventThread =
-                        std::make_unique<impl::EventThread>(mVSyncInjector.get(), *this, false,
-                                                            "injEventThread");
+                mInjectorEventThread = std::make_unique<
+                        impl::EventThread>(mVSyncInjector.get(),
+                                           [this]() { resyncWithRateLimit(); },
+                                           impl::EventThread::InterceptVSyncsCallback(),
+                                           "injEventThread");
             }
             mEventQueue->setEventThread(mInjectorEventThread.get());
         } else {
@@ -1486,9 +1480,12 @@
                     (mPreviousPresentFence->getSignalTime() ==
                             Fence::SIGNAL_TIME_PENDING);
             ATRACE_INT("FrameMissed", static_cast<int>(frameMissed));
-            if (mPropagateBackpressure && frameMissed) {
-                signalLayerUpdate();
-                break;
+            if (frameMissed) {
+                mTimeStats.incrementMissedFrames();
+                if (mPropagateBackpressure) {
+                    signalLayerUpdate();
+                    break;
+                }
             }
 
             // Now that we're going to make it to the handleMessageTransaction()
@@ -1788,6 +1785,11 @@
         mAnimFrameTracker.advanceFrame();
     }
 
+    mTimeStats.incrementTotalFrames();
+    if (mHadClientComposition) {
+        mTimeStats.incrementClientCompositionFrames();
+    }
+
     if (getBE().mHwc->isConnected(HWC_DISPLAY_PRIMARY) &&
             hw->getPowerMode() == HWC_POWER_MODE_OFF) {
         return;
@@ -1876,54 +1878,36 @@
     }
 }
 
-mat4 SurfaceFlinger::computeSaturationMatrix() const {
-    if (mGlobalSaturationFactor == 1.0f) {
-        return mat4();
-    }
-
-    // Rec.709 luma coefficients
-    float3 luminance{0.213f, 0.715f, 0.072f};
-    luminance *= 1.0f - mGlobalSaturationFactor;
-    return mat4(
-        vec4{luminance.r + mGlobalSaturationFactor, luminance.r, luminance.r, 0.0f},
-        vec4{luminance.g, luminance.g + mGlobalSaturationFactor, luminance.g, 0.0f},
-        vec4{luminance.b, luminance.b, luminance.b + mGlobalSaturationFactor, 0.0f},
-        vec4{0.0f, 0.0f, 0.0f, 1.0f}
-    );
-}
-
-// Returns a dataspace that fits all visible layers.  The returned dataspace
+// Returns a data space that fits all visible layers.  The returned data space
 // can only be one of
-//
-//  - Dataspace::V0_SRGB
+//  - Dataspace::SRGB (use legacy dataspace and let HWC saturate when colors are enhanced)
 //  - Dataspace::DISPLAY_P3
-//  - Dataspace::V0_SCRGB_LINEAR
-// TODO(b/73825729) Add BT2020 data space.
-ui::Dataspace SurfaceFlinger::getBestDataspace(
-        const sp<const DisplayDevice>& displayDevice) const {
-    Dataspace bestDataspace = Dataspace::V0_SRGB;
+// The returned HDR data space is one of
+//  - Dataspace::UNKNOWN
+//  - Dataspace::BT2020_HLG
+//  - Dataspace::BT2020_PQ
+Dataspace SurfaceFlinger::getBestDataspace(
+    const sp<const DisplayDevice>& displayDevice, Dataspace* outHdrDataSpace) const {
+    Dataspace bestDataSpace = Dataspace::SRGB;
+    *outHdrDataSpace = Dataspace::UNKNOWN;
+
     for (const auto& layer : displayDevice->getVisibleLayersSortedByZ()) {
         switch (layer->getDataSpace()) {
             case Dataspace::V0_SCRGB:
             case Dataspace::V0_SCRGB_LINEAR:
-                // return immediately
-                return Dataspace::V0_SCRGB_LINEAR;
             case Dataspace::DISPLAY_P3:
-                bestDataspace = Dataspace::DISPLAY_P3;
+                bestDataSpace = Dataspace::DISPLAY_P3;
                 break;
-            // Historically, HDR dataspaces are ignored by SurfaceFlinger. But
-            // since SurfaceFlinger simulates HDR support now, it should honor
-            // them unless there is also native support.
             case Dataspace::BT2020_PQ:
             case Dataspace::BT2020_ITU_PQ:
-                if (!displayDevice->hasHDR10Support()) {
-                    return Dataspace::V0_SCRGB_LINEAR;
-                }
+                *outHdrDataSpace = Dataspace::BT2020_PQ;
                 break;
             case Dataspace::BT2020_HLG:
             case Dataspace::BT2020_ITU_HLG:
-                if (!displayDevice->hasHLGSupport()) {
-                    return Dataspace::V0_SCRGB_LINEAR;
+                // When there's mixed PQ content and HLG content, we set the HDR
+                // data space to be BT2020_PQ and convert HLG to PQ.
+                if (*outHdrDataSpace == Dataspace::UNKNOWN) {
+                    *outHdrDataSpace = Dataspace::BT2020_HLG;
                 }
                 break;
             default:
@@ -1931,30 +1915,45 @@
         }
     }
 
-    return bestDataspace;
+    return bestDataSpace;
 }
 
 // Pick the ColorMode / Dataspace for the display device.
-// TODO(b/73825729) Add BT2020 color mode.
 void SurfaceFlinger::pickColorMode(const sp<DisplayDevice>& displayDevice,
-        ColorMode* outMode, Dataspace* outDataSpace) const {
+                                   ColorMode* outMode, Dataspace* outDataSpace,
+                                   RenderIntent* outRenderIntent) const {
     if (mDisplayColorSetting == DisplayColorSetting::UNMANAGED) {
         *outMode = ColorMode::NATIVE;
         *outDataSpace = Dataspace::UNKNOWN;
+        *outRenderIntent = RenderIntent::COLORIMETRIC;
         return;
     }
 
-    switch (getBestDataspace(displayDevice)) {
-        case Dataspace::DISPLAY_P3:
-        case Dataspace::V0_SCRGB_LINEAR:
-            *outMode = ColorMode::DISPLAY_P3;
-            *outDataSpace = Dataspace::DISPLAY_P3;
+    Dataspace hdrDataSpace;
+    Dataspace bestDataSpace = getBestDataspace(displayDevice, &hdrDataSpace);
+
+    RenderIntent intent;
+    switch (mDisplayColorSetting) {
+        case DisplayColorSetting::MANAGED:
+        case DisplayColorSetting::UNMANAGED:
+            intent = RenderIntent::COLORIMETRIC;
             break;
-        default:
-            *outMode = ColorMode::SRGB;
-            *outDataSpace = Dataspace::V0_SRGB;
+        case DisplayColorSetting::ENHANCED:
+            intent = RenderIntent::ENHANCE;
+            break;
+        default: // vendor display color setting
+            intent = static_cast<RenderIntent>(mDisplayColorSetting);
             break;
     }
+
+    // respect hdrDataSpace only when there is modern HDR support
+    if (hdrDataSpace != Dataspace::UNKNOWN && displayDevice->hasModernHdrSupport(hdrDataSpace)) {
+        bestDataSpace = hdrDataSpace;
+        intent = mDisplayColorSetting == DisplayColorSetting::ENHANCED ?
+            RenderIntent::TONE_MAP_ENHANCE : RenderIntent::TONE_MAP_COLORIMETRIC;
+    }
+
+    displayDevice->getBestColorMode(bestDataSpace, intent, outDataSpace, outMode, outRenderIntent);
 }
 
 void SurfaceFlinger::setUpHWComposer() {
@@ -2017,9 +2016,6 @@
         }
     }
 
-
-    mat4 colorMatrix = mColorMatrix * computeSaturationMatrix() * mDaltonizer();
-
     // Set the per-frame data
     for (size_t displayId = 0; displayId < mDisplays.size(); ++displayId) {
         auto& displayDevice = mDisplays[displayId];
@@ -2028,9 +2024,9 @@
         if (hwcId < 0) {
             continue;
         }
-        if (colorMatrix != mPreviousColorMatrix) {
-            displayDevice->setColorTransform(colorMatrix);
-            status_t result = getBE().mHwc->setColorTransform(hwcId, colorMatrix);
+        if (mDrawingState.colorMatrixChanged) {
+            displayDevice->setColorTransform(mDrawingState.colorMatrix);
+            status_t result = getBE().mHwc->setColorTransform(hwcId, mDrawingState.colorMatrix);
             ALOGE_IF(result != NO_ERROR, "Failed to set color transform on "
                     "display %zd: %d", displayId, result);
         }
@@ -2058,12 +2054,13 @@
         if (hasWideColorDisplay) {
             ColorMode colorMode;
             Dataspace dataSpace;
-            pickColorMode(displayDevice, &colorMode, &dataSpace);
-            setActiveColorModeInternal(displayDevice, colorMode, dataSpace);
+            RenderIntent renderIntent;
+            pickColorMode(displayDevice, &colorMode, &dataSpace, &renderIntent);
+            setActiveColorModeInternal(displayDevice, colorMode, dataSpace, renderIntent);
         }
     }
 
-    mPreviousColorMatrix = colorMatrix;
+    mDrawingState.colorMatrixChanged = false;
 
     for (size_t displayId = 0; displayId < mDisplays.size(); ++displayId) {
         auto& displayDevice = mDisplays[displayId];
@@ -2268,6 +2265,8 @@
         const wp<IBinder>& display, int hwcId, const DisplayDeviceState& state,
         const sp<DisplaySurface>& dispSurface, const sp<IGraphicBufferProducer>& producer) {
     bool hasWideColorGamut = false;
+    std::unordered_map<ColorMode, std::vector<RenderIntent>> hwcColorModes;
+
     if (hasWideColorDisplay) {
         std::vector<ColorMode> modes = getHwComposer().getColorModes(hwcId);
         for (ColorMode colorMode : modes) {
@@ -2277,21 +2276,13 @@
                 case ColorMode::DCI_P3:
                     hasWideColorGamut = true;
                     break;
-                // TODO(lpy) Handle BT2020, BT2100_PQ and BT2100_HLG properly.
                 default:
                     break;
             }
 
             std::vector<RenderIntent> renderIntents = getHwComposer().getRenderIntents(hwcId,
                                                                                        colorMode);
-            if (state.type == DisplayDevice::DISPLAY_PRIMARY) {
-                for (auto intent : renderIntents) {
-                    if (intent == RenderIntent::ENHANCE) {
-                        mBuiltinDisplaySupportsEnhance = true;
-                        break;
-                    }
-                }
-            }
+            hwcColorModes.emplace(colorMode, renderIntents);
         }
     }
 
@@ -2331,7 +2322,7 @@
                               dispSurface, std::move(renderSurface), displayWidth, displayHeight,
                               hasWideColorGamut, hdrCapabilities,
                               getHwComposer().getSupportedPerFrameMetadata(hwcId),
-                              initialPowerMode);
+                              hwcColorModes, initialPowerMode);
 
     if (maxFrameBufferAcquiredBuffers >= 3) {
         nativeWindowSurface->preallocateBuffers();
@@ -2341,9 +2332,13 @@
     Dataspace defaultDataSpace = Dataspace::UNKNOWN;
     if (hasWideColorGamut) {
         defaultColorMode = ColorMode::SRGB;
-        defaultDataSpace = Dataspace::V0_SRGB;
+        defaultDataSpace = Dataspace::SRGB;
     }
-    setActiveColorModeInternal(hw, defaultColorMode, defaultDataSpace);
+    setActiveColorModeInternal(hw, defaultColorMode, defaultDataSpace,
+                               RenderIntent::COLORIMETRIC);
+    if (state.type < DisplayDevice::DISPLAY_VIRTUAL) {
+        hw->setActiveConfig(getHwComposer().getActiveConfigIndex(state.type));
+    }
     hw->setLayerStack(state.layerStack);
     hw->setProjection(state.orientation, state.viewport, state.frame);
     hw->setDisplayName(state.displayName);
@@ -2652,6 +2647,9 @@
     mAnimCompositionPending = mAnimTransactionPending;
 
     mDrawingState = mCurrentState;
+    // clear the "changed" flags in current state
+    mCurrentState.colorMatrixChanged = false;
+
     mDrawingState.traverseInZOrder([](Layer* layer) {
         layer->commitChildList();
     });
@@ -2895,19 +2893,11 @@
     const DisplayRenderArea renderArea(displayDevice);
     const auto hwcId = displayDevice->getHwcDisplayId();
     const bool hasClientComposition = getBE().mHwc->hasClientComposition(hwcId);
-    const bool hasDeviceComposition = getBE().mHwc->hasDeviceComposition(hwcId);
-    const bool skipClientColorTransform = getBE().mHwc->hasCapability(
-        HWC2::Capability::SkipClientColorTransform);
     ATRACE_INT("hasClientComposition", hasClientComposition);
 
-    mat4 oldColorMatrix;
-    mat4 legacySrgbSaturationMatrix = mLegacySrgbSaturationMatrix;
-    const bool applyColorMatrix = !hasDeviceComposition && !skipClientColorTransform;
-    if (applyColorMatrix) {
-        mat4 colorMatrix = mColorMatrix * computeSaturationMatrix() * mDaltonizer();
-        oldColorMatrix = getRenderEngine().setupColorTransform(colorMatrix);
-        legacySrgbSaturationMatrix = colorMatrix * legacySrgbSaturationMatrix;
-    }
+    bool applyColorMatrix = false;
+    bool needsLegacyColorMatrix = false;
+    bool legacyColorMatrixApplied = false;
 
     if (hasClientComposition) {
         ALOGV("hasClientComposition");
@@ -2917,6 +2907,22 @@
             outputDataspace = displayDevice->getCompositionDataSpace();
         }
         getBE().mRenderEngine->setOutputDataSpace(outputDataspace);
+        getBE().mRenderEngine->setDisplayMaxLuminance(
+                displayDevice->getHdrCapabilities().getDesiredMaxLuminance());
+
+        const bool hasDeviceComposition = getBE().mHwc->hasDeviceComposition(hwcId);
+        const bool skipClientColorTransform = getBE().mHwc->hasCapability(
+            HWC2::Capability::SkipClientColorTransform);
+
+        applyColorMatrix = !hasDeviceComposition && !skipClientColorTransform;
+        if (applyColorMatrix) {
+            getRenderEngine().setupColorTransform(mDrawingState.colorMatrix);
+        }
+
+        needsLegacyColorMatrix =
+            (displayDevice->getActiveRenderIntent() >= RenderIntent::ENHANCE &&
+             outputDataspace != Dataspace::UNKNOWN &&
+             outputDataspace != Dataspace::SRGB);
 
         if (!displayDevice->makeCurrent()) {
             ALOGW("DisplayDevice::makeCurrent failed. Aborting surface composition for display %s",
@@ -2979,69 +2985,58 @@
 
     ALOGV("Rendering client layers");
     const Transform& displayTransform = displayDevice->getTransform();
-    if (hwcId >= 0) {
-        // we're using h/w composer
-        bool firstLayer = true;
-        for (auto& layer : displayDevice->getVisibleLayersSortedByZ()) {
-            const Region clip(bounds.intersect(
-                    displayTransform.transform(layer->visibleRegion)));
-            ALOGV("Layer: %s", layer->getName().string());
-            ALOGV("  Composition type: %s",
-                    to_string(layer->getCompositionType(hwcId)).c_str());
-            if (!clip.isEmpty()) {
-                switch (layer->getCompositionType(hwcId)) {
-                    case HWC2::Composition::Cursor:
-                    case HWC2::Composition::Device:
-                    case HWC2::Composition::Sideband:
-                    case HWC2::Composition::SolidColor: {
-                        const Layer::State& state(layer->getDrawingState());
-                        if (layer->getClearClientTarget(hwcId) && !firstLayer &&
-                                layer->isOpaque(state) && (state.color.a == 1.0f)
-                                && hasClientComposition) {
-                            // never clear the very first layer since we're
-                            // guaranteed the FB is already cleared
-                            layer->clearWithOpenGL(renderArea);
-                        }
-                        break;
+    bool firstLayer = true;
+    for (auto& layer : displayDevice->getVisibleLayersSortedByZ()) {
+        const Region clip(bounds.intersect(
+                displayTransform.transform(layer->visibleRegion)));
+        ALOGV("Layer: %s", layer->getName().string());
+        ALOGV("  Composition type: %s",
+                to_string(layer->getCompositionType(hwcId)).c_str());
+        if (!clip.isEmpty()) {
+            switch (layer->getCompositionType(hwcId)) {
+                case HWC2::Composition::Cursor:
+                case HWC2::Composition::Device:
+                case HWC2::Composition::Sideband:
+                case HWC2::Composition::SolidColor: {
+                    const Layer::State& state(layer->getDrawingState());
+                    if (layer->getClearClientTarget(hwcId) && !firstLayer &&
+                            layer->isOpaque(state) && (state.color.a == 1.0f)
+                            && hasClientComposition) {
+                        // never clear the very first layer since we're
+                        // guaranteed the FB is already cleared
+                        layer->clearWithOpenGL(renderArea);
                     }
-                    case HWC2::Composition::Client: {
-                        // Only apply saturation matrix layer that is legacy SRGB dataspace
-                        // when auto color mode is on.
-                        bool restore = false;
-                        mat4 savedMatrix;
-                        if (mDisplayColorSetting == DisplayColorSetting::ENHANCED &&
-                            layer->isLegacySrgbDataSpace()) {
-                            savedMatrix =
-                                getRenderEngine().setupColorTransform(legacySrgbSaturationMatrix);
-                            restore = true;
-                        }
-                        layer->draw(renderArea, clip);
-                        if (restore) {
-                            getRenderEngine().setupColorTransform(savedMatrix);
-                        }
-                        break;
-                    }
-                    default:
-                        break;
+                    break;
                 }
-            } else {
-                ALOGV("  Skipping for empty clip");
+                case HWC2::Composition::Client: {
+                    // switch color matrices lazily
+                    if (layer->isLegacyDataSpace() && needsLegacyColorMatrix) {
+                        if (!legacyColorMatrixApplied) {
+                            getRenderEngine().setSaturationMatrix(mLegacySrgbSaturationMatrix);
+                            legacyColorMatrixApplied = true;
+                        }
+                    } else if (legacyColorMatrixApplied) {
+                        getRenderEngine().setSaturationMatrix(mat4());
+                        legacyColorMatrixApplied = false;
+                    }
+
+                    layer->draw(renderArea, clip);
+                    break;
+                }
+                default:
+                    break;
             }
-            firstLayer = false;
+        } else {
+            ALOGV("  Skipping for empty clip");
         }
-    } else {
-        // we're not using h/w composer
-        for (auto& layer : displayDevice->getVisibleLayersSortedByZ()) {
-            const Region clip(bounds.intersect(
-                    displayTransform.transform(layer->visibleRegion)));
-            if (!clip.isEmpty()) {
-                layer->draw(renderArea, clip);
-            }
-        }
+        firstLayer = false;
     }
 
     if (applyColorMatrix) {
-        getRenderEngine().setupColorTransform(oldColorMatrix);
+        getRenderEngine().setupColorTransform(mat4());
+    }
+    if (needsLegacyColorMatrix && legacyColorMatrixApplied) {
+        getRenderEngine().setSaturationMatrix(mat4());
     }
 
     // disable scissor at the end of the frame
@@ -3079,12 +3074,14 @@
             parent->addChild(lbc);
         }
 
-        mGraphicBufferProducerList.insert(IInterface::asBinder(gbc).get());
-        // TODO (b/74616334): Change this back to a fatal assert once the leak is fixed
-        ALOGE_IF(mGraphicBufferProducerList.size() > mMaxGraphicBufferProducerListSize,
-                 "Suspected IGBP leak: %zu IGBPs (%zu max), %zu Layers",
-                 mGraphicBufferProducerList.size(), mMaxGraphicBufferProducerListSize,
-                 mNumLayers);
+        if (gbc != nullptr) {
+            mGraphicBufferProducerList.insert(IInterface::asBinder(gbc).get());
+            LOG_ALWAYS_FATAL_IF(mGraphicBufferProducerList.size() >
+                                        mMaxGraphicBufferProducerListSize,
+                                "Suspected IGBP leak: %zu IGBPs (%zu max), %zu Layers",
+                                mGraphicBufferProducerList.size(),
+                                mMaxGraphicBufferProducerListSize, mNumLayers);
+        }
         mLayersAdded = true;
         mNumLayers++;
     }
@@ -3839,12 +3836,6 @@
         size_t index = 0;
         size_t numArgs = args.size();
 
-        if (asProto) {
-            LayersProto layersProto = dumpProtoInfo(LayerVector::StateSet::Current);
-            result.append(layersProto.SerializeAsString().c_str(), layersProto.ByteSize());
-            dumpAll = false;
-        }
-
         if (numArgs) {
             if ((index < numArgs) &&
                     (args[index] == String16("--list"))) {
@@ -3921,10 +3912,21 @@
                 mLayerStats.dump(result);
                 dumpAll = false;
             }
+
+            if ((index < numArgs) && (args[index] == String16("--timestats"))) {
+                index++;
+                mTimeStats.parseArgs(asProto, args, index, result);
+                dumpAll = false;
+            }
         }
 
         if (dumpAll) {
-            dumpAllLocked(args, index, result);
+            if (asProto) {
+                LayersProto layersProto = dumpProtoInfo(LayerVector::StateSet::Current);
+                result.append(layersProto.SerializeAsString().c_str(), layersProto.ByteSize());
+            } else {
+                dumpAllLocked(args, index, result);
+            }
         }
 
         if (locked) {
@@ -4092,7 +4094,8 @@
 
 void SurfaceFlinger::dumpWideColorInfo(String8& result) const {
     result.appendFormat("hasWideColorDisplay: %d\n", hasWideColorDisplay);
-    result.appendFormat("DisplayColorSetting: %d\n", mDisplayColorSetting);
+    result.appendFormat("DisplayColorSetting: %s\n",
+            decodeDisplayColorSetting(mDisplayColorSetting).c_str());
 
     // TODO: print out if wide-color mode is active or not
 
@@ -4364,6 +4367,30 @@
     return true;
 }
 
+void SurfaceFlinger::updateColorMatrixLocked() {
+    mat4 colorMatrix;
+    if (mGlobalSaturationFactor != 1.0f) {
+        // Rec.709 luma coefficients
+        float3 luminance{0.213f, 0.715f, 0.072f};
+        luminance *= 1.0f - mGlobalSaturationFactor;
+        mat4 saturationMatrix = mat4(
+            vec4{luminance.r + mGlobalSaturationFactor, luminance.r, luminance.r, 0.0f},
+            vec4{luminance.g, luminance.g + mGlobalSaturationFactor, luminance.g, 0.0f},
+            vec4{luminance.b, luminance.b, luminance.b + mGlobalSaturationFactor, 0.0f},
+            vec4{0.0f, 0.0f, 0.0f, 1.0f}
+        );
+        colorMatrix = mClientColorMatrix * saturationMatrix * mDaltonizer();
+    } else {
+        colorMatrix = mClientColorMatrix * mDaltonizer();
+    }
+
+    if (mCurrentState.colorMatrix != colorMatrix) {
+        mCurrentState.colorMatrix = colorMatrix;
+        mCurrentState.colorMatrixChanged = true;
+        setTransactionFlags(eTransactionNeeded);
+    }
+}
+
 status_t SurfaceFlinger::CheckTransactCodeCredentials(uint32_t code) {
     switch (code) {
         case CREATE_CONNECTION:
@@ -4498,6 +4525,7 @@
                 return NO_ERROR;
             }
             case 1014: {
+                Mutex::Autolock _l(mStateLock);
                 // daltonize
                 n = data.readInt32();
                 switch (n % 10) {
@@ -4519,33 +4547,33 @@
                 } else {
                     mDaltonizer.setMode(ColorBlindnessMode::Simulation);
                 }
-                invalidateHwcGeometry();
-                repaintEverything();
+
+                updateColorMatrixLocked();
                 return NO_ERROR;
             }
             case 1015: {
+                Mutex::Autolock _l(mStateLock);
                 // apply a color matrix
                 n = data.readInt32();
                 if (n) {
                     // color matrix is sent as a column-major mat4 matrix
                     for (size_t i = 0 ; i < 4; i++) {
                         for (size_t j = 0; j < 4; j++) {
-                            mColorMatrix[i][j] = data.readFloat();
+                            mClientColorMatrix[i][j] = data.readFloat();
                         }
                     }
                 } else {
-                    mColorMatrix = mat4();
+                    mClientColorMatrix = mat4();
                 }
 
                 // Check that supplied matrix's last row is {0,0,0,1} so we can avoid
                 // the division by w in the fragment shader
-                float4 lastRow(transpose(mColorMatrix)[3]);
+                float4 lastRow(transpose(mClientColorMatrix)[3]);
                 if (any(greaterThan(abs(lastRow - float4{0, 0, 0, 1}), float4{1e-4f}))) {
                     ALOGE("The color transform's last row must be (0, 0, 0, 1)");
                 }
 
-                invalidateHwcGeometry();
-                repaintEverything();
+                updateColorMatrixLocked();
                 return NO_ERROR;
             }
             // This is an experimental interface
@@ -4588,22 +4616,14 @@
                 return NO_ERROR;
             }
             case 1022: { // Set saturation boost
+                Mutex::Autolock _l(mStateLock);
                 mGlobalSaturationFactor = std::max(0.0f, std::min(data.readFloat(), 2.0f));
 
-                invalidateHwcGeometry();
-                repaintEverything();
+                updateColorMatrixLocked();
                 return NO_ERROR;
             }
             case 1023: { // Set native mode
-                int32_t value = data.readInt32();
-                if (value > 2) {
-                    return BAD_VALUE;
-                }
-                if (value == 2 && !mBuiltinDisplaySupportsEnhance) {
-                    return BAD_VALUE;
-                }
-
-                mDisplayColorSetting = toDisplayColorSetting(value);
+                mDisplayColorSetting = static_cast<DisplayColorSetting>(data.readInt32());
                 invalidateHwcGeometry();
                 repaintEverything();
                 return NO_ERROR;
@@ -4632,20 +4652,27 @@
             }
             // Is a DisplayColorSetting supported?
             case 1027: {
-                int32_t value = data.readInt32();
-                switch (value) {
-                    case 0:
-                        reply->writeBool(hasWideColorDisplay);
-                        return NO_ERROR;
-                    case 1:
-                        reply->writeBool(true);
-                        return NO_ERROR;
-                    case 2:
-                        reply->writeBool(mBuiltinDisplaySupportsEnhance);
-                        return NO_ERROR;
-                    default:
-                        return BAD_VALUE;
+                sp<const DisplayDevice> hw(getDefaultDisplayDevice());
+                if (!hw) {
+                    return NAME_NOT_FOUND;
                 }
+
+                DisplayColorSetting setting = static_cast<DisplayColorSetting>(data.readInt32());
+                switch (setting) {
+                    case DisplayColorSetting::MANAGED:
+                        reply->writeBool(hasWideColorDisplay);
+                        break;
+                    case DisplayColorSetting::UNMANAGED:
+                        reply->writeBool(true);
+                        break;
+                    case DisplayColorSetting::ENHANCED:
+                        reply->writeBool(hw->hasRenderIntent(RenderIntent::ENHANCE));
+                        break;
+                    default: // vendor display color setting
+                        reply->writeBool(hw->hasRenderIntent(static_cast<RenderIntent>(setting)));
+                        break;
+                }
+                return NO_ERROR;
             }
         }
     }
@@ -4719,9 +4746,6 @@
                 return mCrop;
             }
         }
-        bool getWideColorSupport() const override { return false; }
-        Dataspace getDataSpace() const override { return Dataspace::UNKNOWN; }
-
         class ReparentForDrawing {
         public:
             const sp<Layer>& oldParent;
@@ -4812,7 +4836,7 @@
                                              bool useIdentityTransform) {
     ATRACE_CALL();
 
-    renderArea.updateDimensions();
+    renderArea.updateDimensions(mPrimaryDisplayOrientation);
 
     const uint32_t usage = GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN |
             GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_TEXTURE;
@@ -4896,13 +4920,35 @@
     const auto reqHeight = renderArea.getReqHeight();
     Rect sourceCrop = renderArea.getSourceCrop();
 
-    const bool filtering = static_cast<int32_t>(reqWidth) != raWidth ||
-            static_cast<int32_t>(reqHeight) != raHeight;
+    bool filtering = false;
+    if (mPrimaryDisplayOrientation & DisplayState::eOrientationSwapMask) {
+        filtering = static_cast<int32_t>(reqWidth) != raHeight ||
+                static_cast<int32_t>(reqHeight) != raWidth;
+    } else {
+        filtering = static_cast<int32_t>(reqWidth) != raWidth ||
+                static_cast<int32_t>(reqHeight) != raHeight;
+    }
 
     // if a default or invalid sourceCrop is passed in, set reasonable values
     if (sourceCrop.width() == 0 || sourceCrop.height() == 0 || !sourceCrop.isValid()) {
         sourceCrop.setLeftTop(Point(0, 0));
         sourceCrop.setRightBottom(Point(raWidth, raHeight));
+    } else if (mPrimaryDisplayOrientation != DisplayState::eOrientationDefault) {
+        Transform tr;
+        uint32_t flags = 0x00;
+        switch (mPrimaryDisplayOrientation) {
+            case DisplayState::eOrientation90:
+                flags = Transform::ROT_90;
+                break;
+            case DisplayState::eOrientation180:
+                flags = Transform::ROT_180;
+                break;
+            case DisplayState::eOrientation270:
+                flags = Transform::ROT_270;
+                break;
+        }
+        tr.set(flags, raWidth, raHeight);
+        sourceCrop = tr.transform(sourceCrop);
     }
 
     // ensure that sourceCrop is inside screen
@@ -4919,18 +4965,47 @@
         ALOGE("Invalid crop rect: b = %d (> %d)", sourceCrop.bottom, raHeight);
     }
 
-    Dataspace outputDataspace = Dataspace::UNKNOWN;
-    if (renderArea.getWideColorSupport()) {
-        outputDataspace = renderArea.getDataSpace();
-    }
-    getBE().mRenderEngine->setOutputDataSpace(outputDataspace);
+    // assume ColorMode::SRGB / RenderIntent::COLORIMETRIC
+    engine.setOutputDataSpace(Dataspace::SRGB);
+    engine.setDisplayMaxLuminance(DisplayDevice::sDefaultMaxLumiance);
 
     // make sure to clear all GL error flags
     engine.checkErrors();
 
+    Transform::orientation_flags rotation = renderArea.getRotationFlags();
+    if (mPrimaryDisplayOrientation != DisplayState::eOrientationDefault) {
+        // convert hw orientation into flag presentation
+        // here inverse transform needed
+        uint8_t hw_rot_90  = 0x00;
+        uint8_t hw_flip_hv = 0x00;
+        switch (mPrimaryDisplayOrientation) {
+            case DisplayState::eOrientation90:
+                hw_rot_90 = Transform::ROT_90;
+                hw_flip_hv = Transform::ROT_180;
+                break;
+            case DisplayState::eOrientation180:
+                hw_flip_hv = Transform::ROT_180;
+                break;
+            case DisplayState::eOrientation270:
+                hw_rot_90  = Transform::ROT_90;
+                break;
+        }
+
+        // transform flags operation
+        // 1) flip H V if both have ROT_90 flag
+        // 2) XOR these flags
+        uint8_t rotation_rot_90  = rotation & Transform::ROT_90;
+        uint8_t rotation_flip_hv = rotation & Transform::ROT_180;
+        if (rotation_rot_90 & hw_rot_90) {
+            rotation_flip_hv = (~rotation_flip_hv) & Transform::ROT_180;
+        }
+        rotation = static_cast<Transform::orientation_flags>
+                   ((rotation_rot_90 ^ hw_rot_90) | (rotation_flip_hv ^ hw_flip_hv));
+    }
+
     // set-up our viewport
     engine.setViewportAndProjection(reqWidth, reqHeight, sourceCrop, raHeight, yswap,
-                                    renderArea.getRotationFlags());
+                                    rotation);
     engine.disableTexturing();
 
     const float alpha = RenderArea::getCaptureFillValue(renderArea.getCaptureFill());
diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h
index 33706da..062cb76 100644
--- a/services/surfaceflinger/SurfaceFlinger.h
+++ b/services/surfaceflinger/SurfaceFlinger.h
@@ -63,6 +63,7 @@
 #include "SurfaceInterceptor.h"
 #include "SurfaceTracing.h"
 #include "StartPropertySetThread.h"
+#include "TimeStats/TimeStats.h"
 #include "VSyncModulator.h"
 
 #include "DisplayHardware/HWC2.h"
@@ -340,6 +341,8 @@
     bool authenticateSurfaceTextureLocked(
         const sp<IGraphicBufferProducer>& bufferProducer) const;
 
+    int getPrimaryDisplayOrientation() const { return mPrimaryDisplayOrientation; }
+
 private:
     friend class Client;
     friend class DisplayEventConnection;
@@ -372,6 +375,10 @@
             // always uses the Drawing StateSet.
             layersSortedByZ = other.layersSortedByZ;
             displays = other.displays;
+            colorMatrixChanged = other.colorMatrixChanged;
+            if (colorMatrixChanged) {
+                colorMatrix = other.colorMatrix;
+            }
             return *this;
         }
 
@@ -379,6 +386,9 @@
         LayerVector layersSortedByZ;
         DefaultKeyedVector< wp<IBinder>, DisplayDeviceState> displays;
 
+        bool colorMatrixChanged = true;
+        mat4 colorMatrix;
+
         void traverseInZOrder(const LayerVector::Visitor& visitor) const;
         void traverseInReverseZOrder(const LayerVector::Visitor& visitor) const;
     };
@@ -473,7 +483,8 @@
     // Called on the main thread in response to setActiveColorMode()
     void setActiveColorModeInternal(const sp<DisplayDevice>& hw,
                                     ui::ColorMode colorMode,
-                                    ui::Dataspace dataSpace);
+                                    ui::Dataspace dataSpace,
+                                    ui::RenderIntent renderIntent);
 
     // Returns whether the transaction actually modified any state
     bool handleMessageTransaction();
@@ -645,14 +656,16 @@
             nsecs_t compositeToPresentLatency);
     void rebuildLayerStacks();
 
-    // Given a dataSpace, returns the appropriate color_mode to use
-    // to display that dataSpace.
-    ui::Dataspace getBestDataspace(const sp<const DisplayDevice>& displayDevice) const;
+    ui::Dataspace getBestDataspace(const sp<const DisplayDevice>& displayDevice,
+                                   ui::Dataspace* outHdrDataSpace) const;
+
+    // Returns the appropriate ColorMode, Dataspace and RenderIntent for the
+    // DisplayDevice. The function only returns the supported ColorMode,
+    // Dataspace and RenderIntent.
     void pickColorMode(const sp<DisplayDevice>& displayDevice,
                        ui::ColorMode* outMode,
-                       ui::Dataspace* outDataSpace) const;
-
-    mat4 computeSaturationMatrix() const;
+                       ui::Dataspace* outDataSpace,
+                       ui::RenderIntent* outRenderIntent) const;
 
     void setUpHWComposer();
     void doComposition();
@@ -740,6 +753,8 @@
     // Check to see if we should handoff to vr flinger.
     void updateVrFlinger();
 
+    void updateColorMatrixLocked();
+
     /* ------------------------------------------------------------------------
      * Attributes
      */
@@ -753,6 +768,11 @@
     bool mAnimTransactionPending;
     SortedVector< sp<Layer> > mLayersPendingRemoval;
 
+    // global color transform states
+    Daltonizer mDaltonizer;
+    float mGlobalSaturationFactor = 1.0f;
+    mat4 mClientColorMatrix;
+
     // Can't be unordered_set because wp<> isn't hashable
     std::set<wp<IBinder>> mGraphicBufferProducerList;
     size_t mMaxGraphicBufferProducerListSize = MAX_LAYERS;
@@ -815,6 +835,7 @@
             std::make_unique<impl::SurfaceInterceptor>(this);
     SurfaceTracing mTracing;
     LayerStats mLayerStats;
+    TimeStats& mTimeStats = TimeStats::getInstance();
     bool mUseHwcVirtualDisplays = false;
 
     // Restrict layers to use two buffers in their bufferqueues.
@@ -824,6 +845,7 @@
     mutable std::unique_ptr<MessageQueue> mEventQueue{std::make_unique<impl::MessageQueue>()};
     FrameTracker mAnimFrameTracker;
     DispSync mPrimaryDispSync;
+    int mPrimaryDisplayOrientation = DisplayState::eOrientationDefault;
 
     // protected by mDestroyedLayerLock;
     mutable Mutex mDestroyedLayerLock;
@@ -842,12 +864,6 @@
 
     bool mInjectVSyncs;
 
-    Daltonizer mDaltonizer;
-
-    mat4 mPreviousColorMatrix;
-    mat4 mColorMatrix;
-    bool mHasColorMatrix;
-
     // Static screen stats
     bool mHasPoweredOff;
 
@@ -865,9 +881,6 @@
     DisplayColorSetting mDisplayColorSetting = DisplayColorSetting::MANAGED;
     // Applied on sRGB layers when the render intent is non-colorimetric.
     mat4 mLegacySrgbSaturationMatrix;
-    // Applied globally.
-    float mGlobalSaturationFactor = 1.0f;
-    bool mBuiltinDisplaySupportsEnhance = false;
 
     using CreateBufferQueueFunction =
             std::function<void(sp<IGraphicBufferProducer>* /* outProducer */,
diff --git a/services/surfaceflinger/TimeStats/TimeStats.cpp b/services/surfaceflinger/TimeStats/TimeStats.cpp
new file mode 100644
index 0000000..d4f1e29
--- /dev/null
+++ b/services/surfaceflinger/TimeStats/TimeStats.cpp
@@ -0,0 +1,486 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#undef LOG_TAG
+#define LOG_TAG "TimeStats"
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include "TimeStats.h"
+
+#include <android-base/stringprintf.h>
+
+#include <log/log.h>
+
+#include <utils/String8.h>
+#include <utils/Trace.h>
+
+#include <algorithm>
+#include <regex>
+
+namespace android {
+
+TimeStats& TimeStats::getInstance() {
+    static std::unique_ptr<TimeStats> sInstance;
+    static std::once_flag sOnceFlag;
+
+    std::call_once(sOnceFlag, [] { sInstance.reset(new TimeStats); });
+    return *sInstance.get();
+}
+
+void TimeStats::parseArgs(bool asProto, const Vector<String16>& args, size_t& index,
+                          String8& result) {
+    ATRACE_CALL();
+
+    if (args.size() > index + 10) {
+        ALOGD("Invalid args count");
+        return;
+    }
+
+    std::unordered_map<std::string, int32_t> argsMap;
+    while (index < args.size()) {
+        argsMap[std::string(String8(args[index]).c_str())] = index;
+        ++index;
+    }
+
+    if (argsMap.count("-disable")) {
+        disable();
+    }
+
+    if (argsMap.count("-dump")) {
+        std::optional<uint32_t> maxLayers = std::nullopt;
+        auto iter = argsMap.find("-maxlayers");
+        if (iter != argsMap.end() && iter->second + 1 < static_cast<int32_t>(args.size())) {
+            int64_t value = strtol(String8(args[iter->second + 1]).c_str(), nullptr, 10);
+            value = std::clamp(value, int64_t(0), int64_t(UINT32_MAX));
+            maxLayers = static_cast<uint32_t>(value);
+        }
+
+        dump(asProto, maxLayers, result);
+    }
+
+    if (argsMap.count("-clear")) {
+        clear();
+    }
+
+    if (argsMap.count("-enable")) {
+        enable();
+    }
+}
+
+void TimeStats::incrementTotalFrames() {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    timeStats.totalFrames++;
+}
+
+void TimeStats::incrementMissedFrames() {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    timeStats.missedFrames++;
+}
+
+void TimeStats::incrementClientCompositionFrames() {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    timeStats.clientCompositionFrames++;
+}
+
+bool TimeStats::recordReadyLocked(const std::string& layerName, TimeRecord* timeRecord) {
+    if (!timeRecord->ready) {
+        ALOGV("[%s]-[%" PRIu64 "]-presentFence is still not received", layerName.c_str(),
+              timeRecord->frameNumber);
+        return false;
+    }
+
+    if (timeRecord->acquireFence != nullptr) {
+        if (timeRecord->acquireFence->getSignalTime() == Fence::SIGNAL_TIME_PENDING) {
+            return false;
+        }
+        if (timeRecord->acquireFence->getSignalTime() != Fence::SIGNAL_TIME_INVALID) {
+            timeRecord->acquireTime = timeRecord->acquireFence->getSignalTime();
+            timeRecord->acquireFence = nullptr;
+        } else {
+            ALOGV("[%s]-[%" PRIu64 "]-acquireFence signal time is invalid", layerName.c_str(),
+                  timeRecord->frameNumber);
+        }
+    }
+
+    if (timeRecord->presentFence != nullptr) {
+        if (timeRecord->presentFence->getSignalTime() == Fence::SIGNAL_TIME_PENDING) {
+            return false;
+        }
+        if (timeRecord->presentFence->getSignalTime() != Fence::SIGNAL_TIME_INVALID) {
+            timeRecord->presentTime = timeRecord->presentFence->getSignalTime();
+            timeRecord->presentFence = nullptr;
+        } else {
+            ALOGV("[%s]-[%" PRIu64 "]-presentFence signal time invalid", layerName.c_str(),
+                  timeRecord->frameNumber);
+        }
+    }
+
+    return true;
+}
+
+static int32_t msBetween(nsecs_t start, nsecs_t end) {
+    int64_t delta = (end - start) / 1000000;
+    delta = std::clamp(delta, int64_t(INT32_MIN), int64_t(INT32_MAX));
+    return static_cast<int32_t>(delta);
+}
+
+static std::string getPackageName(const std::string& layerName) {
+    // This regular expression captures the following for instance:
+    // StatusBar in StatusBar#0
+    // com.appname in com.appname/com.appname.activity#0
+    // com.appname in SurfaceView - com.appname/com.appname.activity#0
+    const std::regex re("(?:SurfaceView[-\\s\\t]+)?([^/]+).*#\\d+");
+    std::smatch match;
+    if (std::regex_match(layerName.begin(), layerName.end(), match, re)) {
+        // There must be a match for group 1 otherwise the whole string is not
+        // matched and the above will return false
+        return match[1];
+    }
+    return "";
+}
+
+void TimeStats::flushAvailableRecordsToStatsLocked(const std::string& layerName) {
+    ATRACE_CALL();
+
+    LayerRecord& layerRecord = timeStatsTracker[layerName];
+    TimeRecord& prevTimeRecord = layerRecord.prevTimeRecord;
+    std::deque<TimeRecord>& timeRecords = layerRecord.timeRecords;
+    while (!timeRecords.empty()) {
+        if (!recordReadyLocked(layerName, &timeRecords[0])) break;
+        ALOGV("[%s]-[%" PRIu64 "]-presentFenceTime[%" PRId64 "]", layerName.c_str(),
+              timeRecords[0].frameNumber, timeRecords[0].presentTime);
+
+        if (prevTimeRecord.ready) {
+            if (!timeStats.stats.count(layerName)) {
+                timeStats.stats[layerName].layerName = layerName;
+                timeStats.stats[layerName].packageName = getPackageName(layerName);
+                timeStats.stats[layerName].statsStart = static_cast<int64_t>(std::time(0));
+            }
+            TimeStatsHelper::TimeStatsLayer& timeStatsLayer = timeStats.stats[layerName];
+            timeStatsLayer.totalFrames++;
+
+            const int32_t postToPresentMs =
+                    msBetween(timeRecords[0].postTime, timeRecords[0].presentTime);
+            ALOGV("[%s]-[%" PRIu64 "]-post2present[%d]", layerName.c_str(),
+                  timeRecords[0].frameNumber, postToPresentMs);
+            timeStatsLayer.deltas["post2present"].insert(postToPresentMs);
+
+            const int32_t acquireToPresentMs =
+                    msBetween(timeRecords[0].acquireTime, timeRecords[0].presentTime);
+            ALOGV("[%s]-[%" PRIu64 "]-acquire2present[%d]", layerName.c_str(),
+                  timeRecords[0].frameNumber, acquireToPresentMs);
+            timeStatsLayer.deltas["acquire2present"].insert(acquireToPresentMs);
+
+            const int32_t latchToPresentMs =
+                    msBetween(timeRecords[0].latchTime, timeRecords[0].presentTime);
+            ALOGV("[%s]-[%" PRIu64 "]-latch2present[%d]", layerName.c_str(),
+                  timeRecords[0].frameNumber, latchToPresentMs);
+            timeStatsLayer.deltas["latch2present"].insert(latchToPresentMs);
+
+            const int32_t desiredToPresentMs =
+                    msBetween(timeRecords[0].desiredTime, timeRecords[0].presentTime);
+            ALOGV("[%s]-[%" PRIu64 "]-desired2present[%d]", layerName.c_str(),
+                  timeRecords[0].frameNumber, desiredToPresentMs);
+            timeStatsLayer.deltas["desired2present"].insert(desiredToPresentMs);
+
+            const int32_t presentToPresentMs =
+                    msBetween(prevTimeRecord.presentTime, timeRecords[0].presentTime);
+            ALOGV("[%s]-[%" PRIu64 "]-present2present[%d]", layerName.c_str(),
+                  timeRecords[0].frameNumber, presentToPresentMs);
+            timeStatsLayer.deltas["present2present"].insert(presentToPresentMs);
+
+            timeStats.stats[layerName].statsEnd = static_cast<int64_t>(std::time(0));
+        }
+        prevTimeRecord = timeRecords[0];
+        timeRecords.pop_front();
+        layerRecord.waitData--;
+    }
+}
+
+static bool layerNameIsValid(const std::string& layerName) {
+    // This regular expression captures the following layer names for instance:
+    // 1) StatusBat#0
+    // 2) NavigationBar#1
+    // 3) com.*#0
+    // 4) SurfaceView - com.*#0
+    // Using [-\\s\t]+ for the conjunction part between SurfaceView and com.* is
+    // a bit more robust in case there's a slight change.
+    // The layer name would only consist of . / $ _ 0-9 a-z A-Z in most cases.
+    std::regex re("(((SurfaceView[-\\s\\t]+)?com\\.[./$\\w]+)|((Status|Navigation)Bar))#\\d+");
+    return std::regex_match(layerName.begin(), layerName.end(), re);
+}
+
+void TimeStats::setPostTime(const std::string& layerName, uint64_t frameNumber, nsecs_t postTime) {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+    ALOGV("[%s]-[%" PRIu64 "]-PostTime[%" PRId64 "]", layerName.c_str(), frameNumber, postTime);
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (!timeStatsTracker.count(layerName) && !layerNameIsValid(layerName)) {
+        return;
+    }
+    LayerRecord& layerRecord = timeStatsTracker[layerName];
+    if (layerRecord.timeRecords.size() == MAX_NUM_TIME_RECORDS) {
+        ALOGV("[%s]-timeRecords is already at its maximum size[%zu]", layerName.c_str(),
+              MAX_NUM_TIME_RECORDS);
+        // TODO(zzyiwei): if this happens, there must be a present fence missing
+        // or waitData is not in the correct position. Need to think out a
+        // reasonable way to recover from this state.
+        return;
+    }
+    // For most media content, the acquireFence is invalid because the buffer is
+    // ready at the queueBuffer stage. In this case, acquireTime should be given
+    // a default value as postTime.
+    TimeRecord timeRecord = {
+            .frameNumber = frameNumber,
+            .postTime = postTime,
+            .acquireTime = postTime,
+    };
+    layerRecord.timeRecords.push_back(timeRecord);
+    if (layerRecord.waitData < 0 ||
+        layerRecord.waitData >= static_cast<int32_t>(layerRecord.timeRecords.size()))
+        layerRecord.waitData = layerRecord.timeRecords.size() - 1;
+}
+
+void TimeStats::setLatchTime(const std::string& layerName, uint64_t frameNumber,
+                             nsecs_t latchTime) {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+    ALOGV("[%s]-[%" PRIu64 "]-LatchTime[%" PRId64 "]", layerName.c_str(), frameNumber, latchTime);
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (!timeStatsTracker.count(layerName)) return;
+    LayerRecord& layerRecord = timeStatsTracker[layerName];
+    TimeRecord& timeRecord = layerRecord.timeRecords[layerRecord.waitData];
+    if (timeRecord.frameNumber == frameNumber) {
+        timeRecord.latchTime = latchTime;
+    }
+}
+
+void TimeStats::setDesiredTime(const std::string& layerName, uint64_t frameNumber,
+                               nsecs_t desiredTime) {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+    ALOGV("[%s]-[%" PRIu64 "]-DesiredTime[%" PRId64 "]", layerName.c_str(), frameNumber,
+          desiredTime);
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (!timeStatsTracker.count(layerName)) return;
+    LayerRecord& layerRecord = timeStatsTracker[layerName];
+    TimeRecord& timeRecord = layerRecord.timeRecords[layerRecord.waitData];
+    if (timeRecord.frameNumber == frameNumber) {
+        timeRecord.desiredTime = desiredTime;
+    }
+}
+
+void TimeStats::setAcquireTime(const std::string& layerName, uint64_t frameNumber,
+                               nsecs_t acquireTime) {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+    ALOGV("[%s]-[%" PRIu64 "]-AcquireTime[%" PRId64 "]", layerName.c_str(), frameNumber,
+          acquireTime);
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (!timeStatsTracker.count(layerName)) return;
+    LayerRecord& layerRecord = timeStatsTracker[layerName];
+    TimeRecord& timeRecord = layerRecord.timeRecords[layerRecord.waitData];
+    if (timeRecord.frameNumber == frameNumber) {
+        timeRecord.acquireTime = acquireTime;
+    }
+}
+
+void TimeStats::setAcquireFence(const std::string& layerName, uint64_t frameNumber,
+                                const std::shared_ptr<FenceTime>& acquireFence) {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+    ALOGV("[%s]-[%" PRIu64 "]-AcquireFenceTime[%" PRId64 "]", layerName.c_str(), frameNumber,
+          acquireFence->getSignalTime());
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (!timeStatsTracker.count(layerName)) return;
+    LayerRecord& layerRecord = timeStatsTracker[layerName];
+    TimeRecord& timeRecord = layerRecord.timeRecords[layerRecord.waitData];
+    if (timeRecord.frameNumber == frameNumber) {
+        timeRecord.acquireFence = acquireFence;
+    }
+}
+
+void TimeStats::setPresentTime(const std::string& layerName, uint64_t frameNumber,
+                               nsecs_t presentTime) {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+    ALOGV("[%s]-[%" PRIu64 "]-PresentTime[%" PRId64 "]", layerName.c_str(), frameNumber,
+          presentTime);
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (!timeStatsTracker.count(layerName)) return;
+    LayerRecord& layerRecord = timeStatsTracker[layerName];
+    TimeRecord& timeRecord = layerRecord.timeRecords[layerRecord.waitData];
+    if (timeRecord.frameNumber == frameNumber) {
+        timeRecord.presentTime = presentTime;
+        timeRecord.ready = true;
+        layerRecord.waitData++;
+    }
+
+    flushAvailableRecordsToStatsLocked(layerName);
+}
+
+void TimeStats::setPresentFence(const std::string& layerName, uint64_t frameNumber,
+                                const std::shared_ptr<FenceTime>& presentFence) {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+    ALOGV("[%s]-[%" PRIu64 "]-PresentFenceTime[%" PRId64 "]", layerName.c_str(), frameNumber,
+          presentFence->getSignalTime());
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (!timeStatsTracker.count(layerName)) return;
+    LayerRecord& layerRecord = timeStatsTracker[layerName];
+    TimeRecord& timeRecord = layerRecord.timeRecords[layerRecord.waitData];
+    if (timeRecord.frameNumber == frameNumber) {
+        timeRecord.presentFence = presentFence;
+        timeRecord.ready = true;
+        layerRecord.waitData++;
+    }
+
+    flushAvailableRecordsToStatsLocked(layerName);
+}
+
+void TimeStats::onDisconnect(const std::string& layerName) {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+    ALOGV("[%s]-onDisconnect", layerName.c_str());
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (!timeStatsTracker.count(layerName)) return;
+    flushAvailableRecordsToStatsLocked(layerName);
+    timeStatsTracker.erase(layerName);
+}
+
+void TimeStats::clearLayerRecord(const std::string& layerName) {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+    ALOGV("[%s]-clearLayerRecord", layerName.c_str());
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (!timeStatsTracker.count(layerName)) return;
+    LayerRecord& layerRecord = timeStatsTracker[layerName];
+    layerRecord.timeRecords.clear();
+    layerRecord.prevTimeRecord.ready = false;
+    layerRecord.waitData = -1;
+}
+
+void TimeStats::removeTimeRecord(const std::string& layerName, uint64_t frameNumber) {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+    ALOGV("[%s]-[%" PRIu64 "]-removeTimeRecord", layerName.c_str(), frameNumber);
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (!timeStatsTracker.count(layerName)) return;
+    LayerRecord& layerRecord = timeStatsTracker[layerName];
+    size_t removeAt = 0;
+    for (const TimeRecord& record : layerRecord.timeRecords) {
+        if (record.frameNumber == frameNumber) break;
+        removeAt++;
+    }
+    if (removeAt == layerRecord.timeRecords.size()) return;
+    layerRecord.timeRecords.erase(layerRecord.timeRecords.begin() + removeAt);
+    if (layerRecord.waitData > static_cast<int32_t>(removeAt)) {
+        --layerRecord.waitData;
+    }
+}
+
+void TimeStats::enable() {
+    if (mEnabled.load()) return;
+
+    ATRACE_CALL();
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    ALOGD("Enabled");
+    mEnabled.store(true);
+    timeStats.statsStart = static_cast<int64_t>(std::time(0));
+}
+
+void TimeStats::disable() {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    ALOGD("Disabled");
+    mEnabled.store(false);
+    timeStats.statsEnd = static_cast<int64_t>(std::time(0));
+}
+
+void TimeStats::clear() {
+    ATRACE_CALL();
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    ALOGD("Cleared");
+    timeStats.stats.clear();
+    timeStats.statsStart = (mEnabled.load() ? static_cast<int64_t>(std::time(0)) : 0);
+    timeStats.statsEnd = 0;
+    timeStats.totalFrames = 0;
+    timeStats.missedFrames = 0;
+    timeStats.clientCompositionFrames = 0;
+}
+
+bool TimeStats::isEnabled() {
+    return mEnabled.load();
+}
+
+void TimeStats::dump(bool asProto, std::optional<uint32_t> maxLayers, String8& result) {
+    ATRACE_CALL();
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (timeStats.statsStart == 0) {
+        return;
+    }
+
+    timeStats.statsEnd = static_cast<int64_t>(std::time(0));
+
+    if (asProto) {
+        ALOGD("Dumping TimeStats as proto");
+        SFTimeStatsGlobalProto timeStatsProto = timeStats.toProto(maxLayers);
+        result.append(timeStatsProto.SerializeAsString().c_str(), timeStatsProto.ByteSize());
+    } else {
+        ALOGD("Dumping TimeStats as text");
+        result.append(timeStats.toString(maxLayers).c_str());
+        result.append("\n");
+    }
+}
+
+} // namespace android
diff --git a/services/surfaceflinger/TimeStats/TimeStats.h b/services/surfaceflinger/TimeStats/TimeStats.h
new file mode 100644
index 0000000..8318210
--- /dev/null
+++ b/services/surfaceflinger/TimeStats/TimeStats.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <timestatsproto/TimeStatsHelper.h>
+#include <timestatsproto/TimeStatsProtoHeader.h>
+
+#include <ui/FenceTime.h>
+
+#include <utils/String16.h>
+#include <utils/String8.h>
+#include <utils/Vector.h>
+
+#include <deque>
+#include <mutex>
+#include <optional>
+#include <unordered_map>
+
+using namespace android::surfaceflinger;
+
+namespace android {
+class String8;
+
+class TimeStats {
+    // TODO(zzyiwei): Bound the timeStatsTracker with weighted LRU
+    // static const size_t MAX_NUM_LAYER_RECORDS = 200;
+    static const size_t MAX_NUM_TIME_RECORDS = 64;
+
+    struct TimeRecord {
+        bool ready = false;
+        uint64_t frameNumber = 0;
+        nsecs_t postTime = 0;
+        nsecs_t latchTime = 0;
+        nsecs_t acquireTime = 0;
+        nsecs_t desiredTime = 0;
+        nsecs_t presentTime = 0;
+        std::shared_ptr<FenceTime> acquireFence;
+        std::shared_ptr<FenceTime> presentFence;
+    };
+
+    struct LayerRecord {
+        // This is the index in timeRecords, at which the timestamps for that
+        // specific frame are still not fully received. This is not waiting for
+        // fences to signal, but rather waiting to receive those fences/timestamps.
+        int32_t waitData = -1;
+        TimeRecord prevTimeRecord;
+        std::deque<TimeRecord> timeRecords;
+    };
+
+public:
+    static TimeStats& getInstance();
+    void parseArgs(bool asProto, const Vector<String16>& args, size_t& index, String8& result);
+    void incrementTotalFrames();
+    void incrementMissedFrames();
+    void incrementClientCompositionFrames();
+
+    void setPostTime(const std::string& layerName, uint64_t frameNumber, nsecs_t postTime);
+    void setLatchTime(const std::string& layerName, uint64_t frameNumber, nsecs_t latchTime);
+    void setDesiredTime(const std::string& layerName, uint64_t frameNumber, nsecs_t desiredTime);
+    void setAcquireTime(const std::string& layerName, uint64_t frameNumber, nsecs_t acquireTime);
+    void setAcquireFence(const std::string& layerName, uint64_t frameNumber,
+                         const std::shared_ptr<FenceTime>& acquireFence);
+    void setPresentTime(const std::string& layerName, uint64_t frameNumber, nsecs_t presentTime);
+    void setPresentFence(const std::string& layerName, uint64_t frameNumber,
+                         const std::shared_ptr<FenceTime>& presentFence);
+    void onDisconnect(const std::string& layerName);
+    void clearLayerRecord(const std::string& layerName);
+    void removeTimeRecord(const std::string& layerName, uint64_t frameNumber);
+
+private:
+    TimeStats() = default;
+
+    bool recordReadyLocked(const std::string& layerName, TimeRecord* timeRecord);
+    void flushAvailableRecordsToStatsLocked(const std::string& layerName);
+
+    void enable();
+    void disable();
+    void clear();
+    bool isEnabled();
+    void dump(bool asProto, std::optional<uint32_t> maxLayers, String8& result);
+
+    std::atomic<bool> mEnabled = false;
+    std::mutex mMutex;
+    TimeStatsHelper::TimeStatsGlobal timeStats;
+    std::unordered_map<std::string, LayerRecord> timeStatsTracker;
+};
+
+} // namespace android
diff --git a/services/surfaceflinger/TimeStats/timestatsproto/Android.bp b/services/surfaceflinger/TimeStats/timestatsproto/Android.bp
new file mode 100644
index 0000000..bef6b7c
--- /dev/null
+++ b/services/surfaceflinger/TimeStats/timestatsproto/Android.bp
@@ -0,0 +1,33 @@
+cc_library_shared {
+    name: "libtimestats_proto",
+    export_include_dirs: ["include"],
+
+    srcs: [
+        "TimeStatsHelper.cpp",
+        "timestats.proto",
+    ],
+
+    shared_libs: [
+        "libbase",
+        "libprotobuf-cpp-lite",
+    ],
+
+    proto: {
+        export_proto_headers: true,
+    },
+
+    cppflags: [
+        "-std=c++1z",
+        "-Werror",
+        "-Wno-c++98-compat-pedantic",
+        "-Wno-disabled-macro-expansion",
+        "-Wno-float-conversion",
+        "-Wno-float-equal",
+        "-Wno-format",
+        "-Wno-old-style-cast",
+        "-Wno-padded",
+        "-Wno-sign-conversion",
+        "-Wno-undef",
+        "-Wno-unused-parameter",
+    ],
+}
diff --git a/services/surfaceflinger/TimeStats/timestatsproto/TimeStatsHelper.cpp b/services/surfaceflinger/TimeStats/timestatsproto/TimeStatsHelper.cpp
new file mode 100644
index 0000000..21f3ef3
--- /dev/null
+++ b/services/surfaceflinger/TimeStats/timestatsproto/TimeStatsHelper.cpp
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <android-base/stringprintf.h>
+#include <timestatsproto/TimeStatsHelper.h>
+
+#include <array>
+
+#define HISTOGRAM_SIZE 85
+
+using android::base::StringAppendF;
+using android::base::StringPrintf;
+
+namespace android {
+namespace surfaceflinger {
+
+// Time buckets for histogram, the calculated time deltas will be lower bounded
+// to the buckets in this array.
+static const std::array<int32_t, HISTOGRAM_SIZE> histogramConfig =
+        {0,   1,   2,   3,   4,   5,   6,   7,   8,   9,   10,  11,  12,  13,  14,  15,  16,
+         17,  18,  19,  20,  21,  22,  23,  24,  25,  26,  27,  28,  29,  30,  31,  32,  33,
+         34,  36,  38,  40,  42,  44,  46,  48,  50,  54,  58,  62,  66,  70,  74,  78,  82,
+         86,  90,  94,  98,  102, 106, 110, 114, 118, 122, 126, 130, 134, 138, 142, 146, 150,
+         200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 950, 1000};
+
+void TimeStatsHelper::Histogram::insert(int32_t delta) {
+    if (delta < 0) return;
+    // std::lower_bound won't work on out of range values
+    if (delta > histogramConfig[HISTOGRAM_SIZE - 1]) {
+        hist[histogramConfig[HISTOGRAM_SIZE - 1]]++;
+        return;
+    }
+    auto iter = std::lower_bound(histogramConfig.begin(), histogramConfig.end(), delta);
+    hist[*iter]++;
+}
+
+float TimeStatsHelper::Histogram::averageTime() const {
+    int64_t ret = 0;
+    int64_t count = 0;
+    for (auto& ele : hist) {
+        count += ele.second;
+        ret += ele.first * ele.second;
+    }
+    return static_cast<float>(ret) / count;
+}
+
+std::string TimeStatsHelper::Histogram::toString() const {
+    std::string result;
+    for (int32_t i = 0; i < HISTOGRAM_SIZE; ++i) {
+        int32_t bucket = histogramConfig[i];
+        int32_t count = (hist.count(bucket) == 0) ? 0 : hist.at(bucket);
+        StringAppendF(&result, "%dms=%d ", bucket, count);
+    }
+    result.back() = '\n';
+    return result;
+}
+
+std::string TimeStatsHelper::TimeStatsLayer::toString() const {
+    std::string result = "";
+    StringAppendF(&result, "layerName = %s\n", layerName.c_str());
+    StringAppendF(&result, "packageName = %s\n", packageName.c_str());
+    StringAppendF(&result, "statsStart = %lld\n", static_cast<long long int>(statsStart));
+    StringAppendF(&result, "statsEnd = %lld\n", static_cast<long long int>(statsEnd));
+    StringAppendF(&result, "totalFrames= %d\n", totalFrames);
+    auto iter = deltas.find("present2present");
+    if (iter != deltas.end()) {
+        StringAppendF(&result, "averageFPS = %.3f\n", 1000.0 / iter->second.averageTime());
+    }
+    for (auto& ele : deltas) {
+        StringAppendF(&result, "%s histogram is as below:\n", ele.first.c_str());
+        StringAppendF(&result, "%s", ele.second.toString().c_str());
+    }
+
+    return result;
+}
+
+std::string TimeStatsHelper::TimeStatsGlobal::toString(std::optional<uint32_t> maxLayers) const {
+    std::string result = "SurfaceFlinger TimeStats:\n";
+    StringAppendF(&result, "statsStart = %lld\n", static_cast<long long int>(statsStart));
+    StringAppendF(&result, "statsEnd = %lld\n", static_cast<long long int>(statsEnd));
+    StringAppendF(&result, "totalFrames= %d\n", totalFrames);
+    StringAppendF(&result, "missedFrames= %d\n", missedFrames);
+    StringAppendF(&result, "clientCompositionFrames= %d\n", clientCompositionFrames);
+    StringAppendF(&result, "TimeStats for each layer is as below:\n");
+    const auto dumpStats = generateDumpStats(maxLayers);
+    for (auto& ele : dumpStats) {
+        StringAppendF(&result, "%s", ele->toString().c_str());
+    }
+
+    return result;
+}
+
+SFTimeStatsLayerProto TimeStatsHelper::TimeStatsLayer::toProto() const {
+    SFTimeStatsLayerProto layerProto;
+    layerProto.set_layer_name(layerName);
+    layerProto.set_package_name(packageName);
+    layerProto.set_stats_start(statsStart);
+    layerProto.set_stats_end(statsEnd);
+    layerProto.set_total_frames(totalFrames);
+    for (auto& ele : deltas) {
+        SFTimeStatsDeltaProto* deltaProto = layerProto.add_deltas();
+        deltaProto->set_delta_name(ele.first);
+        for (auto& histEle : ele.second.hist) {
+            SFTimeStatsHistogramBucketProto* histProto = deltaProto->add_histograms();
+            histProto->set_render_millis(histEle.first);
+            histProto->set_frame_count(histEle.second);
+        }
+    }
+    return layerProto;
+}
+
+SFTimeStatsGlobalProto TimeStatsHelper::TimeStatsGlobal::toProto(
+        std::optional<uint32_t> maxLayers) const {
+    SFTimeStatsGlobalProto globalProto;
+    globalProto.set_stats_start(statsStart);
+    globalProto.set_stats_end(statsEnd);
+    globalProto.set_total_frames(totalFrames);
+    globalProto.set_missed_frames(missedFrames);
+    globalProto.set_client_composition_frames(clientCompositionFrames);
+    const auto dumpStats = generateDumpStats(maxLayers);
+    for (auto& ele : dumpStats) {
+        SFTimeStatsLayerProto* layerProto = globalProto.add_stats();
+        layerProto->CopyFrom(ele->toProto());
+    }
+    return globalProto;
+}
+
+std::vector<TimeStatsHelper::TimeStatsLayer const*>
+TimeStatsHelper::TimeStatsGlobal::generateDumpStats(std::optional<uint32_t> maxLayers) const {
+    std::vector<TimeStatsLayer const*> dumpStats;
+    for (auto& ele : stats) {
+        dumpStats.push_back(&ele.second);
+    }
+
+    std::sort(dumpStats.begin(), dumpStats.end(),
+              [](TimeStatsHelper::TimeStatsLayer const* l,
+                 TimeStatsHelper::TimeStatsLayer const* r) {
+                  return l->totalFrames > r->totalFrames;
+              });
+
+    if (maxLayers && (*maxLayers < dumpStats.size())) {
+        dumpStats.resize(*maxLayers);
+    }
+    return dumpStats;
+}
+
+} // namespace surfaceflinger
+} // namespace android
diff --git a/services/surfaceflinger/TimeStats/timestatsproto/include/timestatsproto/TimeStatsHelper.h b/services/surfaceflinger/TimeStats/timestatsproto/include/timestatsproto/TimeStatsHelper.h
new file mode 100644
index 0000000..1798555
--- /dev/null
+++ b/services/surfaceflinger/TimeStats/timestatsproto/include/timestatsproto/TimeStatsHelper.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <timestatsproto/TimeStatsProtoHeader.h>
+
+#include <optional>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+namespace android {
+namespace surfaceflinger {
+
+class TimeStatsHelper {
+public:
+    class Histogram {
+    public:
+        // Key is the delta time between timestamps
+        // Value is the number of appearances of that delta
+        std::unordered_map<int32_t, int32_t> hist;
+
+        void insert(int32_t delta);
+        float averageTime() const;
+        std::string toString() const;
+    };
+
+    class TimeStatsLayer {
+    public:
+        std::string layerName;
+        std::string packageName;
+        int64_t statsStart = 0;
+        int64_t statsEnd = 0;
+        int32_t totalFrames = 0;
+        std::unordered_map<std::string, Histogram> deltas;
+
+        std::string toString() const;
+        SFTimeStatsLayerProto toProto() const;
+    };
+
+    class TimeStatsGlobal {
+    public:
+        int64_t statsStart = 0;
+        int64_t statsEnd = 0;
+        int32_t totalFrames = 0;
+        int32_t missedFrames = 0;
+        int32_t clientCompositionFrames = 0;
+        std::unordered_map<std::string, TimeStatsLayer> stats;
+
+        std::string toString(std::optional<uint32_t> maxLayers) const;
+        SFTimeStatsGlobalProto toProto(std::optional<uint32_t> maxLayers) const;
+
+    private:
+        std::vector<TimeStatsLayer const*> generateDumpStats(
+                std::optional<uint32_t> maxLayers) const;
+    };
+};
+
+} // namespace surfaceflinger
+} // namespace android
diff --git a/services/surfaceflinger/TimeStats/timestatsproto/include/timestatsproto/TimeStatsProtoHeader.h b/services/surfaceflinger/TimeStats/timestatsproto/include/timestatsproto/TimeStatsProtoHeader.h
new file mode 100644
index 0000000..fe0d150
--- /dev/null
+++ b/services/surfaceflinger/TimeStats/timestatsproto/include/timestatsproto/TimeStatsProtoHeader.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Projectlayerproto/LayerProtoHeader.h
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// pragma is used here to disable the warnings emitted from the protobuf
+// headers. By adding #pragma before including layer.pb.h, it supresses
+// protobuf warnings, but allows the rest of the files to continuing using
+// the current flags.
+// This file should be included instead of directly including layer.b.h
+#pragma GCC system_header
+#include <timestats.pb.h>
diff --git a/services/surfaceflinger/TimeStats/timestatsproto/timestats.proto b/services/surfaceflinger/TimeStats/timestatsproto/timestats.proto
new file mode 100644
index 0000000..f29fbd1
--- /dev/null
+++ b/services/surfaceflinger/TimeStats/timestatsproto/timestats.proto
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto2";
+
+package android.surfaceflinger;
+
+option optimize_for = LITE_RUNTIME;
+
+message SFTimeStatsGlobalProto {
+  // The start & end timestamps in UTC as
+  // milliseconds since January 1, 1970
+  optional int64 stats_start = 1;
+  optional int64 stats_end = 2;
+  // Total frames
+  optional int32 total_frames = 3;
+  // Total missed frames of SurfaceFlinger.
+  optional int32 missed_frames = 4;
+  // Total frames fallback to client composition.
+  optional int32 client_composition_frames = 5;
+
+  repeated SFTimeStatsLayerProto stats = 6;
+}
+
+message SFTimeStatsLayerProto {
+  // The layer name
+  optional string layer_name = 1;
+  // The package name
+  optional string package_name = 2;
+  // The start & end timestamps in UTC as
+  // milliseconds since January 1, 1970
+  optional int64 stats_start = 3;
+  optional int64 stats_end = 4;
+  // Distinct frame count.
+  optional int32 total_frames = 5;
+
+  repeated SFTimeStatsDeltaProto deltas = 6;
+}
+
+message SFTimeStatsDeltaProto {
+  // Name of the time interval
+  optional string delta_name = 1;
+  // Histogram of the delta time
+  repeated SFTimeStatsHistogramBucketProto histograms = 2;
+}
+
+message SFTimeStatsHistogramBucketProto {
+  // Lower bound of render time in milliseconds.
+  optional int32 render_millis = 1;
+  // Number of frames in the bucket.
+  optional int32 frame_count = 2;
+}
diff --git a/services/surfaceflinger/tests/Android.bp b/services/surfaceflinger/tests/Android.bp
index 7523399..322e8a0 100644
--- a/services/surfaceflinger/tests/Android.bp
+++ b/services/surfaceflinger/tests/Android.bp
@@ -36,6 +36,7 @@
         "liblayers_proto",
         "liblog",
         "libprotobuf-cpp-full",
+        "libtimestats_proto",
         "libui",
         "libutils",
     ]
diff --git a/services/surfaceflinger/tests/fakehwc/Android.bp b/services/surfaceflinger/tests/fakehwc/Android.bp
index 00bc621..520df2d 100644
--- a/services/surfaceflinger/tests/fakehwc/Android.bp
+++ b/services/surfaceflinger/tests/fakehwc/Android.bp
@@ -25,6 +25,7 @@
         "liblog",
         "libnativewindow",
         "libsync",
+        "libtimestats_proto",
         "libui",
         "libutils",
     ],
diff --git a/services/surfaceflinger/tests/unittests/Android.bp b/services/surfaceflinger/tests/unittests/Android.bp
index bcabe0d..39761dd 100644
--- a/services/surfaceflinger/tests/unittests/Android.bp
+++ b/services/surfaceflinger/tests/unittests/Android.bp
@@ -20,6 +20,8 @@
     srcs: [
         ":libsurfaceflinger_sources",
         "DisplayTransactionTest.cpp",
+        "EventControlThreadTest.cpp",
+        "EventThreadTest.cpp",
         "mock/DisplayHardware/MockComposer.cpp",
         "mock/DisplayHardware/MockDisplaySurface.cpp",
         "mock/gui/MockGraphicBufferConsumer.cpp",
diff --git a/services/surfaceflinger/tests/unittests/AsyncCallRecorder.h b/services/surfaceflinger/tests/unittests/AsyncCallRecorder.h
new file mode 100644
index 0000000..2245ee1
--- /dev/null
+++ b/services/surfaceflinger/tests/unittests/AsyncCallRecorder.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <chrono>
+#include <deque>
+#include <mutex>
+#include <optional>
+#include <thread>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include <android-base/thread_annotations.h>
+
+namespace android {
+
+// This class helps record calls made by another thread when they are made
+// asynchronously, with no other way for the tests to verify that the calls have
+// been made.
+//
+// A normal Google Mock recorder, while thread safe, does not allow you to wait
+// for asynchronous calls to be made.
+//
+// Usage:
+//
+// In the test, use a Google Mock expectation to invoke an instance of the
+// recorder:
+//
+//     AsyncCallRecorder<void(int)> recorder;
+//
+//     EXPECT_CALL(someMock, someFunction(_)).
+//             .WillRepeatedly(Invoke(recorder.getInvocable()));
+//
+// Then you can invoke the functionality being tested:
+//
+//     threadUnderTest.doSomethingAsync()
+//
+// And afterwards make a number of assertions using the recorder:
+//
+//     // Wait for one call (with reasonable default timeout), and get the args
+//     // as a std::tuple inside a std::optional.
+//     auto args = recorder.waitForCall();
+//     // The returned std::optional will have a value if the recorder function
+//     // was called.
+//     ASSERT_TRUE(args.has_value());
+//     // The arguments can be checked if needed using standard tuple
+//     // operations.
+//     EXPECT_EQ(123, std::get<0>(args.value()));
+//
+// Alternatively maybe you want to assert that a call was not made.
+//
+//     EXPECT_FALSE(recorder.waitForUnexpectedCall().has_value());
+//
+// However this check uses a really short timeout so as not to block the test
+// unnecessarily. And it could be possible for the check to return false and
+// then the recorder could observe a call being made after.
+template <typename Func>
+class AsyncCallRecorder;
+
+template <typename... Args>
+class AsyncCallRecorder<void (*)(Args...)> {
+public:
+    // For the tests, we expect the wait for an expected change to be signaled
+    // to be much shorter than this.
+    static constexpr std::chrono::milliseconds DEFAULT_CALL_EXPECTED_TIMEOUT{10};
+
+    // The wait here is tricky. We don't expect a change, but we don't want to
+    // wait forever (or for longer than the typical test function runtime). As
+    // even the simplest Google Test can take 1ms (1000us) to run, we wait for
+    // half that time.
+    static constexpr std::chrono::microseconds UNEXPECTED_CALL_TIMEOUT{500};
+
+    using ArgTuple = std::tuple<std::remove_cv_t<std::remove_reference_t<Args>>...>;
+
+    void recordCall(Args... args) {
+        std::lock_guard<std::mutex> lock(mMutex);
+        mCalls.emplace_back(std::make_tuple(args...));
+        mCondition.notify_all();
+    }
+
+    // Returns a functor which can be used with the Google Mock Invoke()
+    // function, or as a std::function to record calls.
+    auto getInvocable() {
+        return [this](Args... args) { recordCall(args...); };
+    }
+
+    // Returns a set of arguments as a std::optional<std::tuple<...>> for the
+    // oldest call, waiting for the given timeout if necessary if there are no
+    // arguments in the FIFO.
+    std::optional<ArgTuple> waitForCall(
+            std::chrono::microseconds timeout = DEFAULT_CALL_EXPECTED_TIMEOUT)
+            NO_THREAD_SAFETY_ANALYSIS {
+        std::unique_lock<std::mutex> lock(mMutex);
+
+        // Wait if necessary for us to have a record from a call.
+        mCondition.wait_for(lock, timeout,
+                            [this]() NO_THREAD_SAFETY_ANALYSIS { return !mCalls.empty(); });
+
+        // Return the arguments from the oldest call, if one was made
+        bool called = !mCalls.empty();
+        std::optional<ArgTuple> result;
+        if (called) {
+            result.emplace(std::move(mCalls.front()));
+            mCalls.pop_front();
+        }
+        return result;
+    }
+
+    // Waits using a small default timeout for when a call is not expected to be
+    // made. The returned std::optional<std:tuple<...>> should not have a value
+    // except if a set of arguments was unexpectedly received because a call was
+    // actually made.
+    //
+    // Note this function uses a small timeout to not block test execution, and
+    // it is possible the code under test could make the call AFTER the timeout
+    // expires.
+    std::optional<ArgTuple> waitForUnexpectedCall() { return waitForCall(UNEXPECTED_CALL_TIMEOUT); }
+
+private:
+    std::mutex mMutex;
+    std::condition_variable mCondition;
+    std::deque<ArgTuple> mCalls GUARDED_BY(mMutex);
+};
+
+// Like AsyncCallRecorder, but for when the function being invoked
+// asynchronously is expected to return a value.
+//
+// This helper allows a single constant return value to be set to be returned by
+// all calls that were made.
+template <typename Func>
+class AsyncCallRecorderWithCannedReturn;
+
+template <typename Ret, typename... Args>
+class AsyncCallRecorderWithCannedReturn<Ret (*)(Args...)>
+      : public AsyncCallRecorder<void (*)(Args...)> {
+public:
+    explicit AsyncCallRecorderWithCannedReturn(Ret returnvalue) : mReturnValue(returnvalue) {}
+
+    auto getInvocable() {
+        return [this](Args... args) {
+            this->recordCall(args...);
+            return mReturnValue;
+        };
+    }
+
+private:
+    const Ret mReturnValue;
+};
+
+} // namespace android
diff --git a/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp b/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
index 2b8a22c..9b308bf 100644
--- a/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
+++ b/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
@@ -45,11 +45,12 @@
 using testing::Return;
 using testing::SetArgPointee;
 
-using android::hardware::graphics::common::V1_0::Hdr;
-using android::hardware::graphics::common::V1_1::ColorMode;
+using android::Hwc2::ColorMode;
 using android::Hwc2::Error;
+using android::Hwc2::Hdr;
 using android::Hwc2::IComposer;
 using android::Hwc2::IComposerClient;
+using android::Hwc2::PerFrameMetadataKey;
 using android::Hwc2::RenderIntent;
 
 using FakeDisplayDeviceInjector = TestableSurfaceFlinger::FakeDisplayDeviceInjector;
@@ -61,6 +62,8 @@
 constexpr int32_t DEFAULT_DPI = 320;
 constexpr int DEFAULT_VIRTUAL_DISPLAY_SURFACE_FORMAT = HAL_PIXEL_FORMAT_RGB_565;
 
+constexpr int HWC_POWER_MODE_LEET = 1337; // An out of range power mode value
+
 /* ------------------------------------------------------------------------
  * Boolean avoidance
  *
@@ -303,11 +306,7 @@
     static constexpr HWC2::DisplayType HWC_DISPLAY_TYPE = hwcDisplayType;
 
     // The HWC active configuration id
-    // TODO(b/69807179): SurfaceFlinger does not correctly get the active
-    // config. Once it does, change this to non-zero so that it is properly
-    // covered.
-    // static constexpr int HWC_ACTIVE_CONFIG_ID = 2001;
-    static constexpr int HWC_ACTIVE_CONFIG_ID = 0;
+    static constexpr int HWC_ACTIVE_CONFIG_ID = 2001;
 
     static void injectPendingHotplugEvent(DisplayTransactionTest* test,
                                           HWC2::Connection connection) {
@@ -359,13 +358,11 @@
     // Called by tests to set up HWC call expectations
     static void setupHwcGetActiveConfigCallExpectations(DisplayTransactionTest* test) {
         EXPECT_CALL(*test->mComposer, getActiveConfig(HWC_DISPLAY_ID, _))
-                .WillOnce(DoAll(SetArgPointee<1>(HWC_ACTIVE_CONFIG_ID), Return(Error::NONE)));
+                .WillRepeatedly(DoAll(SetArgPointee<1>(HWC_ACTIVE_CONFIG_ID), Return(Error::NONE)));
     }
 };
 
 struct NonHwcDisplayVariant {
-    static constexpr int HWC_ACTIVE_CONFIG_ID = 0;
-
     static void injectHwcDisplay(DisplayTransactionTest*) {}
 
     static void setupHwcGetActiveConfigCallExpectations(DisplayTransactionTest* test) {
@@ -386,6 +383,11 @@
                                  DisplayVariant<type, type, width, height, critical, Async::FALSE,
                                                 Secure::TRUE, GRALLOC_USAGE_PHYSICAL_DISPLAY>> {};
 
+// An invalid display
+using InvalidDisplayVariant =
+        DisplayVariant<DisplayDevice::DISPLAY_ID_INVALID, DisplayDevice::DISPLAY_ID_INVALID, 0, 0,
+                       Critical::FALSE, Async::FALSE, Secure::FALSE, 0>;
+
 // A primary display is a physical display that is critical
 using PrimaryDisplayVariant =
         PhysicalDisplayVariant<1001, DisplayDevice::DISPLAY_PRIMARY, 3840, 2160, Critical::TRUE>;
@@ -489,33 +491,6 @@
     }
 };
 
-// For this variant, SurfaceFlinger should configure itself with wide color
-// display support, and the display should respond with an non-empty list of
-// supported color modes.
-template <typename Display>
-struct WideColorP3EnhanceSupportedVariant {
-    static constexpr bool WIDE_COLOR_SUPPORTED = true;
-
-    static void injectConfigChange(DisplayTransactionTest* test) {
-        test->mFlinger.mutableHasWideColorDisplay() = true;
-        test->mFlinger.mutableDisplayColorSetting() = DisplayColorSetting::ENHANCED;
-    }
-
-    static void setupComposerCallExpectations(DisplayTransactionTest* test) {
-        EXPECT_CALL(*test->mComposer, getColorModes(Display::HWC_DISPLAY_ID, _))
-                .WillOnce(DoAll(SetArgPointee<1>(std::vector<ColorMode>({ColorMode::DISPLAY_P3})),
-                                Return(Error::NONE)));
-        EXPECT_CALL(*test->mComposer,
-                    getRenderIntents(Display::HWC_DISPLAY_ID, ColorMode::DISPLAY_P3, _))
-                .WillOnce(
-                        DoAll(SetArgPointee<2>(std::vector<RenderIntent>({RenderIntent::ENHANCE})),
-                              Return(Error::NONE)));
-        EXPECT_CALL(*test->mComposer,
-                    setColorMode(Display::HWC_DISPLAY_ID, ColorMode::SRGB, RenderIntent::ENHANCE))
-                .WillOnce(Return(Error::NONE));
-    }
-};
-
 // For this variant, SurfaceFlinger should configure itself with wide display
 // support, but the display should respond with an empty list of supported color
 // modes. Wide-color support for the display should not be configured.
@@ -530,10 +505,7 @@
     static void setupComposerCallExpectations(DisplayTransactionTest* test) {
         EXPECT_CALL(*test->mComposer, getColorModes(Display::HWC_DISPLAY_ID, _))
                 .WillOnce(DoAll(SetArgPointee<1>(std::vector<ColorMode>()), Return(Error::NONE)));
-        EXPECT_CALL(*test->mComposer,
-                    setColorMode(Display::HWC_DISPLAY_ID, ColorMode::NATIVE,
-                                 RenderIntent::COLORIMETRIC))
-                .WillOnce(Return(Error::NONE));
+        EXPECT_CALL(*test->mComposer, setColorMode(_, _, _)).Times(0);
     }
 };
 
@@ -603,49 +575,118 @@
     }
 };
 
+struct NonHwcPerFrameMetadataSupportVariant {
+    static constexpr int PER_FRAME_METADATA_KEYS = 0;
+    static void setupComposerCallExpectations(DisplayTransactionTest* test) {
+        EXPECT_CALL(*test->mComposer, getPerFrameMetadataKeys(_, _)).Times(0);
+    }
+};
+
+template <typename Display>
+struct NoPerFrameMetadataSupportVariant {
+    static constexpr int PER_FRAME_METADATA_KEYS = 0;
+    static void setupComposerCallExpectations(DisplayTransactionTest* test) {
+        EXPECT_CALL(*test->mComposer, getPerFrameMetadataKeys(Display::HWC_DISPLAY_ID, _))
+                .WillOnce(DoAll(SetArgPointee<1>(std::vector<PerFrameMetadataKey>()),
+                                Return(Error::NONE)));
+    }
+};
+
+template <typename Display>
+struct Smpte2086PerFrameMetadataSupportVariant {
+    static constexpr int PER_FRAME_METADATA_KEYS = HdrMetadata::Type::SMPTE2086;
+    static void setupComposerCallExpectations(DisplayTransactionTest* test) {
+        EXPECT_CALL(*test->mComposer, getPerFrameMetadataKeys(Display::HWC_DISPLAY_ID, _))
+                .WillOnce(DoAll(SetArgPointee<1>(std::vector<PerFrameMetadataKey>({
+                                        PerFrameMetadataKey::DISPLAY_RED_PRIMARY_X,
+                                        PerFrameMetadataKey::DISPLAY_RED_PRIMARY_Y,
+                                        PerFrameMetadataKey::DISPLAY_GREEN_PRIMARY_X,
+                                        PerFrameMetadataKey::DISPLAY_GREEN_PRIMARY_Y,
+                                        PerFrameMetadataKey::DISPLAY_BLUE_PRIMARY_X,
+                                        PerFrameMetadataKey::DISPLAY_BLUE_PRIMARY_Y,
+                                        PerFrameMetadataKey::WHITE_POINT_X,
+                                        PerFrameMetadataKey::WHITE_POINT_Y,
+                                        PerFrameMetadataKey::MAX_LUMINANCE,
+                                        PerFrameMetadataKey::MIN_LUMINANCE,
+                                })),
+                                Return(Error::NONE)));
+    }
+};
+
+template <typename Display>
+struct Cta861_3_PerFrameMetadataSupportVariant {
+    static constexpr int PER_FRAME_METADATA_KEYS = HdrMetadata::Type::CTA861_3;
+    static void setupComposerCallExpectations(DisplayTransactionTest* test) {
+        EXPECT_CALL(*test->mComposer, getPerFrameMetadataKeys(Display::HWC_DISPLAY_ID, _))
+                .WillOnce(DoAll(SetArgPointee<1>(std::vector<PerFrameMetadataKey>({
+                                        PerFrameMetadataKey::MAX_CONTENT_LIGHT_LEVEL,
+                                        PerFrameMetadataKey::MAX_FRAME_AVERAGE_LIGHT_LEVEL,
+                                })),
+                                Return(Error::NONE)));
+    }
+};
+
 /* ------------------------------------------------------------------------
  * Typical display configurations to test
  */
 
-template <typename DisplayPolicy, typename WideColorSupportPolicy, typename HdrSupportPolicy>
+template <typename DisplayPolicy, typename WideColorSupportPolicy, typename HdrSupportPolicy,
+          typename PerFrameMetadataSupportPolicy>
 struct Case {
     using Display = DisplayPolicy;
     using WideColorSupport = WideColorSupportPolicy;
     using HdrSupport = HdrSupportPolicy;
+    using PerFrameMetadataSupport = PerFrameMetadataSupportPolicy;
 };
 
 using SimplePrimaryDisplayCase =
         Case<PrimaryDisplayVariant, WideColorNotSupportedVariant<PrimaryDisplayVariant>,
-             HdrNotSupportedVariant<PrimaryDisplayVariant>>;
+             HdrNotSupportedVariant<PrimaryDisplayVariant>,
+             NoPerFrameMetadataSupportVariant<PrimaryDisplayVariant>>;
 using SimpleExternalDisplayCase =
         Case<ExternalDisplayVariant, WideColorNotSupportedVariant<ExternalDisplayVariant>,
-             HdrNotSupportedVariant<ExternalDisplayVariant>>;
+             HdrNotSupportedVariant<ExternalDisplayVariant>,
+             NoPerFrameMetadataSupportVariant<ExternalDisplayVariant>>;
 using SimpleTertiaryDisplayCase =
         Case<TertiaryDisplayVariant, WideColorNotSupportedVariant<TertiaryDisplayVariant>,
-             HdrNotSupportedVariant<TertiaryDisplayVariant>>;
+             HdrNotSupportedVariant<TertiaryDisplayVariant>,
+             NoPerFrameMetadataSupportVariant<TertiaryDisplayVariant>>;
 using NonHwcVirtualDisplayCase =
         Case<NonHwcVirtualDisplayVariant<1024, 768, Secure::FALSE>,
-             WideColorSupportNotConfiguredVariant, NonHwcDisplayHdrSupportVariant>;
+             WideColorSupportNotConfiguredVariant, NonHwcDisplayHdrSupportVariant,
+             NonHwcPerFrameMetadataSupportVariant>;
 using SimpleHwcVirtualDisplayVariant = HwcVirtualDisplayVariant<1024, 768, Secure::TRUE>;
 using HwcVirtualDisplayCase =
         Case<SimpleHwcVirtualDisplayVariant, WideColorSupportNotConfiguredVariant,
-             HdrNotSupportedVariant<SimpleHwcVirtualDisplayVariant>>;
+             HdrNotSupportedVariant<SimpleHwcVirtualDisplayVariant>,
+             NoPerFrameMetadataSupportVariant<SimpleHwcVirtualDisplayVariant>>;
 using WideColorP3ColorimetricDisplayCase =
         Case<PrimaryDisplayVariant, WideColorP3ColorimetricSupportedVariant<PrimaryDisplayVariant>,
-             HdrNotSupportedVariant<PrimaryDisplayVariant>>;
-using WideColorP3EnhanceDisplayCase =
-        Case<PrimaryDisplayVariant, WideColorP3EnhanceSupportedVariant<PrimaryDisplayVariant>,
-             HdrNotSupportedVariant<PrimaryDisplayVariant>>;
+             HdrNotSupportedVariant<PrimaryDisplayVariant>,
+             NoPerFrameMetadataSupportVariant<PrimaryDisplayVariant>>;
 using Hdr10DisplayCase =
         Case<PrimaryDisplayVariant, WideColorNotSupportedVariant<PrimaryDisplayVariant>,
-             Hdr10SupportedVariant<PrimaryDisplayVariant>>;
+             Hdr10SupportedVariant<PrimaryDisplayVariant>,
+             NoPerFrameMetadataSupportVariant<PrimaryDisplayVariant>>;
 using HdrHlgDisplayCase =
         Case<PrimaryDisplayVariant, WideColorNotSupportedVariant<PrimaryDisplayVariant>,
-             HdrHlgSupportedVariant<PrimaryDisplayVariant>>;
+             HdrHlgSupportedVariant<PrimaryDisplayVariant>,
+             NoPerFrameMetadataSupportVariant<PrimaryDisplayVariant>>;
 using HdrDolbyVisionDisplayCase =
         Case<PrimaryDisplayVariant, WideColorNotSupportedVariant<PrimaryDisplayVariant>,
-             HdrDolbyVisionSupportedVariant<PrimaryDisplayVariant>>;
-
+             HdrDolbyVisionSupportedVariant<PrimaryDisplayVariant>,
+             NoPerFrameMetadataSupportVariant<PrimaryDisplayVariant>>;
+using HdrSmpte2086DisplayCase =
+        Case<PrimaryDisplayVariant, WideColorNotSupportedVariant<PrimaryDisplayVariant>,
+             HdrNotSupportedVariant<PrimaryDisplayVariant>,
+             Smpte2086PerFrameMetadataSupportVariant<PrimaryDisplayVariant>>;
+using HdrCta861_3_DisplayCase =
+        Case<PrimaryDisplayVariant, WideColorNotSupportedVariant<PrimaryDisplayVariant>,
+             HdrNotSupportedVariant<PrimaryDisplayVariant>,
+             Cta861_3_PerFrameMetadataSupportVariant<PrimaryDisplayVariant>>;
+using InvalidDisplayCase = Case<InvalidDisplayVariant, WideColorSupportNotConfiguredVariant,
+                                NonHwcDisplayHdrSupportVariant,
+                                NoPerFrameMetadataSupportVariant<InvalidDisplayVariant>>;
 /* ------------------------------------------------------------------------
  *
  * SurfaceFlinger::onHotplugReceived
@@ -983,12 +1024,10 @@
 
     // Various native window calls will be made.
     Case::Display::setupNativeWindowSurfaceCreationCallExpectations(this);
-
-    // TODO(b/69807179): SurfaceFlinger does not correctly get the active config.
-    // Case::Display::setupHwcGetActiveConfigCallExpectations(this)
-
+    Case::Display::setupHwcGetActiveConfigCallExpectations(this);
     Case::WideColorSupport::setupComposerCallExpectations(this);
     Case::HdrSupport::setupComposerCallExpectations(this);
+    Case::PerFrameMetadataSupport::setupComposerCallExpectations(this);
 
     // --------------------------------------------------------------------
     // Invocation
@@ -1009,7 +1048,12 @@
     EXPECT_EQ(Case::HdrSupport::HDR10_SUPPORTED, device->hasHDR10Support());
     EXPECT_EQ(Case::HdrSupport::HDR_HLG_SUPPORTED, device->hasHLGSupport());
     EXPECT_EQ(Case::HdrSupport::HDR_DOLBY_VISION_SUPPORTED, device->hasDolbyVisionSupport());
-    EXPECT_EQ(Case::Display::HWC_ACTIVE_CONFIG_ID, device->getActiveConfig());
+    // Note: This is not Case::Display::HWC_ACTIVE_CONFIG_ID as the ids are
+    // remapped, and the test only ever sets up one config. If there were an error
+    // looking up the remapped index, device->getActiveConfig() would be -1 instead.
+    EXPECT_EQ(0, device->getActiveConfig());
+    EXPECT_EQ(Case::PerFrameMetadataSupport::PER_FRAME_METADATA_KEYS,
+              device->getSupportedPerFrameMetadata());
 }
 
 TEST_F(SetupNewDisplayDeviceInternalTest, createSimplePrimaryDisplay) {
@@ -1036,10 +1080,6 @@
     setupNewDisplayDeviceInternalTest<WideColorP3ColorimetricDisplayCase>();
 }
 
-TEST_F(SetupNewDisplayDeviceInternalTest, createWideColorP3EnhanceDisplay) {
-    setupNewDisplayDeviceInternalTest<WideColorP3EnhanceDisplayCase>();
-}
-
 TEST_F(SetupNewDisplayDeviceInternalTest, createHdr10Display) {
     setupNewDisplayDeviceInternalTest<Hdr10DisplayCase>();
 }
@@ -1052,6 +1092,14 @@
     setupNewDisplayDeviceInternalTest<HdrDolbyVisionDisplayCase>();
 }
 
+TEST_F(SetupNewDisplayDeviceInternalTest, createHdrSmpte2086DisplayCase) {
+    setupNewDisplayDeviceInternalTest<HdrSmpte2086DisplayCase>();
+}
+
+TEST_F(SetupNewDisplayDeviceInternalTest, createHdrCta816_3_DisplayCase) {
+    setupNewDisplayDeviceInternalTest<HdrCta861_3_DisplayCase>();
+}
+
 /* ------------------------------------------------------------------------
  * SurfaceFlinger::handleTransactionLocked(eDisplayTransactionNeeded)
  */
@@ -1109,6 +1157,7 @@
 
     Case::WideColorSupport::setupComposerCallExpectations(this);
     Case::HdrSupport::setupComposerCallExpectations(this);
+    Case::PerFrameMetadataSupport::setupComposerCallExpectations(this);
 
     EXPECT_CALL(*mSurfaceInterceptor, saveDisplayCreation(_)).Times(1);
     EXPECT_CALL(*mEventThread, onHotplugReceived(Case::Display::TYPE, true)).Times(1);
@@ -1448,6 +1497,7 @@
     Case::Display::setupHwcVirtualDisplayCreationCallExpectations(this);
     Case::WideColorSupport::setupComposerCallExpectations(this);
     Case::HdrSupport::setupComposerCallExpectations(this);
+    Case::PerFrameMetadataSupport::setupComposerCallExpectations(this);
 
     // --------------------------------------------------------------------
     // Invocation
@@ -1560,7 +1610,7 @@
     // --------------------------------------------------------------------
     // Postconditions
 
-    EXPECT_EQ(newLayerStack, getDisplayDevice(display.token())->getLayerStack());
+    EXPECT_EQ(newLayerStack, display.mutableDisplayDevice()->getLayerStack());
 }
 
 TEST_F(HandleTransactionLockedTest, processesDisplayTransformChanges) {
@@ -1588,7 +1638,7 @@
     // --------------------------------------------------------------------
     // Postconditions
 
-    EXPECT_EQ(newTransform, getDisplayDevice(display.token())->getOrientation());
+    EXPECT_EQ(newTransform, display.mutableDisplayDevice()->getOrientation());
 }
 
 TEST_F(HandleTransactionLockedTest, processesDisplayViewportChanges) {
@@ -1616,7 +1666,7 @@
     // --------------------------------------------------------------------
     // Postconditions
 
-    EXPECT_EQ(newViewport, getDisplayDevice(display.token())->getViewport());
+    EXPECT_EQ(newViewport, display.mutableDisplayDevice()->getViewport());
 }
 
 TEST_F(HandleTransactionLockedTest, processesDisplayFrameChanges) {
@@ -1644,7 +1694,7 @@
     // --------------------------------------------------------------------
     // Postconditions
 
-    EXPECT_EQ(newFrame, getDisplayDevice(display.token())->getFrame());
+    EXPECT_EQ(newFrame, display.mutableDisplayDevice()->getFrame());
 }
 
 TEST_F(HandleTransactionLockedTest, processesDisplayWidthChanges) {
@@ -1729,5 +1779,1020 @@
     mFlinger.handleTransactionLocked(eDisplayTransactionNeeded);
 }
 
+/* ------------------------------------------------------------------------
+ * SurfaceFlinger::setDisplayStateLocked
+ */
+
+TEST_F(DisplayTransactionTest, setDisplayStateLockedDoesNothingWithUnknownDisplay) {
+    // --------------------------------------------------------------------
+    // Preconditions
+
+    // We have an unknown display token not associated with a known display
+    sp<BBinder> displayToken = new BBinder();
+
+    // The requested display state references the unknown display.
+    DisplayState state;
+    state.what = DisplayState::eLayerStackChanged;
+    state.token = displayToken;
+    state.layerStack = 456;
+
+    // --------------------------------------------------------------------
+    // Invocation
+
+    uint32_t flags = mFlinger.setDisplayStateLocked(state);
+
+    // --------------------------------------------------------------------
+    // Postconditions
+
+    // The returned flags are empty
+    EXPECT_EQ(0u, flags);
+
+    // The display token still doesn't match anything known.
+    EXPECT_FALSE(hasCurrentDisplayState(displayToken));
+}
+
+TEST_F(DisplayTransactionTest, setDisplayStateLockedDoesNothingWithInvalidDisplay) {
+    using Case = InvalidDisplayCase;
+
+    // --------------------------------------------------------------------
+    // Preconditions
+
+    // An invalid display is set up
+    auto display = Case::Display::makeFakeExistingDisplayInjector(this);
+    display.inject();
+
+    // The invalid display has some state
+    display.mutableCurrentDisplayState().layerStack = 654u;
+
+    // The requested display state tries to change the display state.
+    DisplayState state;
+    state.what = DisplayState::eLayerStackChanged;
+    state.token = display.token();
+    state.layerStack = 456;
+
+    // --------------------------------------------------------------------
+    // Invocation
+
+    uint32_t flags = mFlinger.setDisplayStateLocked(state);
+
+    // --------------------------------------------------------------------
+    // Postconditions
+
+    // The returned flags are empty
+    EXPECT_EQ(0u, flags);
+
+    // The current display layer stack value is unchanged.
+    EXPECT_EQ(654u, getCurrentDisplayState(display.token()).layerStack);
+}
+
+TEST_F(DisplayTransactionTest, setDisplayStateLockedDoesNothingWhenNoChanges) {
+    using Case = SimplePrimaryDisplayCase;
+
+    // --------------------------------------------------------------------
+    // Preconditions
+
+    // A display is already set up
+    auto display = Case::Display::makeFakeExistingDisplayInjector(this);
+    display.inject();
+
+    // No changes are made to the display
+    DisplayState state;
+    state.what = 0;
+    state.token = display.token();
+
+    // --------------------------------------------------------------------
+    // Invocation
+
+    uint32_t flags = mFlinger.setDisplayStateLocked(state);
+
+    // --------------------------------------------------------------------
+    // Postconditions
+
+    // The returned flags are empty
+    EXPECT_EQ(0u, flags);
+}
+
+TEST_F(DisplayTransactionTest, setDisplayStateLockedDoesNothingIfSurfaceDidNotChange) {
+    using Case = SimplePrimaryDisplayCase;
+
+    // --------------------------------------------------------------------
+    // Preconditions
+
+    // A display is already set up
+    auto display = Case::Display::makeFakeExistingDisplayInjector(this);
+    display.inject();
+
+    // There is a surface that can be set.
+    sp<mock::GraphicBufferProducer> surface = new mock::GraphicBufferProducer();
+
+    // The current display state has the surface set
+    display.mutableCurrentDisplayState().surface = surface;
+
+    // The incoming request sets the same surface
+    DisplayState state;
+    state.what = DisplayState::eSurfaceChanged;
+    state.token = display.token();
+    state.surface = surface;
+
+    // --------------------------------------------------------------------
+    // Invocation
+
+    uint32_t flags = mFlinger.setDisplayStateLocked(state);
+
+    // --------------------------------------------------------------------
+    // Postconditions
+
+    // The returned flags are empty
+    EXPECT_EQ(0u, flags);
+
+    // The current display state is unchanged.
+    EXPECT_EQ(surface.get(), display.getCurrentDisplayState().surface.get());
+}
+
+TEST_F(DisplayTransactionTest, setDisplayStateLockedRequestsUpdateIfSurfaceChanged) {
+    using Case = SimplePrimaryDisplayCase;
+
+    // --------------------------------------------------------------------
+    // Preconditions
+
+    // A display is already set up
+    auto display = Case::Display::makeFakeExistingDisplayInjector(this);
+    display.inject();
+
+    // There is a surface that can be set.
+    sp<mock::GraphicBufferProducer> surface = new mock::GraphicBufferProducer();
+
+    // The current display state does not have a surface
+    display.mutableCurrentDisplayState().surface = nullptr;
+
+    // The incoming request sets a surface
+    DisplayState state;
+    state.what = DisplayState::eSurfaceChanged;
+    state.token = display.token();
+    state.surface = surface;
+
+    // --------------------------------------------------------------------
+    // Invocation
+
+    uint32_t flags = mFlinger.setDisplayStateLocked(state);
+
+    // --------------------------------------------------------------------
+    // Postconditions
+
+    // The returned flags indicate a transaction is needed
+    EXPECT_EQ(eDisplayTransactionNeeded, flags);
+
+    // The current display layer stack state is set to the new value
+    EXPECT_EQ(surface.get(), display.getCurrentDisplayState().surface.get());
+}
+
+TEST_F(DisplayTransactionTest, setDisplayStateLockedDoesNothingIfLayerStackDidNotChange) {
+    using Case = SimplePrimaryDisplayCase;
+
+    // --------------------------------------------------------------------
+    // Preconditions
+
+    // A display is already set up
+    auto display = Case::Display::makeFakeExistingDisplayInjector(this);
+    display.inject();
+
+    // The display has a layer stack set
+    display.mutableCurrentDisplayState().layerStack = 456u;
+
+    // The incoming request sets the same layer stack
+    DisplayState state;
+    state.what = DisplayState::eLayerStackChanged;
+    state.token = display.token();
+    state.layerStack = 456u;
+
+    // --------------------------------------------------------------------
+    // Invocation
+
+    uint32_t flags = mFlinger.setDisplayStateLocked(state);
+
+    // --------------------------------------------------------------------
+    // Postconditions
+
+    // The returned flags are empty
+    EXPECT_EQ(0u, flags);
+
+    // The current display state is unchanged
+    EXPECT_EQ(456u, display.getCurrentDisplayState().layerStack);
+}
+
+TEST_F(DisplayTransactionTest, setDisplayStateLockedRequestsUpdateIfLayerStackChanged) {
+    using Case = SimplePrimaryDisplayCase;
+
+    // --------------------------------------------------------------------
+    // Preconditions
+
+    // A display is set up
+    auto display = Case::Display::makeFakeExistingDisplayInjector(this);
+    display.inject();
+
+    // The display has a layer stack set
+    display.mutableCurrentDisplayState().layerStack = 654u;
+
+    // The incoming request sets a different layer stack
+    DisplayState state;
+    state.what = DisplayState::eLayerStackChanged;
+    state.token = display.token();
+    state.layerStack = 456u;
+
+    // --------------------------------------------------------------------
+    // Invocation
+
+    uint32_t flags = mFlinger.setDisplayStateLocked(state);
+
+    // --------------------------------------------------------------------
+    // Postconditions
+
+    // The returned flags indicate a transaction is needed
+    EXPECT_EQ(eDisplayTransactionNeeded, flags);
+
+    // The desired display state has been set to the new value.
+    EXPECT_EQ(456u, display.getCurrentDisplayState().layerStack);
+}
+
+TEST_F(DisplayTransactionTest, setDisplayStateLockedDoesNothingIfProjectionDidNotChange) {
+    using Case = SimplePrimaryDisplayCase;
+    constexpr int initialOrientation = 180;
+    const Rect initialFrame = {1, 2, 3, 4};
+    const Rect initialViewport = {5, 6, 7, 8};
+
+    // --------------------------------------------------------------------
+    // Preconditions
+
+    // A display is set up
+    auto display = Case::Display::makeFakeExistingDisplayInjector(this);
+    display.inject();
+
+    // The current display state projection state is all set
+    display.mutableCurrentDisplayState().orientation = initialOrientation;
+    display.mutableCurrentDisplayState().frame = initialFrame;
+    display.mutableCurrentDisplayState().viewport = initialViewport;
+
+    // The incoming request sets the same projection state
+    DisplayState state;
+    state.what = DisplayState::eDisplayProjectionChanged;
+    state.token = display.token();
+    state.orientation = initialOrientation;
+    state.frame = initialFrame;
+    state.viewport = initialViewport;
+
+    // --------------------------------------------------------------------
+    // Invocation
+
+    uint32_t flags = mFlinger.setDisplayStateLocked(state);
+
+    // --------------------------------------------------------------------
+    // Postconditions
+
+    // The returned flags are empty
+    EXPECT_EQ(0u, flags);
+
+    // The current display state is unchanged
+    EXPECT_EQ(initialOrientation, display.getCurrentDisplayState().orientation);
+
+    EXPECT_EQ(initialFrame, display.getCurrentDisplayState().frame);
+    EXPECT_EQ(initialViewport, display.getCurrentDisplayState().viewport);
+}
+
+TEST_F(DisplayTransactionTest, setDisplayStateLockedRequestsUpdateIfOrientationChanged) {
+    using Case = SimplePrimaryDisplayCase;
+    constexpr int initialOrientation = 90;
+    constexpr int desiredOrientation = 180;
+
+    // --------------------------------------------------------------------
+    // Preconditions
+
+    // A display is set up
+    auto display = Case::Display::makeFakeExistingDisplayInjector(this);
+    display.inject();
+
+    // The current display state has an orientation set
+    display.mutableCurrentDisplayState().orientation = initialOrientation;
+
+    // The incoming request sets a different orientation
+    DisplayState state;
+    state.what = DisplayState::eDisplayProjectionChanged;
+    state.token = display.token();
+    state.orientation = desiredOrientation;
+
+    // --------------------------------------------------------------------
+    // Invocation
+
+    uint32_t flags = mFlinger.setDisplayStateLocked(state);
+
+    // --------------------------------------------------------------------
+    // Postconditions
+
+    // The returned flags indicate a transaction is needed
+    EXPECT_EQ(eDisplayTransactionNeeded, flags);
+
+    // The current display state has the new value.
+    EXPECT_EQ(desiredOrientation, display.getCurrentDisplayState().orientation);
+}
+
+TEST_F(DisplayTransactionTest, setDisplayStateLockedRequestsUpdateIfFrameChanged) {
+    using Case = SimplePrimaryDisplayCase;
+    const Rect initialFrame = {0, 0, 0, 0};
+    const Rect desiredFrame = {5, 6, 7, 8};
+
+    // --------------------------------------------------------------------
+    // Preconditions
+
+    // A display is set up
+    auto display = Case::Display::makeFakeExistingDisplayInjector(this);
+    display.inject();
+
+    // The current display state does not have a frame
+    display.mutableCurrentDisplayState().frame = initialFrame;
+
+    // The incoming request sets a frame
+    DisplayState state;
+    state.what = DisplayState::eDisplayProjectionChanged;
+    state.token = display.token();
+    state.frame = desiredFrame;
+
+    // --------------------------------------------------------------------
+    // Invocation
+
+    uint32_t flags = mFlinger.setDisplayStateLocked(state);
+
+    // --------------------------------------------------------------------
+    // Postconditions
+
+    // The returned flags indicate a transaction is needed
+    EXPECT_EQ(eDisplayTransactionNeeded, flags);
+
+    // The current display state has the new value.
+    EXPECT_EQ(desiredFrame, display.getCurrentDisplayState().frame);
+}
+
+TEST_F(DisplayTransactionTest, setDisplayStateLockedRequestsUpdateIfViewportChanged) {
+    using Case = SimplePrimaryDisplayCase;
+    const Rect initialViewport = {0, 0, 0, 0};
+    const Rect desiredViewport = {5, 6, 7, 8};
+
+    // --------------------------------------------------------------------
+    // Preconditions
+
+    // A display is set up
+    auto display = Case::Display::makeFakeExistingDisplayInjector(this);
+    display.inject();
+
+    // The current display state does not have a viewport
+    display.mutableCurrentDisplayState().viewport = initialViewport;
+
+    // The incoming request sets a viewport
+    DisplayState state;
+    state.what = DisplayState::eDisplayProjectionChanged;
+    state.token = display.token();
+    state.viewport = desiredViewport;
+
+    // --------------------------------------------------------------------
+    // Invocation
+
+    uint32_t flags = mFlinger.setDisplayStateLocked(state);
+
+    // --------------------------------------------------------------------
+    // Postconditions
+
+    // The returned flags indicate a transaction is needed
+    EXPECT_EQ(eDisplayTransactionNeeded, flags);
+
+    // The current display state has the new value.
+    EXPECT_EQ(desiredViewport, display.getCurrentDisplayState().viewport);
+}
+
+TEST_F(DisplayTransactionTest, setDisplayStateLockedDoesNothingIfSizeDidNotChange) {
+    using Case = SimplePrimaryDisplayCase;
+    constexpr uint32_t initialWidth = 1024;
+    constexpr uint32_t initialHeight = 768;
+
+    // --------------------------------------------------------------------
+    // Preconditions
+
+    // A display is set up
+    auto display = Case::Display::makeFakeExistingDisplayInjector(this);
+    display.inject();
+
+    // The current display state has a size set
+    display.mutableCurrentDisplayState().width = initialWidth;
+    display.mutableCurrentDisplayState().height = initialHeight;
+
+    // The incoming request sets the same display size
+    DisplayState state;
+    state.what = DisplayState::eDisplaySizeChanged;
+    state.token = display.token();
+    state.width = initialWidth;
+    state.height = initialHeight;
+
+    // --------------------------------------------------------------------
+    // Invocation
+
+    uint32_t flags = mFlinger.setDisplayStateLocked(state);
+
+    // --------------------------------------------------------------------
+    // Postconditions
+
+    // The returned flags are empty
+    EXPECT_EQ(0u, flags);
+
+    // The current display state is unchanged
+    EXPECT_EQ(initialWidth, display.getCurrentDisplayState().width);
+    EXPECT_EQ(initialHeight, display.getCurrentDisplayState().height);
+}
+
+TEST_F(DisplayTransactionTest, setDisplayStateLockedRequestsUpdateIfWidthChanged) {
+    using Case = SimplePrimaryDisplayCase;
+    constexpr uint32_t initialWidth = 0;
+    constexpr uint32_t desiredWidth = 1024;
+
+    // --------------------------------------------------------------------
+    // Preconditions
+
+    // A display is set up
+    auto display = Case::Display::makeFakeExistingDisplayInjector(this);
+    display.inject();
+
+    // The display does not yet have a width
+    display.mutableCurrentDisplayState().width = initialWidth;
+
+    // The incoming request sets a display width
+    DisplayState state;
+    state.what = DisplayState::eDisplaySizeChanged;
+    state.token = display.token();
+    state.width = desiredWidth;
+
+    // --------------------------------------------------------------------
+    // Invocation
+
+    uint32_t flags = mFlinger.setDisplayStateLocked(state);
+
+    // --------------------------------------------------------------------
+    // Postconditions
+
+    // The returned flags indicate a transaction is needed
+    EXPECT_EQ(eDisplayTransactionNeeded, flags);
+
+    // The current display state has the new value.
+    EXPECT_EQ(desiredWidth, display.getCurrentDisplayState().width);
+}
+
+TEST_F(DisplayTransactionTest, setDisplayStateLockedRequestsUpdateIfHeightChanged) {
+    using Case = SimplePrimaryDisplayCase;
+    constexpr uint32_t initialHeight = 0;
+    constexpr uint32_t desiredHeight = 768;
+
+    // --------------------------------------------------------------------
+    // Preconditions
+
+    // A display is set up
+    auto display = Case::Display::makeFakeExistingDisplayInjector(this);
+    display.inject();
+
+    // The display does not yet have a height
+    display.mutableCurrentDisplayState().height = initialHeight;
+
+    // The incoming request sets a display height
+    DisplayState state;
+    state.what = DisplayState::eDisplaySizeChanged;
+    state.token = display.token();
+    state.height = desiredHeight;
+
+    // --------------------------------------------------------------------
+    // Invocation
+
+    uint32_t flags = mFlinger.setDisplayStateLocked(state);
+
+    // --------------------------------------------------------------------
+    // Postconditions
+
+    // The returned flags indicate a transaction is needed
+    EXPECT_EQ(eDisplayTransactionNeeded, flags);
+
+    // The current display state has the new value.
+    EXPECT_EQ(desiredHeight, display.getCurrentDisplayState().height);
+}
+
+/* ------------------------------------------------------------------------
+ * SurfaceFlinger::onInitializeDisplays
+ */
+
+TEST_F(DisplayTransactionTest, onInitializeDisplaysSetsUpPrimaryDisplay) {
+    using Case = SimplePrimaryDisplayCase;
+
+    // --------------------------------------------------------------------
+    // Preconditions
+
+    // A primary display is set up
+    Case::Display::injectHwcDisplay(this);
+    auto primaryDisplay = Case::Display::makeFakeExistingDisplayInjector(this);
+    primaryDisplay.inject();
+
+    // --------------------------------------------------------------------
+    // Call Expectations
+
+    // We expect the surface interceptor to possibly be used, but we treat it as
+    // disabled since it is called as a side effect rather than directly by this
+    // function.
+    EXPECT_CALL(*mSurfaceInterceptor, isEnabled()).WillOnce(Return(false));
+
+    // We expect a call to get the active display config.
+    Case::Display::setupHwcGetActiveConfigCallExpectations(this);
+
+    // We expect invalidate() to be invoked once to trigger display transaction
+    // processing.
+    EXPECT_CALL(*mMessageQueue, invalidate()).Times(1);
+
+    // --------------------------------------------------------------------
+    // Invocation
+
+    mFlinger.onInitializeDisplays();
+
+    // --------------------------------------------------------------------
+    // Postconditions
+
+    // The primary display should have a current state
+    ASSERT_TRUE(hasCurrentDisplayState(primaryDisplay.token()));
+    const auto& primaryDisplayState = getCurrentDisplayState(primaryDisplay.token());
+    // The layer stack state should be set to zero
+    EXPECT_EQ(0u, primaryDisplayState.layerStack);
+    // The orientation state should be set to zero
+    EXPECT_EQ(0, primaryDisplayState.orientation);
+
+    // The frame state should be set to INVALID
+    EXPECT_EQ(Rect::INVALID_RECT, primaryDisplayState.frame);
+
+    // The viewport state should be set to INVALID
+    EXPECT_EQ(Rect::INVALID_RECT, primaryDisplayState.viewport);
+
+    // The width and height should both be zero
+    EXPECT_EQ(0u, primaryDisplayState.width);
+    EXPECT_EQ(0u, primaryDisplayState.height);
+
+    // The display should be set to HWC_POWER_MODE_NORMAL
+    ASSERT_TRUE(hasDisplayDevice(primaryDisplay.token()));
+    auto displayDevice = primaryDisplay.mutableDisplayDevice();
+    EXPECT_EQ(HWC_POWER_MODE_NORMAL, displayDevice->getPowerMode());
+
+    // The display refresh period should be set in the frame tracker.
+    FrameStats stats;
+    mFlinger.getAnimFrameTracker().getStats(&stats);
+    EXPECT_EQ(DEFAULT_REFRESH_RATE, stats.refreshPeriodNano);
+
+    // The display transaction needed flag should be set.
+    EXPECT_TRUE(hasTransactionFlagSet(eDisplayTransactionNeeded));
+
+    // The compositor timing should be set to default values
+    const auto& compositorTiming = mFlinger.getCompositorTiming();
+    EXPECT_EQ(-DEFAULT_REFRESH_RATE, compositorTiming.deadline);
+    EXPECT_EQ(DEFAULT_REFRESH_RATE, compositorTiming.interval);
+    EXPECT_EQ(DEFAULT_REFRESH_RATE, compositorTiming.presentLatency);
+}
+
+/* ------------------------------------------------------------------------
+ * SurfaceFlinger::setPowerModeInternal
+ */
+
+// Used when we simulate a display that supports doze.
+struct DozeIsSupportedVariant {
+    static constexpr bool DOZE_SUPPORTED = true;
+    static constexpr IComposerClient::PowerMode ACTUAL_POWER_MODE_FOR_DOZE =
+            IComposerClient::PowerMode::DOZE;
+    static constexpr IComposerClient::PowerMode ACTUAL_POWER_MODE_FOR_DOZE_SUSPEND =
+            IComposerClient::PowerMode::DOZE_SUSPEND;
+};
+
+// Used when we simulate a display that does not support doze.
+struct DozeNotSupportedVariant {
+    static constexpr bool DOZE_SUPPORTED = false;
+    static constexpr IComposerClient::PowerMode ACTUAL_POWER_MODE_FOR_DOZE =
+            IComposerClient::PowerMode::ON;
+    static constexpr IComposerClient::PowerMode ACTUAL_POWER_MODE_FOR_DOZE_SUSPEND =
+            IComposerClient::PowerMode::ON;
+};
+
+struct EventThreadBaseSupportedVariant {
+    static void setupEventAndEventControlThreadNoCallExpectations(DisplayTransactionTest* test) {
+        // The event control thread should not be notified.
+        EXPECT_CALL(*test->mEventControlThread, setVsyncEnabled(_)).Times(0);
+
+        // The event thread should not be notified.
+        EXPECT_CALL(*test->mEventThread, onScreenReleased()).Times(0);
+        EXPECT_CALL(*test->mEventThread, onScreenAcquired()).Times(0);
+    }
+};
+
+struct EventThreadNotSupportedVariant : public EventThreadBaseSupportedVariant {
+    static void setupAcquireAndEnableVsyncCallExpectations(DisplayTransactionTest* test) {
+        // These calls are only expected for the primary display.
+
+        // Instead expect no calls.
+        setupEventAndEventControlThreadNoCallExpectations(test);
+    }
+
+    static void setupReleaseAndDisableVsyncCallExpectations(DisplayTransactionTest* test) {
+        // These calls are only expected for the primary display.
+
+        // Instead expect no calls.
+        setupEventAndEventControlThreadNoCallExpectations(test);
+    }
+};
+
+struct EventThreadIsSupportedVariant : public EventThreadBaseSupportedVariant {
+    static void setupAcquireAndEnableVsyncCallExpectations(DisplayTransactionTest* test) {
+        // The event control thread should be notified to enable vsyncs
+        EXPECT_CALL(*test->mEventControlThread, setVsyncEnabled(true)).Times(1);
+
+        // The event thread should be notified that the screen was acquired.
+        EXPECT_CALL(*test->mEventThread, onScreenAcquired()).Times(1);
+    }
+
+    static void setupReleaseAndDisableVsyncCallExpectations(DisplayTransactionTest* test) {
+        // There should be a call to setVsyncEnabled(false)
+        EXPECT_CALL(*test->mEventControlThread, setVsyncEnabled(false)).Times(1);
+
+        // The event thread should not be notified that the screen was released.
+        EXPECT_CALL(*test->mEventThread, onScreenReleased()).Times(1);
+    }
+};
+
+// --------------------------------------------------------------------
+// Note:
+//
+// There are a large number of transitions we could test, however we only test a
+// selected subset which provides complete test coverage of the implementation.
+// --------------------------------------------------------------------
+
+template <int initialPowerMode, int targetPowerMode>
+struct TransitionVariantCommon {
+    static constexpr auto INITIAL_POWER_MODE = initialPowerMode;
+    static constexpr auto TARGET_POWER_MODE = targetPowerMode;
+
+    static void verifyPostconditions(DisplayTransactionTest*) {}
+};
+
+struct TransitionOffToOnVariant
+      : public TransitionVariantCommon<HWC_POWER_MODE_OFF, HWC_POWER_MODE_NORMAL> {
+    template <typename Case>
+    static void setupCallExpectations(DisplayTransactionTest* test) {
+        Case::setupComposerCallExpectations(test, IComposerClient::PowerMode::ON);
+        Case::EventThread::setupAcquireAndEnableVsyncCallExpectations(test);
+        Case::setupRepaintEverythingCallExpectations(test);
+    }
+
+    static void verifyPostconditions(DisplayTransactionTest* test) {
+        EXPECT_TRUE(test->mFlinger.getVisibleRegionsDirty());
+        EXPECT_TRUE(test->mFlinger.getHasPoweredOff());
+    }
+};
+
+struct TransitionOffToDozeSuspendVariant
+      : public TransitionVariantCommon<HWC_POWER_MODE_OFF, HWC_POWER_MODE_DOZE_SUSPEND> {
+    template <typename Case>
+    static void setupCallExpectations(DisplayTransactionTest* test) {
+        Case::setupComposerCallExpectations(test, Case::Doze::ACTUAL_POWER_MODE_FOR_DOZE_SUSPEND);
+        Case::EventThread::setupEventAndEventControlThreadNoCallExpectations(test);
+        Case::setupRepaintEverythingCallExpectations(test);
+    }
+
+    static void verifyPostconditions(DisplayTransactionTest* test) {
+        EXPECT_TRUE(test->mFlinger.getVisibleRegionsDirty());
+        EXPECT_TRUE(test->mFlinger.getHasPoweredOff());
+    }
+};
+
+struct TransitionOnToOffVariant
+      : public TransitionVariantCommon<HWC_POWER_MODE_NORMAL, HWC_POWER_MODE_OFF> {
+    template <typename Case>
+    static void setupCallExpectations(DisplayTransactionTest* test) {
+        Case::EventThread::setupReleaseAndDisableVsyncCallExpectations(test);
+        Case::setupComposerCallExpectations(test, IComposerClient::PowerMode::OFF);
+    }
+
+    static void verifyPostconditions(DisplayTransactionTest* test) {
+        EXPECT_TRUE(test->mFlinger.getVisibleRegionsDirty());
+    }
+};
+
+struct TransitionDozeSuspendToOffVariant
+      : public TransitionVariantCommon<HWC_POWER_MODE_DOZE_SUSPEND, HWC_POWER_MODE_OFF> {
+    template <typename Case>
+    static void setupCallExpectations(DisplayTransactionTest* test) {
+        Case::EventThread::setupEventAndEventControlThreadNoCallExpectations(test);
+        Case::setupComposerCallExpectations(test, IComposerClient::PowerMode::OFF);
+    }
+
+    static void verifyPostconditions(DisplayTransactionTest* test) {
+        EXPECT_TRUE(test->mFlinger.getVisibleRegionsDirty());
+    }
+};
+
+struct TransitionOnToDozeVariant
+      : public TransitionVariantCommon<HWC_POWER_MODE_NORMAL, HWC_POWER_MODE_DOZE> {
+    template <typename Case>
+    static void setupCallExpectations(DisplayTransactionTest* test) {
+        Case::EventThread::setupEventAndEventControlThreadNoCallExpectations(test);
+        Case::setupComposerCallExpectations(test, Case::Doze::ACTUAL_POWER_MODE_FOR_DOZE);
+    }
+};
+
+struct TransitionDozeSuspendToDozeVariant
+      : public TransitionVariantCommon<HWC_POWER_MODE_DOZE_SUSPEND, HWC_POWER_MODE_DOZE> {
+    template <typename Case>
+    static void setupCallExpectations(DisplayTransactionTest* test) {
+        Case::EventThread::setupAcquireAndEnableVsyncCallExpectations(test);
+        Case::setupComposerCallExpectations(test, Case::Doze::ACTUAL_POWER_MODE_FOR_DOZE);
+    }
+};
+
+struct TransitionDozeToOnVariant
+      : public TransitionVariantCommon<HWC_POWER_MODE_DOZE, HWC_POWER_MODE_NORMAL> {
+    template <typename Case>
+    static void setupCallExpectations(DisplayTransactionTest* test) {
+        Case::EventThread::setupEventAndEventControlThreadNoCallExpectations(test);
+        Case::setupComposerCallExpectations(test, IComposerClient::PowerMode::ON);
+    }
+};
+
+struct TransitionDozeSuspendToOnVariant
+      : public TransitionVariantCommon<HWC_POWER_MODE_DOZE_SUSPEND, HWC_POWER_MODE_NORMAL> {
+    template <typename Case>
+    static void setupCallExpectations(DisplayTransactionTest* test) {
+        Case::EventThread::setupAcquireAndEnableVsyncCallExpectations(test);
+        Case::setupComposerCallExpectations(test, IComposerClient::PowerMode::ON);
+    }
+};
+
+struct TransitionOnToDozeSuspendVariant
+      : public TransitionVariantCommon<HWC_POWER_MODE_NORMAL, HWC_POWER_MODE_DOZE_SUSPEND> {
+    template <typename Case>
+    static void setupCallExpectations(DisplayTransactionTest* test) {
+        Case::EventThread::setupReleaseAndDisableVsyncCallExpectations(test);
+        Case::setupComposerCallExpectations(test, Case::Doze::ACTUAL_POWER_MODE_FOR_DOZE_SUSPEND);
+    }
+};
+
+struct TransitionOnToUnknownVariant
+      : public TransitionVariantCommon<HWC_POWER_MODE_NORMAL, HWC_POWER_MODE_LEET> {
+    template <typename Case>
+    static void setupCallExpectations(DisplayTransactionTest* test) {
+        Case::EventThread::setupEventAndEventControlThreadNoCallExpectations(test);
+        Case::setupNoComposerPowerModeCallExpectations(test);
+    }
+};
+
+// --------------------------------------------------------------------
+// Note:
+//
+// Rather than testing the cartesian product of of
+// DozeIsSupported/DozeNotSupported with all other options, we use one for one
+// display type, and the other for another display type.
+// --------------------------------------------------------------------
+
+template <typename DisplayVariant, typename DozeVariant, typename EventThreadVariant,
+          typename TransitionVariant>
+struct DisplayPowerCase {
+    using Display = DisplayVariant;
+    using Doze = DozeVariant;
+    using EventThread = EventThreadVariant;
+    using Transition = TransitionVariant;
+
+    static auto injectDisplayWithInitialPowerMode(DisplayTransactionTest* test, int mode) {
+        Display::injectHwcDisplay(test);
+        auto display = Display::makeFakeExistingDisplayInjector(test);
+        display.inject();
+        display.mutableDisplayDevice()->setPowerMode(mode);
+        return display;
+    }
+
+    static void setInitialPrimaryHWVsyncEnabled(DisplayTransactionTest* test, bool enabled) {
+        test->mFlinger.mutablePrimaryHWVsyncEnabled() = enabled;
+    }
+
+    static void setupRepaintEverythingCallExpectations(DisplayTransactionTest* test) {
+        EXPECT_CALL(*test->mMessageQueue, invalidate()).Times(1);
+    }
+
+    static void setupSurfaceInterceptorCallExpectations(DisplayTransactionTest* test, int mode) {
+        EXPECT_CALL(*test->mSurfaceInterceptor, isEnabled()).WillOnce(Return(true));
+        EXPECT_CALL(*test->mSurfaceInterceptor, savePowerModeUpdate(_, mode)).Times(1);
+    }
+
+    static void setupComposerCallExpectations(DisplayTransactionTest* test,
+                                              IComposerClient::PowerMode mode) {
+        // Any calls to get the active config will return a default value.
+        EXPECT_CALL(*test->mComposer, getActiveConfig(Display::HWC_DISPLAY_ID, _))
+                .WillRepeatedly(DoAll(SetArgPointee<1>(Display::HWC_ACTIVE_CONFIG_ID),
+                                      Return(Error::NONE)));
+
+        // Any calls to get whether the display supports dozing will return the value set by the
+        // policy variant.
+        EXPECT_CALL(*test->mComposer, getDozeSupport(Display::HWC_DISPLAY_ID, _))
+                .WillRepeatedly(DoAll(SetArgPointee<1>(Doze::DOZE_SUPPORTED), Return(Error::NONE)));
+
+        EXPECT_CALL(*test->mComposer, setPowerMode(Display::HWC_DISPLAY_ID, mode)).Times(1);
+    }
+
+    static void setupNoComposerPowerModeCallExpectations(DisplayTransactionTest* test) {
+        EXPECT_CALL(*test->mComposer, setPowerMode(Display::HWC_DISPLAY_ID, _)).Times(0);
+    }
+};
+
+// A sample configuration for the primary display.
+// In addition to having event thread support, we emulate doze support.
+template <typename TransitionVariant>
+using PrimaryDisplayPowerCase = DisplayPowerCase<PrimaryDisplayVariant, DozeIsSupportedVariant,
+                                                 EventThreadIsSupportedVariant, TransitionVariant>;
+
+// A sample configuration for the external display.
+// In addition to not having event thread support, we emulate not having doze
+// support.
+template <typename TransitionVariant>
+using ExternalDisplayPowerCase =
+        DisplayPowerCase<ExternalDisplayVariant, DozeNotSupportedVariant,
+                         EventThreadNotSupportedVariant, TransitionVariant>;
+
+class SetPowerModeInternalTest : public DisplayTransactionTest {
+public:
+    template <typename Case>
+    void transitionDisplayCommon();
+};
+
+template <int PowerMode>
+struct PowerModeInitialVSyncEnabled : public std::false_type {};
+
+template <>
+struct PowerModeInitialVSyncEnabled<HWC_POWER_MODE_NORMAL> : public std::true_type {};
+
+template <>
+struct PowerModeInitialVSyncEnabled<HWC_POWER_MODE_DOZE> : public std::true_type {};
+
+template <typename Case>
+void SetPowerModeInternalTest::transitionDisplayCommon() {
+    // --------------------------------------------------------------------
+    // Preconditions
+
+    auto display =
+            Case::injectDisplayWithInitialPowerMode(this, Case::Transition::INITIAL_POWER_MODE);
+    Case::setInitialPrimaryHWVsyncEnabled(this,
+                                          PowerModeInitialVSyncEnabled<
+                                                  Case::Transition::INITIAL_POWER_MODE>::value);
+
+    // --------------------------------------------------------------------
+    // Call Expectations
+
+    Case::setupSurfaceInterceptorCallExpectations(this, Case::Transition::TARGET_POWER_MODE);
+    Case::Transition::template setupCallExpectations<Case>(this);
+
+    // --------------------------------------------------------------------
+    // Invocation
+
+    mFlinger.setPowerModeInternal(display.mutableDisplayDevice(),
+                                  Case::Transition::TARGET_POWER_MODE);
+
+    // --------------------------------------------------------------------
+    // Postconditions
+
+    Case::Transition::verifyPostconditions(this);
+}
+
+TEST_F(SetPowerModeInternalTest, setPowerModeInternalDoesNothingIfNoChange) {
+    using Case = SimplePrimaryDisplayCase;
+
+    // --------------------------------------------------------------------
+    // Preconditions
+
+    // A primary display device is set up
+    Case::Display::injectHwcDisplay(this);
+    auto display = Case::Display::makeFakeExistingDisplayInjector(this);
+    display.inject();
+
+    // The diplay is already set to HWC_POWER_MODE_NORMAL
+    display.mutableDisplayDevice()->setPowerMode(HWC_POWER_MODE_NORMAL);
+
+    // --------------------------------------------------------------------
+    // Invocation
+
+    mFlinger.setPowerModeInternal(display.mutableDisplayDevice(), HWC_POWER_MODE_NORMAL);
+
+    // --------------------------------------------------------------------
+    // Postconditions
+
+    EXPECT_EQ(HWC_POWER_MODE_NORMAL, display.mutableDisplayDevice()->getPowerMode());
+}
+
+TEST_F(SetPowerModeInternalTest, setPowerModeInternalJustSetsInternalStateIfVirtualDisplay) {
+    using Case = HwcVirtualDisplayCase;
+
+    // --------------------------------------------------------------------
+    // Preconditions
+
+    // We need to resize this so that the HWC thinks the virtual display
+    // is something it created.
+    mFlinger.mutableHwcDisplayData().resize(3);
+
+    // A virtual display device is set up
+    Case::Display::injectHwcDisplay(this);
+    auto display = Case::Display::makeFakeExistingDisplayInjector(this);
+    display.inject();
+
+    // The display is set to HWC_POWER_MODE_OFF
+    getDisplayDevice(display.token())->setPowerMode(HWC_POWER_MODE_OFF);
+
+    // --------------------------------------------------------------------
+    // Invocation
+
+    mFlinger.setPowerModeInternal(display.mutableDisplayDevice(), HWC_POWER_MODE_NORMAL);
+
+    // --------------------------------------------------------------------
+    // Postconditions
+
+    EXPECT_EQ(HWC_POWER_MODE_NORMAL, display.mutableDisplayDevice()->getPowerMode());
+}
+
+TEST_F(SetPowerModeInternalTest, transitionsDisplayFromOffToOnPrimaryDisplay) {
+    transitionDisplayCommon<PrimaryDisplayPowerCase<TransitionOffToOnVariant>>();
+}
+
+TEST_F(SetPowerModeInternalTest, transitionsDisplayFromOffToDozeSuspendPrimaryDisplay) {
+    transitionDisplayCommon<PrimaryDisplayPowerCase<TransitionOffToDozeSuspendVariant>>();
+}
+
+TEST_F(SetPowerModeInternalTest, transitionsDisplayFromOnToOffPrimaryDisplay) {
+    transitionDisplayCommon<PrimaryDisplayPowerCase<TransitionOnToOffVariant>>();
+}
+
+TEST_F(SetPowerModeInternalTest, transitionsDisplayFromDozeSuspendToOffPrimaryDisplay) {
+    transitionDisplayCommon<PrimaryDisplayPowerCase<TransitionDozeSuspendToOffVariant>>();
+}
+
+TEST_F(SetPowerModeInternalTest, transitionsDisplayFromOnToDozePrimaryDisplay) {
+    transitionDisplayCommon<PrimaryDisplayPowerCase<TransitionOnToDozeVariant>>();
+}
+
+TEST_F(SetPowerModeInternalTest, transitionsDisplayFromDozeSuspendToDozePrimaryDisplay) {
+    transitionDisplayCommon<PrimaryDisplayPowerCase<TransitionDozeSuspendToDozeVariant>>();
+}
+
+TEST_F(SetPowerModeInternalTest, transitionsDisplayFromDozeToOnPrimaryDisplay) {
+    transitionDisplayCommon<PrimaryDisplayPowerCase<TransitionDozeToOnVariant>>();
+}
+
+TEST_F(SetPowerModeInternalTest, transitionsDisplayFromDozeSuspendToOnPrimaryDisplay) {
+    transitionDisplayCommon<PrimaryDisplayPowerCase<TransitionDozeSuspendToOnVariant>>();
+}
+
+TEST_F(SetPowerModeInternalTest, transitionsDisplayFromOnToDozeSuspendPrimaryDisplay) {
+    transitionDisplayCommon<PrimaryDisplayPowerCase<TransitionOnToDozeSuspendVariant>>();
+}
+
+TEST_F(SetPowerModeInternalTest, transitionsDisplayFromOnToUnknownPrimaryDisplay) {
+    transitionDisplayCommon<PrimaryDisplayPowerCase<TransitionOnToUnknownVariant>>();
+}
+
+TEST_F(SetPowerModeInternalTest, transitionsDisplayFromOffToOnExternalDisplay) {
+    transitionDisplayCommon<ExternalDisplayPowerCase<TransitionOffToOnVariant>>();
+}
+
+TEST_F(SetPowerModeInternalTest, transitionsDisplayFromOffToDozeSuspendExternalDisplay) {
+    transitionDisplayCommon<ExternalDisplayPowerCase<TransitionOffToDozeSuspendVariant>>();
+}
+
+TEST_F(SetPowerModeInternalTest, transitionsDisplayFromOnToOffExternalDisplay) {
+    transitionDisplayCommon<ExternalDisplayPowerCase<TransitionOnToOffVariant>>();
+}
+
+TEST_F(SetPowerModeInternalTest, transitionsDisplayFromDozeSuspendToOffExternalDisplay) {
+    transitionDisplayCommon<ExternalDisplayPowerCase<TransitionDozeSuspendToOffVariant>>();
+}
+
+TEST_F(SetPowerModeInternalTest, transitionsDisplayFromOnToDozeExternalDisplay) {
+    transitionDisplayCommon<ExternalDisplayPowerCase<TransitionOnToDozeVariant>>();
+}
+
+TEST_F(SetPowerModeInternalTest, transitionsDisplayFromDozeSuspendToDozeExternalDisplay) {
+    transitionDisplayCommon<ExternalDisplayPowerCase<TransitionDozeSuspendToDozeVariant>>();
+}
+
+TEST_F(SetPowerModeInternalTest, transitionsDisplayFromDozeToOnExternalDisplay) {
+    transitionDisplayCommon<ExternalDisplayPowerCase<TransitionDozeToOnVariant>>();
+}
+
+TEST_F(SetPowerModeInternalTest, transitionsDisplayFromDozeSuspendToOnExternalDisplay) {
+    transitionDisplayCommon<ExternalDisplayPowerCase<TransitionDozeSuspendToOnVariant>>();
+}
+
+TEST_F(SetPowerModeInternalTest, transitionsDisplayFromOnToDozeSuspendExternalDisplay) {
+    transitionDisplayCommon<ExternalDisplayPowerCase<TransitionOnToDozeSuspendVariant>>();
+}
+
+TEST_F(SetPowerModeInternalTest, transitionsDisplayFromOnToUnknownExternalDisplay) {
+    transitionDisplayCommon<ExternalDisplayPowerCase<TransitionOnToUnknownVariant>>();
+}
+
 } // namespace
 } // namespace android
diff --git a/services/surfaceflinger/tests/unittests/EventControlThreadTest.cpp b/services/surfaceflinger/tests/unittests/EventControlThreadTest.cpp
new file mode 100644
index 0000000..b346454
--- /dev/null
+++ b/services/surfaceflinger/tests/unittests/EventControlThreadTest.cpp
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#undef LOG_TAG
+#define LOG_TAG "LibSurfaceFlingerUnittests"
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include <log/log.h>
+
+#include "AsyncCallRecorder.h"
+#include "EventControlThread.h"
+
+namespace android {
+namespace {
+
+using namespace std::chrono_literals;
+using testing::_;
+
+class EventControlThreadTest : public testing::Test {
+protected:
+    EventControlThreadTest();
+    ~EventControlThreadTest() override;
+
+    void createThread();
+
+    void expectVSyncEnableCallbackCalled(bool enable);
+
+    AsyncCallRecorder<void (*)(bool)> mVSyncSetEnabledCallRecorder;
+
+    std::unique_ptr<EventControlThread> mThread;
+};
+
+EventControlThreadTest::EventControlThreadTest() {
+    const ::testing::TestInfo* const test_info =
+            ::testing::UnitTest::GetInstance()->current_test_info();
+    ALOGD("**** Setting up for %s.%s\n", test_info->test_case_name(), test_info->name());
+}
+
+EventControlThreadTest::~EventControlThreadTest() {
+    const ::testing::TestInfo* const test_info =
+            ::testing::UnitTest::GetInstance()->current_test_info();
+    ALOGD("**** Tearing down after %s.%s\n", test_info->test_case_name(), test_info->name());
+}
+
+void EventControlThreadTest::createThread() {
+    mThread = std::make_unique<android::impl::EventControlThread>(
+            mVSyncSetEnabledCallRecorder.getInvocable());
+}
+
+void EventControlThreadTest::expectVSyncEnableCallbackCalled(bool expectedEnabled) {
+    auto args = mVSyncSetEnabledCallRecorder.waitForCall();
+    ASSERT_TRUE(args.has_value());
+    EXPECT_EQ(std::get<0>(args.value()), expectedEnabled);
+}
+
+/* ------------------------------------------------------------------------
+ * Test cases
+ */
+
+TEST_F(EventControlThreadTest, signalsVSyncDisabledOnStartup) {
+    createThread();
+
+    // On thread start, there should be an automatic explicit call to disable
+    // vsyncs
+    expectVSyncEnableCallbackCalled(false);
+}
+
+TEST_F(EventControlThreadTest, signalsVSyncDisabledOnce) {
+    createThread();
+    expectVSyncEnableCallbackCalled(false);
+
+    mThread->setVsyncEnabled(false);
+
+    EXPECT_FALSE(mVSyncSetEnabledCallRecorder.waitForUnexpectedCall().has_value());
+}
+
+TEST_F(EventControlThreadTest, signalsVSyncEnabledThenDisabled) {
+    createThread();
+    expectVSyncEnableCallbackCalled(false);
+
+    mThread->setVsyncEnabled(true);
+
+    expectVSyncEnableCallbackCalled(true);
+
+    mThread->setVsyncEnabled(false);
+
+    expectVSyncEnableCallbackCalled(false);
+}
+
+TEST_F(EventControlThreadTest, signalsVSyncEnabledOnce) {
+    createThread();
+    expectVSyncEnableCallbackCalled(false);
+
+    mThread->setVsyncEnabled(true);
+
+    expectVSyncEnableCallbackCalled(true);
+
+    mThread->setVsyncEnabled(true);
+
+    EXPECT_FALSE(mVSyncSetEnabledCallRecorder.waitForUnexpectedCall().has_value());
+}
+
+} // namespace
+} // namespace android
diff --git a/services/surfaceflinger/tests/unittests/EventThreadTest.cpp b/services/surfaceflinger/tests/unittests/EventThreadTest.cpp
new file mode 100644
index 0000000..80fdb80
--- /dev/null
+++ b/services/surfaceflinger/tests/unittests/EventThreadTest.cpp
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#undef LOG_TAG
+#define LOG_TAG "LibSurfaceFlingerUnittests"
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+
+#include <log/log.h>
+
+#include <utils/Errors.h>
+
+#include "AsyncCallRecorder.h"
+#include "EventThread.h"
+
+using namespace std::chrono_literals;
+using namespace std::placeholders;
+
+using testing::_;
+using testing::Invoke;
+
+namespace android {
+namespace {
+
+class MockVSyncSource : public VSyncSource {
+public:
+    MOCK_METHOD1(setVSyncEnabled, void(bool));
+    MOCK_METHOD1(setCallback, void(VSyncSource::Callback*));
+    MOCK_METHOD1(setPhaseOffset, void(nsecs_t));
+};
+
+} // namespace
+
+class EventThreadTest : public testing::Test {
+protected:
+    class MockEventThreadConnection : public android::impl::EventThread::Connection {
+    public:
+        explicit MockEventThreadConnection(android::impl::EventThread* eventThread)
+              : android::impl::EventThread::Connection(eventThread) {}
+        MOCK_METHOD1(postEvent, status_t(const DisplayEventReceiver::Event& event));
+    };
+
+    using ConnectionEventRecorder =
+            AsyncCallRecorderWithCannedReturn<status_t (*)(const DisplayEventReceiver::Event&)>;
+
+    EventThreadTest();
+    ~EventThreadTest() override;
+
+    void createThread();
+    sp<MockEventThreadConnection> createConnection(ConnectionEventRecorder& recorder);
+
+    void expectVSyncSetEnabledCallReceived(bool expectedState);
+    void expectVSyncSetPhaseOffsetCallReceived(nsecs_t expectedPhaseOffset);
+    VSyncSource::Callback* expectVSyncSetCallbackCallReceived();
+    void expectInterceptCallReceived(nsecs_t expectedTimestamp);
+    void expectVsyncEventReceivedByConnection(const char* name,
+                                              ConnectionEventRecorder& connectionEventRecorder,
+                                              nsecs_t expectedTimestamp, unsigned expectedCount);
+    void expectVsyncEventReceivedByConnection(nsecs_t expectedTimestamp, unsigned expectedCount);
+    void expectHotplugEventReceivedByConnection(int expectedDisplayType, bool expectedConnected);
+
+    AsyncCallRecorder<void (*)(bool)> mVSyncSetEnabledCallRecorder;
+    AsyncCallRecorder<void (*)(VSyncSource::Callback*)> mVSyncSetCallbackCallRecorder;
+    AsyncCallRecorder<void (*)(nsecs_t)> mVSyncSetPhaseOffsetCallRecorder;
+    AsyncCallRecorder<void (*)()> mResyncCallRecorder;
+    AsyncCallRecorder<void (*)(nsecs_t)> mInterceptVSyncCallRecorder;
+    ConnectionEventRecorder mConnectionEventCallRecorder{0};
+
+    MockVSyncSource mVSyncSource;
+    std::unique_ptr<android::impl::EventThread> mThread;
+    sp<MockEventThreadConnection> mConnection;
+};
+
+EventThreadTest::EventThreadTest() {
+    const ::testing::TestInfo* const test_info =
+            ::testing::UnitTest::GetInstance()->current_test_info();
+    ALOGD("**** Setting up for %s.%s\n", test_info->test_case_name(), test_info->name());
+
+    EXPECT_CALL(mVSyncSource, setVSyncEnabled(_))
+            .WillRepeatedly(Invoke(mVSyncSetEnabledCallRecorder.getInvocable()));
+
+    EXPECT_CALL(mVSyncSource, setCallback(_))
+            .WillRepeatedly(Invoke(mVSyncSetCallbackCallRecorder.getInvocable()));
+
+    EXPECT_CALL(mVSyncSource, setPhaseOffset(_))
+            .WillRepeatedly(Invoke(mVSyncSetPhaseOffsetCallRecorder.getInvocable()));
+
+    createThread();
+    mConnection = createConnection(mConnectionEventCallRecorder);
+}
+
+EventThreadTest::~EventThreadTest() {
+    const ::testing::TestInfo* const test_info =
+            ::testing::UnitTest::GetInstance()->current_test_info();
+    ALOGD("**** Tearing down after %s.%s\n", test_info->test_case_name(), test_info->name());
+}
+
+void EventThreadTest::createThread() {
+    mThread =
+            std::make_unique<android::impl::EventThread>(&mVSyncSource,
+                                                         mResyncCallRecorder.getInvocable(),
+                                                         mInterceptVSyncCallRecorder.getInvocable(),
+                                                         "unit-test-event-thread");
+}
+
+sp<EventThreadTest::MockEventThreadConnection> EventThreadTest::createConnection(
+        ConnectionEventRecorder& recorder) {
+    sp<MockEventThreadConnection> connection = new MockEventThreadConnection(mThread.get());
+    EXPECT_CALL(*connection, postEvent(_)).WillRepeatedly(Invoke(recorder.getInvocable()));
+    return connection;
+}
+
+void EventThreadTest::expectVSyncSetEnabledCallReceived(bool expectedState) {
+    auto args = mVSyncSetEnabledCallRecorder.waitForCall();
+    ASSERT_TRUE(args.has_value());
+    EXPECT_EQ(expectedState, std::get<0>(args.value()));
+}
+
+void EventThreadTest::expectVSyncSetPhaseOffsetCallReceived(nsecs_t expectedPhaseOffset) {
+    auto args = mVSyncSetPhaseOffsetCallRecorder.waitForCall();
+    ASSERT_TRUE(args.has_value());
+    EXPECT_EQ(expectedPhaseOffset, std::get<0>(args.value()));
+}
+
+VSyncSource::Callback* EventThreadTest::expectVSyncSetCallbackCallReceived() {
+    auto callbackSet = mVSyncSetCallbackCallRecorder.waitForCall();
+    return callbackSet.has_value() ? std::get<0>(callbackSet.value()) : nullptr;
+}
+
+void EventThreadTest::expectInterceptCallReceived(nsecs_t expectedTimestamp) {
+    auto args = mInterceptVSyncCallRecorder.waitForCall();
+    ASSERT_TRUE(args.has_value());
+    EXPECT_EQ(expectedTimestamp, std::get<0>(args.value()));
+}
+
+void EventThreadTest::expectVsyncEventReceivedByConnection(
+        const char* name, ConnectionEventRecorder& connectionEventRecorder,
+        nsecs_t expectedTimestamp, unsigned expectedCount) {
+    auto args = connectionEventRecorder.waitForCall();
+    ASSERT_TRUE(args.has_value()) << name << " did not receive an event for timestamp "
+                                  << expectedTimestamp;
+    const auto& event = std::get<0>(args.value());
+    EXPECT_EQ(DisplayEventReceiver::DISPLAY_EVENT_VSYNC, event.header.type)
+            << name << " did not get the correct event for timestamp " << expectedTimestamp;
+    EXPECT_EQ(expectedTimestamp, event.header.timestamp)
+            << name << " did not get the expected timestamp for timestamp " << expectedTimestamp;
+    EXPECT_EQ(expectedCount, event.vsync.count)
+            << name << " did not get the expected count for timestamp " << expectedTimestamp;
+}
+
+void EventThreadTest::expectVsyncEventReceivedByConnection(nsecs_t expectedTimestamp,
+                                                           unsigned expectedCount) {
+    expectVsyncEventReceivedByConnection("mConnectionEventCallRecorder",
+                                         mConnectionEventCallRecorder, expectedTimestamp,
+                                         expectedCount);
+}
+
+void EventThreadTest::expectHotplugEventReceivedByConnection(int expectedDisplayType,
+                                                             bool expectedConnected) {
+    auto args = mConnectionEventCallRecorder.waitForCall();
+    ASSERT_TRUE(args.has_value());
+    const auto& event = std::get<0>(args.value());
+    EXPECT_EQ(DisplayEventReceiver::DISPLAY_EVENT_HOTPLUG, event.header.type);
+    EXPECT_EQ(static_cast<unsigned>(expectedDisplayType), event.header.id);
+    EXPECT_EQ(expectedConnected, event.hotplug.connected);
+}
+
+namespace {
+
+/* ------------------------------------------------------------------------
+ * Test cases
+ */
+
+TEST_F(EventThreadTest, canCreateAndDestroyThreadWithNoEventsSent) {
+    EXPECT_FALSE(mVSyncSetEnabledCallRecorder.waitForUnexpectedCall().has_value());
+    EXPECT_FALSE(mVSyncSetCallbackCallRecorder.waitForCall(0us).has_value());
+    EXPECT_FALSE(mVSyncSetPhaseOffsetCallRecorder.waitForCall(0us).has_value());
+    EXPECT_FALSE(mResyncCallRecorder.waitForCall(0us).has_value());
+    EXPECT_FALSE(mInterceptVSyncCallRecorder.waitForCall(0us).has_value());
+    EXPECT_FALSE(mConnectionEventCallRecorder.waitForCall(0us).has_value());
+}
+
+TEST_F(EventThreadTest, requestNextVsyncPostsASingleVSyncEventToTheConnection) {
+    // Signal that we want the next vsync event to be posted to the connection
+    mThread->requestNextVsync(mConnection);
+
+    // EventThread should immediately request a resync.
+    EXPECT_TRUE(mResyncCallRecorder.waitForCall().has_value());
+
+    // EventThread should enable vsync callbacks, and set a callback interface
+    // pointer to use them with the VSync source.
+    expectVSyncSetEnabledCallReceived(true);
+    auto callback = expectVSyncSetCallbackCallReceived();
+    ASSERT_TRUE(callback);
+
+    // Use the received callback to signal a first vsync event.
+    // The interceptor should receive the event, as well as the connection.
+    callback->onVSyncEvent(123);
+    expectInterceptCallReceived(123);
+    expectVsyncEventReceivedByConnection(123, 1u);
+
+    // Use the received callback to signal a second vsync event.
+    // The interceptor should receive the event, but the the connection should
+    // not as it was only interested in the first.
+    callback->onVSyncEvent(456);
+    expectInterceptCallReceived(456);
+    EXPECT_FALSE(mConnectionEventCallRecorder.waitForUnexpectedCall().has_value());
+
+    // EventThread should also detect that at this point that it does not need
+    // any more vsync events, and should disable their generation.
+    expectVSyncSetEnabledCallReceived(false);
+}
+
+TEST_F(EventThreadTest, setVsyncRateZeroPostsNoVSyncEventsToThatConnection) {
+    // Create a first connection, register it, and request a vsync rate of zero.
+    ConnectionEventRecorder firstConnectionEventRecorder{0};
+    sp<MockEventThreadConnection> firstConnection = createConnection(firstConnectionEventRecorder);
+    mThread->setVsyncRate(0, firstConnection);
+
+    // By itself, this should not enable vsync events
+    EXPECT_FALSE(mVSyncSetEnabledCallRecorder.waitForUnexpectedCall().has_value());
+    EXPECT_FALSE(mVSyncSetCallbackCallRecorder.waitForCall(0us).has_value());
+
+    // However if there is another connection which wants events at a nonzero rate.....
+    ConnectionEventRecorder secondConnectionEventRecorder{0};
+    sp<MockEventThreadConnection> secondConnection =
+            createConnection(secondConnectionEventRecorder);
+    mThread->setVsyncRate(1, secondConnection);
+
+    // EventThread should enable vsync callbacks, and set a callback interface
+    // pointer to use them with the VSync source.
+    expectVSyncSetEnabledCallReceived(true);
+    auto callback = expectVSyncSetCallbackCallReceived();
+    ASSERT_TRUE(callback);
+
+    // Send a vsync event. EventThread should then make a call to the
+    // interceptor, and the second connection. The first connection should not
+    // get the event.
+    callback->onVSyncEvent(123);
+    expectInterceptCallReceived(123);
+    EXPECT_FALSE(firstConnectionEventRecorder.waitForUnexpectedCall().has_value());
+    expectVsyncEventReceivedByConnection("secondConnection", secondConnectionEventRecorder, 123,
+                                         1u);
+}
+
+TEST_F(EventThreadTest, setVsyncRateOnePostsAllEventsToThatConnection) {
+    mThread->setVsyncRate(1, mConnection);
+
+    // EventThread should enable vsync callbacks, and set a callback interface
+    // pointer to use them with the VSync source.
+    expectVSyncSetEnabledCallReceived(true);
+    auto callback = expectVSyncSetCallbackCallReceived();
+    ASSERT_TRUE(callback);
+
+    // Send a vsync event. EventThread should then make a call to the
+    // interceptor, and the connection.
+    callback->onVSyncEvent(123);
+    expectInterceptCallReceived(123);
+    expectVsyncEventReceivedByConnection(123, 1u);
+
+    // A second event should go to the same places.
+    callback->onVSyncEvent(456);
+    expectInterceptCallReceived(456);
+    expectVsyncEventReceivedByConnection(456, 2u);
+
+    // A third event should go to the same places.
+    callback->onVSyncEvent(789);
+    expectInterceptCallReceived(789);
+    expectVsyncEventReceivedByConnection(789, 3u);
+}
+
+TEST_F(EventThreadTest, setVsyncRateTwoPostsEveryOtherEventToThatConnection) {
+    mThread->setVsyncRate(2, mConnection);
+
+    // EventThread should enable vsync callbacks, and set a callback interface
+    // pointer to use them with the VSync source.
+    expectVSyncSetEnabledCallReceived(true);
+    auto callback = expectVSyncSetCallbackCallReceived();
+    ASSERT_TRUE(callback);
+
+    // The first event will be seen by the interceptor, and not the connection.
+    callback->onVSyncEvent(123);
+    expectInterceptCallReceived(123);
+    EXPECT_FALSE(mConnectionEventCallRecorder.waitForUnexpectedCall().has_value());
+
+    // The second event will be seen by the interceptor and the connection.
+    callback->onVSyncEvent(456);
+    expectInterceptCallReceived(456);
+    expectVsyncEventReceivedByConnection(456, 2u);
+
+    // The third event will be seen by the interceptor, and not the connection.
+    callback->onVSyncEvent(789);
+    expectInterceptCallReceived(789);
+    EXPECT_FALSE(mConnectionEventCallRecorder.waitForUnexpectedCall().has_value());
+
+    // The fourth event will be seen by the interceptor and the connection.
+    callback->onVSyncEvent(101112);
+    expectInterceptCallReceived(101112);
+    expectVsyncEventReceivedByConnection(101112, 4u);
+}
+
+TEST_F(EventThreadTest, connectionsRemovedIfInstanceDestroyed) {
+    mThread->setVsyncRate(1, mConnection);
+
+    // EventThread should enable vsync callbacks, and set a callback interface
+    // pointer to use them with the VSync source.
+    expectVSyncSetEnabledCallReceived(true);
+    auto callback = expectVSyncSetCallbackCallReceived();
+    ASSERT_TRUE(callback);
+
+    // Destroy the only (strong) reference to the connection.
+    mConnection = nullptr;
+
+    // The first event will be seen by the interceptor, and not the connection.
+    callback->onVSyncEvent(123);
+    expectInterceptCallReceived(123);
+    EXPECT_FALSE(mConnectionEventCallRecorder.waitForUnexpectedCall().has_value());
+
+    // EventThread should disable vsync callbacks
+    expectVSyncSetEnabledCallReceived(false);
+}
+
+TEST_F(EventThreadTest, connectionsRemovedIfEventDeliveryError) {
+    ConnectionEventRecorder errorConnectionEventRecorder{NO_MEMORY};
+    sp<MockEventThreadConnection> errorConnection = createConnection(errorConnectionEventRecorder);
+    mThread->setVsyncRate(1, errorConnection);
+
+    // EventThread should enable vsync callbacks, and set a callback interface
+    // pointer to use them with the VSync source.
+    expectVSyncSetEnabledCallReceived(true);
+    auto callback = expectVSyncSetCallbackCallReceived();
+    ASSERT_TRUE(callback);
+
+    // The first event will be seen by the interceptor, and by the connection,
+    // which then returns an error.
+    callback->onVSyncEvent(123);
+    expectInterceptCallReceived(123);
+    expectVsyncEventReceivedByConnection("errorConnection", errorConnectionEventRecorder, 123, 1u);
+
+    // A subsequent event will be seen by the interceptor and not by the
+    // connection.
+    callback->onVSyncEvent(456);
+    expectInterceptCallReceived(456);
+    EXPECT_FALSE(errorConnectionEventRecorder.waitForUnexpectedCall().has_value());
+
+    // EventThread should disable vsync callbacks with the second event
+    expectVSyncSetEnabledCallReceived(false);
+}
+
+TEST_F(EventThreadTest, eventsDroppedIfNonfatalEventDeliveryError) {
+    ConnectionEventRecorder errorConnectionEventRecorder{WOULD_BLOCK};
+    sp<MockEventThreadConnection> errorConnection = createConnection(errorConnectionEventRecorder);
+    mThread->setVsyncRate(1, errorConnection);
+
+    // EventThread should enable vsync callbacks, and set a callback interface
+    // pointer to use them with the VSync source.
+    expectVSyncSetEnabledCallReceived(true);
+    auto callback = expectVSyncSetCallbackCallReceived();
+    ASSERT_TRUE(callback);
+
+    // The first event will be seen by the interceptor, and by the connection,
+    // which then returns an non-fatal error.
+    callback->onVSyncEvent(123);
+    expectInterceptCallReceived(123);
+    expectVsyncEventReceivedByConnection("errorConnection", errorConnectionEventRecorder, 123, 1u);
+
+    // A subsequent event will be seen by the interceptor, and by the connection,
+    // which still then returns an non-fatal error.
+    callback->onVSyncEvent(456);
+    expectInterceptCallReceived(456);
+    expectVsyncEventReceivedByConnection("errorConnection", errorConnectionEventRecorder, 456, 2u);
+
+    // EventThread will not disable vsync callbacks as the errors are non-fatal.
+    EXPECT_FALSE(mVSyncSetEnabledCallRecorder.waitForUnexpectedCall().has_value());
+}
+
+TEST_F(EventThreadTest, setPhaseOffsetForwardsToVSyncSource) {
+    mThread->setPhaseOffset(321);
+    expectVSyncSetPhaseOffsetCallReceived(321);
+}
+
+TEST_F(EventThreadTest, postHotplugPrimaryDisconnect) {
+    mThread->onHotplugReceived(DisplayDevice::DISPLAY_PRIMARY, false);
+    expectHotplugEventReceivedByConnection(DisplayDevice::DISPLAY_PRIMARY, false);
+}
+
+TEST_F(EventThreadTest, postHotplugPrimaryConnect) {
+    mThread->onHotplugReceived(DisplayDevice::DISPLAY_PRIMARY, true);
+    expectHotplugEventReceivedByConnection(DisplayDevice::DISPLAY_PRIMARY, true);
+}
+
+TEST_F(EventThreadTest, postHotplugExternalDisconnect) {
+    mThread->onHotplugReceived(DisplayDevice::DISPLAY_EXTERNAL, false);
+    expectHotplugEventReceivedByConnection(DisplayDevice::DISPLAY_EXTERNAL, false);
+}
+
+TEST_F(EventThreadTest, postHotplugExternalConnect) {
+    mThread->onHotplugReceived(DisplayDevice::DISPLAY_EXTERNAL, true);
+    expectHotplugEventReceivedByConnection(DisplayDevice::DISPLAY_EXTERNAL, true);
+}
+
+TEST_F(EventThreadTest, postHotplugVirtualDisconnectIsFilteredOut) {
+    mThread->onHotplugReceived(DisplayDevice::DISPLAY_VIRTUAL, false);
+    EXPECT_FALSE(mConnectionEventCallRecorder.waitForUnexpectedCall().has_value());
+}
+
+} // namespace
+} // namespace android
diff --git a/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h b/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h
index b8a3baf..f1556d8 100644
--- a/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h
+++ b/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h
@@ -85,6 +85,25 @@
         return mFlinger->onHotplugReceived(sequenceId, display, connection);
     }
 
+    auto setDisplayStateLocked(const DisplayState& s) { return mFlinger->setDisplayStateLocked(s); }
+
+    auto onInitializeDisplays() { return mFlinger->onInitializeDisplays(); }
+
+    auto setPowerModeInternal(const sp<DisplayDevice>& hw, int mode, bool stateLockHeld = false) {
+        return mFlinger->setPowerModeInternal(hw, mode, stateLockHeld);
+    }
+
+    /* ------------------------------------------------------------------------
+     * Read-only access to private data to assert post-conditions.
+     */
+
+    const auto& getAnimFrameTracker() const { return mFlinger->mAnimFrameTracker; }
+    const auto& getHasPoweredOff() const { return mFlinger->mHasPoweredOff; }
+    const auto& getHWVsyncAvailable() const { return mFlinger->mHWVsyncAvailable; }
+    const auto& getVisibleRegionsDirty() const { return mFlinger->mVisibleRegionsDirty; }
+
+    const auto& getCompositorTiming() const { return mFlinger->getBE().mCompositorTiming; }
+
     /* ------------------------------------------------------------------------
      * Read-write access to private data to set up preconditions and assert
      * post-conditions.
@@ -247,6 +266,16 @@
             return mFlinger.mutableCurrentState().displays.editValueFor(mDisplayToken);
         }
 
+        const auto& getDrawingDisplayState() {
+            return mFlinger.mutableDrawingState().displays.valueFor(mDisplayToken);
+        }
+
+        const auto& getCurrentDisplayState() {
+            return mFlinger.mutableCurrentState().displays.valueFor(mDisplayToken);
+        }
+
+        auto& mutableDisplayDevice() { return mFlinger.mutableDisplays().valueFor(mDisplayToken); }
+
         auto& setNativeWindow(const sp<ANativeWindow>& nativeWindow) {
             mNativeWindow = nativeWindow;
             return *this;
@@ -268,17 +297,19 @@
         }
 
         sp<DisplayDevice> inject() {
+            std::unordered_map<ui::ColorMode, std::vector<ui::RenderIntent>> hdrAndRenderIntents;
             sp<DisplayDevice> device =
                     new DisplayDevice(mFlinger.mFlinger.get(), mType, mHwcId, mSecure, mDisplayToken,
                                       mNativeWindow, mDisplaySurface, std::move(mRenderSurface), 0,
-                                      0, false, HdrCapabilities(), 0, HWC_POWER_MODE_NORMAL);
+                                      0, false, HdrCapabilities(), 0, hdrAndRenderIntents,
+                                      HWC_POWER_MODE_NORMAL);
             mFlinger.mutableDisplays().add(mDisplayToken, device);
 
             DisplayDeviceState state(mType, mSecure);
             mFlinger.mutableCurrentState().displays.add(mDisplayToken, state);
             mFlinger.mutableDrawingState().displays.add(mDisplayToken, state);
 
-            if (mType < DisplayDevice::DISPLAY_VIRTUAL) {
+            if (mType >= DisplayDevice::DISPLAY_PRIMARY && mType < DisplayDevice::DISPLAY_VIRTUAL) {
                 mFlinger.mutableBuiltinDisplays()[mType] = mDisplayToken;
             }
 
diff --git a/services/surfaceflinger/tests/unittests/mock/RenderEngine/MockRenderEngine.h b/services/surfaceflinger/tests/unittests/mock/RenderEngine/MockRenderEngine.h
index 9bb2a3c..ac08293 100644
--- a/services/surfaceflinger/tests/unittests/mock/RenderEngine/MockRenderEngine.h
+++ b/services/surfaceflinger/tests/unittests/mock/RenderEngine/MockRenderEngine.h
@@ -60,12 +60,14 @@
     MOCK_METHOD1(setupLayerTexturing, void(const Texture&));
     MOCK_METHOD0(setupLayerBlackedOut, void());
     MOCK_METHOD4(setupFillWithColor, void(float, float, float, float));
-    MOCK_METHOD1(setupColorTransform, mat4(const mat4&));
+    MOCK_METHOD1(setupColorTransform, void(const mat4&));
+    MOCK_METHOD1(setSaturationMatrix, void(const mat4&));
     MOCK_METHOD0(disableTexturing, void());
     MOCK_METHOD0(disableBlending, void());
     MOCK_METHOD1(setSourceY410BT2020, void(bool));
     MOCK_METHOD1(setSourceDataSpace, void(ui::Dataspace));
     MOCK_METHOD1(setOutputDataSpace, void(ui::Dataspace));
+    MOCK_METHOD1(setDisplayMaxLuminance, void(const float));
     MOCK_METHOD2(bindNativeBufferAsFrameBuffer,
                  void(ANativeWindowBuffer*, RE::BindNativeBufferAsFramebuffer*));
     MOCK_METHOD1(unbindNativeBufferAsFrameBuffer, void(RE::BindNativeBufferAsFramebuffer*));
diff --git a/vulkan/libvulkan/swapchain.cpp b/vulkan/libvulkan/swapchain.cpp
index 6fb3351..c42e811 100644
--- a/vulkan/libvulkan/swapchain.cpp
+++ b/vulkan/libvulkan/swapchain.cpp
@@ -185,6 +185,7 @@
 struct Surface {
     android::sp<ANativeWindow> window;
     VkSwapchainKHR swapchain_handle;
+    uint64_t consumer_usage;
 };
 
 VkSurfaceKHR HandleFromSurface(Surface* surface) {
@@ -496,9 +497,18 @@
 
     surface->window = pCreateInfo->window;
     surface->swapchain_handle = VK_NULL_HANDLE;
+    int err = native_window_get_consumer_usage(surface->window.get(),
+                                               &surface->consumer_usage);
+    if (err != android::NO_ERROR) {
+        ALOGE("native_window_get_consumer_usage() failed: %s (%d)",
+              strerror(-err), err);
+        surface->~Surface();
+        allocator->pfnFree(allocator->pUserData, surface);
+        return VK_ERROR_INITIALIZATION_FAILED;
+    }
 
     // TODO(jessehall): Create and use NATIVE_WINDOW_API_VULKAN.
-    int err =
+    err =
         native_window_api_connect(surface->window.get(), NATIVE_WINDOW_API_EGL);
     if (err != 0) {
         // TODO(jessehall): Improve error reporting. Can we enumerate possible
@@ -536,9 +546,45 @@
 VKAPI_ATTR
 VkResult GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice /*pdev*/,
                                             uint32_t /*queue_family*/,
-                                            VkSurfaceKHR /*surface*/,
+                                            VkSurfaceKHR surface_handle,
                                             VkBool32* supported) {
-    *supported = VK_TRUE;
+    const Surface* surface = SurfaceFromHandle(surface_handle);
+    if (!surface) {
+        return VK_ERROR_SURFACE_LOST_KHR;
+    }
+    const ANativeWindow* window = surface->window.get();
+
+    int query_value;
+    int err = window->query(window, NATIVE_WINDOW_FORMAT, &query_value);
+    if (err != 0 || query_value < 0) {
+        ALOGE("NATIVE_WINDOW_FORMAT query failed: %s (%d) value=%d",
+              strerror(-err), err, query_value);
+        return VK_ERROR_SURFACE_LOST_KHR;
+    }
+
+    android_pixel_format native_format =
+        static_cast<android_pixel_format>(query_value);
+
+    bool format_supported = false;
+    switch (native_format) {
+        case HAL_PIXEL_FORMAT_RGBA_8888:
+        case HAL_PIXEL_FORMAT_RGB_565:
+            format_supported = true;
+            break;
+        default:
+            break;
+    }
+
+    // USAGE_CPU_READ_MASK 0xFUL
+    // USAGE_CPU_WRITE_MASK (0xFUL << 4)
+    // The currently used bits are as below:
+    // USAGE_CPU_READ_RARELY = 2UL
+    // USAGE_CPU_READ_OFTEN = 3UL
+    // USAGE_CPU_WRITE_RARELY = (2UL << 4)
+    // USAGE_CPU_WRITE_OFTEN = (3UL << 4)
+    *supported = static_cast<VkBool32>(format_supported ||
+                                       (surface->consumer_usage & 0xFFUL) == 0);
+
     return VK_SUCCESS;
 }