Merge "Make android:cantSaveState an optional feature." into pi-dev
diff --git a/cmds/atrace/atrace.cpp b/cmds/atrace/atrace.cpp
index f65f4f8..70164ea 100644
--- a/cmds/atrace/atrace.cpp
+++ b/cmds/atrace/atrace.cpp
@@ -125,6 +125,7 @@
         { OPT,      "events/sched/sched_waking/enable" },
         { OPT,      "events/sched/sched_blocked_reason/enable" },
         { OPT,      "events/sched/sched_cpu_hotplug/enable" },
+        { OPT,      "events/sched/sched_pi_setprio/enable" },
         { OPT,      "events/cgroup/enable" },
     } },
     { "irq",        "IRQ Events",   0, {
diff --git a/cmds/atrace/atrace.rc b/cmds/atrace/atrace.rc
index 8421568..a9c5c82 100644
--- a/cmds/atrace/atrace.rc
+++ b/cmds/atrace/atrace.rc
@@ -1,6 +1,6 @@
 ## Permissions to allow system-wide tracing to the kernel trace buffer.
 ##
-on post-fs
+on late-init
 
 # Allow writing to the kernel trace log.
     chmod 0222 /sys/kernel/debug/tracing/trace_marker
@@ -29,6 +29,8 @@
     chmod 0666 /sys/kernel/tracing/events/sched/sched_blocked_reason/enable
     chmod 0666 /sys/kernel/debug/tracing/events/sched/sched_cpu_hotplug/enable
     chmod 0666 /sys/kernel/tracing/events/sched/sched_cpu_hotplug/enable
+    chmod 0666 /sys/kernel/debug/tracing/events/sched/sched_pi_setprio/enable
+    chmod 0666 /sys/kernel/tracing/events/sched/sched_pi_setprio/enable
     chmod 0666 /sys/kernel/debug/tracing/events/cgroup/enable
     chmod 0666 /sys/kernel/tracing/events/cgroup/enable
     chmod 0666 /sys/kernel/debug/tracing/events/power/cpu_frequency/enable
diff --git a/cmds/installd/dexopt.cpp b/cmds/installd/dexopt.cpp
index 2777662..624d7d6 100644
--- a/cmds/installd/dexopt.cpp
+++ b/cmds/installd/dexopt.cpp
@@ -740,6 +740,7 @@
 static void run_profman(const std::vector<unique_fd>& profile_fds,
                         const unique_fd& reference_profile_fd,
                         const std::vector<unique_fd>* apk_fds,
+                        const std::vector<std::string>* dex_locations,
                         bool copy_and_update) {
     const char* profman_bin = is_debug_runtime() ? "/system/bin/profmand" : "/system/bin/profman";
 
@@ -762,6 +763,13 @@
         }
     }
 
+    std::vector<std::string> dex_location_args;
+    if (dex_locations != nullptr) {
+        for (size_t k = 0; k < dex_locations->size(); k++) {
+            dex_location_args.push_back("--dex-location=" + (*dex_locations)[k]);
+        }
+    }
+
     // program name, reference profile fd, the final NULL and the profile fds
     const char* argv[3 + profile_args.size() + apk_args.size() + (copy_and_update ? 1 : 0)];
     int i = 0;
@@ -773,9 +781,13 @@
     for (size_t k = 0; k < apk_args.size(); k++) {
         argv[i++] = apk_args[k].c_str();
     }
+    for (size_t k = 0; k < dex_location_args.size(); k++) {
+        argv[i++] = dex_location_args[k].c_str();
+    }
     if (copy_and_update) {
         argv[i++] = "--copy-and-update-profile-key";
     }
+
     // Do not add after dex2oat_flags, they should override others for debugging.
     argv[i] = NULL;
 
@@ -787,20 +799,26 @@
 [[ noreturn ]]
 static void run_profman_merge(const std::vector<unique_fd>& profiles_fd,
                               const unique_fd& reference_profile_fd,
-                              const std::vector<unique_fd>* apk_fds = nullptr) {
-    run_profman(profiles_fd, reference_profile_fd, apk_fds, /*copy_and_update*/false);
+                              const std::vector<unique_fd>* apk_fds = nullptr,
+                              const std::vector<std::string>* dex_locations = nullptr) {
+    run_profman(profiles_fd, reference_profile_fd, apk_fds, dex_locations,
+            /*copy_and_update*/false);
 }
 
 [[ noreturn ]]
 static void run_profman_copy_and_update(unique_fd&& profile_fd,
                                         unique_fd&& reference_profile_fd,
-                                        unique_fd&& apk_fd) {
+                                        unique_fd&& apk_fd,
+                                        const std::string& dex_location) {
     std::vector<unique_fd> profiles_fd;
     profiles_fd.push_back(std::move(profile_fd));
     std::vector<unique_fd> apk_fds;
     apk_fds.push_back(std::move(apk_fd));
+    std::vector<std::string> dex_locations;
+    dex_locations.push_back(dex_location);
 
-    run_profman(profiles_fd, reference_profile_fd, &apk_fds, /*copy_and_update*/true);
+    run_profman(profiles_fd, reference_profile_fd, &apk_fds, &dex_locations,
+            /*copy_and_update*/true);
 }
 
 // Decides if profile guided compilation is needed or not based on existing profiles.
@@ -2598,7 +2616,8 @@
     }
 }
 
-bool open_classpath_files(const std::string& classpath, std::vector<unique_fd>* apk_fds) {
+bool open_classpath_files(const std::string& classpath, std::vector<unique_fd>* apk_fds,
+        std::vector<std::string>* dex_locations) {
     std::vector<std::string> classpaths_elems = base::Split(classpath, ":");
     for (const std::string& elem : classpaths_elems) {
         unique_fd fd(TEMP_FAILURE_RETRY(open(elem.c_str(), O_RDONLY)));
@@ -2607,6 +2626,7 @@
             return false;
         } else {
             apk_fds->push_back(std::move(fd));
+            dex_locations->push_back(elem);
         }
     }
     return true;
@@ -2636,7 +2656,8 @@
     // Open the class paths elements. These will be used to filter out profile data that does
     // not belong to the classpath during merge.
     std::vector<unique_fd> apk_fds;
-    if (!open_classpath_files(classpath, &apk_fds)) {
+    std::vector<std::string> dex_locations;
+    if (!open_classpath_files(classpath, &apk_fds, &dex_locations)) {
         return false;
     }
 
@@ -2644,7 +2665,7 @@
     if (pid == 0) {
         /* child -- drop privileges before continuing */
         drop_capabilities(app_shared_gid);
-        run_profman_merge(profiles_fd, snapshot_fd, &apk_fds);
+        run_profman_merge(profiles_fd, snapshot_fd, &apk_fds, &dex_locations);
     }
 
     /* parent */
@@ -2694,7 +2715,8 @@
     // Open the classpath elements. These will be used to filter out profile data that does
     // not belong to the classpath during merge.
     std::vector<unique_fd> apk_fds;
-    if (!open_classpath_files(classpath, &apk_fds)) {
+    std::vector<std::string> dex_locations;
+    if (!open_classpath_files(classpath, &apk_fds, &dex_locations)) {
         return false;
     }
 
@@ -2721,7 +2743,9 @@
             /* child -- drop privileges before continuing */
             drop_capabilities(AID_SYSTEM);
 
-            run_profman_merge(profiles_fd, snapshot_fd, &apk_fds);
+            // The introduction of new access flags into boot jars causes them to
+            // fail dex file verification.
+            run_profman_merge(profiles_fd, snapshot_fd, &apk_fds, &dex_locations);
         }
 
         /* parent */
@@ -2784,7 +2808,8 @@
         // The copy and update takes ownership over the fds.
         run_profman_copy_and_update(std::move(dex_metadata_fd),
                                     std::move(ref_profile_fd),
-                                    std::move(apk_fd));
+                                    std::move(apk_fd),
+                                    code_path);
     }
 
     /* parent */
diff --git a/include/android/hardware_buffer_jni.h b/include/android/hardware_buffer_jni.h
index 6020870..7c4be24 100644
--- a/include/android/hardware_buffer_jni.h
+++ b/include/android/hardware_buffer_jni.h
@@ -31,9 +31,11 @@
 
 /**
  * Return the AHardwareBuffer associated with a Java HardwareBuffer object,
- * for interacting with it through native code.  This acquires a reference
- * on the AHardwareBuffer that is returned; be sure to use
- * AHardwareBuffer_release() when done with it so that it doesn't leak.
+ * for interacting with it through native code. This method does not acquire any
+ * additional reference to the AHardwareBuffer that is returned. To keep the
+ * AHardwareBuffer live after the Java HardwareBuffer object got garbage
+ * collected, be sure to use AHardwareBuffer_acquire() to acquire an additional
+ * reference.
  */
 AHardwareBuffer* AHardwareBuffer_fromHardwareBuffer(JNIEnv* env,
         jobject hardwareBufferObj);
diff --git a/libs/gui/Android.bp b/libs/gui/Android.bp
index 2768ad8..73f2147 100644
--- a/libs/gui/Android.bp
+++ b/libs/gui/Android.bp
@@ -142,10 +142,26 @@
         "android.hardware.configstore-utils",
     ],
 
+    // bufferhub is not used when building libgui for vendors
+    target: {
+        vendor: {
+            cflags: ["-DNO_BUFFERHUB"],
+            exclude_srcs: [
+                "BufferHubConsumer.cpp",
+                "BufferHubProducer.cpp",
+            ],
+            exclude_shared_libs: [
+                "libbufferhubqueue",
+                "libpdx_default_transport",
+            ],
+        },
+    },
+
     header_libs: [
         "libdvr_headers",
         "libnativebase_headers",
         "libgui_headers",
+        "libpdx_headers",
     ],
 
     export_shared_lib_headers: [
diff --git a/libs/gui/BufferHubProducer.cpp b/libs/gui/BufferHubProducer.cpp
index 061710a..ae5cca2 100644
--- a/libs/gui/BufferHubProducer.cpp
+++ b/libs/gui/BufferHubProducer.cpp
@@ -136,7 +136,7 @@
                                           uint32_t height, PixelFormat format, uint64_t usage,
                                           uint64_t* /*outBufferAge*/,
                                           FrameEventHistoryDelta* /* out_timestamps */) {
-    ALOGW("dequeueBuffer: w=%u, h=%u, format=%d, usage=%" PRIu64, width, height, format, usage);
+    ALOGV("dequeueBuffer: w=%u, h=%u, format=%d, usage=%" PRIu64, width, height, format, usage);
 
     status_t ret;
     std::unique_lock<std::mutex> lock(mutex_);
@@ -208,7 +208,7 @@
 
     buffers_[slot].mBufferState.freeQueued();
     buffers_[slot].mBufferState.dequeue();
-    ALOGW("dequeueBuffer: slot=%zu", slot);
+    ALOGV("dequeueBuffer: slot=%zu", slot);
 
     // TODO(jwcai) Handle fence properly. |BufferHub| has full fence support, we
     // just need to exopose that through |BufferHubQueue| once we need fence.
diff --git a/libs/gui/BufferQueue.cpp b/libs/gui/BufferQueue.cpp
index 2917f45..a8da134 100644
--- a/libs/gui/BufferQueue.cpp
+++ b/libs/gui/BufferQueue.cpp
@@ -18,8 +18,11 @@
 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
 //#define LOG_NDEBUG 0
 
+#ifndef NO_BUFFERHUB
 #include <gui/BufferHubConsumer.h>
 #include <gui/BufferHubProducer.h>
+#endif
+
 #include <gui/BufferQueue.h>
 #include <gui/BufferQueueConsumer.h>
 #include <gui/BufferQueueCore.h>
@@ -103,6 +106,7 @@
     *outConsumer = consumer;
 }
 
+#ifndef NO_BUFFERHUB
 void BufferQueue::createBufferHubQueue(sp<IGraphicBufferProducer>* outProducer,
                                        sp<IGraphicBufferConsumer>* outConsumer) {
     LOG_ALWAYS_FATAL_IF(outProducer == NULL, "BufferQueue: outProducer must not be NULL");
@@ -128,5 +132,6 @@
     *outProducer = producer;
     *outConsumer = consumer;
 }
+#endif
 
 }; // namespace android
diff --git a/libs/gui/IGraphicBufferProducer.cpp b/libs/gui/IGraphicBufferProducer.cpp
index 777a3e5..0749fde 100644
--- a/libs/gui/IGraphicBufferProducer.cpp
+++ b/libs/gui/IGraphicBufferProducer.cpp
@@ -27,7 +27,9 @@
 #include <binder/Parcel.h>
 #include <binder/IInterface.h>
 
+#ifndef NO_BUFFERHUB
 #include <gui/BufferHubProducer.h>
+#endif
 #include <gui/BufferQueueDefs.h>
 #include <gui/IGraphicBufferProducer.h>
 #include <gui/IProducerListener.h>
@@ -706,6 +708,7 @@
         }
         case USE_BUFFER_HUB: {
             ALOGE("createFromParcel: BufferHub not implemented.");
+#ifndef NO_BUFFERHUB
             dvr::ProducerQueueParcelable producerParcelable;
             res = producerParcelable.readFromParcel(parcel);
             if (res != NO_ERROR) {
@@ -713,6 +716,9 @@
                 return nullptr;
             }
             return BufferHubProducer::Create(std::move(producerParcelable));
+#else
+            return nullptr;
+#endif
         }
         default: {
             ALOGE("createFromParcel: Unexpected mgaic: 0x%x.", outMagic);
diff --git a/libs/gui/include/gui/BufferQueue.h b/libs/gui/include/gui/BufferQueue.h
index f175573..da95274 100644
--- a/libs/gui/include/gui/BufferQueue.h
+++ b/libs/gui/include/gui/BufferQueue.h
@@ -79,9 +79,11 @@
             sp<IGraphicBufferConsumer>* outConsumer,
             bool consumerIsSurfaceFlinger = false);
 
+#ifndef NO_BUFFERHUB
     // Creates an IGraphicBufferProducer and IGraphicBufferConsumer pair backed by BufferHub.
     static void createBufferHubQueue(sp<IGraphicBufferProducer>* outProducer,
                                      sp<IGraphicBufferConsumer>* outConsumer);
+#endif
 
     BufferQueue() = delete; // Create through createBufferQueue
 };
diff --git a/libs/ui/Android.bp b/libs/ui/Android.bp
index 1a9fb8b..ff9d19e 100644
--- a/libs/ui/Android.bp
+++ b/libs/ui/Android.bp
@@ -84,7 +84,6 @@
         "libhidlbase",
         "libhidltransport",
         "libhwbinder",
-        "libpdx_default_transport",
         "libsync",
         "libutils",
         "libutilscallstack",
@@ -106,6 +105,7 @@
         "libnativebase_headers",
         "libhardware_headers",
         "libui_headers",
+        "libpdx_headers",
     ],
 
     export_static_lib_headers: [
diff --git a/libs/ui/HdrCapabilities.cpp b/libs/ui/HdrCapabilities.cpp
index 50f9bf1..a36911d 100644
--- a/libs/ui/HdrCapabilities.cpp
+++ b/libs/ui/HdrCapabilities.cpp
@@ -27,7 +27,6 @@
 HdrCapabilities::HdrCapabilities(HdrCapabilities&& other) = default;
 HdrCapabilities& HdrCapabilities::operator=(HdrCapabilities&& other) = default;
 
-
 size_t HdrCapabilities::getFlattenedSize() const {
     return  sizeof(mMaxLuminance) +
             sizeof(mMaxAverageLuminance) +
diff --git a/libs/vr/libbufferhub/Android.bp b/libs/vr/libbufferhub/Android.bp
index b38ecc7..7b5ad44 100644
--- a/libs/vr/libbufferhub/Android.bp
+++ b/libs/vr/libbufferhub/Android.bp
@@ -56,15 +56,6 @@
     export_header_lib_headers: [
         "libnativebase_headers",
     ],
-    vendor_available: false,
-    vndk: {
-        enabled: true,
-    },
-    target: {
-        vendor: {
-            exclude_srcs: ["detached_buffer.cpp"],
-        },
-    },
 }
 
 cc_test {
diff --git a/libs/vr/libbufferhubqueue/Android.bp b/libs/vr/libbufferhubqueue/Android.bp
index eeec9ec..9f72c05 100644
--- a/libs/vr/libbufferhubqueue/Android.bp
+++ b/libs/vr/libbufferhubqueue/Android.bp
@@ -59,10 +59,6 @@
     static_libs: staticLibraries,
     shared_libs: sharedLibraries,
     header_libs: headerLibraries,
-    vendor_available: false,
-    vndk: {
-        enabled: true,
-    },
 }
 
 subdirs = ["benchmarks", "tests"]
diff --git a/libs/vr/libdvr/Android.bp b/libs/vr/libdvr/Android.bp
index d0e34ee..16906f5 100644
--- a/libs/vr/libdvr/Android.bp
+++ b/libs/vr/libdvr/Android.bp
@@ -16,10 +16,7 @@
 cc_library_headers {
     name: "libdvr_headers",
     export_include_dirs: ["include"],
-    vendor_available: false,
-    vndk: {
-        enabled: true,
-    },
+    vendor_available: true,
 }
 
 cflags = [
diff --git a/libs/vr/libpdx/Android.bp b/libs/vr/libpdx/Android.bp
index 9b84d65..1a9d727 100644
--- a/libs/vr/libpdx/Android.bp
+++ b/libs/vr/libpdx/Android.bp
@@ -1,3 +1,9 @@
+cc_library_headers {
+    name: "libpdx_headers",
+    export_include_dirs: ["private"],
+    vendor_available: true,
+}
+
 cc_library_static {
     name: "libpdx",
     clang: true,
@@ -8,8 +14,8 @@
         "-DLOG_TAG=\"libpdx\"",
         "-DTRACE=0",
     ],
-    export_include_dirs: ["private"],
-    local_include_dirs: ["private"],
+    header_libs: ["libpdx_headers"],
+    export_header_lib_headers: ["libpdx_headers"],
     srcs: [
         "client.cpp",
         "service.cpp",
@@ -22,10 +28,6 @@
         "libutils",
         "liblog",
     ],
-    vendor_available: false,
-    vndk: {
-        enabled: true,
-    },
 }
 
 cc_test {
diff --git a/libs/vr/libpdx_default_transport/Android.bp b/libs/vr/libpdx_default_transport/Android.bp
index 475eb50..74b8c8b 100644
--- a/libs/vr/libpdx_default_transport/Android.bp
+++ b/libs/vr/libpdx_default_transport/Android.bp
@@ -12,10 +12,6 @@
     name: "pdx_default_transport_lib_defaults",
     export_include_dirs: ["private"],
     whole_static_libs: ["libpdx"],
-    vendor_available: false,
-    vndk: {
-        enabled: true,
-    },
 }
 
 cc_defaults {
@@ -37,10 +33,6 @@
         "pdx_default_transport_lib_defaults",
         "pdx_use_transport_uds",
     ],
-    vendor_available: false,
-    vndk: {
-        enabled: true,
-    },
     shared_libs: [
         "libbase",
         "libbinder",
diff --git a/libs/vr/libpdx_uds/Android.bp b/libs/vr/libpdx_uds/Android.bp
index 79cfdf6..d640950 100644
--- a/libs/vr/libpdx_uds/Android.bp
+++ b/libs/vr/libpdx_uds/Android.bp
@@ -30,10 +30,6 @@
     whole_static_libs: [
         "libselinux",
     ],
-    vendor_available: false,
-    vndk: {
-        enabled: true,
-    },
 }
 
 cc_test {
diff --git a/services/surfaceflinger/Android.bp b/services/surfaceflinger/Android.bp
index 5b1e631..6a69844 100644
--- a/services/surfaceflinger/Android.bp
+++ b/services/surfaceflinger/Android.bp
@@ -45,6 +45,7 @@
         "libpdx_default_transport",
         "libprotobuf-cpp-lite",
         "libsync",
+        "libtimestats_proto",
         "libui",
         "libutils",
         "libvulkan",
@@ -124,6 +125,7 @@
         "SurfaceFlinger.cpp",
         "SurfaceInterceptor.cpp",
         "SurfaceTracing.cpp",
+        "TimeStats/TimeStats.cpp",
         "Transform.cpp",
     ],
 }
@@ -172,6 +174,7 @@
         "liblayers_proto",
         "liblog",
         "libsurfaceflinger",
+        "libtimestats_proto",
         "libutils",
     ],
     static_libs: [
@@ -213,5 +216,6 @@
 
 subdirs = [
     "layerproto",
+    "TimeStats/timestatsproto",
     "tests",
 ]
diff --git a/services/surfaceflinger/BufferLayer.cpp b/services/surfaceflinger/BufferLayer.cpp
index 7fd9d01..6feec53 100644
--- a/services/surfaceflinger/BufferLayer.cpp
+++ b/services/surfaceflinger/BufferLayer.cpp
@@ -316,6 +316,9 @@
     nsecs_t desiredPresentTime = mConsumer->getTimestamp();
     mFrameTracker.setDesiredPresentTime(desiredPresentTime);
 
+    const std::string layerName(getName().c_str());
+    mTimeStats.setDesiredTime(layerName, mCurrentFrameNumber, desiredPresentTime);
+
     std::shared_ptr<FenceTime> frameReadyFence = mConsumer->getCurrentFenceTime();
     if (frameReadyFence->isValid()) {
         mFrameTracker.setFrameReadyFence(std::move(frameReadyFence));
@@ -326,12 +329,15 @@
     }
 
     if (presentFence->isValid()) {
+        mTimeStats.setPresentFence(layerName, mCurrentFrameNumber, presentFence);
         mFrameTracker.setActualPresentFence(std::shared_ptr<FenceTime>(presentFence));
     } else {
         // The HWC doesn't support present fences, so use the refresh
         // timestamp instead.
-        mFrameTracker.setActualPresentTime(
-                mFlinger->getHwComposer().getRefreshTimestamp(HWC_DISPLAY_PRIMARY));
+        const nsecs_t actualPresentTime =
+                mFlinger->getHwComposer().getRefreshTimestamp(HWC_DISPLAY_PRIMARY);
+        mTimeStats.setPresentTime(layerName, mCurrentFrameNumber, actualPresentTime);
+        mFrameTracker.setActualPresentTime(actualPresentTime);
     }
 
     mFrameTracker.advanceFrame();
@@ -441,6 +447,7 @@
         // and return early
         if (queuedBuffer) {
             Mutex::Autolock lock(mQueueItemLock);
+            mTimeStats.removeTimeRecord(getName().c_str(), mQueueItems[0].mFrameNumber);
             mQueueItems.removeAt(0);
             android_atomic_dec(&mQueuedFrames);
         }
@@ -454,6 +461,7 @@
             Mutex::Autolock lock(mQueueItemLock);
             mQueueItems.clear();
             android_atomic_and(0, &mQueuedFrames);
+            mTimeStats.clearLayerRecord(getName().c_str());
         }
 
         // Once we have hit this state, the shadow queue may no longer
@@ -474,10 +482,15 @@
         // Remove any stale buffers that have been dropped during
         // updateTexImage
         while (mQueueItems[0].mFrameNumber != currentFrameNumber) {
+            mTimeStats.removeTimeRecord(getName().c_str(), mQueueItems[0].mFrameNumber);
             mQueueItems.removeAt(0);
             android_atomic_dec(&mQueuedFrames);
         }
 
+        const std::string layerName(getName().c_str());
+        mTimeStats.setAcquireFence(layerName, currentFrameNumber, mQueueItems[0].mFenceTime);
+        mTimeStats.setLatchTime(layerName, currentFrameNumber, latchTime);
+
         mQueueItems.removeAt(0);
     }
 
@@ -515,11 +528,9 @@
         recomputeVisibleRegions = true;
     }
 
-    // Dataspace::V0_SRGB and Dataspace::V0_SRGB_LINEAR are not legacy
-    // data space, however since framework doesn't distinguish them out of
-    // legacy SRGB, we have to treat them as the same for now.
-    // UNKNOWN is treated as legacy SRGB when the connected api is EGL.
     ui::Dataspace dataSpace = mConsumer->getCurrentDataSpace();
+    // treat modern dataspaces as legacy dataspaces whenever possible, until
+    // we can trust the buffer producers
     switch (dataSpace) {
         case ui::Dataspace::V0_SRGB:
             dataSpace = ui::Dataspace::SRGB;
@@ -527,10 +538,17 @@
         case ui::Dataspace::V0_SRGB_LINEAR:
             dataSpace = ui::Dataspace::SRGB_LINEAR;
             break;
-        case ui::Dataspace::UNKNOWN:
-            if (mConsumer->getCurrentApi() == NATIVE_WINDOW_API_EGL) {
-                dataSpace = ui::Dataspace::SRGB;
-            }
+        case ui::Dataspace::V0_JFIF:
+            dataSpace = ui::Dataspace::JFIF;
+            break;
+        case ui::Dataspace::V0_BT601_625:
+            dataSpace = ui::Dataspace::BT601_625;
+            break;
+        case ui::Dataspace::V0_BT601_525:
+            dataSpace = ui::Dataspace::BT601_525;
+            break;
+        case ui::Dataspace::V0_BT709:
+            dataSpace = ui::Dataspace::BT709;
             break;
         default:
             break;
diff --git a/services/surfaceflinger/DisplayDevice.cpp b/services/surfaceflinger/DisplayDevice.cpp
index 7c6302e..2b1e577 100644
--- a/services/surfaceflinger/DisplayDevice.cpp
+++ b/services/surfaceflinger/DisplayDevice.cpp
@@ -80,6 +80,7 @@
         bool hasWideColorGamut,
         const HdrCapabilities& hdrCapabilities,
         const int32_t supportedPerFrameMetadata,
+        const std::unordered_map<ui::ColorMode, std::vector<ui::RenderIntent>>& hdrAndRenderIntents,
         int initialPowerMode)
     : lastCompositionHadVisibleLayers(false),
       mFlinger(flinger),
@@ -105,10 +106,15 @@
       mHasHdr10(false),
       mHasHLG(false),
       mHasDolbyVision(false),
-      mSupportedPerFrameMetadata(supportedPerFrameMetadata)
+      mSupportedPerFrameMetadata(supportedPerFrameMetadata),
+      mHasBT2100PQColorimetric(false),
+      mHasBT2100PQEnhance(false),
+      mHasBT2100HLGColorimetric(false),
+      mHasBT2100HLGEnhance(false)
 {
     // clang-format on
-    for (Hdr hdrType : hdrCapabilities.getSupportedHdrTypes()) {
+    std::vector<Hdr> types = hdrCapabilities.getSupportedHdrTypes();
+    for (Hdr hdrType : types) {
         switch (hdrType) {
             case Hdr::HDR10:
                 mHasHdr10 = true;
@@ -124,6 +130,38 @@
         }
     }
 
+    float minLuminance = hdrCapabilities.getDesiredMinLuminance();
+    float maxLuminance = hdrCapabilities.getDesiredMaxLuminance();
+    float maxAverageLuminance = hdrCapabilities.getDesiredMaxAverageLuminance();
+
+    minLuminance = minLuminance <= 0.0 ? sDefaultMinLumiance : minLuminance;
+    maxLuminance = maxLuminance <= 0.0 ? sDefaultMaxLumiance : maxLuminance;
+    maxAverageLuminance = maxAverageLuminance <= 0.0 ? sDefaultMaxLumiance : maxAverageLuminance;
+    if (this->hasWideColorGamut()) {
+        // insert HDR10/HLG as we will force client composition for HDR10/HLG
+        // layers
+        if (!hasHDR10Support()) {
+          types.push_back(Hdr::HDR10);
+        }
+
+        if (!hasHLGSupport()) {
+          types.push_back(Hdr::HLG);
+        }
+    }
+    mHdrCapabilities = HdrCapabilities(types, maxLuminance, maxAverageLuminance, minLuminance);
+
+    auto iter = hdrAndRenderIntents.find(ColorMode::BT2100_PQ);
+    if (iter != hdrAndRenderIntents.end()) {
+        hasToneMapping(iter->second,
+                       &mHasBT2100PQColorimetric, &mHasBT2100PQEnhance);
+    }
+
+    iter = hdrAndRenderIntents.find(ColorMode::BT2100_HLG);
+    if (iter != hdrAndRenderIntents.end()) {
+        hasToneMapping(iter->second,
+                       &mHasBT2100HLGColorimetric, &mHasBT2100HLGEnhance);
+    }
+
     // initialize the display orientation transform.
     setProjection(DisplayState::eOrientationDefault, mViewport, mFrame);
 }
@@ -508,6 +546,22 @@
     result.append(surfaceDump);
 }
 
+void DisplayDevice::hasToneMapping(const std::vector<RenderIntent>& renderIntents,
+                                   bool* outColorimetric, bool *outEnhance) {
+    for (auto intent : renderIntents) {
+        switch (intent) {
+            case RenderIntent::TONE_MAP_COLORIMETRIC:
+                *outColorimetric = true;
+                break;
+            case RenderIntent::TONE_MAP_ENHANCE:
+                *outEnhance = true;
+                break;
+            default:
+                break;
+        }
+    }
+}
+
 std::atomic<int32_t> DisplayDeviceState::nextDisplayId(1);
 
 DisplayDeviceState::DisplayDeviceState(DisplayDevice::DisplayType type, bool isSecure)
diff --git a/services/surfaceflinger/DisplayDevice.h b/services/surfaceflinger/DisplayDevice.h
index df5d945..d051e33 100644
--- a/services/surfaceflinger/DisplayDevice.h
+++ b/services/surfaceflinger/DisplayDevice.h
@@ -20,6 +20,7 @@
 #include "Transform.h"
 
 #include <stdlib.h>
+#include <unordered_map>
 
 #include <math/mat4.h>
 
@@ -27,6 +28,7 @@
 #include <gui/ISurfaceComposer.h>
 #include <hardware/hwcomposer_defs.h>
 #include <ui/GraphicTypes.h>
+#include <ui/HdrCapabilities.h>
 #include <ui/Region.h>
 #include <utils/RefBase.h>
 #include <utils/Mutex.h>
@@ -53,6 +55,9 @@
 class DisplayDevice : public LightRefBase<DisplayDevice>
 {
 public:
+    constexpr static float sDefaultMinLumiance = 0.0;
+    constexpr static float sDefaultMaxLumiance = 500.0;
+
     // region in layer-stack space
     mutable Region dirtyRegion;
     // region in screen space
@@ -86,6 +91,7 @@
             bool hasWideColorGamut,
             const HdrCapabilities& hdrCapabilities,
             const int32_t supportedPerFrameMetadata,
+            const std::unordered_map<ui::ColorMode, std::vector<ui::RenderIntent>>& hdrAndRenderIntents,
             int initialPowerMode);
     // clang-format on
 
@@ -138,9 +144,25 @@
     status_t beginFrame(bool mustRecompose) const;
     status_t prepareFrame(HWComposer& hwc);
     bool hasWideColorGamut() const { return mHasWideColorGamut; }
+    // Whether h/w composer has native support for specific HDR type.
     bool hasHDR10Support() const { return mHasHdr10; }
     bool hasHLGSupport() const { return mHasHLG; }
     bool hasDolbyVisionSupport() const { return mHasDolbyVision; }
+    // The returned HdrCapabilities is the combination of HDR capabilities from
+    // hardware composer and RenderEngine. When the DisplayDevice supports wide
+    // color gamut, RenderEngine is able to simulate HDR support in Display P3
+    // color space for both PQ and HLG HDR contents. The minimum and maximum
+    // luminance will be set to sDefaultMinLumiance and sDefaultMaxLumiance
+    // respectively if hardware composer doesn't return meaningful values.
+    const HdrCapabilities& getHdrCapabilities() const { return mHdrCapabilities; }
+
+    // Whether h/w composer has BT2100_PQ color mode.
+    bool hasBT2100PQColorimetricSupport() const { return mHasBT2100PQColorimetric; }
+    bool hasBT2100PQEnhanceSupport() const { return mHasBT2100PQEnhance; }
+
+    // Whether h/w composer has BT2100_HLG color mode.
+    bool hasBT2100HLGColorimetricSupport() const { return mHasBT2100HLGColorimetric; }
+    bool hasBT2100HLGEnhanceSupport() const { return mHasBT2100HLGEnhance; }
 
     void swapBuffers(HWComposer& hwc) const;
 
@@ -192,6 +214,9 @@
     void dump(String8& result) const;
 
 private:
+    void hasToneMapping(const std::vector<ui::RenderIntent>& renderIntents,
+                        bool* outColorimetric, bool *outEnhance);
+
     /*
      *  Constants, set during initialization
      */
@@ -261,8 +286,14 @@
     bool mHasHdr10;
     bool mHasHLG;
     bool mHasDolbyVision;
-
+    HdrCapabilities mHdrCapabilities;
     const int32_t mSupportedPerFrameMetadata;
+    // Whether h/w composer has BT2100_PQ and BT2100_HLG color mode with
+    // colorimetrical tone mapping or enhanced tone mapping.
+    bool mHasBT2100PQColorimetric;
+    bool mHasBT2100PQEnhance;
+    bool mHasBT2100HLGColorimetric;
+    bool mHasBT2100HLGEnhance;
 };
 
 struct DisplayDeviceState {
@@ -309,6 +340,9 @@
     ui::Dataspace getDataSpace() const override {
         return mDevice->getCompositionDataSpace();
     }
+    float getDisplayMaxLuminance() const override {
+        return mDevice->getHdrCapabilities().getDesiredMaxLuminance();
+    }
 
 private:
     const sp<const DisplayDevice> mDevice;
diff --git a/services/surfaceflinger/DisplayHardware/HWComposer.cpp b/services/surfaceflinger/DisplayHardware/HWComposer.cpp
index f111f2b..96d691c 100644
--- a/services/surfaceflinger/DisplayHardware/HWComposer.cpp
+++ b/services/surfaceflinger/DisplayHardware/HWComposer.cpp
@@ -596,7 +596,10 @@
             ALOGV("setPowerMode: Calling HWC %s", to_string(mode).c_str());
             {
                 auto error = hwcDisplay->setPowerMode(mode);
-                LOG_HWC_ERROR(("setPowerMode(" + to_string(mode) + ")").c_str(), error, displayId);
+                if (error != HWC2::Error::None) {
+                    LOG_HWC_ERROR(("setPowerMode(" + to_string(mode) + ")").c_str(),
+                                  error, displayId);
+                }
             }
             break;
         case HWC2::PowerMode::Doze:
@@ -605,14 +608,19 @@
             {
                 bool supportsDoze = false;
                 auto error = hwcDisplay->supportsDoze(&supportsDoze);
-                LOG_HWC_ERROR("supportsDoze", error, displayId);
+                if (error != HWC2::Error::None) {
+                    LOG_HWC_ERROR("supportsDoze", error, displayId);
+                }
 
                 if (!supportsDoze) {
                     mode = HWC2::PowerMode::On;
                 }
 
                 error = hwcDisplay->setPowerMode(mode);
-                LOG_HWC_ERROR(("setPowerMode(" + to_string(mode) + ")").c_str(), error, displayId);
+                if (error != HWC2::Error::None) {
+                    LOG_HWC_ERROR(("setPowerMode(" + to_string(mode) + ")").c_str(),
+                                  error, displayId);
+                }
             }
             break;
         default:
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index bbc974d..9043234 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -1532,10 +1532,16 @@
 void Layer::onDisconnect() {
     Mutex::Autolock lock(mFrameEventHistoryMutex);
     mFrameEventHistory.onDisconnect();
+    mTimeStats.onDisconnect(getName().c_str());
 }
 
 void Layer::addAndGetFrameTimestamps(const NewFrameEventsEntry* newTimestamps,
                                      FrameEventHistoryDelta* outDelta) {
+    if (newTimestamps) {
+        mTimeStats.setPostTime(getName().c_str(), newTimestamps->frameNumber,
+                               newTimestamps->postedTime);
+    }
+
     Mutex::Autolock lock(mFrameEventHistoryMutex);
     if (newTimestamps) {
         // If there are any unsignaled fences in the aquire timeline at this
@@ -1644,9 +1650,10 @@
     return true;
 }
 
-bool Layer::isLegacySrgbDataSpace() const {
-    return mDrawingState.dataSpace == ui::Dataspace::SRGB ||
-        mDrawingState.dataSpace == ui::Dataspace::SRGB_LINEAR;
+bool Layer::isLegacyDataSpace() const {
+    // return true when no higher bits are set
+    return !(mDrawingState.dataSpace & (ui::Dataspace::STANDARD_MASK |
+                ui::Dataspace::TRANSFER_MASK | ui::Dataspace::RANGE_MASK));
 }
 
 void Layer::setParent(const sp<Layer>& layer) {
diff --git a/services/surfaceflinger/Layer.h b/services/surfaceflinger/Layer.h
index be3967b..632efbe 100644
--- a/services/surfaceflinger/Layer.h
+++ b/services/surfaceflinger/Layer.h
@@ -41,6 +41,7 @@
 #include "LayerVector.h"
 #include "MonitoredProducer.h"
 #include "SurfaceFlinger.h"
+#include "TimeStats/TimeStats.h"
 #include "Transform.h"
 
 #include <layerproto/LayerProtoHeader.h>
@@ -302,8 +303,8 @@
     // desaturated in order to match what they appears like visually.
     // With color management, these contents will appear desaturated, thus
     // needed to be saturated so that they match what they are designed for
-    // visually. When returns true, legacy SRGB data space is passed to HWC.
-    bool isLegacySrgbDataSpace() const;
+    // visually.
+    bool isLegacyDataSpace() const;
 
     // If we have received a new buffer this frame, we will pass its surface
     // damage down to hardware composer. Otherwise, we must send a region with
@@ -737,6 +738,8 @@
     FenceTimeline mAcquireTimeline;
     FenceTimeline mReleaseTimeline;
 
+    TimeStats& mTimeStats = TimeStats::getInstance();
+
     // main thread
     int mActiveBufferSlot;
     sp<GraphicBuffer> mActiveBuffer;
diff --git a/services/surfaceflinger/RenderArea.h b/services/surfaceflinger/RenderArea.h
index 3630677..4694403 100644
--- a/services/surfaceflinger/RenderArea.h
+++ b/services/surfaceflinger/RenderArea.h
@@ -32,6 +32,7 @@
     virtual Rect getSourceCrop() const = 0;
     virtual bool getWideColorSupport() const = 0;
     virtual ui::Dataspace getDataSpace() const = 0;
+    virtual float getDisplayMaxLuminance() const = 0;
 
     virtual void render(std::function<void()> drawLayers) { drawLayers(); }
 
diff --git a/services/surfaceflinger/RenderEngine/Description.cpp b/services/surfaceflinger/RenderEngine/Description.cpp
index 323bdb2..c218e4d 100644
--- a/services/surfaceflinger/RenderEngine/Description.cpp
+++ b/services/surfaceflinger/RenderEngine/Description.cpp
@@ -52,9 +52,30 @@
 }
 
 void Description::setColorMatrix(const mat4& mtx) {
-    const mat4 identity;
     mColorMatrix = mtx;
-    mColorMatrixEnabled = (mtx != identity);
+}
+
+void Description::setInputTransformMatrix(const mat3& matrix) {
+    mInputTransformMatrix = matrix;
+}
+
+void Description::setOutputTransformMatrix(const mat4& matrix) {
+    mOutputTransformMatrix = matrix;
+}
+
+bool Description::hasInputTransformMatrix() const {
+    const mat3 identity;
+    return mInputTransformMatrix != identity;
+}
+
+bool Description::hasOutputTransformMatrix() const {
+    const mat4 identity;
+    return mOutputTransformMatrix != identity;
+}
+
+bool Description::hasColorMatrix() const {
+    const mat4 identity;
+    return mColorMatrix != identity;
 }
 
 const mat4& Description::getColorMatrix() const {
@@ -73,4 +94,8 @@
     mOutputTransferFunction = transferFunction;
 }
 
+void Description::setDisplayMaxLuminance(const float maxLuminance) {
+    mDisplayMaxLuminance = maxLuminance;
+}
+
 } /* namespace android */
diff --git a/services/surfaceflinger/RenderEngine/Description.h b/services/surfaceflinger/RenderEngine/Description.h
index 5854ba4..6ebb340 100644
--- a/services/surfaceflinger/RenderEngine/Description.h
+++ b/services/surfaceflinger/RenderEngine/Description.h
@@ -43,6 +43,11 @@
     void setColor(const half4& color);
     void setProjectionMatrix(const mat4& mtx);
     void setColorMatrix(const mat4& mtx);
+    void setInputTransformMatrix(const mat3& matrix);
+    void setOutputTransformMatrix(const mat4& matrix);
+    bool hasInputTransformMatrix() const;
+    bool hasOutputTransformMatrix() const;
+    bool hasColorMatrix() const;
     const mat4& getColorMatrix() const;
 
     void setY410BT2020(bool enable);
@@ -55,6 +60,7 @@
     };
     void setInputTransferFunction(TransferFunction transferFunction);
     void setOutputTransferFunction(TransferFunction transferFunction);
+    void setDisplayMaxLuminance(const float maxLuminance);
 
 private:
     friend class Program;
@@ -71,11 +77,6 @@
 
     // color used when texturing is disabled or when setting alpha.
     half4 mColor;
-    // projection matrix
-    mat4 mProjectionMatrix;
-
-    bool mColorMatrixEnabled = false;
-    mat4 mColorMatrix;
 
     // true if the sampled pixel values are in Y410/BT2020 rather than RGBA
     bool mY410BT2020 = false;
@@ -83,6 +84,14 @@
     // transfer functions for the input/output
     TransferFunction mInputTransferFunction = TransferFunction::LINEAR;
     TransferFunction mOutputTransferFunction = TransferFunction::LINEAR;
+
+    float mDisplayMaxLuminance;
+
+    // projection matrix
+    mat4 mProjectionMatrix;
+    mat4 mColorMatrix;
+    mat3 mInputTransformMatrix;
+    mat4 mOutputTransformMatrix;
 };
 
 } /* namespace android */
diff --git a/services/surfaceflinger/RenderEngine/GLES20RenderEngine.cpp b/services/surfaceflinger/RenderEngine/GLES20RenderEngine.cpp
index 6e0fa32..64095dd 100644
--- a/services/surfaceflinger/RenderEngine/GLES20RenderEngine.cpp
+++ b/services/surfaceflinger/RenderEngine/GLES20RenderEngine.cpp
@@ -131,15 +131,24 @@
     // mColorBlindnessCorrection = M;
 
     if (mPlatformHasWideColor) {
-        // Compute sRGB to DisplayP3 color transform
-        // NOTE: For now, we are limiting wide-color support to
-        // Display-P3 only.
-        mSrgbToDisplayP3 = mat4(
-                ColorSpaceConnector(ColorSpace::sRGB(), ColorSpace::DisplayP3()).getTransform());
+        ColorSpace srgb(ColorSpace::sRGB());
+        ColorSpace displayP3(ColorSpace::DisplayP3());
+        ColorSpace bt2020(ColorSpace::BT2020());
 
-        // Compute BT2020 to DisplayP3 color transform
-        mBt2020ToDisplayP3 = mat4(
-                ColorSpaceConnector(ColorSpace::BT2020(), ColorSpace::DisplayP3()).getTransform());
+        // Compute sRGB to Display P3 transform matrix.
+        // NOTE: For now, we are limiting output wide color space support to
+        // Display-P3 only.
+        mSrgbToDisplayP3 = mat4(ColorSpaceConnector(srgb, displayP3).getTransform());
+
+        // Compute Display P3 to sRGB transform matrix.
+        mDisplayP3ToSrgb = mat4(ColorSpaceConnector(displayP3, srgb).getTransform());
+
+        // no chromatic adaptation needed since all color spaces use D65 for their white points.
+        mSrgbToXyz = srgb.getRGBtoXYZ();
+        mDisplayP3ToXyz = displayP3.getRGBtoXYZ();
+        mBt2020ToXyz = bt2020.getRGBtoXYZ();
+        mXyzToDisplayP3 = mat4(displayP3.getXYZtoRGB());
+        mXyzToBt2020 = mat4(bt2020.getXYZtoRGB());
     }
 }
 
@@ -224,6 +233,10 @@
     mOutputDataSpace = dataspace;
 }
 
+void GLES20RenderEngine::setDisplayMaxLuminance(const float maxLuminance) {
+    mState.setDisplayMaxLuminance(maxLuminance);
+}
+
 void GLES20RenderEngine::setupLayerTexturing(const Texture& texture) {
     GLuint target = texture.getTextureTarget();
     glBindTexture(target, texture.getTextureName());
@@ -246,10 +259,8 @@
     mState.setTexture(texture);
 }
 
-mat4 GLES20RenderEngine::setupColorTransform(const mat4& colorTransform) {
-    mat4 oldTransform = mState.getColorMatrix();
+void GLES20RenderEngine::setupColorTransform(const mat4& colorTransform) {
     mState.setColorMatrix(colorTransform);
-    return oldTransform;
 }
 
 void GLES20RenderEngine::disableTexturing() {
@@ -303,44 +314,96 @@
     glVertexAttribPointer(Program::position, mesh.getVertexSize(), GL_FLOAT, GL_FALSE,
                           mesh.getByteStride(), mesh.getPositions());
 
-    // TODO(b/73825729) Refactor this code block to handle BT2020 color space properly.
-    // DISPLAY_P3 is the only supported wide color output
-    if (mPlatformHasWideColor && mOutputDataSpace == Dataspace::DISPLAY_P3) {
+    // By default, DISPLAY_P3 is the only supported wide color output. However,
+    // when HDR content is present, hardware composer may be able to handle
+    // BT2020 data space, in that case, the output data space is set to be
+    // BT2020_HLG or BT2020_PQ respectively. In GPU fall back we need
+    // to respect this and convert non-HDR content to HDR format.
+    if (mPlatformHasWideColor) {
         Description wideColorState = mState;
-        switch (mDataSpace) {
-            case Dataspace::DISPLAY_P3:
-                // input matches output
-                break;
-            case Dataspace::BT2020_PQ:
-            case Dataspace::BT2020_ITU_PQ:
-                wideColorState.setColorMatrix(mState.getColorMatrix() * mBt2020ToDisplayP3);
-                wideColorState.setInputTransferFunction(Description::TransferFunction::ST2084);
-                wideColorState.setOutputTransferFunction(Description::TransferFunction::SRGB);
-                break;
-            case Dataspace::BT2020_HLG:
-            case Dataspace::BT2020_ITU_HLG:
-                wideColorState.setColorMatrix(mState.getColorMatrix() * mBt2020ToDisplayP3);
-                wideColorState.setInputTransferFunction(Description::TransferFunction::HLG);
-                wideColorState.setOutputTransferFunction(Description::TransferFunction::SRGB);
-                break;
-            default:
-                // treat all other dataspaces as sRGB
-                wideColorState.setColorMatrix(mState.getColorMatrix() * mSrgbToDisplayP3);
-                switch (static_cast<Dataspace>(mDataSpace & Dataspace::TRANSFER_MASK)) {
-                    case Dataspace::TRANSFER_LINEAR:
-                        wideColorState.setInputTransferFunction(
-                                Description::TransferFunction::LINEAR);
-                        break;
-                    default:
-                        // treat all other transfer functions as sRGB
-                        wideColorState.setInputTransferFunction(
-                                Description::TransferFunction::SRGB);
-                        break;
-                }
-                wideColorState.setOutputTransferFunction(Description::TransferFunction::SRGB);
-                ALOGV("drawMesh: gamut transform applied");
-                break;
+        Dataspace inputStandard = static_cast<Dataspace>(mDataSpace & Dataspace::STANDARD_MASK);
+        Dataspace inputTransfer = static_cast<Dataspace>(mDataSpace & Dataspace::TRANSFER_MASK);
+        Dataspace outputStandard = static_cast<Dataspace>(mOutputDataSpace &
+                                                          Dataspace::STANDARD_MASK);
+        Dataspace outputTransfer = static_cast<Dataspace>(mOutputDataSpace &
+                                                          Dataspace::TRANSFER_MASK);
+        bool needsXYZConversion = needsXYZTransformMatrix();
+
+        if (needsXYZConversion) {
+            // The supported input color spaces are standard RGB, Display P3 and BT2020.
+            switch (inputStandard) {
+                case Dataspace::STANDARD_DCI_P3:
+                    wideColorState.setInputTransformMatrix(mDisplayP3ToXyz);
+                    break;
+                case Dataspace::STANDARD_BT2020:
+                    wideColorState.setInputTransformMatrix(mBt2020ToXyz);
+                    break;
+                default:
+                    wideColorState.setInputTransformMatrix(mSrgbToXyz);
+                    break;
+            }
+
+            // The supported output color spaces are Display P3 and BT2020.
+            switch (outputStandard) {
+                case Dataspace::STANDARD_BT2020:
+                    wideColorState.setOutputTransformMatrix(mXyzToBt2020);
+                    break;
+                default:
+                    wideColorState.setOutputTransformMatrix(mXyzToDisplayP3);
+                    break;
+            }
+        } else if (inputStandard != outputStandard) {
+            // At this point, the input data space and output data space could be both
+            // HDR data spaces, but they match each other, we do nothing in this case.
+            // In addition to the case above, the input data space could be
+            // - scRGB linear
+            // - scRGB non-linear
+            // - sRGB
+            // - Display P3
+            // The output data spaces could be
+            // - sRGB
+            // - Display P3
+            if (outputStandard == Dataspace::STANDARD_BT709) {
+                wideColorState.setOutputTransformMatrix(mDisplayP3ToSrgb);
+            } else if (outputStandard == Dataspace::STANDARD_DCI_P3) {
+                wideColorState.setOutputTransformMatrix(mSrgbToDisplayP3);
+            }
         }
+
+        // we need to convert the RGB value to linear space and convert it back when:
+        // - there is a color matrix that is not an identity matrix, or
+        // - there is an output transform matrix that is not an identity matrix, or
+        // - the input transfer function doesn't match the output transfer function.
+        if (wideColorState.hasColorMatrix() || wideColorState.hasOutputTransformMatrix() ||
+            inputTransfer != outputTransfer) {
+            switch (inputTransfer) {
+                case Dataspace::TRANSFER_ST2084:
+                    wideColorState.setInputTransferFunction(Description::TransferFunction::ST2084);
+                    break;
+                case Dataspace::TRANSFER_HLG:
+                    wideColorState.setInputTransferFunction(Description::TransferFunction::HLG);
+                    break;
+                case Dataspace::TRANSFER_LINEAR:
+                    wideColorState.setInputTransferFunction(Description::TransferFunction::LINEAR);
+                    break;
+                default:
+                    wideColorState.setInputTransferFunction(Description::TransferFunction::SRGB);
+                    break;
+            }
+
+            switch (outputTransfer) {
+                case Dataspace::TRANSFER_ST2084:
+                    wideColorState.setOutputTransferFunction(Description::TransferFunction::ST2084);
+                    break;
+                case Dataspace::TRANSFER_HLG:
+                    wideColorState.setOutputTransferFunction(Description::TransferFunction::HLG);
+                    break;
+                default:
+                    wideColorState.setOutputTransferFunction(Description::TransferFunction::SRGB);
+                    break;
+            }
+        }
+
         ProgramCache::getInstance().useProgram(wideColorState);
 
         glDrawArrays(mesh.getPrimitive(), 0, mesh.getVertexCount());
@@ -369,6 +432,33 @@
                         dataspaceDetails(static_cast<android_dataspace>(mOutputDataSpace)).c_str());
 }
 
+bool GLES20RenderEngine::isHdrDataSpace(const Dataspace dataSpace) const {
+    const Dataspace standard = static_cast<Dataspace>(dataSpace & Dataspace::STANDARD_MASK);
+    const Dataspace transfer = static_cast<Dataspace>(dataSpace & Dataspace::TRANSFER_MASK);
+    return standard == Dataspace::STANDARD_BT2020 &&
+        (transfer == Dataspace::TRANSFER_ST2084 || transfer == Dataspace::TRANSFER_HLG);
+}
+
+// For convenience, we want to convert the input color space to XYZ color space first,
+// and then convert from XYZ color space to output color space when
+// - SDR and HDR contents are mixed, either SDR content will be converted to HDR or
+//   HDR content will be tone-mapped to SDR; Or,
+// - there are HDR PQ and HLG contents presented at the same time, where we want to convert
+//   HLG content to PQ content.
+// In either case above, we need to operate the Y value in XYZ color space. Thus, when either
+// input data space or output data space is HDR data space, and the input transfer function
+// doesn't match the output transfer function, we would enable an intermediate transfrom to
+// XYZ color space.
+bool GLES20RenderEngine::needsXYZTransformMatrix() const {
+    const bool isInputHdrDataSpace = isHdrDataSpace(mDataSpace);
+    const bool isOutputHdrDataSpace = isHdrDataSpace(mOutputDataSpace);
+    const Dataspace inputTransfer = static_cast<Dataspace>(mDataSpace & Dataspace::TRANSFER_MASK);
+    const Dataspace outputTransfer = static_cast<Dataspace>(mOutputDataSpace &
+                                                            Dataspace::TRANSFER_MASK);
+
+    return (isInputHdrDataSpace || isOutputHdrDataSpace) && inputTransfer != outputTransfer;
+}
+
 // ---------------------------------------------------------------------------
 } // namespace impl
 } // namespace RE
diff --git a/services/surfaceflinger/RenderEngine/GLES20RenderEngine.h b/services/surfaceflinger/RenderEngine/GLES20RenderEngine.h
index 99da19d..c9e402d 100644
--- a/services/surfaceflinger/RenderEngine/GLES20RenderEngine.h
+++ b/services/surfaceflinger/RenderEngine/GLES20RenderEngine.h
@@ -75,6 +75,19 @@
     void setSourceY410BT2020(bool enable) override;
     void setSourceDataSpace(ui::Dataspace source) override;
     void setOutputDataSpace(ui::Dataspace dataspace) override;
+    void setDisplayMaxLuminance(const float maxLuminance) override;
+
+    virtual void setupLayerTexturing(const Texture& texture);
+    virtual void setupLayerBlackedOut();
+    virtual void setupFillWithColor(float r, float g, float b, float a);
+    virtual void setupColorTransform(const mat4& colorTransform);
+    virtual void disableTexturing();
+    virtual void disableBlending();
+
+    virtual void drawMesh(const Mesh& mesh);
+
+    virtual size_t getMaxTextureSize() const;
+    virtual size_t getMaxViewportDims() const;
 
     // Current dataspace of layer being rendered
     ui::Dataspace mDataSpace = ui::Dataspace::UNKNOWN;
@@ -85,19 +98,18 @@
     // Currently only supporting sRGB, BT2020 and DisplayP3 color spaces
     const bool mPlatformHasWideColor = false;
     mat4 mSrgbToDisplayP3;
-    mat4 mBt2020ToDisplayP3;
+    mat4 mDisplayP3ToSrgb;
+    mat3 mSrgbToXyz;
+    mat3 mBt2020ToXyz;
+    mat3 mDisplayP3ToXyz;
+    mat4 mXyzToDisplayP3;
+    mat4 mXyzToBt2020;
 
-    virtual void setupLayerTexturing(const Texture& texture);
-    virtual void setupLayerBlackedOut();
-    virtual void setupFillWithColor(float r, float g, float b, float a);
-    virtual mat4 setupColorTransform(const mat4& colorTransform);
-    virtual void disableTexturing();
-    virtual void disableBlending();
-
-    virtual void drawMesh(const Mesh& mesh);
-
-    virtual size_t getMaxTextureSize() const;
-    virtual size_t getMaxViewportDims() const;
+private:
+    // A data space is considered HDR data space if it has BT2020 color space
+    // with PQ or HLG transfer function.
+    bool isHdrDataSpace(const ui::Dataspace dataSpace) const;
+    bool needsXYZTransformMatrix() const;
 };
 
 // ---------------------------------------------------------------------------
diff --git a/services/surfaceflinger/RenderEngine/Program.cpp b/services/surfaceflinger/RenderEngine/Program.cpp
index 225bcf0..fd2c968 100644
--- a/services/surfaceflinger/RenderEngine/Program.cpp
+++ b/services/surfaceflinger/RenderEngine/Program.cpp
@@ -58,12 +58,13 @@
         mVertexShader = vertexId;
         mFragmentShader = fragmentId;
         mInitialized = true;
-
-        mColorMatrixLoc = glGetUniformLocation(programId, "colorMatrix");
         mProjectionMatrixLoc = glGetUniformLocation(programId, "projection");
         mTextureMatrixLoc = glGetUniformLocation(programId, "texture");
         mSamplerLoc = glGetUniformLocation(programId, "sampler");
         mColorLoc = glGetUniformLocation(programId, "color");
+        mDisplayMaxLuminanceLoc = glGetUniformLocation(programId, "displayMaxLuminance");
+        mInputTransformMatrixLoc = glGetUniformLocation(programId, "inputTransformMatrix");
+        mOutputTransformMatrixLoc = glGetUniformLocation(programId, "outputTransformMatrix");
 
         // set-up the default values for our uniforms
         glUseProgram(programId);
@@ -133,8 +134,19 @@
         const float color[4] = {desc.mColor.r, desc.mColor.g, desc.mColor.b, desc.mColor.a};
         glUniform4fv(mColorLoc, 1, color);
     }
-    if (mColorMatrixLoc >= 0) {
-        glUniformMatrix4fv(mColorMatrixLoc, 1, GL_FALSE, desc.mColorMatrix.asArray());
+    if (mInputTransformMatrixLoc >= 0) {
+        glUniformMatrix3fv(mInputTransformMatrixLoc, 1, GL_FALSE,
+                           desc.mInputTransformMatrix.asArray());
+    }
+    if (mOutputTransformMatrixLoc >= 0) {
+        // The output transform matrix and color matrix can be combined as one matrix
+        // that is applied right before applying OETF.
+        mat4 outputTransformMatrix = desc.mColorMatrix * desc.mOutputTransformMatrix;
+        glUniformMatrix4fv(mOutputTransformMatrixLoc, 1, GL_FALSE,
+                           outputTransformMatrix.asArray());
+    }
+    if (mDisplayMaxLuminanceLoc >= 0) {
+        glUniform1f(mDisplayMaxLuminanceLoc, desc.mDisplayMaxLuminance);
     }
     // these uniforms are always present
     glUniformMatrix4fv(mProjectionMatrixLoc, 1, GL_FALSE, desc.mProjectionMatrix.asArray());
diff --git a/services/surfaceflinger/RenderEngine/Program.h b/services/surfaceflinger/RenderEngine/Program.h
index 6e57fdd..ae796c5 100644
--- a/services/surfaceflinger/RenderEngine/Program.h
+++ b/services/surfaceflinger/RenderEngine/Program.h
@@ -69,9 +69,6 @@
     /* location of the projection matrix uniform */
     GLint mProjectionMatrixLoc;
 
-    /* location of the color matrix uniform */
-    GLint mColorMatrixLoc;
-
     /* location of the texture matrix uniform */
     GLint mTextureMatrixLoc;
 
@@ -80,6 +77,13 @@
 
     /* location of the color uniform */
     GLint mColorLoc;
+
+    /* location of display luminance uniform */
+    GLint mDisplayMaxLuminanceLoc;
+
+    /* location of transform matrix */
+    GLint mInputTransformMatrixLoc;
+    GLint mOutputTransformMatrixLoc;
 };
 
 } /* namespace android */
diff --git a/services/surfaceflinger/RenderEngine/ProgramCache.cpp b/services/surfaceflinger/RenderEngine/ProgramCache.cpp
index fb63296..57a772b 100644
--- a/services/surfaceflinger/RenderEngine/ProgramCache.cpp
+++ b/services/surfaceflinger/RenderEngine/ProgramCache.cpp
@@ -125,13 +125,17 @@
                  description.mPremultipliedAlpha ? Key::BLEND_PREMULT : Key::BLEND_NORMAL)
             .set(Key::OPACITY_MASK,
                  description.mOpaque ? Key::OPACITY_OPAQUE : Key::OPACITY_TRANSLUCENT)
-            .set(Key::COLOR_MATRIX_MASK,
-                 description.mColorMatrixEnabled ? Key::COLOR_MATRIX_ON : Key::COLOR_MATRIX_OFF);
+            .set(Key::Key::INPUT_TRANSFORM_MATRIX_MASK,
+                 description.hasInputTransformMatrix() ?
+                     Key::INPUT_TRANSFORM_MATRIX_ON : Key::INPUT_TRANSFORM_MATRIX_OFF)
+            .set(Key::Key::OUTPUT_TRANSFORM_MATRIX_MASK,
+                 description.hasOutputTransformMatrix() || description.hasColorMatrix() ?
+                     Key::OUTPUT_TRANSFORM_MATRIX_ON : Key::OUTPUT_TRANSFORM_MATRIX_OFF);
 
     needs.set(Key::Y410_BT2020_MASK,
               description.mY410BT2020 ? Key::Y410_BT2020_ON : Key::Y410_BT2020_OFF);
 
-    if (needs.hasColorMatrix()) {
+    if (needs.hasTransformMatrix() || (needs.getInputTF() != needs.getOutputTF())) {
         switch (description.mInputTransferFunction) {
             case Description::TransferFunction::LINEAR:
             default:
@@ -229,103 +233,177 @@
 }
 
 // Generate OOTF that modifies the relative scence light to relative display light.
-void ProgramCache::generateOOTF(Formatter& fs, const Key& needs) {
-    fs << R"__SHADER__(
-        highp float CalculateY(const highp vec3 color) {
-            // BT2020 standard uses the unadjusted KR = 0.2627,
-            // KB = 0.0593 luminance interpretation for RGB conversion.
-            return color.r * 0.262700 + color.g * 0.677998 + color.b * 0.059302;
-        }
-    )__SHADER__";
+void ProgramCache::generateOOTF(Formatter& fs, const ProgramCache::Key& needs) {
+    // When HDR and non-HDR contents are mixed, or different types of HDR contents are
+    // mixed, we will do a transcoding process to transcode the input content to output
+    // content. Currently, the following conversions handled, they are:
+    // * SDR -> HLG
+    // * SDR -> PQ
+    // * HLG -> PQ
 
-    // Generate OOTF that modifies the relative display light.
-    switch(needs.getInputTF()) {
+    // Convert relative light to absolute light.
+    switch (needs.getInputTF()) {
         case Key::INPUT_TF_ST2084:
             fs << R"__SHADER__(
-                highp vec3 OOTF(const highp vec3 color) {
-                    const float maxLumi = 10000.0;
-                    const float maxMasteringLumi = 1000.0;
-                    const float maxContentLumi = 1000.0;
-                    const float maxInLumi = min(maxMasteringLumi, maxContentLumi);
-                    const float maxOutLumi = 500.0;
-
-                    // Calculate Y value in XYZ color space.
-                    float colorY = CalculateY(color);
-
-                    // convert to nits first
-                    float nits = colorY * maxLumi;
-
-                    // clamp to max input luminance
-                    nits = clamp(nits, 0.0, maxInLumi);
-
-                    // scale [0.0, maxInLumi] to [0.0, maxOutLumi]
-                    if (maxInLumi <= maxOutLumi) {
-                        nits *= maxOutLumi / maxInLumi;
-                    } else {
-                        // three control points
-                        const float x0 = 10.0;
-                        const float y0 = 17.0;
-                        const float x1 = maxOutLumi * 0.75;
-                        const float y1 = x1;
-                        const float x2 = x1 + (maxInLumi - x1) / 2.0;
-                        const float y2 = y1 + (maxOutLumi - y1) * 0.75;
-
-                        // horizontal distances between the last three control points
-                        const float h12 = x2 - x1;
-                        const float h23 = maxInLumi - x2;
-                        // tangents at the last three control points
-                        const float m1 = (y2 - y1) / h12;
-                        const float m3 = (maxOutLumi - y2) / h23;
-                        const float m2 = (m1 + m3) / 2.0;
-
-                        if (nits < x0) {
-                            // scale [0.0, x0] to [0.0, y0] linearly
-                            const float slope = y0 / x0;
-                            nits *= slope;
-                        } else if (nits < x1) {
-                            // scale [x0, x1] to [y0, y1] linearly
-                            const float slope = (y1 - y0) / (x1 - x0);
-                            nits = y0 + (nits - x0) * slope;
-                        } else if (nits < x2) {
-                            // scale [x1, x2] to [y1, y2] using Hermite interp
-                            float t = (nits - x1) / h12;
-                            nits = (y1 * (1.0 + 2.0 * t) + h12 * m1 * t) * (1.0 - t) * (1.0 - t) +
-                                    (y2 * (3.0 - 2.0 * t) + h12 * m2 * (t - 1.0)) * t * t;
-                        } else {
-                            // scale [x2, maxInLumi] to [y2, maxOutLumi] using Hermite interp
-                            float t = (nits - x2) / h23;
-                            nits = (y2 * (1.0 + 2.0 * t) + h23 * m2 * t) * (1.0 - t) * (1.0 - t) +
-                                    (maxOutLumi * (3.0 - 2.0 * t) + h23 * m3 * (t - 1.0)) * t * t;
-                        }
-                    }
-
-                    // convert back to [0.0, 1.0]
-                    float targetY = nits / maxOutLumi;
-                    return color * (targetY / max(1e-6, colorY));
+                highp vec3 ScaleLuminance(color) {
+                    return color * 10000.0;
                 }
             )__SHADER__";
             break;
         case Key::INPUT_TF_HLG:
             fs << R"__SHADER__(
-                highp vec3 OOTF(const highp vec3 color) {
-                    const float maxOutLumi = 500.0;
-                    const float gamma = 1.2 + 0.42 * log(maxOutLumi / 1000.0) / log(10.0);
+                highp vec3 ScaleLuminance(color) {
                     // The formula is:
                     // alpha * pow(Y, gamma - 1.0) * color + beta;
-                    // where alpha is 1.0, beta is 0.0 as recommended in
-                    // Rec. ITU-R BT.2100-1 TABLE 5.
-                    return pow(CalculateY(color), gamma - 1.0) * color;
+                    // where alpha is 1000.0, gamma is 1.2, beta is 0.0.
+                    return color * 1000.0 * pow(color.y, 0.2);
                 }
             )__SHADER__";
             break;
         default:
             fs << R"__SHADER__(
-                highp vec3 OOTF(const highp vec3 color) {
+                highp vec3 ScaleLuminance(color) {
+                    return color * displayMaxLuminance;
+                }
+            )__SHADER__";
+            break;
+    }
+
+    // Tone map absolute light to display luminance range.
+    switch (needs.getInputTF()) {
+        case Key::INPUT_TF_ST2084:
+        case Key::INPUT_TF_HLG:
+            switch (needs.getOutputTF()) {
+                case Key::OUTPUT_TF_HLG:
+                    // Right now when mixed PQ and HLG contents are presented,
+                    // HLG content will always be converted to PQ. However, for
+                    // completeness, we simply clamp the value to [0.0, 1000.0].
+                    fs << R"__SHADER__(
+                        highp vec3 ToneMap(color) {
+                            return clamp(color, 0.0, 1000.0);
+                        }
+                    )__SHADER__";
+                    break;
+                case Key::OUTPUT_TF_ST2084:
+                    fs << R"__SHADER__(
+                        highp vec3 ToneMap(color) {
+                            return color;
+                        }
+                    )__SHADER__";
+                    break;
+                default:
+                    fs << R"__SHADER__(
+                        highp vec3 ToneMap(color) {
+                            const float maxMasteringLumi = 1000.0;
+                            const float maxContentLumi = 1000.0;
+                            const float maxInLumi = min(maxMasteringLumi, maxContentLumi);
+                            float maxOutLumi = displayMaxLuminance;
+
+                            float nits = color.y;
+
+                            // clamp to max input luminance
+                            nits = clamp(nits, 0.0, maxInLumi);
+
+                            // scale [0.0, maxInLumi] to [0.0, maxOutLumi]
+                            if (maxInLumi <= maxOutLumi) {
+                                nits *= maxOutLumi / maxInLumi;
+                            } else {
+                                // three control points
+                                const float x0 = 10.0;
+                                const float y0 = 17.0;
+                                float x1 = maxOutLumi * 0.75;
+                                float y1 = x1;
+                                float x2 = x1 + (maxInLumi - x1) / 2.0;
+                                float y2 = y1 + (maxOutLumi - y1) * 0.75;
+
+                                // horizontal distances between the last three control points
+                                const float h12 = x2 - x1;
+                                const float h23 = maxInLumi - x2;
+                                // tangents at the last three control points
+                                const float m1 = (y2 - y1) / h12;
+                                const float m3 = (maxOutLumi - y2) / h23;
+                                const float m2 = (m1 + m3) / 2.0;
+
+                                if (nits < x0) {
+                                    // scale [0.0, x0] to [0.0, y0] linearly
+                                    const float slope = y0 / x0;
+                                    nits *= slope;
+                                } else if (nits < x1) {
+                                    // scale [x0, x1] to [y0, y1] linearly
+                                    const float slope = (y1 - y0) / (x1 - x0);
+                                    nits = y0 + (nits - x0) * slope;
+                                } else if (nits < x2) {
+                                    // scale [x1, x2] to [y1, y2] using Hermite interp
+                                    float t = (nits - x1) / h12;
+                                    nits = (y1 * (1.0 + 2.0 * t) + h12 * m1 * t) * (1.0 - t) * (1.0 - t) +
+                                            (y2 * (3.0 - 2.0 * t) + h12 * m2 * (t - 1.0)) * t * t;
+                                } else {
+                                    // scale [x2, maxInLumi] to [y2, maxOutLumi] using Hermite interp
+                                    float t = (nits - x2) / h23;
+                                    nits = (y2 * (1.0 + 2.0 * t) + h23 * m2 * t) * (1.0 - t) * (1.0 - t) +
+                                            (maxOutLumi * (3.0 - 2.0 * t) + h23 * m3 * (t - 1.0)) * t * t;
+                                }
+                            }
+
+                            return color * (nits / max(1e-6, color.y));
+                        }
+                    )__SHADER__";
+                    break;
+            }
+            break;
+        default:
+            // TODO(73825729) We need to revert the tone mapping in
+            // hardware composer properly.
+            fs << R"__SHADER__(
+                highp vec3 ToneMap(color) {
                     return color;
                 }
             )__SHADER__";
             break;
     }
+
+    // convert absolute light to relative light.
+    switch (needs.getOutputTF()) {
+        case Key::OUTPUT_TF_ST2084:
+            fs << R"__SHADER__(
+                highp vec3 NormalizeLuminance(color) {
+                    return color / 10000.0;
+                }
+            )__SHADER__";
+            break;
+        case Key::OUTPUT_TF_HLG:
+            fs << R"__SHADER__(
+                highp vec3 NormalizeLuminance(color) {
+                    return color / 1000.0 * pow(color.y / 1000.0, -0.2 / 1.2);
+                }
+            )__SHADER__";
+            break;
+        default:
+            fs << R"__SHADER__(
+                highp vec3 NormalizeLuminance(color) {
+                    return color / displayMaxLuminance;
+                }
+            )__SHADER__";
+            break;
+    }
+
+    if (needs.getInputTF() == needs.getOutputTF() ||
+        (needs.getInputTF() == Key::INPUT_TF_LINEAR &&
+         needs.getOutputTF() == Key::OUTPUT_TF_SRGB) ||
+        (needs.getInputTF() == Key::INPUT_TF_SRGB &&
+         needs.getOutputTF() == Key::OUTPUT_TF_LINEAR)) {
+        fs << R"__SHADER__(
+            highp vec3 OOTF(const highp vec3 color) {
+                return color;
+            }
+        )__SHADER__";
+    } else {
+        fs << R"__SHADER__(
+            highp vec3 OOTF(const highp vec3 color) {
+                return NormalizeLuminance(ToneMap(ScaleLuminance(color)));
+            }
+        )__SHADER__";
+    }
 }
 
 // Generate OETF that converts relative display light to signal values,
@@ -441,8 +519,43 @@
             )__SHADER__";
     }
 
-    if (needs.hasColorMatrix()) {
-        fs << "uniform mat4 colorMatrix;";
+    if (needs.hasTransformMatrix() || (needs.getInputTF() != needs.getOutputTF())) {
+        // Currently, only the OOTF of BT2020 PQ needs display maximum luminance.
+        if (needs.getInputTF() == Key::INPUT_TF_ST2084) {
+            fs << "uniform float displayMaxLuminance;";
+        }
+
+        if (needs.hasInputTransformMatrix()) {
+            fs << "uniform mat3 inputTransformMatrix;";
+            fs << R"__SHADER__(
+                highp vec3 InputTransform(const highp vec3 color) {
+                    return inputTransformMatrix * color;
+                }
+            )__SHADER__";
+        } else {
+            fs << R"__SHADER__(
+                highp vec3 InputTransform(const highp vec3 color) {
+                    return color;
+                }
+            )__SHADER__";
+        }
+
+        // the transformation from a wider colorspace to a narrower one can
+        // result in >1.0 or <0.0 pixel values
+        if (needs.hasOutputTransformMatrix()) {
+            fs << "uniform mat4 outputTransformMatrix;";
+            fs << R"__SHADER__(
+                highp vec3 OutputTransform(const highp vec3 color) {
+                    return clamp(vec3(outputTransformMatrix * vec4(color, 1.0)), 0.0, 1.0);
+                }
+            )__SHADER__";
+        } else {
+            fs << R"__SHADER__(
+                highp vec3 OutputTransform(const highp vec3 color) {
+                    return clamp(color, 0.0, 1.0);
+                }
+            )__SHADER__";
+        }
 
         generateEOTF(fs, needs);
         generateOOTF(fs, needs);
@@ -472,18 +585,13 @@
         }
     }
 
-    if (needs.hasColorMatrix()) {
+    if (needs.hasTransformMatrix() || (needs.getInputTF() != needs.getOutputTF())) {
         if (!needs.isOpaque() && needs.isPremultiplied()) {
             // un-premultiply if needed before linearization
             // avoid divide by 0 by adding 0.5/256 to the alpha channel
             fs << "gl_FragColor.rgb = gl_FragColor.rgb / (gl_FragColor.a + 0.0019);";
         }
-        fs << "vec4 transformed = colorMatrix * vec4(OOTF(EOTF(gl_FragColor.rgb)), 1);";
-        // the transformation from a wider colorspace to a narrower one can
-        // result in >1.0 or <0.0 pixel values
-        fs << "transformed.rgb = clamp(transformed.rgb, 0.0, 1.0);";
-        // We assume the last row is always {0,0,0,1} and we skip the division by w
-        fs << "gl_FragColor.rgb = OETF(transformed.rgb);";
+        fs << "gl_FragColor.rgb = OETF(OutputTransform(OOTF(InputTransform(EOTF(gl_FragColor.rgb)))));";
         if (!needs.isOpaque() && needs.isPremultiplied()) {
             // and re-premultiply if needed after gamma correction
             fs << "gl_FragColor.rgb = gl_FragColor.rgb * (gl_FragColor.a + 0.0019);";
diff --git a/services/surfaceflinger/RenderEngine/ProgramCache.h b/services/surfaceflinger/RenderEngine/ProgramCache.h
index d18163a..e1398eb 100644
--- a/services/surfaceflinger/RenderEngine/ProgramCache.h
+++ b/services/surfaceflinger/RenderEngine/ProgramCache.h
@@ -72,26 +72,31 @@
             TEXTURE_EXT = 1 << TEXTURE_SHIFT,
             TEXTURE_2D = 2 << TEXTURE_SHIFT,
 
-            COLOR_MATRIX_SHIFT = 5,
-            COLOR_MATRIX_MASK = 1 << COLOR_MATRIX_SHIFT,
-            COLOR_MATRIX_OFF = 0 << COLOR_MATRIX_SHIFT,
-            COLOR_MATRIX_ON = 1 << COLOR_MATRIX_SHIFT,
+            INPUT_TRANSFORM_MATRIX_SHIFT = 5,
+            INPUT_TRANSFORM_MATRIX_MASK = 1 << INPUT_TRANSFORM_MATRIX_SHIFT,
+            INPUT_TRANSFORM_MATRIX_OFF = 0 << INPUT_TRANSFORM_MATRIX_SHIFT,
+            INPUT_TRANSFORM_MATRIX_ON = 1 << INPUT_TRANSFORM_MATRIX_SHIFT,
 
-            INPUT_TF_SHIFT = 6,
+            OUTPUT_TRANSFORM_MATRIX_SHIFT = 6,
+            OUTPUT_TRANSFORM_MATRIX_MASK = 1 << OUTPUT_TRANSFORM_MATRIX_SHIFT,
+            OUTPUT_TRANSFORM_MATRIX_OFF = 0 << OUTPUT_TRANSFORM_MATRIX_SHIFT,
+            OUTPUT_TRANSFORM_MATRIX_ON = 1 << OUTPUT_TRANSFORM_MATRIX_SHIFT,
+
+            INPUT_TF_SHIFT = 7,
             INPUT_TF_MASK = 3 << INPUT_TF_SHIFT,
             INPUT_TF_LINEAR = 0 << INPUT_TF_SHIFT,
             INPUT_TF_SRGB = 1 << INPUT_TF_SHIFT,
             INPUT_TF_ST2084 = 2 << INPUT_TF_SHIFT,
             INPUT_TF_HLG = 3 << INPUT_TF_SHIFT,
 
-            OUTPUT_TF_SHIFT = 8,
+            OUTPUT_TF_SHIFT = 9,
             OUTPUT_TF_MASK = 3 << OUTPUT_TF_SHIFT,
             OUTPUT_TF_LINEAR = 0 << OUTPUT_TF_SHIFT,
             OUTPUT_TF_SRGB = 1 << OUTPUT_TF_SHIFT,
             OUTPUT_TF_ST2084 = 2 << OUTPUT_TF_SHIFT,
             OUTPUT_TF_HLG = 3 << OUTPUT_TF_SHIFT,
 
-            Y410_BT2020_SHIFT = 10,
+            Y410_BT2020_SHIFT = 11,
             Y410_BT2020_MASK = 1 << Y410_BT2020_SHIFT,
             Y410_BT2020_OFF = 0 << Y410_BT2020_SHIFT,
             Y410_BT2020_ON = 1 << Y410_BT2020_SHIFT,
@@ -110,7 +115,15 @@
         inline bool isPremultiplied() const { return (mKey & BLEND_MASK) == BLEND_PREMULT; }
         inline bool isOpaque() const { return (mKey & OPACITY_MASK) == OPACITY_OPAQUE; }
         inline bool hasAlpha() const { return (mKey & ALPHA_MASK) == ALPHA_LT_ONE; }
-        inline bool hasColorMatrix() const { return (mKey & COLOR_MATRIX_MASK) == COLOR_MATRIX_ON; }
+        inline bool hasInputTransformMatrix() const {
+            return (mKey & INPUT_TRANSFORM_MATRIX_MASK) == INPUT_TRANSFORM_MATRIX_ON;
+        }
+        inline bool hasOutputTransformMatrix() const {
+            return (mKey & OUTPUT_TRANSFORM_MATRIX_MASK) == OUTPUT_TRANSFORM_MATRIX_ON;
+        }
+        inline bool hasTransformMatrix() const {
+            return hasInputTransformMatrix() || hasOutputTransformMatrix();
+        }
         inline int getInputTF() const { return (mKey & INPUT_TF_MASK); }
         inline int getOutputTF() const { return (mKey & OUTPUT_TF_MASK); }
         inline bool isY410BT2020() const { return (mKey & Y410_BT2020_MASK) == Y410_BT2020_ON; }
diff --git a/services/surfaceflinger/RenderEngine/RenderEngine.h b/services/surfaceflinger/RenderEngine/RenderEngine.h
index f78b230..d559464 100644
--- a/services/surfaceflinger/RenderEngine/RenderEngine.h
+++ b/services/surfaceflinger/RenderEngine/RenderEngine.h
@@ -112,15 +112,16 @@
     virtual void setupLayerBlackedOut() = 0;
     virtual void setupFillWithColor(float r, float g, float b, float a) = 0;
 
-    virtual mat4 setupColorTransform(const mat4& /* colorTransform */) = 0;
+    virtual void setupColorTransform(const mat4& /* colorTransform */) = 0;
 
     virtual void disableTexturing() = 0;
     virtual void disableBlending() = 0;
 
-    // wide color support
+    // HDR and wide color gamut support
     virtual void setSourceY410BT2020(bool enable) = 0;
     virtual void setSourceDataSpace(ui::Dataspace source) = 0;
     virtual void setOutputDataSpace(ui::Dataspace dataspace) = 0;
+    virtual void setDisplayMaxLuminance(const float maxLuminance) = 0;
 
     // drawing
     virtual void drawMesh(const Mesh& mesh) = 0;
@@ -223,7 +224,7 @@
 
     void checkErrors() const override;
 
-    mat4 setupColorTransform(const mat4& /* colorTransform */) override { return mat4(); }
+    void setupColorTransform(const mat4& /* colorTransform */) override {}
 
     // internal to RenderEngine
     EGLDisplay getEGLDisplay() const;
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index e12d7ca..588d24c 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -244,7 +244,6 @@
         mPrimaryDispSync("PrimaryDispSync"),
         mPrimaryHWVsyncEnabled(false),
         mHWVsyncAvailable(false),
-        mHasColorMatrix(false),
         mHasPoweredOff(false),
         mNumLayers(0),
         mVrFlingerRequestsDisplay(false),
@@ -723,10 +722,13 @@
 }
 
 void SurfaceFlinger::readPersistentProperties() {
+    Mutex::Autolock _l(mStateLock);
+
     char value[PROPERTY_VALUE_MAX];
 
     property_get("persist.sys.sf.color_saturation", value, "1.0");
     mGlobalSaturationFactor = atof(value);
+    updateColorMatrixLocked();
     ALOGV("Saturation is set to %.2f", mGlobalSaturationFactor);
 
     property_get("persist.sys.sf.native_mode", value, "0");
@@ -1017,27 +1019,13 @@
 }
 
 void SurfaceFlinger::setActiveColorModeInternal(const sp<DisplayDevice>& hw,
-                                                ColorMode mode, Dataspace dataSpace) {
+                                                ColorMode mode, Dataspace dataSpace,
+                                                RenderIntent renderIntent) {
     int32_t type = hw->getDisplayType();
     ColorMode currentMode = hw->getActiveColorMode();
     Dataspace currentDataSpace = hw->getCompositionDataSpace();
     RenderIntent currentRenderIntent = hw->getActiveRenderIntent();
 
-    // Natural Mode means it's color managed and the color must be right,
-    // thus we pick RenderIntent::COLORIMETRIC as render intent.
-    // Native Mode means the display is not color managed, and whichever
-    // render intent is picked doesn't matter, thus return
-    // RenderIntent::COLORIMETRIC as default here.
-    RenderIntent renderIntent = RenderIntent::COLORIMETRIC;
-
-    // In Auto Color Mode, we want to strech to panel color space, right now
-    // only the built-in display supports it.
-    if (mDisplayColorSetting == DisplayColorSetting::ENHANCED &&
-        mBuiltinDisplaySupportsEnhance &&
-        hw->getDisplayType() == DisplayDevice::DISPLAY_PRIMARY) {
-        renderIntent = RenderIntent::ENHANCE;
-    }
-
     if (mode == currentMode && dataSpace == currentDataSpace &&
         renderIntent == currentRenderIntent) {
         return;
@@ -1087,7 +1075,8 @@
                 ALOGW("Attempt to set active color mode %s %d for virtual display",
                       decodeColorMode(mMode).c_str(), mMode);
             } else {
-                mFlinger.setActiveColorModeInternal(hw, mMode, Dataspace::UNKNOWN);
+                mFlinger.setActiveColorModeInternal(hw, mMode, Dataspace::UNKNOWN,
+                                                    RenderIntent::COLORIMETRIC);
             }
             return true;
         }
@@ -1119,31 +1108,14 @@
         return BAD_VALUE;
     }
 
-    HdrCapabilities capabilities;
-    int status = getBE().mHwc->getHdrCapabilities(
-        displayDevice->getHwcDisplayId(), &capabilities);
-    if (status == NO_ERROR) {
-        if (displayDevice->hasWideColorGamut()) {
-            std::vector<Hdr> types = capabilities.getSupportedHdrTypes();
-            // insert HDR10/HLG as we will force client composition for HDR10/HLG
-            // layers
-            if (!displayDevice->hasHDR10Support()) {
-                types.push_back(Hdr::HDR10);
-            }
-            if (!displayDevice->hasHLGSupport()) {
-                types.push_back(Hdr::HLG);
-            }
-
-            *outCapabilities = HdrCapabilities(types,
-                    capabilities.getDesiredMaxLuminance(),
-                    capabilities.getDesiredMaxAverageLuminance(),
-                    capabilities.getDesiredMinLuminance());
-        } else {
-            *outCapabilities = std::move(capabilities);
-        }
-    } else {
-        return BAD_VALUE;
-    }
+    // At this point the DisplayDeivce should already be set up,
+    // meaning the luminance information is already queried from
+    // hardware composer and stored properly.
+    const HdrCapabilities& capabilities = displayDevice->getHdrCapabilities();
+    *outCapabilities = HdrCapabilities(capabilities.getSupportedHdrTypes(),
+                                       capabilities.getDesiredMaxLuminance(),
+                                       capabilities.getDesiredMaxAverageLuminance(),
+                                       capabilities.getDesiredMinLuminance());
 
     return NO_ERROR;
 }
@@ -1486,9 +1458,12 @@
                     (mPreviousPresentFence->getSignalTime() ==
                             Fence::SIGNAL_TIME_PENDING);
             ATRACE_INT("FrameMissed", static_cast<int>(frameMissed));
-            if (mPropagateBackpressure && frameMissed) {
-                signalLayerUpdate();
-                break;
+            if (frameMissed) {
+                mTimeStats.incrementMissedFrames();
+                if (mPropagateBackpressure) {
+                    signalLayerUpdate();
+                    break;
+                }
             }
 
             // Now that we're going to make it to the handleMessageTransaction()
@@ -1788,6 +1763,11 @@
         mAnimFrameTracker.advanceFrame();
     }
 
+    mTimeStats.incrementTotalFrames();
+    if (mHadClientComposition) {
+        mTimeStats.incrementClientCompositionFrames();
+    }
+
     if (getBE().mHwc->isConnected(HWC_DISPLAY_PRIMARY) &&
             hw->getPowerMode() == HWC_POWER_MODE_OFF) {
         return;
@@ -1876,54 +1856,41 @@
     }
 }
 
-mat4 SurfaceFlinger::computeSaturationMatrix() const {
-    if (mGlobalSaturationFactor == 1.0f) {
-        return mat4();
-    }
-
-    // Rec.709 luma coefficients
-    float3 luminance{0.213f, 0.715f, 0.072f};
-    luminance *= 1.0f - mGlobalSaturationFactor;
-    return mat4(
-        vec4{luminance.r + mGlobalSaturationFactor, luminance.r, luminance.r, 0.0f},
-        vec4{luminance.g, luminance.g + mGlobalSaturationFactor, luminance.g, 0.0f},
-        vec4{luminance.b, luminance.b, luminance.b + mGlobalSaturationFactor, 0.0f},
-        vec4{0.0f, 0.0f, 0.0f, 1.0f}
-    );
-}
-
-// Returns a dataspace that fits all visible layers.  The returned dataspace
+// Returns a data space that fits all visible layers.  The returned data space
 // can only be one of
-//
-//  - Dataspace::V0_SRGB
+//  - Dataspace::SRGB (use legacy dataspace and let HWC saturate when colors are enhanced)
 //  - Dataspace::DISPLAY_P3
 //  - Dataspace::V0_SCRGB_LINEAR
-// TODO(b/73825729) Add BT2020 data space.
-ui::Dataspace SurfaceFlinger::getBestDataspace(
-        const sp<const DisplayDevice>& displayDevice) const {
-    Dataspace bestDataspace = Dataspace::V0_SRGB;
+// The returned HDR data space is one of
+//  - Dataspace::UNKNOWN
+//  - Dataspace::BT2020_HLG
+//  - Dataspace::BT2020_PQ
+Dataspace SurfaceFlinger::getBestDataspace(
+    const sp<const DisplayDevice>& displayDevice, Dataspace* outHdrDataSpace) const {
+    Dataspace bestDataSpace = Dataspace::SRGB;
+    *outHdrDataSpace = Dataspace::UNKNOWN;
+
     for (const auto& layer : displayDevice->getVisibleLayersSortedByZ()) {
         switch (layer->getDataSpace()) {
             case Dataspace::V0_SCRGB:
             case Dataspace::V0_SCRGB_LINEAR:
-                // return immediately
-                return Dataspace::V0_SCRGB_LINEAR;
-            case Dataspace::DISPLAY_P3:
-                bestDataspace = Dataspace::DISPLAY_P3;
+                bestDataSpace = Dataspace::V0_SCRGB_LINEAR;
                 break;
-            // Historically, HDR dataspaces are ignored by SurfaceFlinger. But
-            // since SurfaceFlinger simulates HDR support now, it should honor
-            // them unless there is also native support.
+            case Dataspace::DISPLAY_P3:
+                if (bestDataSpace == Dataspace::SRGB) {
+                    bestDataSpace = Dataspace::DISPLAY_P3;
+                }
+                break;
             case Dataspace::BT2020_PQ:
             case Dataspace::BT2020_ITU_PQ:
-                if (!displayDevice->hasHDR10Support()) {
-                    return Dataspace::V0_SCRGB_LINEAR;
-                }
+                *outHdrDataSpace = Dataspace::BT2020_PQ;
                 break;
             case Dataspace::BT2020_HLG:
             case Dataspace::BT2020_ITU_HLG:
-                if (!displayDevice->hasHLGSupport()) {
-                    return Dataspace::V0_SCRGB_LINEAR;
+                // When there's mixed PQ content and HLG content, we set the HDR
+                // data space to be BT2020_PQ and convert HLG to PQ.
+                if (*outHdrDataSpace == Dataspace::UNKNOWN) {
+                    *outHdrDataSpace = Dataspace::BT2020_HLG;
                 }
                 break;
             default:
@@ -1931,29 +1898,120 @@
         }
     }
 
-    return bestDataspace;
+    return bestDataSpace;
 }
 
 // Pick the ColorMode / Dataspace for the display device.
-// TODO(b/73825729) Add BT2020 color mode.
 void SurfaceFlinger::pickColorMode(const sp<DisplayDevice>& displayDevice,
-        ColorMode* outMode, Dataspace* outDataSpace) const {
+                                   ColorMode* outMode, Dataspace* outDataSpace,
+                                   RenderIntent* outRenderIntent) const {
     if (mDisplayColorSetting == DisplayColorSetting::UNMANAGED) {
         *outMode = ColorMode::NATIVE;
         *outDataSpace = Dataspace::UNKNOWN;
+        *outRenderIntent = RenderIntent::COLORIMETRIC;
         return;
     }
 
-    switch (getBestDataspace(displayDevice)) {
-        case Dataspace::DISPLAY_P3:
-        case Dataspace::V0_SCRGB_LINEAR:
+    Dataspace hdrDataSpace;
+    Dataspace bestDataSpace = getBestDataspace(displayDevice, &hdrDataSpace);
+
+    if (hdrDataSpace == Dataspace::BT2020_PQ) {
+        // Hardware composer can handle BT2100 ColorMode only when
+        // - colorimetrical tone mapping is supported, or
+        // - Auto mode is turned on and enhanced tone mapping is supported.
+        if (displayDevice->hasBT2100PQColorimetricSupport() ||
+            (mDisplayColorSetting == DisplayColorSetting::ENHANCED &&
+             displayDevice->hasBT2100PQEnhanceSupport())) {
+            *outMode = ColorMode::BT2100_PQ;
+            *outDataSpace = Dataspace::BT2020_PQ;
+        } else if (displayDevice->hasHDR10Support()) {
+            // Legacy HDR support.  HDR layers are treated as UNKNOWN layers.
+            hdrDataSpace = Dataspace::UNKNOWN;
+        } else {
+            // Simulate PQ through RenderEngine, pick DISPLAY_P3 color mode.
             *outMode = ColorMode::DISPLAY_P3;
             *outDataSpace = Dataspace::DISPLAY_P3;
-            break;
+        }
+    } else if (hdrDataSpace == Dataspace::BT2020_HLG) {
+        if (displayDevice->hasBT2100HLGColorimetricSupport() ||
+            (mDisplayColorSetting == DisplayColorSetting::ENHANCED &&
+             displayDevice->hasBT2100HLGEnhanceSupport())) {
+            *outMode = ColorMode::BT2100_HLG;
+            *outDataSpace = Dataspace::BT2020_HLG;
+        } else if (displayDevice->hasHLGSupport()) {
+            // Legacy HDR support.  HDR layers are treated as UNKNOWN layers.
+            hdrDataSpace = Dataspace::UNKNOWN;
+        } else {
+            // Simulate HLG through RenderEngine, pick DISPLAY_P3 color mode.
+            *outMode = ColorMode::DISPLAY_P3;
+            *outDataSpace = Dataspace::DISPLAY_P3;
+        }
+    }
+
+    // At this point, there's no HDR layer.
+    if (hdrDataSpace == Dataspace::UNKNOWN) {
+        switch (bestDataSpace) {
+            case Dataspace::DISPLAY_P3:
+            case Dataspace::V0_SCRGB_LINEAR:
+                *outMode = ColorMode::DISPLAY_P3;
+                *outDataSpace = Dataspace::DISPLAY_P3;
+                break;
+            default:
+                *outMode = ColorMode::SRGB;
+                *outDataSpace = Dataspace::SRGB;
+                break;
+        }
+    }
+    *outRenderIntent = pickRenderIntent(displayDevice, *outMode);
+}
+
+RenderIntent SurfaceFlinger::pickRenderIntent(const sp<DisplayDevice>& displayDevice,
+                                              ColorMode colorMode) const {
+    // Native Mode means the display is not color managed, and whichever
+    // render intent is picked doesn't matter, thus return
+    // RenderIntent::COLORIMETRIC as default here.
+    if (mDisplayColorSetting == DisplayColorSetting::UNMANAGED) {
+        return RenderIntent::COLORIMETRIC;
+    }
+
+    // In Auto Color Mode, we want to strech to panel color space, right now
+    // only the built-in display supports it.
+    if (mDisplayColorSetting == DisplayColorSetting::ENHANCED &&
+        mBuiltinDisplaySupportsEnhance &&
+        displayDevice->getDisplayType() == DisplayDevice::DISPLAY_PRIMARY) {
+        switch (colorMode) {
+            case ColorMode::DISPLAY_P3:
+            case ColorMode::SRGB:
+                return RenderIntent::ENHANCE;
+            // In Auto Color Mode, BT2100_PQ and BT2100_HLG will only be picked
+            // when TONE_MAP_ENHANCE or TONE_MAP_COLORIMETRIC is supported.
+            // If TONE_MAP_ENHANCE is not supported, fall back to TONE_MAP_COLORIMETRIC.
+            case ColorMode::BT2100_PQ:
+                return displayDevice->hasBT2100PQEnhanceSupport() ?
+                    RenderIntent::TONE_MAP_ENHANCE : RenderIntent::TONE_MAP_COLORIMETRIC;
+            case ColorMode::BT2100_HLG:
+                return displayDevice->hasBT2100HLGEnhanceSupport() ?
+                    RenderIntent::TONE_MAP_ENHANCE : RenderIntent::TONE_MAP_COLORIMETRIC;
+            // This statement shouldn't be reached, switch cases will always
+            // cover all possible ColorMode returned by pickColorMode.
+            default:
+                return RenderIntent::COLORIMETRIC;
+        }
+    }
+
+    // Either enhance is not supported or we are in natural mode.
+
+    // Natural Mode means it's color managed and the color must be right,
+    // thus we pick RenderIntent::COLORIMETRIC as render intent for non-HDR
+    // content and pick RenderIntent::TONE_MAP_COLORIMETRIC for HDR content.
+    switch (colorMode) {
+        // In Natural Color Mode, BT2100_PQ and BT2100_HLG will only be picked
+        // when TONE_MAP_COLORIMETRIC is supported.
+        case ColorMode::BT2100_PQ:
+        case ColorMode::BT2100_HLG:
+            return RenderIntent::TONE_MAP_COLORIMETRIC;
         default:
-            *outMode = ColorMode::SRGB;
-            *outDataSpace = Dataspace::V0_SRGB;
-            break;
+            return RenderIntent::COLORIMETRIC;
     }
 }
 
@@ -2017,9 +2075,6 @@
         }
     }
 
-
-    mat4 colorMatrix = mColorMatrix * computeSaturationMatrix() * mDaltonizer();
-
     // Set the per-frame data
     for (size_t displayId = 0; displayId < mDisplays.size(); ++displayId) {
         auto& displayDevice = mDisplays[displayId];
@@ -2028,9 +2083,9 @@
         if (hwcId < 0) {
             continue;
         }
-        if (colorMatrix != mPreviousColorMatrix) {
-            displayDevice->setColorTransform(colorMatrix);
-            status_t result = getBE().mHwc->setColorTransform(hwcId, colorMatrix);
+        if (mDrawingState.colorMatrixChanged) {
+            displayDevice->setColorTransform(mDrawingState.colorMatrix);
+            status_t result = getBE().mHwc->setColorTransform(hwcId, mDrawingState.colorMatrix);
             ALOGE_IF(result != NO_ERROR, "Failed to set color transform on "
                     "display %zd: %d", displayId, result);
         }
@@ -2058,12 +2113,13 @@
         if (hasWideColorDisplay) {
             ColorMode colorMode;
             Dataspace dataSpace;
-            pickColorMode(displayDevice, &colorMode, &dataSpace);
-            setActiveColorModeInternal(displayDevice, colorMode, dataSpace);
+            RenderIntent renderIntent;
+            pickColorMode(displayDevice, &colorMode, &dataSpace, &renderIntent);
+            setActiveColorModeInternal(displayDevice, colorMode, dataSpace, renderIntent);
         }
     }
 
-    mPreviousColorMatrix = colorMatrix;
+    mDrawingState.colorMatrixChanged = false;
 
     for (size_t displayId = 0; displayId < mDisplays.size(); ++displayId) {
         auto& displayDevice = mDisplays[displayId];
@@ -2268,6 +2324,8 @@
         const wp<IBinder>& display, int hwcId, const DisplayDeviceState& state,
         const sp<DisplaySurface>& dispSurface, const sp<IGraphicBufferProducer>& producer) {
     bool hasWideColorGamut = false;
+    std::unordered_map<ColorMode, std::vector<RenderIntent>> hdrAndRenderIntents;
+
     if (hasWideColorDisplay) {
         std::vector<ColorMode> modes = getHwComposer().getColorModes(hwcId);
         for (ColorMode colorMode : modes) {
@@ -2277,7 +2335,6 @@
                 case ColorMode::DCI_P3:
                     hasWideColorGamut = true;
                     break;
-                // TODO(lpy) Handle BT2020, BT2100_PQ and BT2100_HLG properly.
                 default:
                     break;
             }
@@ -2292,6 +2349,10 @@
                     }
                 }
             }
+
+            if (colorMode == ColorMode::BT2100_PQ || colorMode == ColorMode::BT2100_HLG) {
+                hdrAndRenderIntents.emplace(colorMode, renderIntents);
+            }
         }
     }
 
@@ -2331,7 +2392,7 @@
                               dispSurface, std::move(renderSurface), displayWidth, displayHeight,
                               hasWideColorGamut, hdrCapabilities,
                               getHwComposer().getSupportedPerFrameMetadata(hwcId),
-                              initialPowerMode);
+                              hdrAndRenderIntents, initialPowerMode);
 
     if (maxFrameBufferAcquiredBuffers >= 3) {
         nativeWindowSurface->preallocateBuffers();
@@ -2343,7 +2404,8 @@
         defaultColorMode = ColorMode::SRGB;
         defaultDataSpace = Dataspace::V0_SRGB;
     }
-    setActiveColorModeInternal(hw, defaultColorMode, defaultDataSpace);
+    setActiveColorModeInternal(hw, defaultColorMode, defaultDataSpace,
+                               RenderIntent::COLORIMETRIC);
     hw->setLayerStack(state.layerStack);
     hw->setProjection(state.orientation, state.viewport, state.frame);
     hw->setDisplayName(state.displayName);
@@ -2652,6 +2714,9 @@
     mAnimCompositionPending = mAnimTransactionPending;
 
     mDrawingState = mCurrentState;
+    // clear the "changed" flags in current state
+    mCurrentState.colorMatrixChanged = false;
+
     mDrawingState.traverseInZOrder([](Layer* layer) {
         layer->commitChildList();
     });
@@ -2895,19 +2960,13 @@
     const DisplayRenderArea renderArea(displayDevice);
     const auto hwcId = displayDevice->getHwcDisplayId();
     const bool hasClientComposition = getBE().mHwc->hasClientComposition(hwcId);
-    const bool hasDeviceComposition = getBE().mHwc->hasDeviceComposition(hwcId);
-    const bool skipClientColorTransform = getBE().mHwc->hasCapability(
-        HWC2::Capability::SkipClientColorTransform);
     ATRACE_INT("hasClientComposition", hasClientComposition);
 
-    mat4 oldColorMatrix;
-    mat4 legacySrgbSaturationMatrix = mLegacySrgbSaturationMatrix;
-    const bool applyColorMatrix = !hasDeviceComposition && !skipClientColorTransform;
-    if (applyColorMatrix) {
-        mat4 colorMatrix = mColorMatrix * computeSaturationMatrix() * mDaltonizer();
-        oldColorMatrix = getRenderEngine().setupColorTransform(colorMatrix);
-        legacySrgbSaturationMatrix = colorMatrix * legacySrgbSaturationMatrix;
-    }
+    bool applyColorMatrix = false;
+    bool applyLegacyColorMatrix = false;
+    mat4 colorMatrix;
+    mat4 legacyColorMatrix;
+    const mat4* currentColorMatrix = nullptr;
 
     if (hasClientComposition) {
         ALOGV("hasClientComposition");
@@ -2917,6 +2976,28 @@
             outputDataspace = displayDevice->getCompositionDataSpace();
         }
         getBE().mRenderEngine->setOutputDataSpace(outputDataspace);
+        getBE().mRenderEngine->setDisplayMaxLuminance(
+                displayDevice->getHdrCapabilities().getDesiredMaxLuminance());
+
+        const bool hasDeviceComposition = getBE().mHwc->hasDeviceComposition(hwcId);
+        const bool skipClientColorTransform = getBE().mHwc->hasCapability(
+            HWC2::Capability::SkipClientColorTransform);
+
+        applyColorMatrix = !hasDeviceComposition && !skipClientColorTransform;
+        if (applyColorMatrix) {
+            colorMatrix = mDrawingState.colorMatrix;
+        }
+
+        applyLegacyColorMatrix = (mDisplayColorSetting == DisplayColorSetting::ENHANCED &&
+                outputDataspace != Dataspace::UNKNOWN &&
+                outputDataspace != Dataspace::SRGB);
+        if (applyLegacyColorMatrix) {
+            if (applyColorMatrix) {
+                legacyColorMatrix = colorMatrix * mLegacySrgbSaturationMatrix;
+            } else {
+                legacyColorMatrix = mLegacySrgbSaturationMatrix;
+            }
+        }
 
         if (!displayDevice->makeCurrent()) {
             ALOGW("DisplayDevice::makeCurrent failed. Aborting surface composition for display %s",
@@ -2979,69 +3060,59 @@
 
     ALOGV("Rendering client layers");
     const Transform& displayTransform = displayDevice->getTransform();
-    if (hwcId >= 0) {
-        // we're using h/w composer
-        bool firstLayer = true;
-        for (auto& layer : displayDevice->getVisibleLayersSortedByZ()) {
-            const Region clip(bounds.intersect(
-                    displayTransform.transform(layer->visibleRegion)));
-            ALOGV("Layer: %s", layer->getName().string());
-            ALOGV("  Composition type: %s",
-                    to_string(layer->getCompositionType(hwcId)).c_str());
-            if (!clip.isEmpty()) {
-                switch (layer->getCompositionType(hwcId)) {
-                    case HWC2::Composition::Cursor:
-                    case HWC2::Composition::Device:
-                    case HWC2::Composition::Sideband:
-                    case HWC2::Composition::SolidColor: {
-                        const Layer::State& state(layer->getDrawingState());
-                        if (layer->getClearClientTarget(hwcId) && !firstLayer &&
-                                layer->isOpaque(state) && (state.color.a == 1.0f)
-                                && hasClientComposition) {
-                            // never clear the very first layer since we're
-                            // guaranteed the FB is already cleared
-                            layer->clearWithOpenGL(renderArea);
-                        }
-                        break;
+    bool firstLayer = true;
+    for (auto& layer : displayDevice->getVisibleLayersSortedByZ()) {
+        const Region clip(bounds.intersect(
+                displayTransform.transform(layer->visibleRegion)));
+        ALOGV("Layer: %s", layer->getName().string());
+        ALOGV("  Composition type: %s",
+                to_string(layer->getCompositionType(hwcId)).c_str());
+        if (!clip.isEmpty()) {
+            switch (layer->getCompositionType(hwcId)) {
+                case HWC2::Composition::Cursor:
+                case HWC2::Composition::Device:
+                case HWC2::Composition::Sideband:
+                case HWC2::Composition::SolidColor: {
+                    const Layer::State& state(layer->getDrawingState());
+                    if (layer->getClearClientTarget(hwcId) && !firstLayer &&
+                            layer->isOpaque(state) && (state.color.a == 1.0f)
+                            && hasClientComposition) {
+                        // never clear the very first layer since we're
+                        // guaranteed the FB is already cleared
+                        layer->clearWithOpenGL(renderArea);
                     }
-                    case HWC2::Composition::Client: {
-                        // Only apply saturation matrix layer that is legacy SRGB dataspace
-                        // when auto color mode is on.
-                        bool restore = false;
-                        mat4 savedMatrix;
-                        if (mDisplayColorSetting == DisplayColorSetting::ENHANCED &&
-                            layer->isLegacySrgbDataSpace()) {
-                            savedMatrix =
-                                getRenderEngine().setupColorTransform(legacySrgbSaturationMatrix);
-                            restore = true;
-                        }
-                        layer->draw(renderArea, clip);
-                        if (restore) {
-                            getRenderEngine().setupColorTransform(savedMatrix);
-                        }
-                        break;
-                    }
-                    default:
-                        break;
+                    break;
                 }
-            } else {
-                ALOGV("  Skipping for empty clip");
+                case HWC2::Composition::Client: {
+                    // switch color matrices lazily
+                    if (layer->isLegacyDataSpace()) {
+                        if (applyLegacyColorMatrix && currentColorMatrix != &legacyColorMatrix) {
+                            // TODO(b/78891890) Legacy sRGB saturation matrix should be set
+                            // separately.
+                            getRenderEngine().setupColorTransform(legacyColorMatrix);
+                            currentColorMatrix = &legacyColorMatrix;
+                        }
+                    } else {
+                        if (applyColorMatrix && currentColorMatrix != &colorMatrix) {
+                            getRenderEngine().setupColorTransform(colorMatrix);
+                            currentColorMatrix = &colorMatrix;
+                        }
+                    }
+
+                    layer->draw(renderArea, clip);
+                    break;
+                }
+                default:
+                    break;
             }
-            firstLayer = false;
+        } else {
+            ALOGV("  Skipping for empty clip");
         }
-    } else {
-        // we're not using h/w composer
-        for (auto& layer : displayDevice->getVisibleLayersSortedByZ()) {
-            const Region clip(bounds.intersect(
-                    displayTransform.transform(layer->visibleRegion)));
-            if (!clip.isEmpty()) {
-                layer->draw(renderArea, clip);
-            }
-        }
+        firstLayer = false;
     }
 
-    if (applyColorMatrix) {
-        getRenderEngine().setupColorTransform(oldColorMatrix);
+    if (applyColorMatrix || applyLegacyColorMatrix) {
+        getRenderEngine().setupColorTransform(mat4());
     }
 
     // disable scissor at the end of the frame
@@ -3839,12 +3910,6 @@
         size_t index = 0;
         size_t numArgs = args.size();
 
-        if (asProto) {
-            LayersProto layersProto = dumpProtoInfo(LayerVector::StateSet::Current);
-            result.append(layersProto.SerializeAsString().c_str(), layersProto.ByteSize());
-            dumpAll = false;
-        }
-
         if (numArgs) {
             if ((index < numArgs) &&
                     (args[index] == String16("--list"))) {
@@ -3921,10 +3986,21 @@
                 mLayerStats.dump(result);
                 dumpAll = false;
             }
+
+            if ((index < numArgs) && (args[index] == String16("--timestats"))) {
+                index++;
+                mTimeStats.parseArgs(asProto, args, index, result);
+                dumpAll = false;
+            }
         }
 
         if (dumpAll) {
-            dumpAllLocked(args, index, result);
+            if (asProto) {
+                LayersProto layersProto = dumpProtoInfo(LayerVector::StateSet::Current);
+                result.append(layersProto.SerializeAsString().c_str(), layersProto.ByteSize());
+            } else {
+                dumpAllLocked(args, index, result);
+            }
         }
 
         if (locked) {
@@ -4364,6 +4440,30 @@
     return true;
 }
 
+void SurfaceFlinger::updateColorMatrixLocked() {
+    mat4 colorMatrix;
+    if (mGlobalSaturationFactor != 1.0f) {
+        // Rec.709 luma coefficients
+        float3 luminance{0.213f, 0.715f, 0.072f};
+        luminance *= 1.0f - mGlobalSaturationFactor;
+        mat4 saturationMatrix = mat4(
+            vec4{luminance.r + mGlobalSaturationFactor, luminance.r, luminance.r, 0.0f},
+            vec4{luminance.g, luminance.g + mGlobalSaturationFactor, luminance.g, 0.0f},
+            vec4{luminance.b, luminance.b, luminance.b + mGlobalSaturationFactor, 0.0f},
+            vec4{0.0f, 0.0f, 0.0f, 1.0f}
+        );
+        colorMatrix = mClientColorMatrix * saturationMatrix * mDaltonizer();
+    } else {
+        colorMatrix = mClientColorMatrix * mDaltonizer();
+    }
+
+    if (mCurrentState.colorMatrix != colorMatrix) {
+        mCurrentState.colorMatrix = colorMatrix;
+        mCurrentState.colorMatrixChanged = true;
+        setTransactionFlags(eTransactionNeeded);
+    }
+}
+
 status_t SurfaceFlinger::CheckTransactCodeCredentials(uint32_t code) {
     switch (code) {
         case CREATE_CONNECTION:
@@ -4498,6 +4598,7 @@
                 return NO_ERROR;
             }
             case 1014: {
+                Mutex::Autolock _l(mStateLock);
                 // daltonize
                 n = data.readInt32();
                 switch (n % 10) {
@@ -4519,33 +4620,33 @@
                 } else {
                     mDaltonizer.setMode(ColorBlindnessMode::Simulation);
                 }
-                invalidateHwcGeometry();
-                repaintEverything();
+
+                updateColorMatrixLocked();
                 return NO_ERROR;
             }
             case 1015: {
+                Mutex::Autolock _l(mStateLock);
                 // apply a color matrix
                 n = data.readInt32();
                 if (n) {
                     // color matrix is sent as a column-major mat4 matrix
                     for (size_t i = 0 ; i < 4; i++) {
                         for (size_t j = 0; j < 4; j++) {
-                            mColorMatrix[i][j] = data.readFloat();
+                            mClientColorMatrix[i][j] = data.readFloat();
                         }
                     }
                 } else {
-                    mColorMatrix = mat4();
+                    mClientColorMatrix = mat4();
                 }
 
                 // Check that supplied matrix's last row is {0,0,0,1} so we can avoid
                 // the division by w in the fragment shader
-                float4 lastRow(transpose(mColorMatrix)[3]);
+                float4 lastRow(transpose(mClientColorMatrix)[3]);
                 if (any(greaterThan(abs(lastRow - float4{0, 0, 0, 1}), float4{1e-4f}))) {
                     ALOGE("The color transform's last row must be (0, 0, 0, 1)");
                 }
 
-                invalidateHwcGeometry();
-                repaintEverything();
+                updateColorMatrixLocked();
                 return NO_ERROR;
             }
             // This is an experimental interface
@@ -4588,10 +4689,10 @@
                 return NO_ERROR;
             }
             case 1022: { // Set saturation boost
+                Mutex::Autolock _l(mStateLock);
                 mGlobalSaturationFactor = std::max(0.0f, std::min(data.readFloat(), 2.0f));
 
-                invalidateHwcGeometry();
-                repaintEverything();
+                updateColorMatrixLocked();
                 return NO_ERROR;
             }
             case 1023: { // Set native mode
@@ -4721,6 +4822,9 @@
         }
         bool getWideColorSupport() const override { return false; }
         Dataspace getDataSpace() const override { return Dataspace::UNKNOWN; }
+        float getDisplayMaxLuminance() const override {
+            return DisplayDevice::sDefaultMaxLumiance;
+        }
 
         class ReparentForDrawing {
         public:
@@ -4923,7 +5027,8 @@
     if (renderArea.getWideColorSupport()) {
         outputDataspace = renderArea.getDataSpace();
     }
-    getBE().mRenderEngine->setOutputDataSpace(outputDataspace);
+    engine.setOutputDataSpace(outputDataspace);
+    engine.setDisplayMaxLuminance(renderArea.getDisplayMaxLuminance());
 
     // make sure to clear all GL error flags
     engine.checkErrors();
diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h
index 33706da..d9cf946 100644
--- a/services/surfaceflinger/SurfaceFlinger.h
+++ b/services/surfaceflinger/SurfaceFlinger.h
@@ -63,6 +63,7 @@
 #include "SurfaceInterceptor.h"
 #include "SurfaceTracing.h"
 #include "StartPropertySetThread.h"
+#include "TimeStats/TimeStats.h"
 #include "VSyncModulator.h"
 
 #include "DisplayHardware/HWC2.h"
@@ -372,6 +373,10 @@
             // always uses the Drawing StateSet.
             layersSortedByZ = other.layersSortedByZ;
             displays = other.displays;
+            colorMatrixChanged = other.colorMatrixChanged;
+            if (colorMatrixChanged) {
+                colorMatrix = other.colorMatrix;
+            }
             return *this;
         }
 
@@ -379,6 +384,9 @@
         LayerVector layersSortedByZ;
         DefaultKeyedVector< wp<IBinder>, DisplayDeviceState> displays;
 
+        bool colorMatrixChanged = true;
+        mat4 colorMatrix;
+
         void traverseInZOrder(const LayerVector::Visitor& visitor) const;
         void traverseInReverseZOrder(const LayerVector::Visitor& visitor) const;
     };
@@ -473,7 +481,8 @@
     // Called on the main thread in response to setActiveColorMode()
     void setActiveColorModeInternal(const sp<DisplayDevice>& hw,
                                     ui::ColorMode colorMode,
-                                    ui::Dataspace dataSpace);
+                                    ui::Dataspace dataSpace,
+                                    ui::RenderIntent renderIntent);
 
     // Returns whether the transaction actually modified any state
     bool handleMessageTransaction();
@@ -645,14 +654,18 @@
             nsecs_t compositeToPresentLatency);
     void rebuildLayerStacks();
 
-    // Given a dataSpace, returns the appropriate color_mode to use
-    // to display that dataSpace.
-    ui::Dataspace getBestDataspace(const sp<const DisplayDevice>& displayDevice) const;
+    ui::Dataspace getBestDataspace(const sp<const DisplayDevice>& displayDevice,
+                                   ui::Dataspace* outHdrDataSpace) const;
+
+    // Returns the appropriate ColorMode, Dataspace and RenderIntent for the
+    // DisplayDevice. The function only returns the supported ColorMode,
+    // Dataspace and RenderIntent.
     void pickColorMode(const sp<DisplayDevice>& displayDevice,
                        ui::ColorMode* outMode,
-                       ui::Dataspace* outDataSpace) const;
-
-    mat4 computeSaturationMatrix() const;
+                       ui::Dataspace* outDataSpace,
+                       ui::RenderIntent* outRenderIntent) const;
+    ui::RenderIntent pickRenderIntent(const sp<DisplayDevice>& displayDevice,
+                                      ui::ColorMode colorMode) const;
 
     void setUpHWComposer();
     void doComposition();
@@ -740,6 +753,8 @@
     // Check to see if we should handoff to vr flinger.
     void updateVrFlinger();
 
+    void updateColorMatrixLocked();
+
     /* ------------------------------------------------------------------------
      * Attributes
      */
@@ -753,6 +768,11 @@
     bool mAnimTransactionPending;
     SortedVector< sp<Layer> > mLayersPendingRemoval;
 
+    // global color transform states
+    Daltonizer mDaltonizer;
+    float mGlobalSaturationFactor = 1.0f;
+    mat4 mClientColorMatrix;
+
     // Can't be unordered_set because wp<> isn't hashable
     std::set<wp<IBinder>> mGraphicBufferProducerList;
     size_t mMaxGraphicBufferProducerListSize = MAX_LAYERS;
@@ -815,6 +835,7 @@
             std::make_unique<impl::SurfaceInterceptor>(this);
     SurfaceTracing mTracing;
     LayerStats mLayerStats;
+    TimeStats& mTimeStats = TimeStats::getInstance();
     bool mUseHwcVirtualDisplays = false;
 
     // Restrict layers to use two buffers in their bufferqueues.
@@ -842,12 +863,6 @@
 
     bool mInjectVSyncs;
 
-    Daltonizer mDaltonizer;
-
-    mat4 mPreviousColorMatrix;
-    mat4 mColorMatrix;
-    bool mHasColorMatrix;
-
     // Static screen stats
     bool mHasPoweredOff;
 
@@ -865,8 +880,6 @@
     DisplayColorSetting mDisplayColorSetting = DisplayColorSetting::MANAGED;
     // Applied on sRGB layers when the render intent is non-colorimetric.
     mat4 mLegacySrgbSaturationMatrix;
-    // Applied globally.
-    float mGlobalSaturationFactor = 1.0f;
     bool mBuiltinDisplaySupportsEnhance = false;
 
     using CreateBufferQueueFunction =
diff --git a/services/surfaceflinger/TimeStats/TimeStats.cpp b/services/surfaceflinger/TimeStats/TimeStats.cpp
new file mode 100644
index 0000000..a6833a5
--- /dev/null
+++ b/services/surfaceflinger/TimeStats/TimeStats.cpp
@@ -0,0 +1,495 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#undef LOG_TAG
+#define LOG_TAG "TimeStats"
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include "TimeStats.h"
+
+#include <android-base/stringprintf.h>
+
+#include <log/log.h>
+
+#include <utils/String8.h>
+#include <utils/Trace.h>
+
+#include <algorithm>
+#include <regex>
+
+namespace android {
+
+TimeStats& TimeStats::getInstance() {
+    static std::unique_ptr<TimeStats> sInstance;
+    static std::once_flag sOnceFlag;
+
+    std::call_once(sOnceFlag, [] { sInstance.reset(new TimeStats); });
+    return *sInstance.get();
+}
+
+void TimeStats::parseArgs(bool asProto, const Vector<String16>& args, size_t& index,
+                          String8& result) {
+    ATRACE_CALL();
+
+    if (args.size() > index + 10) {
+        ALOGD("Invalid args count");
+        return;
+    }
+
+    std::unordered_map<std::string, int32_t> argsMap;
+    while (index < args.size()) {
+        argsMap[std::string(String8(args[index]).c_str())] = index;
+        ++index;
+    }
+
+    if (argsMap.count("-disable")) {
+        disable();
+    }
+
+    if (argsMap.count("-dump")) {
+        int64_t maxLayers = 0;
+        auto iter = argsMap.find("-maxlayers");
+        if (iter != argsMap.end() && iter->second + 1 < static_cast<int32_t>(args.size())) {
+            maxLayers = strtol(String8(args[iter->second + 1]).c_str(), nullptr, 10);
+            maxLayers = std::clamp(maxLayers, int64_t(0), int64_t(UINT32_MAX));
+        }
+
+        dump(asProto, static_cast<uint32_t>(maxLayers), result);
+    }
+
+    if (argsMap.count("-clear")) {
+        clear();
+    }
+
+    if (argsMap.count("-enable")) {
+        enable();
+    }
+}
+
+void TimeStats::incrementTotalFrames() {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    timeStats.totalFrames++;
+}
+
+void TimeStats::incrementMissedFrames() {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    timeStats.missedFrames++;
+}
+
+void TimeStats::incrementClientCompositionFrames() {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    timeStats.clientCompositionFrames++;
+}
+
+bool TimeStats::recordReadyLocked(const std::string& layerName, TimeRecord* timeRecord) {
+    if (!timeRecord->ready) {
+        ALOGV("[%s]-[%" PRIu64 "]-presentFence is still not received", layerName.c_str(),
+              timeRecord->frameNumber);
+        return false;
+    }
+
+    if (timeRecord->acquireFence != nullptr) {
+        if (timeRecord->acquireFence->getSignalTime() == Fence::SIGNAL_TIME_PENDING) {
+            return false;
+        }
+        if (timeRecord->acquireFence->getSignalTime() != Fence::SIGNAL_TIME_INVALID) {
+            timeRecord->acquireTime = timeRecord->acquireFence->getSignalTime();
+            timeRecord->acquireFence = nullptr;
+        } else {
+            ALOGV("[%s]-[%" PRIu64 "]-acquireFence signal time is invalid", layerName.c_str(),
+                  timeRecord->frameNumber);
+        }
+    }
+
+    if (timeRecord->presentFence != nullptr) {
+        if (timeRecord->presentFence->getSignalTime() == Fence::SIGNAL_TIME_PENDING) {
+            return false;
+        }
+        if (timeRecord->presentFence->getSignalTime() != Fence::SIGNAL_TIME_INVALID) {
+            timeRecord->presentTime = timeRecord->presentFence->getSignalTime();
+            timeRecord->presentFence = nullptr;
+        } else {
+            ALOGV("[%s]-[%" PRIu64 "]-presentFence signal time invalid", layerName.c_str(),
+                  timeRecord->frameNumber);
+        }
+    }
+
+    return true;
+}
+
+static int32_t msBetween(nsecs_t start, nsecs_t end) {
+    int64_t delta = (end - start) / 1000000;
+    delta = std::clamp(delta, int64_t(INT32_MIN), int64_t(INT32_MAX));
+    return static_cast<int32_t>(delta);
+}
+
+void TimeStats::flushAvailableRecordsToStatsLocked(const std::string& layerName) {
+    ATRACE_CALL();
+
+    LayerRecord& layerRecord = timeStatsTracker[layerName];
+    TimeRecord& prevTimeRecord = layerRecord.prevTimeRecord;
+    std::vector<TimeRecord>& timeRecords = layerRecord.timeRecords;
+    while (!timeRecords.empty()) {
+        if (!recordReadyLocked(layerName, &timeRecords[0])) break;
+        ALOGV("[%s]-[%" PRIu64 "]-presentFenceTime[%" PRId64 "]", layerName.c_str(),
+              timeRecords[0].frameNumber, timeRecords[0].presentTime);
+
+        if (prevTimeRecord.ready) {
+            if (!timeStats.stats.count(layerName)) {
+                timeStats.stats[layerName].layerName = layerName;
+                timeStats.stats[layerName].statsStart = static_cast<int64_t>(std::time(0));
+            }
+            TimeStatsHelper::TimeStatsLayer& timeStatsLayer = timeStats.stats[layerName];
+            timeStatsLayer.totalFrames++;
+
+            const int32_t postToPresentMs =
+                    msBetween(timeRecords[0].postTime, timeRecords[0].presentTime);
+            ALOGV("[%s]-[%" PRIu64 "]-post2present[%d]", layerName.c_str(),
+                  timeRecords[0].frameNumber, postToPresentMs);
+            timeStatsLayer.deltas["post2present"].insert(postToPresentMs);
+
+            const int32_t acquireToPresentMs =
+                    msBetween(timeRecords[0].acquireTime, timeRecords[0].presentTime);
+            ALOGV("[%s]-[%" PRIu64 "]-acquire2present[%d]", layerName.c_str(),
+                  timeRecords[0].frameNumber, acquireToPresentMs);
+            timeStatsLayer.deltas["acquire2present"].insert(acquireToPresentMs);
+
+            const int32_t latchToPresentMs =
+                    msBetween(timeRecords[0].latchTime, timeRecords[0].presentTime);
+            ALOGV("[%s]-[%" PRIu64 "]-latch2present[%d]", layerName.c_str(),
+                  timeRecords[0].frameNumber, latchToPresentMs);
+            timeStatsLayer.deltas["latch2present"].insert(latchToPresentMs);
+
+            const int32_t desiredToPresentMs =
+                    msBetween(timeRecords[0].desiredTime, timeRecords[0].presentTime);
+            ALOGV("[%s]-[%" PRIu64 "]-desired2present[%d]", layerName.c_str(),
+                  timeRecords[0].frameNumber, desiredToPresentMs);
+            timeStatsLayer.deltas["desired2present"].insert(desiredToPresentMs);
+
+            const int32_t presentToPresentMs =
+                    msBetween(prevTimeRecord.presentTime, timeRecords[0].presentTime);
+            ALOGV("[%s]-[%" PRIu64 "]-present2present[%d]", layerName.c_str(),
+                  timeRecords[0].frameNumber, presentToPresentMs);
+            timeStatsLayer.deltas["present2present"].insert(presentToPresentMs);
+
+            timeStats.stats[layerName].statsEnd = static_cast<int64_t>(std::time(0));
+        }
+        prevTimeRecord = timeRecords[0];
+        // TODO(zzyiwei): change timeRecords to use std::deque
+        timeRecords.erase(timeRecords.begin());
+        layerRecord.waitData--;
+    }
+}
+
+static bool layerNameIsValid(const std::string& layerName) {
+    // This regular expression captures the following layer names for instance:
+    // 1) StatusBat#0
+    // 2) NavigationBar#1
+    // 3) com.*#0
+    // 4) SurfaceView - com.*#0
+    // Using [-\\s\t]+ for the conjunction part between SurfaceView and com.* is
+    // a bit more robust in case there's a slight change.
+    // The layer name would only consist of . / $ _ 0-9 a-z A-Z in most cases.
+    std::regex re("(((SurfaceView[-\\s\\t]+)?com\\.[./$\\w]+)|((Status|Navigation)Bar))#\\d+");
+    return std::regex_match(layerName.begin(), layerName.end(), re);
+}
+
+void TimeStats::setPostTime(const std::string& layerName, uint64_t frameNumber, nsecs_t postTime) {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+    ALOGV("[%s]-[%" PRIu64 "]-PostTime[%" PRId64 "]", layerName.c_str(), frameNumber, postTime);
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (!timeStatsTracker.count(layerName) && !layerNameIsValid(layerName)) {
+        return;
+    }
+    LayerRecord& layerRecord = timeStatsTracker[layerName];
+    if (layerRecord.timeRecords.size() == MAX_NUM_TIME_RECORDS) {
+        ALOGV("[%s]-timeRecords is already at its maximum size[%zu]", layerName.c_str(),
+              MAX_NUM_TIME_RECORDS);
+        // TODO(zzyiwei): if this happens, there must be a present fence missing
+        // or waitData is not in the correct position. Need to think out a
+        // reasonable way to recover from this state.
+        return;
+    }
+    // For most media content, the acquireFence is invalid because the buffer is
+    // ready at the queueBuffer stage. In this case, acquireTime should be given
+    // a default value as postTime.
+    TimeRecord timeRecord = {
+            .frameNumber = frameNumber,
+            .postTime = postTime,
+            .acquireTime = postTime,
+    };
+    layerRecord.timeRecords.push_back(timeRecord);
+    if (layerRecord.waitData < 0 ||
+        layerRecord.waitData >= static_cast<int32_t>(layerRecord.timeRecords.size()))
+        layerRecord.waitData = layerRecord.timeRecords.size() - 1;
+}
+
+void TimeStats::setLatchTime(const std::string& layerName, uint64_t frameNumber,
+                             nsecs_t latchTime) {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+    ALOGV("[%s]-[%" PRIu64 "]-LatchTime[%" PRId64 "]", layerName.c_str(), frameNumber, latchTime);
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (!timeStatsTracker.count(layerName)) return;
+    LayerRecord& layerRecord = timeStatsTracker[layerName];
+    TimeRecord& timeRecord = layerRecord.timeRecords[layerRecord.waitData];
+    if (timeRecord.frameNumber == frameNumber) {
+        timeRecord.latchTime = latchTime;
+    }
+}
+
+void TimeStats::setDesiredTime(const std::string& layerName, uint64_t frameNumber,
+                               nsecs_t desiredTime) {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+    ALOGV("[%s]-[%" PRIu64 "]-DesiredTime[%" PRId64 "]", layerName.c_str(), frameNumber,
+          desiredTime);
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (!timeStatsTracker.count(layerName)) return;
+    LayerRecord& layerRecord = timeStatsTracker[layerName];
+    TimeRecord& timeRecord = layerRecord.timeRecords[layerRecord.waitData];
+    if (timeRecord.frameNumber == frameNumber) {
+        timeRecord.desiredTime = desiredTime;
+    }
+}
+
+void TimeStats::setAcquireTime(const std::string& layerName, uint64_t frameNumber,
+                               nsecs_t acquireTime) {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+    ALOGV("[%s]-[%" PRIu64 "]-AcquireTime[%" PRId64 "]", layerName.c_str(), frameNumber,
+          acquireTime);
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (!timeStatsTracker.count(layerName)) return;
+    LayerRecord& layerRecord = timeStatsTracker[layerName];
+    TimeRecord& timeRecord = layerRecord.timeRecords[layerRecord.waitData];
+    if (timeRecord.frameNumber == frameNumber) {
+        timeRecord.acquireTime = acquireTime;
+    }
+}
+
+void TimeStats::setAcquireFence(const std::string& layerName, uint64_t frameNumber,
+                                const std::shared_ptr<FenceTime>& acquireFence) {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+    ALOGV("[%s]-[%" PRIu64 "]-AcquireFenceTime[%" PRId64 "]", layerName.c_str(), frameNumber,
+          acquireFence->getSignalTime());
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (!timeStatsTracker.count(layerName)) return;
+    LayerRecord& layerRecord = timeStatsTracker[layerName];
+    TimeRecord& timeRecord = layerRecord.timeRecords[layerRecord.waitData];
+    if (timeRecord.frameNumber == frameNumber) {
+        timeRecord.acquireFence = acquireFence;
+    }
+}
+
+void TimeStats::setPresentTime(const std::string& layerName, uint64_t frameNumber,
+                               nsecs_t presentTime) {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+    ALOGV("[%s]-[%" PRIu64 "]-PresentTime[%" PRId64 "]", layerName.c_str(), frameNumber,
+          presentTime);
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (!timeStatsTracker.count(layerName)) return;
+    LayerRecord& layerRecord = timeStatsTracker[layerName];
+    TimeRecord& timeRecord = layerRecord.timeRecords[layerRecord.waitData];
+    if (timeRecord.frameNumber == frameNumber) {
+        timeRecord.presentTime = presentTime;
+        timeRecord.ready = true;
+        layerRecord.waitData++;
+    }
+
+    flushAvailableRecordsToStatsLocked(layerName);
+}
+
+void TimeStats::setPresentFence(const std::string& layerName, uint64_t frameNumber,
+                                const std::shared_ptr<FenceTime>& presentFence) {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+    ALOGV("[%s]-[%" PRIu64 "]-PresentFenceTime[%" PRId64 "]", layerName.c_str(), frameNumber,
+          presentFence->getSignalTime());
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (!timeStatsTracker.count(layerName)) return;
+    LayerRecord& layerRecord = timeStatsTracker[layerName];
+    TimeRecord& timeRecord = layerRecord.timeRecords[layerRecord.waitData];
+    if (timeRecord.frameNumber == frameNumber) {
+        timeRecord.presentFence = presentFence;
+        timeRecord.ready = true;
+        layerRecord.waitData++;
+    }
+
+    flushAvailableRecordsToStatsLocked(layerName);
+}
+
+void TimeStats::onDisconnect(const std::string& layerName) {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+    ALOGV("[%s]-onDisconnect", layerName.c_str());
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (!timeStatsTracker.count(layerName)) return;
+    flushAvailableRecordsToStatsLocked(layerName);
+    timeStatsTracker.erase(layerName);
+}
+
+void TimeStats::clearLayerRecord(const std::string& layerName) {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+    ALOGV("[%s]-clearLayerRecord", layerName.c_str());
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (!timeStatsTracker.count(layerName)) return;
+    LayerRecord& layerRecord = timeStatsTracker[layerName];
+    layerRecord.timeRecords.clear();
+    layerRecord.prevTimeRecord.ready = false;
+    layerRecord.waitData = -1;
+}
+
+void TimeStats::removeTimeRecord(const std::string& layerName, uint64_t frameNumber) {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+    ALOGV("[%s]-[%" PRIu64 "]-removeTimeRecord", layerName.c_str(), frameNumber);
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (!timeStatsTracker.count(layerName)) return;
+    LayerRecord& layerRecord = timeStatsTracker[layerName];
+    size_t removeAt = 0;
+    for (const TimeRecord& record : layerRecord.timeRecords) {
+        if (record.frameNumber == frameNumber) break;
+        removeAt++;
+    }
+    if (removeAt == layerRecord.timeRecords.size()) return;
+    layerRecord.timeRecords.erase(layerRecord.timeRecords.begin() + removeAt);
+    if (layerRecord.waitData > static_cast<int32_t>(removeAt)) {
+        --layerRecord.waitData;
+    }
+}
+
+void TimeStats::enable() {
+    if (mEnabled.load()) return;
+
+    ATRACE_CALL();
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    ALOGD("Enabled");
+    mEnabled.store(true);
+    timeStats.statsStart = static_cast<int64_t>(std::time(0));
+}
+
+void TimeStats::disable() {
+    if (!mEnabled.load()) return;
+
+    ATRACE_CALL();
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    ALOGD("Disabled");
+    mEnabled.store(false);
+    timeStats.statsEnd = static_cast<int64_t>(std::time(0));
+}
+
+void TimeStats::clear() {
+    ATRACE_CALL();
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    ALOGD("Cleared");
+    timeStats.dumpStats.clear();
+    timeStats.stats.clear();
+    timeStats.statsStart = (mEnabled.load() ? static_cast<int64_t>(std::time(0)) : 0);
+    timeStats.statsEnd = 0;
+    timeStats.totalFrames = 0;
+    timeStats.missedFrames = 0;
+    timeStats.clientCompositionFrames = 0;
+}
+
+bool TimeStats::isEnabled() {
+    return mEnabled.load();
+}
+
+void TimeStats::dump(bool asProto, uint32_t maxLayers, String8& result) {
+    ATRACE_CALL();
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (timeStats.statsStart == 0) {
+        return;
+    }
+
+    timeStats.statsEnd = static_cast<int64_t>(std::time(0));
+
+    // TODO(zzyiwei): refactor dumpStats into TimeStatsHelper
+    timeStats.dumpStats.clear();
+    for (auto& ele : timeStats.stats) {
+        timeStats.dumpStats.push_back(&ele.second);
+    }
+
+    std::sort(timeStats.dumpStats.begin(), timeStats.dumpStats.end(),
+              [](TimeStatsHelper::TimeStatsLayer* const& l,
+                 TimeStatsHelper::TimeStatsLayer* const& r) {
+                  return l->totalFrames > r->totalFrames;
+              });
+
+    if (maxLayers != 0 && maxLayers < timeStats.dumpStats.size()) {
+        timeStats.dumpStats.resize(maxLayers);
+    }
+
+    if (asProto) {
+        dumpAsProtoLocked(result);
+    } else {
+        dumpAsTextLocked(result);
+    }
+}
+
+void TimeStats::dumpAsTextLocked(String8& result) {
+    ALOGD("Dumping TimeStats as text");
+    result.append(timeStats.toString().c_str());
+    result.append("\n");
+}
+
+void TimeStats::dumpAsProtoLocked(String8& result) {
+    ALOGD("Dumping TimeStats as proto");
+    SFTimeStatsGlobalProto timeStatsProto = timeStats.toProto();
+    result.append(timeStatsProto.SerializeAsString().c_str(), timeStatsProto.ByteSize());
+}
+
+} // namespace android
diff --git a/services/surfaceflinger/TimeStats/TimeStats.h b/services/surfaceflinger/TimeStats/TimeStats.h
new file mode 100644
index 0000000..f76a62e
--- /dev/null
+++ b/services/surfaceflinger/TimeStats/TimeStats.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <timestatsproto/TimeStatsHelper.h>
+#include <timestatsproto/TimeStatsProtoHeader.h>
+
+#include <ui/FenceTime.h>
+
+#include <utils/String16.h>
+#include <utils/String8.h>
+#include <utils/Vector.h>
+
+#include <mutex>
+#include <unordered_map>
+#include <vector>
+
+using namespace android::surfaceflinger;
+
+namespace android {
+class String8;
+
+class TimeStats {
+    // TODO(zzyiwei): Bound the timeStatsTracker with weighted LRU
+    // static const size_t MAX_NUM_LAYER_RECORDS = 200;
+    static const size_t MAX_NUM_TIME_RECORDS = 64;
+
+    struct TimeRecord {
+        bool ready = false;
+        uint64_t frameNumber = 0;
+        nsecs_t postTime = 0;
+        nsecs_t latchTime = 0;
+        nsecs_t acquireTime = 0;
+        nsecs_t desiredTime = 0;
+        nsecs_t presentTime = 0;
+        std::shared_ptr<FenceTime> acquireFence;
+        std::shared_ptr<FenceTime> presentFence;
+    };
+
+    struct LayerRecord {
+        // This is the index in timeRecords, at which the timestamps for that
+        // specific frame are still not fully received. This is not waiting for
+        // fences to signal, but rather waiting to receive those fences/timestamps.
+        int32_t waitData = -1;
+        TimeRecord prevTimeRecord;
+        std::vector<TimeRecord> timeRecords;
+    };
+
+public:
+    static TimeStats& getInstance();
+    void parseArgs(bool asProto, const Vector<String16>& args, size_t& index, String8& result);
+    void incrementTotalFrames();
+    void incrementMissedFrames();
+    void incrementClientCompositionFrames();
+
+    void setPostTime(const std::string& layerName, uint64_t frameNumber, nsecs_t postTime);
+    void setLatchTime(const std::string& layerName, uint64_t frameNumber, nsecs_t latchTime);
+    void setDesiredTime(const std::string& layerName, uint64_t frameNumber, nsecs_t desiredTime);
+    void setAcquireTime(const std::string& layerName, uint64_t frameNumber, nsecs_t acquireTime);
+    void setAcquireFence(const std::string& layerName, uint64_t frameNumber,
+                         const std::shared_ptr<FenceTime>& acquireFence);
+    void setPresentTime(const std::string& layerName, uint64_t frameNumber, nsecs_t presentTime);
+    void setPresentFence(const std::string& layerName, uint64_t frameNumber,
+                         const std::shared_ptr<FenceTime>& presentFence);
+    void onDisconnect(const std::string& layerName);
+    void clearLayerRecord(const std::string& layerName);
+    void removeTimeRecord(const std::string& layerName, uint64_t frameNumber);
+
+private:
+    TimeStats() = default;
+
+    bool recordReadyLocked(const std::string& layerName, TimeRecord* timeRecord);
+    void flushAvailableRecordsToStatsLocked(const std::string& layerName);
+
+    void enable();
+    void disable();
+    void clear();
+    bool isEnabled();
+    void dump(bool asProto, uint32_t maxLayer, String8& result);
+    void dumpAsTextLocked(String8& result);
+    void dumpAsProtoLocked(String8& result);
+
+    std::atomic<bool> mEnabled = false;
+    std::mutex mMutex;
+    TimeStatsHelper::TimeStatsGlobal timeStats;
+    std::unordered_map<std::string, LayerRecord> timeStatsTracker;
+};
+
+} // namespace android
diff --git a/services/surfaceflinger/TimeStats/timestatsproto/Android.bp b/services/surfaceflinger/TimeStats/timestatsproto/Android.bp
new file mode 100644
index 0000000..66aa719
--- /dev/null
+++ b/services/surfaceflinger/TimeStats/timestatsproto/Android.bp
@@ -0,0 +1,55 @@
+cc_library_shared {
+    name: "libtimestats_proto",
+    vendor_available: true,
+    export_include_dirs: ["include"],
+
+    srcs: [
+        "TimeStatsHelper.cpp",
+        "timestats.proto",
+    ],
+
+    shared_libs: [
+        "android.hardware.graphics.common@1.1",
+        "libui",
+        "libprotobuf-cpp-lite",
+        "libbase",
+        "liblog",
+    ],
+
+    proto: {
+        export_proto_headers: true,
+    },
+
+    cppflags: [
+        "-Werror",
+        "-Wno-unused-parameter",
+        "-Wno-format",
+        "-Wno-c++98-compat-pedantic",
+        "-Wno-float-conversion",
+        "-Wno-disabled-macro-expansion",
+        "-Wno-float-equal",
+        "-Wno-sign-conversion",
+        "-Wno-padded",
+        "-Wno-old-style-cast",
+        "-Wno-undef",
+    ],
+
+}
+
+java_library_static {
+    name: "timestatsprotosnano",
+    host_supported: true,
+    proto: {
+        type: "nano",
+    },
+    srcs: ["*.proto"],
+    no_framework_libs: true,
+    target: {
+        android: {
+            jarjar_rules: "jarjar-rules.txt",
+        },
+        host: {
+            static_libs: ["libprotobuf-java-nano"],
+        },
+    },
+}
diff --git a/services/surfaceflinger/TimeStats/timestatsproto/TimeStatsHelper.cpp b/services/surfaceflinger/TimeStats/timestatsproto/TimeStatsHelper.cpp
new file mode 100644
index 0000000..3e5007c
--- /dev/null
+++ b/services/surfaceflinger/TimeStats/timestatsproto/TimeStatsHelper.cpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <android-base/stringprintf.h>
+#include <timestatsproto/TimeStatsHelper.h>
+
+#include <array>
+#include <regex>
+
+#define HISTOGRAM_SIZE 85
+
+using android::base::StringAppendF;
+using android::base::StringPrintf;
+
+namespace android {
+namespace surfaceflinger {
+
+// Time buckets for histogram, the calculated time deltas will be lower bounded
+// to the buckets in this array.
+static const std::array<int32_t, HISTOGRAM_SIZE> histogramConfig =
+        {0,   1,   2,   3,   4,   5,   6,   7,   8,   9,   10,  11,  12,  13,  14,  15,  16,
+         17,  18,  19,  20,  21,  22,  23,  24,  25,  26,  27,  28,  29,  30,  31,  32,  33,
+         34,  36,  38,  40,  42,  44,  46,  48,  50,  54,  58,  62,  66,  70,  74,  78,  82,
+         86,  90,  94,  98,  102, 106, 110, 114, 118, 122, 126, 130, 134, 138, 142, 146, 150,
+         200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 950, 1000};
+
+void TimeStatsHelper::Histogram::insert(int32_t delta) {
+    if (delta < 0) return;
+    // std::lower_bound won't work on out of range values
+    if (delta > histogramConfig[HISTOGRAM_SIZE - 1]) {
+        hist[histogramConfig[HISTOGRAM_SIZE - 1]]++;
+        return;
+    }
+    auto iter = std::lower_bound(histogramConfig.begin(), histogramConfig.end(), delta);
+    hist[*iter]++;
+}
+
+float TimeStatsHelper::Histogram::averageTime() {
+    int64_t ret = 0;
+    int64_t count = 0;
+    for (auto ele : hist) {
+        count += ele.second;
+        ret += ele.first * ele.second;
+    }
+    return static_cast<float>(ret) / count;
+}
+
+std::string TimeStatsHelper::Histogram::toString() {
+    std::string result;
+    for (int32_t i = 0; i < HISTOGRAM_SIZE; ++i) {
+        int32_t bucket = histogramConfig[i];
+        int32_t count = (hist.count(bucket) == 0) ? 0 : hist[bucket];
+        StringAppendF(&result, "%dms=%d ", bucket, count);
+    }
+    result.back() = '\n';
+    return result;
+}
+
+static std::string getPackageName(const std::string& layerName) {
+    // This regular expression captures the following for instance:
+    // StatusBar in StatusBar#0
+    // com.appname in com.appname/com.appname.activity#0
+    // com.appname in SurfaceView - com.appname/com.appname.activity#0
+    const std::regex re("(?:SurfaceView[-\\s\\t]+)?([^/]+).*#\\d+");
+    std::smatch match;
+    if (std::regex_match(layerName.begin(), layerName.end(), match, re)) {
+        // There must be a match for group 1 otherwise the whole string is not
+        // matched and the above will return false
+        return match[1];
+    }
+    return "";
+}
+
+std::string TimeStatsHelper::TimeStatsLayer::toString() {
+    std::string result = "";
+    StringAppendF(&result, "layerName = %s\n", layerName.c_str());
+    packageName = getPackageName(layerName);
+    StringAppendF(&result, "packageName = %s\n", packageName.c_str());
+    StringAppendF(&result, "statsStart = %lld\n", static_cast<long long int>(statsStart));
+    StringAppendF(&result, "statsEnd = %lld\n", static_cast<long long int>(statsEnd));
+    StringAppendF(&result, "totalFrames= %d\n", totalFrames);
+    if (deltas.find("present2present") != deltas.end()) {
+        StringAppendF(&result, "averageFPS = %.3f\n",
+                      1000.0 / deltas["present2present"].averageTime());
+    }
+    for (auto ele : deltas) {
+        StringAppendF(&result, "%s histogram is as below:\n", ele.first.c_str());
+        StringAppendF(&result, "%s", ele.second.toString().c_str());
+    }
+
+    return result;
+}
+
+std::string TimeStatsHelper::TimeStatsGlobal::toString() {
+    std::string result = "SurfaceFlinger TimeStats:\n";
+    StringAppendF(&result, "statsStart = %lld\n", static_cast<long long int>(statsStart));
+    StringAppendF(&result, "statsEnd = %lld\n", static_cast<long long int>(statsEnd));
+    StringAppendF(&result, "totalFrames= %d\n", totalFrames);
+    StringAppendF(&result, "missedFrames= %d\n", missedFrames);
+    StringAppendF(&result, "clientCompositionFrames= %d\n", clientCompositionFrames);
+    StringAppendF(&result, "TimeStats for each layer is as below:\n");
+    for (auto ele : dumpStats) {
+        StringAppendF(&result, "%s", ele->toString().c_str());
+    }
+
+    return result;
+}
+
+SFTimeStatsLayerProto TimeStatsHelper::TimeStatsLayer::toProto() {
+    SFTimeStatsLayerProto layerProto;
+    layerProto.set_layer_name(layerName);
+    packageName = getPackageName(layerName);
+    layerProto.set_package_name(packageName);
+    layerProto.set_stats_start(statsStart);
+    layerProto.set_stats_end(statsEnd);
+    layerProto.set_total_frames(totalFrames);
+    for (auto ele : deltas) {
+        SFTimeStatsDeltaProto* deltaProto = layerProto.add_deltas();
+        deltaProto->set_delta_name(ele.first);
+        for (auto histEle : ele.second.hist) {
+            SFTimeStatsHistogramBucketProto* histProto = deltaProto->add_histograms();
+            histProto->set_render_millis(histEle.first);
+            histProto->set_frame_count(histEle.second);
+        }
+    }
+    return layerProto;
+}
+
+SFTimeStatsGlobalProto TimeStatsHelper::TimeStatsGlobal::toProto() {
+    SFTimeStatsGlobalProto globalProto;
+    globalProto.set_stats_start(statsStart);
+    globalProto.set_stats_end(statsEnd);
+    globalProto.set_total_frames(totalFrames);
+    globalProto.set_missed_frames(missedFrames);
+    globalProto.set_client_composition_frames(clientCompositionFrames);
+    for (auto ele : dumpStats) {
+        SFTimeStatsLayerProto* layerProto = globalProto.add_stats();
+        layerProto->CopyFrom(ele->toProto());
+    }
+    return globalProto;
+}
+
+} // namespace surfaceflinger
+} // namespace android
diff --git a/services/surfaceflinger/TimeStats/timestatsproto/include/timestatsproto/TimeStatsHelper.h b/services/surfaceflinger/TimeStats/timestatsproto/include/timestatsproto/TimeStatsHelper.h
new file mode 100644
index 0000000..c876f21
--- /dev/null
+++ b/services/surfaceflinger/TimeStats/timestatsproto/include/timestatsproto/TimeStatsHelper.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <timestatsproto/TimeStatsProtoHeader.h>
+
+#include <math/vec4.h>
+
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+namespace android {
+namespace surfaceflinger {
+
+class TimeStatsHelper {
+public:
+    class Histogram {
+    public:
+        // Key is the delta time between timestamps
+        // Value is the number of appearances of that delta
+        std::unordered_map<int32_t, int32_t> hist;
+
+        void insert(int32_t delta);
+        float averageTime();
+        std::string toString();
+    };
+
+    class TimeStatsLayer {
+    public:
+        std::string layerName;
+        std::string packageName;
+        int64_t statsStart = 0;
+        int64_t statsEnd = 0;
+        int32_t totalFrames = 0;
+        std::unordered_map<std::string, Histogram> deltas;
+
+        std::string toString();
+        SFTimeStatsLayerProto toProto();
+    };
+
+    class TimeStatsGlobal {
+    public:
+        int64_t statsStart = 0;
+        int64_t statsEnd = 0;
+        int32_t totalFrames = 0;
+        int32_t missedFrames = 0;
+        int32_t clientCompositionFrames = 0;
+        std::unordered_map<std::string, TimeStatsLayer> stats;
+        std::vector<TimeStatsLayer*> dumpStats;
+
+        std::string toString();
+        SFTimeStatsGlobalProto toProto();
+    };
+};
+
+} // namespace surfaceflinger
+} // namespace android
diff --git a/services/surfaceflinger/TimeStats/timestatsproto/include/timestatsproto/TimeStatsProtoHeader.h b/services/surfaceflinger/TimeStats/timestatsproto/include/timestatsproto/TimeStatsProtoHeader.h
new file mode 100644
index 0000000..fe0d150
--- /dev/null
+++ b/services/surfaceflinger/TimeStats/timestatsproto/include/timestatsproto/TimeStatsProtoHeader.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Projectlayerproto/LayerProtoHeader.h
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// pragma is used here to disable the warnings emitted from the protobuf
+// headers. By adding #pragma before including layer.pb.h, it supresses
+// protobuf warnings, but allows the rest of the files to continuing using
+// the current flags.
+// This file should be included instead of directly including layer.b.h
+#pragma GCC system_header
+#include <timestats.pb.h>
diff --git a/services/surfaceflinger/TimeStats/timestatsproto/jarjar-rules.txt b/services/surfaceflinger/TimeStats/timestatsproto/jarjar-rules.txt
new file mode 100644
index 0000000..40043a8
--- /dev/null
+++ b/services/surfaceflinger/TimeStats/timestatsproto/jarjar-rules.txt
@@ -0,0 +1 @@
+rule com.google.protobuf.nano.** com.android.framework.protobuf.nano.@1
diff --git a/services/surfaceflinger/TimeStats/timestatsproto/timestats.proto b/services/surfaceflinger/TimeStats/timestatsproto/timestats.proto
new file mode 100644
index 0000000..a8f6fa8
--- /dev/null
+++ b/services/surfaceflinger/TimeStats/timestatsproto/timestats.proto
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto2";
+
+package android.surfaceflinger;
+
+option optimize_for = LITE_RUNTIME;
+
+// frameworks/base/core/proto/android/service/sftimestats.proto is based on
+// this proto. Please only make valid protobuf changes to these messages, and
+// keep the other file in sync with this one.
+
+message SFTimeStatsGlobalProto {
+  // The start & end timestamps in UTC as
+  // milliseconds since January 1, 1970
+  optional int64 stats_start = 1;
+  optional int64 stats_end = 2;
+  // Total frames
+  optional int32 total_frames = 3;
+  // Total missed frames of SurfaceFlinger.
+  optional int32 missed_frames = 4;
+  // Total frames fallback to client composition.
+  optional int32 client_composition_frames = 5;
+
+  repeated SFTimeStatsLayerProto stats = 6;
+}
+
+message SFTimeStatsLayerProto {
+  // The layer name
+  optional string layer_name = 1;
+  // The package name
+  optional string package_name = 2;
+  // The start & end timestamps in UTC as
+  // milliseconds since January 1, 1970
+  optional int64 stats_start = 3;
+  optional int64 stats_end = 4;
+  // Distinct frame count.
+  optional int32 total_frames = 5;
+
+  repeated SFTimeStatsDeltaProto deltas = 6;
+}
+
+message SFTimeStatsDeltaProto {
+  // Name of the time interval
+  optional string delta_name = 1;
+  // Histogram of the delta time
+  repeated SFTimeStatsHistogramBucketProto histograms = 2;
+}
+
+message SFTimeStatsHistogramBucketProto {
+  // Lower bound of render time in milliseconds.
+  optional int32 render_millis = 1;
+  // Number of frames in the bucket.
+  optional int32 frame_count = 2;
+}
diff --git a/services/surfaceflinger/tests/Android.bp b/services/surfaceflinger/tests/Android.bp
index 7523399..322e8a0 100644
--- a/services/surfaceflinger/tests/Android.bp
+++ b/services/surfaceflinger/tests/Android.bp
@@ -36,6 +36,7 @@
         "liblayers_proto",
         "liblog",
         "libprotobuf-cpp-full",
+        "libtimestats_proto",
         "libui",
         "libutils",
     ]
diff --git a/services/surfaceflinger/tests/fakehwc/Android.bp b/services/surfaceflinger/tests/fakehwc/Android.bp
index 00bc621..520df2d 100644
--- a/services/surfaceflinger/tests/fakehwc/Android.bp
+++ b/services/surfaceflinger/tests/fakehwc/Android.bp
@@ -25,6 +25,7 @@
         "liblog",
         "libnativewindow",
         "libsync",
+        "libtimestats_proto",
         "libui",
         "libutils",
     ],
diff --git a/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp b/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
index bb6ca39..08da1a2 100644
--- a/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
+++ b/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
@@ -47,10 +47,10 @@
 
 using android::hardware::graphics::common::V1_0::Hdr;
 using android::hardware::graphics::common::V1_1::ColorMode;
+using android::hardware::graphics::common::V1_1::RenderIntent;
 using android::Hwc2::Error;
 using android::Hwc2::IComposer;
 using android::Hwc2::IComposerClient;
-using android::Hwc2::RenderIntent;
 
 using FakeDisplayDeviceInjector = TestableSurfaceFlinger::FakeDisplayDeviceInjector;
 using FakeHwcDisplayInjector = TestableSurfaceFlinger::FakeHwcDisplayInjector;
@@ -494,33 +494,6 @@
     }
 };
 
-// For this variant, SurfaceFlinger should configure itself with wide color
-// display support, and the display should respond with an non-empty list of
-// supported color modes.
-template <typename Display>
-struct WideColorP3EnhanceSupportedVariant {
-    static constexpr bool WIDE_COLOR_SUPPORTED = true;
-
-    static void injectConfigChange(DisplayTransactionTest* test) {
-        test->mFlinger.mutableHasWideColorDisplay() = true;
-        test->mFlinger.mutableDisplayColorSetting() = DisplayColorSetting::ENHANCED;
-    }
-
-    static void setupComposerCallExpectations(DisplayTransactionTest* test) {
-        EXPECT_CALL(*test->mComposer, getColorModes(Display::HWC_DISPLAY_ID, _))
-                .WillOnce(DoAll(SetArgPointee<1>(std::vector<ColorMode>({ColorMode::DISPLAY_P3})),
-                                Return(Error::NONE)));
-        EXPECT_CALL(*test->mComposer,
-                    getRenderIntents(Display::HWC_DISPLAY_ID, ColorMode::DISPLAY_P3, _))
-                .WillOnce(
-                        DoAll(SetArgPointee<2>(std::vector<RenderIntent>({RenderIntent::ENHANCE})),
-                              Return(Error::NONE)));
-        EXPECT_CALL(*test->mComposer,
-                    setColorMode(Display::HWC_DISPLAY_ID, ColorMode::SRGB, RenderIntent::ENHANCE))
-                .WillOnce(Return(Error::NONE));
-    }
-};
-
 // For this variant, SurfaceFlinger should configure itself with wide display
 // support, but the display should respond with an empty list of supported color
 // modes. Wide-color support for the display should not be configured.
@@ -638,9 +611,6 @@
 using WideColorP3ColorimetricDisplayCase =
         Case<PrimaryDisplayVariant, WideColorP3ColorimetricSupportedVariant<PrimaryDisplayVariant>,
              HdrNotSupportedVariant<PrimaryDisplayVariant>>;
-using WideColorP3EnhanceDisplayCase =
-        Case<PrimaryDisplayVariant, WideColorP3EnhanceSupportedVariant<PrimaryDisplayVariant>,
-             HdrNotSupportedVariant<PrimaryDisplayVariant>>;
 using Hdr10DisplayCase =
         Case<PrimaryDisplayVariant, WideColorNotSupportedVariant<PrimaryDisplayVariant>,
              Hdr10SupportedVariant<PrimaryDisplayVariant>>;
@@ -1043,10 +1013,6 @@
     setupNewDisplayDeviceInternalTest<WideColorP3ColorimetricDisplayCase>();
 }
 
-TEST_F(SetupNewDisplayDeviceInternalTest, createWideColorP3EnhanceDisplay) {
-    setupNewDisplayDeviceInternalTest<WideColorP3EnhanceDisplayCase>();
-}
-
 TEST_F(SetupNewDisplayDeviceInternalTest, createHdr10Display) {
     setupNewDisplayDeviceInternalTest<Hdr10DisplayCase>();
 }
diff --git a/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h b/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h
index f689537..a4e7361 100644
--- a/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h
+++ b/services/surfaceflinger/tests/unittests/TestableSurfaceFlinger.h
@@ -289,10 +289,12 @@
         }
 
         sp<DisplayDevice> inject() {
+            std::unordered_map<ui::ColorMode, std::vector<ui::RenderIntent>> hdrAndRenderIntents;
             sp<DisplayDevice> device =
                     new DisplayDevice(mFlinger.mFlinger.get(), mType, mHwcId, mSecure, mDisplayToken,
                                       mNativeWindow, mDisplaySurface, std::move(mRenderSurface), 0,
-                                      0, false, HdrCapabilities(), 0, HWC_POWER_MODE_NORMAL);
+                                      0, false, HdrCapabilities(), 0, hdrAndRenderIntents,
+                                      HWC_POWER_MODE_NORMAL);
             mFlinger.mutableDisplays().add(mDisplayToken, device);
 
             DisplayDeviceState state(mType, mSecure);
diff --git a/services/surfaceflinger/tests/unittests/mock/RenderEngine/MockRenderEngine.h b/services/surfaceflinger/tests/unittests/mock/RenderEngine/MockRenderEngine.h
index 9bb2a3c..93769a5 100644
--- a/services/surfaceflinger/tests/unittests/mock/RenderEngine/MockRenderEngine.h
+++ b/services/surfaceflinger/tests/unittests/mock/RenderEngine/MockRenderEngine.h
@@ -60,12 +60,13 @@
     MOCK_METHOD1(setupLayerTexturing, void(const Texture&));
     MOCK_METHOD0(setupLayerBlackedOut, void());
     MOCK_METHOD4(setupFillWithColor, void(float, float, float, float));
-    MOCK_METHOD1(setupColorTransform, mat4(const mat4&));
+    MOCK_METHOD1(setupColorTransform, void(const mat4&));
     MOCK_METHOD0(disableTexturing, void());
     MOCK_METHOD0(disableBlending, void());
     MOCK_METHOD1(setSourceY410BT2020, void(bool));
     MOCK_METHOD1(setSourceDataSpace, void(ui::Dataspace));
     MOCK_METHOD1(setOutputDataSpace, void(ui::Dataspace));
+    MOCK_METHOD1(setDisplayMaxLuminance, void(const float));
     MOCK_METHOD2(bindNativeBufferAsFrameBuffer,
                  void(ANativeWindowBuffer*, RE::BindNativeBufferAsFramebuffer*));
     MOCK_METHOD1(unbindNativeBufferAsFrameBuffer, void(RE::BindNativeBufferAsFramebuffer*));